aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-19 21:06:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-19 21:06:31 -0400
commit8aa3417255fababc0cab7128dd7520d3af344ab8 (patch)
tree521faa00d95dcf17c69482136c86b427beb826ca /drivers
parent1b8df61908bde12946877b4c079bb73fc0bd3409 (diff)
parent6c611d18f386d37cce3afbd921568e2a895bd86e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target fixes from Nicholas Bellinger: "The bulk of the changes are in qla2xxx target driver code to address various issues found during Cavium/QLogic's internal testing (stable CC's included), along with a few other stability and smaller miscellaneous improvements. There are also a couple of different patch sets from Mike Christie, which have been a result of his work to use target-core ALUA logic together with tcm-user backend driver. Finally, a patch to address some long standing issues with pass-through SCSI export of TYPE_TAPE + TYPE_MEDIUM_CHANGER devices, which will make folks using physical (or virtual) magnetic tape happy" * git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (28 commits) qla2xxx: Update driver version to 9.00.00.00-k qla2xxx: Fix delayed response to command for loop mode/direct connect. qla2xxx: Change scsi host lookup method. qla2xxx: Add DebugFS node to display Port Database qla2xxx: Use IOCB interface to submit non-critical MBX. qla2xxx: Add async new target notification qla2xxx: Export DIF stats via debugfs qla2xxx: Improve T10-DIF/PI handling in driver. qla2xxx: Allow relogin to proceed if remote login did not finish qla2xxx: Fix sess_lock & hardware_lock lock order problem. qla2xxx: Fix inadequate lock protection for ABTS. qla2xxx: Fix request queue corruption. qla2xxx: Fix memory leak for abts processing qla2xxx: Allow vref count to timeout on vport delete. tcmu: Convert cmd_time_out into backend device attribute tcmu: make cmd timeout configurable tcmu: add helper to check if dev was configured target: fix race during implicit transition work flushes target: allow userspace to set state to transitioning target: fix ALUA transition timeout handling ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/qla2xxx/Kconfig1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h56
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c107
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c304
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c748
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c49
-rw-r--r--drivers/target/target_core_alua.c82
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_pscsi.c50
-rw-r--r--drivers/target/target_core_sbc.c10
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c3
-rw-r--r--drivers/target/target_core_user.c152
23 files changed, 1267 insertions, 546 deletions
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 67c0d5aa3212..de952935b5d2 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -3,6 +3,7 @@ config SCSI_QLA_FC
3 depends on PCI && SCSI 3 depends on PCI && SCSI
4 depends on SCSI_FC_ATTRS 4 depends on SCSI_FC_ATTRS
5 select FW_LOADER 5 select FW_LOADER
6 select BTREE
6 ---help--- 7 ---help---
7 This qla2xxx driver supports all QLogic Fibre Channel 8 This qla2xxx driver supports all QLogic Fibre Channel
8 PCI and PCIe host adapters. 9 PCI and PCIe host adapters.
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index f610103994af..435ff7fd6384 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2154 "Timer for the VP[%d] has stopped\n", vha->vp_idx); 2154 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
2155 } 2155 }
2156 2156
2157 BUG_ON(atomic_read(&vha->vref_count));
2158
2159 qla2x00_free_fcports(vha); 2157 qla2x00_free_fcports(vha);
2160 2158
2161 mutex_lock(&ha->vport_lock); 2159 mutex_lock(&ha->vport_lock);
@@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2166 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 2164 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2167 vha->gnl.ldma); 2165 vha->gnl.ldma);
2168 2166
2169 if (vha->qpair->vp_idx == vha->vp_idx) { 2167 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
2170 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) 2168 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
2171 ql_log(ql_log_warn, vha, 0x7087, 2169 ql_log(ql_log_warn, vha, 0x7087,
2172 "Queue Pair delete failed.\n"); 2170 "Queue Pair delete failed.\n");
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index e1fc4e66966a..c6bffe929fe7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
348#define ql_dbg_tgt 0x00004000 /* Target mode */ 348#define ql_dbg_tgt 0x00004000 /* Target mode */
349#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ 349#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
350#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ 350#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
351#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
351 352
352extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, 353extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
353 uint32_t, void **); 354 uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 625d438e3cce..ae119018dfaa 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,6 +25,7 @@
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26#include <linux/aer.h> 26#include <linux/aer.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/btree.h>
28 29
29#include <scsi/scsi.h> 30#include <scsi/scsi.h>
30#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
@@ -395,11 +396,15 @@ struct srb_iocb {
395 struct completion comp; 396 struct completion comp;
396 } abt; 397 } abt;
397 struct ct_arg ctarg; 398 struct ct_arg ctarg;
399#define MAX_IOCB_MB_REG 28
400#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
398 struct { 401 struct {
399 __le16 in_mb[28]; /* fr fw */ 402 __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
400 __le16 out_mb[28]; /* to fw */ 403 __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
401 void *out, *in; 404 void *out, *in;
402 dma_addr_t out_dma, in_dma; 405 dma_addr_t out_dma, in_dma;
406 struct completion comp;
407 int rc;
403 } mbx; 408 } mbx;
404 struct { 409 struct {
405 struct imm_ntfy_from_isp *ntfy; 410 struct imm_ntfy_from_isp *ntfy;
@@ -437,7 +442,7 @@ typedef struct srb {
437 uint32_t handle; 442 uint32_t handle;
438 uint16_t flags; 443 uint16_t flags;
439 uint16_t type; 444 uint16_t type;
440 char *name; 445 const char *name;
441 int iocbs; 446 int iocbs;
442 struct qla_qpair *qpair; 447 struct qla_qpair *qpair;
443 u32 gen1; /* scratch */ 448 u32 gen1; /* scratch */
@@ -2300,6 +2305,8 @@ typedef struct fc_port {
2300 struct ct_sns_desc ct_desc; 2305 struct ct_sns_desc ct_desc;
2301 enum discovery_state disc_state; 2306 enum discovery_state disc_state;
2302 enum login_state fw_login_state; 2307 enum login_state fw_login_state;
2308 unsigned long plogi_nack_done_deadline;
2309
2303 u32 login_gen, last_login_gen; 2310 u32 login_gen, last_login_gen;
2304 u32 rscn_gen, last_rscn_gen; 2311 u32 rscn_gen, last_rscn_gen;
2305 u32 chip_reset; 2312 u32 chip_reset;
@@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx {
3106 uint32_t gold_fw_version; 3113 uint32_t gold_fw_version;
3107}; 3114};
3108 3115
3116struct qla_dif_statistics {
3117 uint64_t dif_input_bytes;
3118 uint64_t dif_output_bytes;
3119 uint64_t dif_input_requests;
3120 uint64_t dif_output_requests;
3121 uint32_t dif_guard_err;
3122 uint32_t dif_ref_tag_err;
3123 uint32_t dif_app_tag_err;
3124};
3125
3109struct qla_statistics { 3126struct qla_statistics {
3110 uint32_t total_isp_aborts; 3127 uint32_t total_isp_aborts;
3111 uint64_t input_bytes; 3128 uint64_t input_bytes;
@@ -3118,6 +3135,8 @@ struct qla_statistics {
3118 uint32_t stat_max_pend_cmds; 3135 uint32_t stat_max_pend_cmds;
3119 uint32_t stat_max_qfull_cmds_alloc; 3136 uint32_t stat_max_qfull_cmds_alloc;
3120 uint32_t stat_max_qfull_cmds_dropped; 3137 uint32_t stat_max_qfull_cmds_dropped;
3138
3139 struct qla_dif_statistics qla_dif_stats;
3121}; 3140};
3122 3141
3123struct bidi_statistics { 3142struct bidi_statistics {
@@ -3125,6 +3144,16 @@ struct bidi_statistics {
3125 unsigned long long transfer_bytes; 3144 unsigned long long transfer_bytes;
3126}; 3145};
3127 3146
3147struct qla_tc_param {
3148 struct scsi_qla_host *vha;
3149 uint32_t blk_sz;
3150 uint32_t bufflen;
3151 struct scatterlist *sg;
3152 struct scatterlist *prot_sg;
3153 struct crc_context *ctx;
3154 uint8_t *ctx_dsd_alloced;
3155};
3156
3128/* Multi queue support */ 3157/* Multi queue support */
3129#define MBC_INITIALIZE_MULTIQ 0x1f 3158#define MBC_INITIALIZE_MULTIQ 0x1f
3130#define QLA_QUE_PAGE 0X1000 3159#define QLA_QUE_PAGE 0X1000
@@ -3272,6 +3301,8 @@ struct qlt_hw_data {
3272 uint8_t tgt_node_name[WWN_SIZE]; 3301 uint8_t tgt_node_name[WWN_SIZE];
3273 3302
3274 struct dentry *dfs_tgt_sess; 3303 struct dentry *dfs_tgt_sess;
3304 struct dentry *dfs_tgt_port_database;
3305
3275 struct list_head q_full_list; 3306 struct list_head q_full_list;
3276 uint32_t num_pend_cmds; 3307 uint32_t num_pend_cmds;
3277 uint32_t num_qfull_cmds_alloc; 3308 uint32_t num_qfull_cmds_alloc;
@@ -3281,6 +3312,7 @@ struct qlt_hw_data {
3281 spinlock_t sess_lock; 3312 spinlock_t sess_lock;
3282 int rspq_vector_cpuid; 3313 int rspq_vector_cpuid;
3283 spinlock_t atio_lock ____cacheline_aligned; 3314 spinlock_t atio_lock ____cacheline_aligned;
3315 struct btree_head32 host_map;
3284}; 3316};
3285 3317
3286#define MAX_QFULL_CMDS_ALLOC 8192 3318#define MAX_QFULL_CMDS_ALLOC 8192
@@ -3290,6 +3322,10 @@ struct qlt_hw_data {
3290 3322
3291#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ 3323#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
3292 3324
3325#define QLA_EARLY_LINKUP(_ha) \
3326 ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
3327 _ha->flags.fw_started && !_ha->flags.fw_init_done)
3328
3293/* 3329/*
3294 * Qlogic host adapter specific data structure. 3330 * Qlogic host adapter specific data structure.
3295*/ 3331*/
@@ -3339,7 +3375,11 @@ struct qla_hw_data {
3339 uint32_t fawwpn_enabled:1; 3375 uint32_t fawwpn_enabled:1;
3340 uint32_t exlogins_enabled:1; 3376 uint32_t exlogins_enabled:1;
3341 uint32_t exchoffld_enabled:1; 3377 uint32_t exchoffld_enabled:1;
3342 /* 35 bits */ 3378
3379 uint32_t lip_ae:1;
3380 uint32_t n2n_ae:1;
3381 uint32_t fw_started:1;
3382 uint32_t fw_init_done:1;
3343 } flags; 3383 } flags;
3344 3384
3345 /* This spinlock is used to protect "io transactions", you must 3385 /* This spinlock is used to protect "io transactions", you must
@@ -3432,7 +3472,6 @@ struct qla_hw_data {
3432#define P2P_LOOP 3 3472#define P2P_LOOP 3
3433 uint8_t interrupts_on; 3473 uint8_t interrupts_on;
3434 uint32_t isp_abort_cnt; 3474 uint32_t isp_abort_cnt;
3435
3436#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 3475#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
3437#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 3476#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
3438#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 3477#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
@@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host {
3913 struct list_head vp_fcports; /* list of fcports */ 3952 struct list_head vp_fcports; /* list of fcports */
3914 struct list_head work_list; 3953 struct list_head work_list;
3915 spinlock_t work_lock; 3954 spinlock_t work_lock;
3955 struct work_struct iocb_work;
3916 3956
3917 /* Commonly used flags and state information. */ 3957 /* Commonly used flags and state information. */
3918 struct Scsi_Host *host; 3958 struct Scsi_Host *host;
@@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host {
4076 /* Count of active session/fcport */ 4116 /* Count of active session/fcport */
4077 int fcport_count; 4117 int fcport_count;
4078 wait_queue_head_t fcport_waitQ; 4118 wait_queue_head_t fcport_waitQ;
4119 wait_queue_head_t vref_waitq;
4079} scsi_qla_host_t; 4120} scsi_qla_host_t;
4080 4121
4081struct qla27xx_image_status { 4122struct qla27xx_image_status {
@@ -4131,14 +4172,17 @@ struct qla2_sgx {
4131 mb(); \ 4172 mb(); \
4132 if (__vha->flags.delete_progress) { \ 4173 if (__vha->flags.delete_progress) { \
4133 atomic_dec(&__vha->vref_count); \ 4174 atomic_dec(&__vha->vref_count); \
4175 wake_up(&__vha->vref_waitq); \
4134 __bail = 1; \ 4176 __bail = 1; \
4135 } else { \ 4177 } else { \
4136 __bail = 0; \ 4178 __bail = 0; \
4137 } \ 4179 } \
4138} while (0) 4180} while (0)
4139 4181
4140#define QLA_VHA_MARK_NOT_BUSY(__vha) \ 4182#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
4141 atomic_dec(&__vha->vref_count); \ 4183 atomic_dec(&__vha->vref_count); \
4184 wake_up(&__vha->vref_waitq); \
4185} while (0) \
4142 4186
4143#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \ 4187#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
4144 atomic_inc(&__qpair->ref_count); \ 4188 atomic_inc(&__qpair->ref_count); \
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index b48cce696bac..989e17b0758c 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
19 struct qla_hw_data *ha = vha->hw; 19 struct qla_hw_data *ha = vha->hw;
20 unsigned long flags; 20 unsigned long flags;
21 struct fc_port *sess = NULL; 21 struct fc_port *sess = NULL;
22 struct qla_tgt *tgt= vha->vha_tgt.qla_tgt; 22 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
23 23
24 seq_printf(s, "%s\n",vha->host_str); 24 seq_printf(s, "%s\n", vha->host_str);
25 if (tgt) { 25 if (tgt) {
26 seq_printf(s, "Port ID Port Name Handle\n"); 26 seq_puts(s, "Port ID Port Name Handle\n");
27 27
28 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 28 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
29 list_for_each_entry(sess, &vha->vp_fcports, list) 29 list_for_each_entry(sess, &vha->vp_fcports, list)
@@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
44 return single_open(file, qla2x00_dfs_tgt_sess_show, vha); 44 return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
45} 45}
46 46
47
48static const struct file_operations dfs_tgt_sess_ops = { 47static const struct file_operations dfs_tgt_sess_ops = {
49 .open = qla2x00_dfs_tgt_sess_open, 48 .open = qla2x00_dfs_tgt_sess_open,
50 .read = seq_read, 49 .read = seq_read,
@@ -53,6 +52,78 @@ static const struct file_operations dfs_tgt_sess_ops = {
53}; 52};
54 53
55static int 54static int
55qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
56{
57 scsi_qla_host_t *vha = s->private;
58 struct qla_hw_data *ha = vha->hw;
59 struct gid_list_info *gid_list;
60 dma_addr_t gid_list_dma;
61 fc_port_t fc_port;
62 char *id_iter;
63 int rc, i;
64 uint16_t entries, loop_id;
65 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
66
67 seq_printf(s, "%s\n", vha->host_str);
68 if (tgt) {
69 gid_list = dma_alloc_coherent(&ha->pdev->dev,
70 qla2x00_gid_list_size(ha),
71 &gid_list_dma, GFP_KERNEL);
72 if (!gid_list) {
73 ql_dbg(ql_dbg_user, vha, 0x705c,
74 "DMA allocation failed for %u\n",
75 qla2x00_gid_list_size(ha));
76 return 0;
77 }
78
79 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
80 &entries);
81 if (rc != QLA_SUCCESS)
82 goto out_free_id_list;
83
84 id_iter = (char *)gid_list;
85
86 seq_puts(s, "Port Name Port ID Loop ID\n");
87
88 for (i = 0; i < entries; i++) {
89 struct gid_list_info *gid =
90 (struct gid_list_info *)id_iter;
91 loop_id = le16_to_cpu(gid->loop_id);
92 memset(&fc_port, 0, sizeof(fc_port_t));
93
94 fc_port.loop_id = loop_id;
95
96 rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
97 seq_printf(s, "%8phC %02x%02x%02x %d\n",
98 fc_port.port_name, fc_port.d_id.b.domain,
99 fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
100 fc_port.loop_id);
101 id_iter += ha->gid_list_info_size;
102 }
103out_free_id_list:
104 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
105 gid_list, gid_list_dma);
106 }
107
108 return 0;
109}
110
111static int
112qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
113{
114 scsi_qla_host_t *vha = inode->i_private;
115
116 return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
117}
118
119static const struct file_operations dfs_tgt_port_database_ops = {
120 .open = qla2x00_dfs_tgt_port_database_open,
121 .read = seq_read,
122 .llseek = seq_lseek,
123 .release = single_release,
124};
125
126static int
56qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) 127qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
57{ 128{
58 struct scsi_qla_host *vha = s->private; 129 struct scsi_qla_host *vha = s->private;
@@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
114 seq_printf(s, "num Q full sent = %lld\n", 185 seq_printf(s, "num Q full sent = %lld\n",
115 vha->tgt_counters.num_q_full_sent); 186 vha->tgt_counters.num_q_full_sent);
116 187
188 /* DIF stats */
189 seq_printf(s, "DIF Inp Bytes = %lld\n",
190 vha->qla_stats.qla_dif_stats.dif_input_bytes);
191 seq_printf(s, "DIF Outp Bytes = %lld\n",
192 vha->qla_stats.qla_dif_stats.dif_output_bytes);
193 seq_printf(s, "DIF Inp Req = %lld\n",
194 vha->qla_stats.qla_dif_stats.dif_input_requests);
195 seq_printf(s, "DIF Outp Req = %lld\n",
196 vha->qla_stats.qla_dif_stats.dif_output_requests);
197 seq_printf(s, "DIF Guard err = %d\n",
198 vha->qla_stats.qla_dif_stats.dif_guard_err);
199 seq_printf(s, "DIF Ref tag err = %d\n",
200 vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
201 seq_printf(s, "DIF App tag err = %d\n",
202 vha->qla_stats.qla_dif_stats.dif_app_tag_err);
117 return 0; 203 return 0;
118} 204}
119 205
@@ -281,6 +367,14 @@ create_nodes:
281 goto out; 367 goto out;
282 } 368 }
283 369
370 ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
371 S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
372 if (!ha->tgt.dfs_tgt_port_database) {
373 ql_log(ql_log_warn, vha, 0xffff,
374 "Unable to create debugFS tgt_port_database node.\n");
375 goto out;
376 }
377
284 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, 378 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
285 &dfs_fce_ops); 379 &dfs_fce_ops);
286 if (!ha->dfs_fce) { 380 if (!ha->dfs_fce) {
@@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
311 ha->tgt.dfs_tgt_sess = NULL; 405 ha->tgt.dfs_tgt_sess = NULL;
312 } 406 }
313 407
408 if (ha->tgt.dfs_tgt_port_database) {
409 debugfs_remove(ha->tgt.dfs_tgt_port_database);
410 ha->tgt.dfs_tgt_port_database = NULL;
411 }
412
314 if (ha->dfs_fw_resource_cnt) { 413 if (ha->dfs_fw_resource_cnt) {
315 debugfs_remove(ha->dfs_fw_resource_cnt); 414 debugfs_remove(ha->dfs_fw_resource_cnt);
316 ha->dfs_fw_resource_cnt = NULL; 415 ha->dfs_fw_resource_cnt = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b3d6441d1d90..5b2451745e9f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
193void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, 193void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
194 uint16_t *); 194 uint16_t *);
195int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); 195int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
196int qla24xx_async_abort_cmd(srb_t *);
196 197
197/* 198/*
198 * Global Functions in qla_mid.c source file. 199 * Global Functions in qla_mid.c source file.
@@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
256extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); 257extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
257extern int qla2x00_issue_marker(scsi_qla_host_t *, int); 258extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
258extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, 259extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
259 uint32_t *, uint16_t, struct qla_tgt_cmd *); 260 uint32_t *, uint16_t, struct qla_tc_param *);
260extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, 261extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
261 uint32_t *, uint16_t, struct qla_tgt_cmd *); 262 uint32_t *, uint16_t, struct qla_tc_param *);
262extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, 263extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
263 uint32_t *, uint16_t, struct qla_tgt_cmd *); 264 uint32_t *, uint16_t, struct qla_tc_param *);
264extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); 265extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
265extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); 266extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
266extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *, 267extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
@@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
368 369
369extern int 370extern int
370qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 371qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
371 dma_addr_t, uint); 372 dma_addr_t, uint16_t);
372 373
373extern int qla24xx_abort_command(srb_t *); 374extern int qla24xx_abort_command(srb_t *);
374extern int qla24xx_async_abort_command(srb_t *); 375extern int qla24xx_async_abort_command(srb_t *);
@@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
472extern int 473extern int
473qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); 474qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
474 475
476int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
477int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
478int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
479 uint16_t *);
480int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
481 struct port_database_24xx *);
482
475/* 483/*
476 * Global Function Prototypes in qla_isr.c source file. 484 * Global Function Prototypes in qla_isr.c source file.
477 */ 485 */
@@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
846 uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); 854 uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
847void qla24xx_delete_sess_fn(struct work_struct *); 855void qla24xx_delete_sess_fn(struct work_struct *);
848void qlt_unknown_atio_work_fn(struct work_struct *); 856void qlt_unknown_atio_work_fn(struct work_struct *);
857void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
858void qlt_remove_target_resources(struct qla_hw_data *);
849 859
850#endif /* _QLA_GBL_H */ 860#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 32fb9007f137..f9d2fe7b1ade 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
629 struct srb *sp = s; 629 struct srb *sp = s;
630 struct scsi_qla_host *vha = sp->vha; 630 struct scsi_qla_host *vha = sp->vha;
631 struct qla_hw_data *ha = vha->hw; 631 struct qla_hw_data *ha = vha->hw;
632 uint64_t zero = 0;
633 struct port_database_24xx *pd; 632 struct port_database_24xx *pd;
634 fc_port_t *fcport = sp->fcport; 633 fc_port_t *fcport = sp->fcport;
635 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; 634 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
@@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
649 648
650 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; 649 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
651 650
652 /* Check for logged in state. */ 651 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
653 if (pd->current_login_state != PDS_PRLI_COMPLETE &&
654 pd->last_login_state != PDS_PRLI_COMPLETE) {
655 ql_dbg(ql_dbg_mbx, vha, 0xffff,
656 "Unable to verify login-state (%x/%x) for "
657 "loop_id %x.\n", pd->current_login_state,
658 pd->last_login_state, fcport->loop_id);
659 rval = QLA_FUNCTION_FAILED;
660 goto gpd_error_out;
661 }
662
663 if (fcport->loop_id == FC_NO_LOOP_ID ||
664 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
665 memcmp(fcport->port_name, pd->port_name, 8))) {
666 /* We lost the device mid way. */
667 rval = QLA_NOT_LOGGED_IN;
668 goto gpd_error_out;
669 }
670
671 /* Names are little-endian. */
672 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
673
674 /* Get port_id of device. */
675 fcport->d_id.b.domain = pd->port_id[0];
676 fcport->d_id.b.area = pd->port_id[1];
677 fcport->d_id.b.al_pa = pd->port_id[2];
678 fcport->d_id.b.rsvd_1 = 0;
679
680 /* If not target must be initiator or unknown type. */
681 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
682 fcport->port_type = FCT_INITIATOR;
683 else
684 fcport->port_type = FCT_TARGET;
685
686 /* Passback COS information. */
687 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
688 FC_COS_CLASS2 : FC_COS_CLASS3;
689
690 if (pd->prli_svc_param_word_3[0] & BIT_7) {
691 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
692 fcport->conf_compl_supported = 1;
693 }
694 652
695gpd_error_out: 653gpd_error_out:
696 memset(&ea, 0, sizeof(ea)); 654 memset(&ea, 0, sizeof(ea));
@@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
876 fcport->login_retry--; 834 fcport->login_retry--;
877 835
878 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 836 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
879 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
880 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 837 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
881 return 0; 838 return 0;
882 839
840 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
841 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
842 return 0;
843 }
844
883 /* for pure Target Mode. Login will not be initiated */ 845 /* for pure Target Mode. Login will not be initiated */
884 if (vha->host->active_mode == MODE_TARGET) 846 if (vha->host->active_mode == MODE_TARGET)
885 return 0; 847 return 0;
@@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1041 fcport->flags); 1003 fcport->flags);
1042 1004
1043 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1005 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1044 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
1045 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 1006 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1046 return; 1007 return;
1047 1008
1009 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1010 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
1011 return;
1012 }
1013
1048 if (fcport->flags & FCF_ASYNC_SENT) { 1014 if (fcport->flags & FCF_ASYNC_SENT) {
1049 fcport->login_retry++; 1015 fcport->login_retry++;
1050 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1016 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
1258 complete(&abt->u.abt.comp); 1224 complete(&abt->u.abt.comp);
1259} 1225}
1260 1226
1261static int 1227int
1262qla24xx_async_abort_cmd(srb_t *cmd_sp) 1228qla24xx_async_abort_cmd(srb_t *cmd_sp)
1263{ 1229{
1264 scsi_qla_host_t *vha = cmd_sp->vha; 1230 scsi_qla_host_t *vha = cmd_sp->vha;
@@ -3212,6 +3178,7 @@ next_check:
3212 } else { 3178 } else {
3213 ql_dbg(ql_dbg_init, vha, 0x00d3, 3179 ql_dbg(ql_dbg_init, vha, 0x00d3,
3214 "Init Firmware -- success.\n"); 3180 "Init Firmware -- success.\n");
3181 ha->flags.fw_started = 1;
3215 } 3182 }
3216 3183
3217 return (rval); 3184 return (rval);
@@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
3374 uint8_t domain; 3341 uint8_t domain;
3375 char connect_type[22]; 3342 char connect_type[22];
3376 struct qla_hw_data *ha = vha->hw; 3343 struct qla_hw_data *ha = vha->hw;
3377 unsigned long flags;
3378 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3344 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3345 port_id_t id;
3379 3346
3380 /* Get host addresses. */ 3347 /* Get host addresses. */
3381 rval = qla2x00_get_adapter_id(vha, 3348 rval = qla2x00_get_adapter_id(vha,
@@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
3453 3420
3454 /* Save Host port and loop ID. */ 3421 /* Save Host port and loop ID. */
3455 /* byte order - Big Endian */ 3422 /* byte order - Big Endian */
3456 vha->d_id.b.domain = domain; 3423 id.b.domain = domain;
3457 vha->d_id.b.area = area; 3424 id.b.area = area;
3458 vha->d_id.b.al_pa = al_pa; 3425 id.b.al_pa = al_pa;
3459 3426 id.b.rsvd_1 = 0;
3460 spin_lock_irqsave(&ha->vport_slock, flags); 3427 qlt_update_host_map(vha, id);
3461 qlt_update_vp_map(vha, SET_AL_PA);
3462 spin_unlock_irqrestore(&ha->vport_slock, flags);
3463 3428
3464 if (!vha->flags.init_done) 3429 if (!vha->flags.init_done)
3465 ql_log(ql_log_info, vha, 0x2010, 3430 ql_log(ql_log_info, vha, 0x2010,
@@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
4036 atomic_set(&vha->loop_state, LOOP_READY); 4001 atomic_set(&vha->loop_state, LOOP_READY);
4037 ql_dbg(ql_dbg_disc, vha, 0x2069, 4002 ql_dbg(ql_dbg_disc, vha, 0x2069,
4038 "LOOP READY.\n"); 4003 "LOOP READY.\n");
4004 ha->flags.fw_init_done = 1;
4039 4005
4040 /* 4006 /*
4041 * Process any ATIO queue entries that came in 4007 * Process any ATIO queue entries that came in
@@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
5148 } 5114 }
5149 } 5115 }
5150 atomic_dec(&vha->vref_count); 5116 atomic_dec(&vha->vref_count);
5117 wake_up(&vha->vref_waitq);
5151 } 5118 }
5152 spin_unlock_irqrestore(&ha->vport_slock, flags); 5119 spin_unlock_irqrestore(&ha->vport_slock, flags);
5153} 5120}
@@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
5526 if (!(IS_P3P_TYPE(ha))) 5493 if (!(IS_P3P_TYPE(ha)))
5527 ha->isp_ops->reset_chip(vha); 5494 ha->isp_ops->reset_chip(vha);
5528 5495
5496 ha->flags.n2n_ae = 0;
5497 ha->flags.lip_ae = 0;
5498 ha->current_topology = 0;
5499 ha->flags.fw_started = 0;
5500 ha->flags.fw_init_done = 0;
5529 ha->chip_reset++; 5501 ha->chip_reset++;
5530 5502
5531 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 5503 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
6802 return; 6774 return;
6803 if (!ha->fw_major_version) 6775 if (!ha->fw_major_version)
6804 return; 6776 return;
6777 if (!ha->flags.fw_started)
6778 return;
6805 6779
6806 ret = qla2x00_stop_firmware(vha); 6780 ret = qla2x00_stop_firmware(vha);
6807 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 6781 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
@@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
6815 "Attempting retry of stop-firmware command.\n"); 6789 "Attempting retry of stop-firmware command.\n");
6816 ret = qla2x00_stop_firmware(vha); 6790 ret = qla2x00_stop_firmware(vha);
6817 } 6791 }
6792
6793 ha->flags.fw_started = 0;
6794 ha->flags.fw_init_done = 0;
6818} 6795}
6819 6796
6820int 6797int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 535079280288..ea027f6a7fd4 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
889 889
890int 890int
891qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 891qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
892 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 892 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
893{ 893{
894 void *next_dsd; 894 void *next_dsd;
895 uint8_t avail_dsds = 0; 895 uint8_t avail_dsds = 0;
@@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
898 struct scatterlist *sg_prot; 898 struct scatterlist *sg_prot;
899 uint32_t *cur_dsd = dsd; 899 uint32_t *cur_dsd = dsd;
900 uint16_t used_dsds = tot_dsds; 900 uint16_t used_dsds = tot_dsds;
901
902 uint32_t prot_int; /* protection interval */ 901 uint32_t prot_int; /* protection interval */
903 uint32_t partial; 902 uint32_t partial;
904 struct qla2_sgx sgx; 903 struct qla2_sgx sgx;
@@ -966,7 +965,7 @@ alloc_and_fill:
966 } else { 965 } else {
967 list_add_tail(&dsd_ptr->list, 966 list_add_tail(&dsd_ptr->list,
968 &(tc->ctx->dsd_list)); 967 &(tc->ctx->dsd_list));
969 tc->ctx_dsd_alloced = 1; 968 *tc->ctx_dsd_alloced = 1;
970 } 969 }
971 970
972 971
@@ -1005,7 +1004,7 @@ alloc_and_fill:
1005 1004
1006int 1005int
1007qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 1006qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1008 uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1007 uint16_t tot_dsds, struct qla_tc_param *tc)
1009{ 1008{
1010 void *next_dsd; 1009 void *next_dsd;
1011 uint8_t avail_dsds = 0; 1010 uint8_t avail_dsds = 0;
@@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1066 } else { 1065 } else {
1067 list_add_tail(&dsd_ptr->list, 1066 list_add_tail(&dsd_ptr->list,
1068 &(tc->ctx->dsd_list)); 1067 &(tc->ctx->dsd_list));
1069 tc->ctx_dsd_alloced = 1; 1068 *tc->ctx_dsd_alloced = 1;
1070 } 1069 }
1071 1070
1072 /* add new list to cmd iocb or last list */ 1071 /* add new list to cmd iocb or last list */
@@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1092 1091
1093int 1092int
1094qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1093qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1095 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1094 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1096{ 1095{
1097 void *next_dsd; 1096 void *next_dsd;
1098 uint8_t avail_dsds = 0; 1097 uint8_t avail_dsds = 0;
@@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1158 } else { 1157 } else {
1159 list_add_tail(&dsd_ptr->list, 1158 list_add_tail(&dsd_ptr->list,
1160 &(tc->ctx->dsd_list)); 1159 &(tc->ctx->dsd_list));
1161 tc->ctx_dsd_alloced = 1; 1160 *tc->ctx_dsd_alloced = 1;
1162 } 1161 }
1163 1162
1164 /* add new list to cmd iocb or last list */ 1163 /* add new list to cmd iocb or last list */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 3c66ea29de27..3203367a4f42 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -708,6 +708,8 @@ skip_rio:
708 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 708 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
709 709
710 ha->isp_ops->fw_dump(vha, 1); 710 ha->isp_ops->fw_dump(vha, 1);
711 ha->flags.fw_init_done = 0;
712 ha->flags.fw_started = 0;
711 713
712 if (IS_FWI2_CAPABLE(ha)) { 714 if (IS_FWI2_CAPABLE(ha)) {
713 if (mb[1] == 0 && mb[2] == 0) { 715 if (mb[1] == 0 && mb[2] == 0) {
@@ -761,6 +763,9 @@ skip_rio:
761 break; 763 break;
762 764
763 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 765 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
766 ha->flags.lip_ae = 1;
767 ha->flags.n2n_ae = 0;
768
764 ql_dbg(ql_dbg_async, vha, 0x5009, 769 ql_dbg(ql_dbg_async, vha, 0x5009,
765 "LIP occurred (%x).\n", mb[1]); 770 "LIP occurred (%x).\n", mb[1]);
766 771
@@ -797,6 +802,10 @@ skip_rio:
797 break; 802 break;
798 803
799 case MBA_LOOP_DOWN: /* Loop Down Event */ 804 case MBA_LOOP_DOWN: /* Loop Down Event */
805 ha->flags.n2n_ae = 0;
806 ha->flags.lip_ae = 0;
807 ha->current_topology = 0;
808
800 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 809 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
801 ? RD_REG_WORD(&reg24->mailbox4) : 0; 810 ? RD_REG_WORD(&reg24->mailbox4) : 0;
802 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4]) 811 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
@@ -866,6 +875,9 @@ skip_rio:
866 875
867 /* case MBA_DCBX_COMPLETE: */ 876 /* case MBA_DCBX_COMPLETE: */
868 case MBA_POINT_TO_POINT: /* Point-to-Point */ 877 case MBA_POINT_TO_POINT: /* Point-to-Point */
878 ha->flags.lip_ae = 0;
879 ha->flags.n2n_ae = 1;
880
869 if (IS_QLA2100(ha)) 881 if (IS_QLA2100(ha))
870 break; 882 break;
871 883
@@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1620 QLA_LOGIO_LOGIN_RETRIED : 0; 1632 QLA_LOGIO_LOGIN_RETRIED : 0;
1621 if (logio->entry_status) { 1633 if (logio->entry_status) {
1622 ql_log(ql_log_warn, fcport->vha, 0x5034, 1634 ql_log(ql_log_warn, fcport->vha, 0x5034,
1623 "Async-%s error entry - hdl=%x" 1635 "Async-%s error entry - %8phC hdl=%x"
1624 "portid=%02x%02x%02x entry-status=%x.\n", 1636 "portid=%02x%02x%02x entry-status=%x.\n",
1625 type, sp->handle, fcport->d_id.b.domain, 1637 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
1626 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1638 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1627 logio->entry_status); 1639 logio->entry_status);
1628 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1640 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
@@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1633 1645
1634 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1646 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1635 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1647 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1636 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1648 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
1637 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1649 "iop0=%x.\n", type, fcport->port_name, sp->handle,
1650 fcport->d_id.b.domain,
1638 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1651 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1639 le32_to_cpu(logio->io_parameter[0])); 1652 le32_to_cpu(logio->io_parameter[0]));
1640 1653
@@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1674 case LSC_SCODE_NPORT_USED: 1687 case LSC_SCODE_NPORT_USED:
1675 data[0] = MBS_LOOP_ID_USED; 1688 data[0] = MBS_LOOP_ID_USED;
1676 break; 1689 break;
1690 case LSC_SCODE_CMD_FAILED:
1691 if (iop[1] == 0x0606) {
1692 /*
1693 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
1694 * Target side acked.
1695 */
1696 data[0] = MBS_COMMAND_COMPLETE;
1697 goto logio_done;
1698 }
1699 data[0] = MBS_COMMAND_ERROR;
1700 break;
1677 case LSC_SCODE_NOXCB: 1701 case LSC_SCODE_NOXCB:
1678 vha->hw->exch_starvation++; 1702 vha->hw->exch_starvation++;
1679 if (vha->hw->exch_starvation > 5) { 1703 if (vha->hw->exch_starvation > 5) {
@@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1695 } 1719 }
1696 1720
1697 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1721 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1698 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1722 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
1699 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1723 "iop0=%x iop1=%x.\n", type, fcport->port_name,
1724 sp->handle, fcport->d_id.b.domain,
1700 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1725 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1701 le16_to_cpu(logio->comp_status), 1726 le16_to_cpu(logio->comp_status),
1702 le32_to_cpu(logio->io_parameter[0]), 1727 le32_to_cpu(logio->io_parameter[0]),
@@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2679 return; 2704 return;
2680 2705
2681 abt = &sp->u.iocb_cmd; 2706 abt = &sp->u.iocb_cmd;
2682 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); 2707 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
2683 sp->done(sp, 0); 2708 sp->done(sp, 0);
2684} 2709}
2685 2710
@@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2693 struct sts_entry_24xx *pkt; 2718 struct sts_entry_24xx *pkt;
2694 struct qla_hw_data *ha = vha->hw; 2719 struct qla_hw_data *ha = vha->hw;
2695 2720
2696 if (!vha->flags.online) 2721 if (!ha->flags.fw_started)
2697 return; 2722 return;
2698 2723
2699 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2724 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 35079f417417..a113ab3592a7 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,6 +10,28 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12 12
13static struct mb_cmd_name {
14 uint16_t cmd;
15 const char *str;
16} mb_str[] = {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
20};
21
22static const char *mb_to_str(uint16_t cmd)
23{
24 int i;
25 struct mb_cmd_name *e;
26
27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
28 e = mb_str + i;
29 if (cmd == e->cmd)
30 return e->str;
31 }
32 return "unknown";
33}
34
13static struct rom_cmd { 35static struct rom_cmd {
14 uint16_t cmd; 36 uint16_t cmd;
15} rom_cmds[] = { 37} rom_cmds[] = {
@@ -2818,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2818 2840
2819int 2841int
2820qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 2842qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2821 dma_addr_t stats_dma, uint options) 2843 dma_addr_t stats_dma, uint16_t options)
2822{ 2844{
2823 int rval; 2845 int rval;
2824 mbx_cmd_t mc; 2846 mbx_cmd_t mc;
@@ -2828,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 2850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2829 "Entered %s.\n", __func__); 2851 "Entered %s.\n", __func__);
2830 2852
2831 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2853 memset(&mc, 0, sizeof(mc));
2832 mcp->mb[2] = MSW(stats_dma); 2854 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
2833 mcp->mb[3] = LSW(stats_dma); 2855 mc.mb[2] = MSW(stats_dma);
2834 mcp->mb[6] = MSW(MSD(stats_dma)); 2856 mc.mb[3] = LSW(stats_dma);
2835 mcp->mb[7] = LSW(MSD(stats_dma)); 2857 mc.mb[6] = MSW(MSD(stats_dma));
2836 mcp->mb[8] = sizeof(struct link_statistics) / 4; 2858 mc.mb[7] = LSW(MSD(stats_dma));
2837 mcp->mb[9] = vha->vp_idx; 2859 mc.mb[8] = sizeof(struct link_statistics) / 4;
2838 mcp->mb[10] = options; 2860 mc.mb[9] = cpu_to_le16(vha->vp_idx);
2839 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2861 mc.mb[10] = cpu_to_le16(options);
2840 mcp->in_mb = MBX_2|MBX_1|MBX_0; 2862
2841 mcp->tov = MBX_TOV_SECONDS; 2863 rval = qla24xx_send_mb_cmd(vha, &mc);
2842 mcp->flags = IOCTL_CMD;
2843 rval = qla2x00_mailbox_command(vha, mcp);
2844 2864
2845 if (rval == QLA_SUCCESS) { 2865 if (rval == QLA_SUCCESS) {
2846 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2866 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -3603,6 +3623,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3603 scsi_qla_host_t *vp = NULL; 3623 scsi_qla_host_t *vp = NULL;
3604 unsigned long flags; 3624 unsigned long flags;
3605 int found; 3625 int found;
3626 port_id_t id;
3606 3627
3607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3608 "Entered %s.\n", __func__); 3629 "Entered %s.\n", __func__);
@@ -3610,28 +3631,27 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3610 if (rptid_entry->entry_status != 0) 3631 if (rptid_entry->entry_status != 0)
3611 return; 3632 return;
3612 3633
3634 id.b.domain = rptid_entry->port_id[2];
3635 id.b.area = rptid_entry->port_id[1];
3636 id.b.al_pa = rptid_entry->port_id[0];
3637 id.b.rsvd_1 = 0;
3638
3613 if (rptid_entry->format == 0) { 3639 if (rptid_entry->format == 0) {
3614 /* loop */ 3640 /* loop */
3615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, 3641 ql_dbg(ql_dbg_async, vha, 0x10b7,
3616 "Format 0 : Number of VPs setup %d, number of " 3642 "Format 0 : Number of VPs setup %d, number of "
3617 "VPs acquired %d.\n", rptid_entry->vp_setup, 3643 "VPs acquired %d.\n", rptid_entry->vp_setup,
3618 rptid_entry->vp_acquired); 3644 rptid_entry->vp_acquired);
3619 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, 3645 ql_dbg(ql_dbg_async, vha, 0x10b8,
3620 "Primary port id %02x%02x%02x.\n", 3646 "Primary port id %02x%02x%02x.\n",
3621 rptid_entry->port_id[2], rptid_entry->port_id[1], 3647 rptid_entry->port_id[2], rptid_entry->port_id[1],
3622 rptid_entry->port_id[0]); 3648 rptid_entry->port_id[0]);
3623 3649
3624 vha->d_id.b.domain = rptid_entry->port_id[2]; 3650 qlt_update_host_map(vha, id);
3625 vha->d_id.b.area = rptid_entry->port_id[1];
3626 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3627
3628 spin_lock_irqsave(&ha->vport_slock, flags);
3629 qlt_update_vp_map(vha, SET_AL_PA);
3630 spin_unlock_irqrestore(&ha->vport_slock, flags);
3631 3651
3632 } else if (rptid_entry->format == 1) { 3652 } else if (rptid_entry->format == 1) {
3633 /* fabric */ 3653 /* fabric */
3634 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, 3654 ql_dbg(ql_dbg_async, vha, 0x10b9,
3635 "Format 1: VP[%d] enabled - status %d - with " 3655 "Format 1: VP[%d] enabled - status %d - with "
3636 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3656 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3637 rptid_entry->vp_status, 3657 rptid_entry->vp_status,
@@ -3653,12 +3673,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3653 WWN_SIZE); 3673 WWN_SIZE);
3654 } 3674 }
3655 3675
3656 vha->d_id.b.domain = rptid_entry->port_id[2]; 3676 qlt_update_host_map(vha, id);
3657 vha->d_id.b.area = rptid_entry->port_id[1];
3658 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3659 spin_lock_irqsave(&ha->vport_slock, flags);
3660 qlt_update_vp_map(vha, SET_AL_PA);
3661 spin_unlock_irqrestore(&ha->vport_slock, flags);
3662 } 3677 }
3663 3678
3664 fc_host_port_name(vha->host) = 3679 fc_host_port_name(vha->host) =
@@ -3694,12 +3709,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3694 if (!found) 3709 if (!found)
3695 return; 3710 return;
3696 3711
3697 vp->d_id.b.domain = rptid_entry->port_id[2]; 3712 qlt_update_host_map(vp, id);
3698 vp->d_id.b.area = rptid_entry->port_id[1];
3699 vp->d_id.b.al_pa = rptid_entry->port_id[0];
3700 spin_lock_irqsave(&ha->vport_slock, flags);
3701 qlt_update_vp_map(vp, SET_AL_PA);
3702 spin_unlock_irqrestore(&ha->vport_slock, flags);
3703 3713
3704 /* 3714 /*
3705 * Cannot configure here as we are still sitting on the 3715 * Cannot configure here as we are still sitting on the
@@ -5827,3 +5837,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
5827 5837
5828 return rval; 5838 return rval;
5829} 5839}
5840
5841static void qla2x00_async_mb_sp_done(void *s, int res)
5842{
5843 struct srb *sp = s;
5844
5845 sp->u.iocb_cmd.u.mbx.rc = res;
5846
5847 complete(&sp->u.iocb_cmd.u.mbx.comp);
5848 /* don't free sp here. Let the caller do the free */
5849}
5850
5851/*
5852 * This mailbox uses the iocb interface to send MB command.
5853 * This allows non-critial (non chip setup) command to go
5854 * out in parrallel.
5855 */
5856int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
5857{
5858 int rval = QLA_FUNCTION_FAILED;
5859 srb_t *sp;
5860 struct srb_iocb *c;
5861
5862 if (!vha->hw->flags.fw_started)
5863 goto done;
5864
5865 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
5866 if (!sp)
5867 goto done;
5868
5869 sp->type = SRB_MB_IOCB;
5870 sp->name = mb_to_str(mcp->mb[0]);
5871
5872 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
5873
5874 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
5875
5876 c = &sp->u.iocb_cmd;
5877 c->timeout = qla2x00_async_iocb_timeout;
5878 init_completion(&c->u.mbx.comp);
5879
5880 sp->done = qla2x00_async_mb_sp_done;
5881
5882 rval = qla2x00_start_sp(sp);
5883 if (rval != QLA_SUCCESS) {
5884 ql_dbg(ql_dbg_mbx, vha, 0xffff,
5885 "%s: %s Failed submission. %x.\n",
5886 __func__, sp->name, rval);
5887 goto done_free_sp;
5888 }
5889
5890 ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n",
5891 sp->name, sp->handle);
5892
5893 wait_for_completion(&c->u.mbx.comp);
5894 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
5895
5896 rval = c->u.mbx.rc;
5897 switch (rval) {
5898 case QLA_FUNCTION_TIMEOUT:
5899 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n",
5900 __func__, sp->name, rval);
5901 break;
5902 case QLA_SUCCESS:
5903 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n",
5904 __func__, sp->name);
5905 sp->free(sp);
5906 break;
5907 default:
5908 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n",
5909 __func__, sp->name, rval);
5910 sp->free(sp);
5911 break;
5912 }
5913
5914 return rval;
5915
5916done_free_sp:
5917 sp->free(sp);
5918done:
5919 return rval;
5920}
5921
5922/*
5923 * qla24xx_gpdb_wait
5924 * NOTE: Do not call this routine from DPC thread
5925 */
5926int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
5927{
5928 int rval = QLA_FUNCTION_FAILED;
5929 dma_addr_t pd_dma;
5930 struct port_database_24xx *pd;
5931 struct qla_hw_data *ha = vha->hw;
5932 mbx_cmd_t mc;
5933
5934 if (!vha->hw->flags.fw_started)
5935 goto done;
5936
5937 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
5938 if (pd == NULL) {
5939 ql_log(ql_log_warn, vha, 0xffff,
5940 "Failed to allocate port database structure.\n");
5941 goto done_free_sp;
5942 }
5943 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
5944
5945 memset(&mc, 0, sizeof(mc));
5946 mc.mb[0] = MBC_GET_PORT_DATABASE;
5947 mc.mb[1] = cpu_to_le16(fcport->loop_id);
5948 mc.mb[2] = MSW(pd_dma);
5949 mc.mb[3] = LSW(pd_dma);
5950 mc.mb[6] = MSW(MSD(pd_dma));
5951 mc.mb[7] = LSW(MSD(pd_dma));
5952 mc.mb[9] = cpu_to_le16(vha->vp_idx);
5953 mc.mb[10] = cpu_to_le16((uint16_t)opt);
5954
5955 rval = qla24xx_send_mb_cmd(vha, &mc);
5956 if (rval != QLA_SUCCESS) {
5957 ql_dbg(ql_dbg_mbx, vha, 0xffff,
5958 "%s: %8phC fail\n", __func__, fcport->port_name);
5959 goto done_free_sp;
5960 }
5961
5962 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
5963
5964 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n",
5965 __func__, fcport->port_name);
5966
5967done_free_sp:
5968 if (pd)
5969 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
5970done:
5971 return rval;
5972}
5973
5974int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
5975 struct port_database_24xx *pd)
5976{
5977 int rval = QLA_SUCCESS;
5978 uint64_t zero = 0;
5979
5980 /* Check for logged in state. */
5981 if (pd->current_login_state != PDS_PRLI_COMPLETE &&
5982 pd->last_login_state != PDS_PRLI_COMPLETE) {
5983 ql_dbg(ql_dbg_mbx, vha, 0xffff,
5984 "Unable to verify login-state (%x/%x) for "
5985 "loop_id %x.\n", pd->current_login_state,
5986 pd->last_login_state, fcport->loop_id);
5987 rval = QLA_FUNCTION_FAILED;
5988 goto gpd_error_out;
5989 }
5990
5991 if (fcport->loop_id == FC_NO_LOOP_ID ||
5992 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
5993 memcmp(fcport->port_name, pd->port_name, 8))) {
5994 /* We lost the device mid way. */
5995 rval = QLA_NOT_LOGGED_IN;
5996 goto gpd_error_out;
5997 }
5998
5999 /* Names are little-endian. */
6000 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6001 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6002
6003 /* Get port_id of device. */
6004 fcport->d_id.b.domain = pd->port_id[0];
6005 fcport->d_id.b.area = pd->port_id[1];
6006 fcport->d_id.b.al_pa = pd->port_id[2];
6007 fcport->d_id.b.rsvd_1 = 0;
6008
6009 /* If not target must be initiator or unknown type. */
6010 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6011 fcport->port_type = FCT_INITIATOR;
6012 else
6013 fcport->port_type = FCT_TARGET;
6014
6015 /* Passback COS information. */
6016 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6017 FC_COS_CLASS2 : FC_COS_CLASS3;
6018
6019 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6020 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6021 fcport->conf_compl_supported = 1;
6022 }
6023
6024gpd_error_out:
6025 return rval;
6026}
6027
6028/*
6029 * qla24xx_gidlist__wait
6030 * NOTE: don't call this routine from DPC thread.
6031 */
6032int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6033 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6034{
6035 int rval = QLA_FUNCTION_FAILED;
6036 mbx_cmd_t mc;
6037
6038 if (!vha->hw->flags.fw_started)
6039 goto done;
6040
6041 memset(&mc, 0, sizeof(mc));
6042 mc.mb[0] = MBC_GET_ID_LIST;
6043 mc.mb[2] = MSW(id_list_dma);
6044 mc.mb[3] = LSW(id_list_dma);
6045 mc.mb[6] = MSW(MSD(id_list_dma));
6046 mc.mb[7] = LSW(MSD(id_list_dma));
6047 mc.mb[8] = 0;
6048 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6049
6050 rval = qla24xx_send_mb_cmd(vha, &mc);
6051 if (rval != QLA_SUCCESS) {
6052 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6053 "%s: fail\n", __func__);
6054 } else {
6055 *entries = mc.mb[1];
6056 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6057 "%s: done\n", __func__);
6058 }
6059done:
6060 return rval;
6061}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c6d6f0d912ff..09a490c98763 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
74 * ensures no active vp_list traversal while the vport is removed 74 * ensures no active vp_list traversal while the vport is removed
75 * from the queue) 75 * from the queue)
76 */ 76 */
77 spin_lock_irqsave(&ha->vport_slock, flags); 77 wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
78 while (atomic_read(&vha->vref_count)) { 78 10*HZ);
79 spin_unlock_irqrestore(&ha->vport_slock, flags);
80
81 msleep(500);
82 79
83 spin_lock_irqsave(&ha->vport_slock, flags); 80 spin_lock_irqsave(&ha->vport_slock, flags);
81 if (atomic_read(&vha->vref_count)) {
82 ql_dbg(ql_dbg_vport, vha, 0xfffa,
83 "vha->vref_count=%u timeout\n", vha->vref_count.counter);
84 vha->vref_count = (atomic_t)ATOMIC_INIT(0);
84 } 85 }
85 list_del(&vha->list); 86 list_del(&vha->list);
86 qlt_update_vp_map(vha, RESET_VP_IDX); 87 qlt_update_vp_map(vha, RESET_VP_IDX);
@@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
269 270
270 spin_lock_irqsave(&ha->vport_slock, flags); 271 spin_lock_irqsave(&ha->vport_slock, flags);
271 atomic_dec(&vha->vref_count); 272 atomic_dec(&vha->vref_count);
273 wake_up(&vha->vref_waitq);
272 } 274 }
273 i++; 275 i++;
274 } 276 }
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1fed235a1b4a..41d5b09f7326 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2560,6 +2560,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
2560 return atomic_read(&vha->loop_state) == LOOP_READY; 2560 return atomic_read(&vha->loop_state) == LOOP_READY;
2561} 2561}
2562 2562
2563static void qla2x00_iocb_work_fn(struct work_struct *work)
2564{
2565 struct scsi_qla_host *vha = container_of(work,
2566 struct scsi_qla_host, iocb_work);
2567 int cnt = 0;
2568
2569 while (!list_empty(&vha->work_list)) {
2570 qla2x00_do_work(vha);
2571 cnt++;
2572 if (cnt > 10)
2573 break;
2574 }
2575}
2576
2563/* 2577/*
2564 * PCI driver interface 2578 * PCI driver interface
2565 */ 2579 */
@@ -3078,6 +3092,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3078 */ 3092 */
3079 qla2xxx_wake_dpc(base_vha); 3093 qla2xxx_wake_dpc(base_vha);
3080 3094
3095 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
3081 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3096 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
3082 3097
3083 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3098 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
@@ -3469,6 +3484,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
3469 qla2x00_free_sysfs_attr(base_vha, true); 3484 qla2x00_free_sysfs_attr(base_vha, true);
3470 3485
3471 fc_remove_host(base_vha->host); 3486 fc_remove_host(base_vha->host);
3487 qlt_remove_target_resources(ha);
3472 3488
3473 scsi_remove_host(base_vha->host); 3489 scsi_remove_host(base_vha->host);
3474 3490
@@ -4268,6 +4284,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4268 spin_lock_init(&vha->work_lock); 4284 spin_lock_init(&vha->work_lock);
4269 spin_lock_init(&vha->cmd_list_lock); 4285 spin_lock_init(&vha->cmd_list_lock);
4270 init_waitqueue_head(&vha->fcport_waitQ); 4286 init_waitqueue_head(&vha->fcport_waitQ);
4287 init_waitqueue_head(&vha->vref_waitq);
4271 4288
4272 vha->gnl.size = sizeof(struct get_name_list_extended) * 4289 vha->gnl.size = sizeof(struct get_name_list_extended) *
4273 (ha->max_loop_id + 1); 4290 (ha->max_loop_id + 1);
@@ -4319,7 +4336,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
4319 spin_lock_irqsave(&vha->work_lock, flags); 4336 spin_lock_irqsave(&vha->work_lock, flags);
4320 list_add_tail(&e->list, &vha->work_list); 4337 list_add_tail(&e->list, &vha->work_list);
4321 spin_unlock_irqrestore(&vha->work_lock, flags); 4338 spin_unlock_irqrestore(&vha->work_lock, flags);
4322 qla2xxx_wake_dpc(vha); 4339
4340 if (QLA_EARLY_LINKUP(vha->hw))
4341 schedule_work(&vha->iocb_work);
4342 else
4343 qla2xxx_wake_dpc(vha);
4323 4344
4324 return QLA_SUCCESS; 4345 return QLA_SUCCESS;
4325} 4346}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 45f5077684f0..0e03ca2ab3e5 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -130,6 +130,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
130static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, 130static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
131 fc_port_t *fcport, bool local); 131 fc_port_t *fcport, bool local);
132void qlt_unreg_sess(struct fc_port *sess); 132void qlt_unreg_sess(struct fc_port *sess);
133static void qlt_24xx_handle_abts(struct scsi_qla_host *,
134 struct abts_recv_from_24xx *);
135
133/* 136/*
134 * Global Variables 137 * Global Variables
135 */ 138 */
@@ -140,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq;
140static DEFINE_MUTEX(qla_tgt_mutex); 143static DEFINE_MUTEX(qla_tgt_mutex);
141static LIST_HEAD(qla_tgt_glist); 144static LIST_HEAD(qla_tgt_glist);
142 145
146static const char *prot_op_str(u32 prot_op)
147{
148 switch (prot_op) {
149 case TARGET_PROT_NORMAL: return "NORMAL";
150 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
151 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
152 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
153 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
154 case TARGET_PROT_DIN_PASS: return "DIN_PASS";
155 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
156 default: return "UNKNOWN";
157 }
158}
159
143/* This API intentionally takes dest as a parameter, rather than returning 160/* This API intentionally takes dest as a parameter, rather than returning
144 * int value to avoid caller forgetting to issue wmb() after the store */ 161 * int value to avoid caller forgetting to issue wmb() after the store */
145void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) 162void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
@@ -170,21 +187,23 @@ static inline
170struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 187struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
171 uint8_t *d_id) 188 uint8_t *d_id)
172{ 189{
173 struct qla_hw_data *ha = vha->hw; 190 struct scsi_qla_host *host;
174 uint8_t vp_idx; 191 uint32_t key = 0;
175
176 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
177 return NULL;
178 192
179 if (vha->d_id.b.al_pa == d_id[2]) 193 if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
194 (vha->d_id.b.al_pa == d_id[2]))
180 return vha; 195 return vha;
181 196
182 BUG_ON(ha->tgt.tgt_vp_map == NULL); 197 key = (uint32_t)d_id[0] << 16;
183 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; 198 key |= (uint32_t)d_id[1] << 8;
184 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 199 key |= (uint32_t)d_id[2];
185 return ha->tgt.tgt_vp_map[vp_idx].vha;
186 200
187 return NULL; 201 host = btree_lookup32(&vha->hw->tgt.host_map, key);
202 if (!host)
203 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
204 "Unable to find host %06x\n", key);
205
206 return host;
188} 207}
189 208
190static inline 209static inline
@@ -389,6 +408,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
389 (struct abts_recv_from_24xx *)atio; 408 (struct abts_recv_from_24xx *)atio;
390 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 409 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
391 entry->vp_index); 410 entry->vp_index);
411 unsigned long flags;
412
392 if (unlikely(!host)) { 413 if (unlikely(!host)) {
393 ql_dbg(ql_dbg_tgt, vha, 0xffff, 414 ql_dbg(ql_dbg_tgt, vha, 0xffff,
394 "qla_target(%d): Response pkt (ABTS_RECV_24XX) " 415 "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
@@ -396,9 +417,12 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
396 vha->vp_idx, entry->vp_index); 417 vha->vp_idx, entry->vp_index);
397 break; 418 break;
398 } 419 }
399 qlt_response_pkt(host, (response_t *)atio); 420 if (!ha_locked)
421 spin_lock_irqsave(&host->hw->hardware_lock, flags);
422 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
423 if (!ha_locked)
424 spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
400 break; 425 break;
401
402 } 426 }
403 427
404 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ 428 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
@@ -554,6 +578,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
554 sp->fcport->login_gen++; 578 sp->fcport->login_gen++;
555 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 579 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
556 sp->fcport->logout_on_delete = 1; 580 sp->fcport->logout_on_delete = 1;
581 sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
557 break; 582 break;
558 583
559 case SRB_NACK_PRLI: 584 case SRB_NACK_PRLI:
@@ -613,6 +638,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
613 break; 638 break;
614 case SRB_NACK_PRLI: 639 case SRB_NACK_PRLI:
615 fcport->fw_login_state = DSC_LS_PRLI_PEND; 640 fcport->fw_login_state = DSC_LS_PRLI_PEND;
641 fcport->deleted = 0;
616 c = "PRLI"; 642 c = "PRLI";
617 break; 643 break;
618 case SRB_NACK_LOGO: 644 case SRB_NACK_LOGO:
@@ -1215,7 +1241,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
1215 } 1241 }
1216 1242
1217 /* Get list of logged in devices */ 1243 /* Get list of logged in devices */
1218 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); 1244 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1219 if (rc != QLA_SUCCESS) { 1245 if (rc != QLA_SUCCESS) {
1220 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 1246 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1221 "qla_target(%d): get_id_list() failed: %x\n", 1247 "qla_target(%d): get_id_list() failed: %x\n",
@@ -1551,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1551 request_t *pkt; 1577 request_t *pkt;
1552 struct nack_to_isp *nack; 1578 struct nack_to_isp *nack;
1553 1579
1580 if (!ha->flags.fw_started)
1581 return;
1582
1554 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1583 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1555 1584
1556 /* Send marker if required */ 1585 /* Send marker if required */
@@ -2013,6 +2042,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2013} 2042}
2014EXPORT_SYMBOL(qlt_free_mcmd); 2043EXPORT_SYMBOL(qlt_free_mcmd);
2015 2044
2045/*
2046 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2047 * reacquire
2048 */
2049void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
2050 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2051{
2052 struct atio_from_isp *atio = &cmd->atio;
2053 struct ctio7_to_24xx *ctio;
2054 uint16_t temp;
2055
2056 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2057 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2058 "sense_key=%02x, asc=%02x, ascq=%02x",
2059 vha, atio, scsi_status, sense_key, asc, ascq);
2060
2061 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2062 if (!ctio) {
2063 ql_dbg(ql_dbg_async, vha, 0x3067,
2064 "qla2x00t(%ld): %s failed: unable to allocate request packet",
2065 vha->host_no, __func__);
2066 goto out;
2067 }
2068
2069 ctio->entry_type = CTIO_TYPE7;
2070 ctio->entry_count = 1;
2071 ctio->handle = QLA_TGT_SKIP_HANDLE;
2072 ctio->nport_handle = cmd->sess->loop_id;
2073 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2074 ctio->vp_index = vha->vp_idx;
2075 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2076 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2077 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2078 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2079 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
2080 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
2081 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2082 ctio->u.status1.ox_id = cpu_to_le16(temp);
2083 ctio->u.status1.scsi_status =
2084 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2085 ctio->u.status1.response_len = cpu_to_le16(18);
2086 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2087
2088 if (ctio->u.status1.residual != 0)
2089 ctio->u.status1.scsi_status |=
2090 cpu_to_le16(SS_RESIDUAL_UNDER);
2091
2092 /* Response code and sense key */
2093 put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
2094 (&ctio->u.status1.sense_data)[0]);
2095 /* Additional sense length */
2096 put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
2097 /* ASC and ASCQ */
2098 put_unaligned_le32(((asc << 24) | (ascq << 16)),
2099 (&ctio->u.status1.sense_data)[3]);
2100
2101 /* Memory Barrier */
2102 wmb();
2103
2104 qla2x00_start_iocbs(vha, vha->req);
2105out:
2106 return;
2107}
2108
2016/* callback from target fabric module code */ 2109/* callback from target fabric module code */
2017void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 2110void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2018{ 2111{
@@ -2261,7 +2354,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
2261 */ 2354 */
2262 return -EAGAIN; 2355 return -EAGAIN;
2263 } else 2356 } else
2264 ha->tgt.cmds[h-1] = prm->cmd; 2357 ha->tgt.cmds[h - 1] = prm->cmd;
2265 2358
2266 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2359 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2267 pkt->nport_handle = prm->cmd->loop_id; 2360 pkt->nport_handle = prm->cmd->loop_id;
@@ -2391,6 +2484,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2391 return cmd->bufflen > 0; 2484 return cmd->bufflen > 0;
2392} 2485}
2393 2486
2487static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2488{
2489 struct qla_tgt_cmd *cmd;
2490 struct scsi_qla_host *vha;
2491
2492 /* asc 0x10=dif error */
2493 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2494 cmd = prm->cmd;
2495 vha = cmd->vha;
2496 /* ASCQ */
2497 switch (prm->sense_buffer[13]) {
2498 case 1:
2499 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
2500 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2501 "se_cmd=%p tag[%x]",
2502 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2503 cmd->atio.u.isp24.exchange_addr);
2504 break;
2505 case 2:
2506 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
2507 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2508 "se_cmd=%p tag[%x]",
2509 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2510 cmd->atio.u.isp24.exchange_addr);
2511 break;
2512 case 3:
2513 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
2514 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2515 "se_cmd=%p tag[%x]",
2516 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2517 cmd->atio.u.isp24.exchange_addr);
2518 break;
2519 default:
2520 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
2521 "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2522 "se_cmd=%p tag[%x]",
2523 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2524 cmd->atio.u.isp24.exchange_addr);
2525 break;
2526 }
2527 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16);
2528 }
2529}
2530
2394/* 2531/*
2395 * Called without ha->hardware_lock held 2532 * Called without ha->hardware_lock held
2396 */ 2533 */
@@ -2512,18 +2649,9 @@ skip_explict_conf:
2512 for (i = 0; i < prm->sense_buffer_len/4; i++) 2649 for (i = 0; i < prm->sense_buffer_len/4; i++)
2513 ((uint32_t *)ctio->u.status1.sense_data)[i] = 2650 ((uint32_t *)ctio->u.status1.sense_data)[i] =
2514 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 2651 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2515#if 0 2652
2516 if (unlikely((prm->sense_buffer_len % 4) != 0)) { 2653 qlt_print_dif_err(prm);
2517 static int q; 2654
2518 if (q < 10) {
2519 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
2520 "qla_target(%d): %d bytes of sense "
2521 "lost", prm->tgt->ha->vp_idx,
2522 prm->sense_buffer_len % 4);
2523 q++;
2524 }
2525 }
2526#endif
2527 } else { 2655 } else {
2528 ctio->u.status1.flags &= 2656 ctio->u.status1.flags &=
2529 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2657 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
@@ -2537,19 +2665,9 @@ skip_explict_conf:
2537 /* Sense with len > 24, is it possible ??? */ 2665 /* Sense with len > 24, is it possible ??? */
2538} 2666}
2539 2667
2540
2541
2542/* diff */
2543static inline int 2668static inline int
2544qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2669qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2545{ 2670{
2546 /*
2547 * Uncomment when corresponding SCSI changes are done.
2548 *
2549 if (!sp->cmd->prot_chk)
2550 return 0;
2551 *
2552 */
2553 switch (se_cmd->prot_op) { 2671 switch (se_cmd->prot_op) {
2554 case TARGET_PROT_DOUT_INSERT: 2672 case TARGET_PROT_DOUT_INSERT:
2555 case TARGET_PROT_DIN_STRIP: 2673 case TARGET_PROT_DIN_STRIP:
@@ -2570,16 +2688,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2570 return 0; 2688 return 0;
2571} 2689}
2572 2690
2691static inline int
2692qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2693{
2694 switch (se_cmd->prot_op) {
2695 case TARGET_PROT_DIN_INSERT:
2696 case TARGET_PROT_DOUT_INSERT:
2697 case TARGET_PROT_DIN_STRIP:
2698 case TARGET_PROT_DOUT_STRIP:
2699 case TARGET_PROT_DIN_PASS:
2700 case TARGET_PROT_DOUT_PASS:
2701 return 1;
2702 default:
2703 return 0;
2704 }
2705 return 0;
2706}
2707
2573/* 2708/*
2574 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 2709 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2575 *
2576 */ 2710 */
2577static inline void 2711static void
2578qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) 2712qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2713 uint16_t *pfw_prot_opts)
2579{ 2714{
2715 struct se_cmd *se_cmd = &cmd->se_cmd;
2580 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2716 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2717 scsi_qla_host_t *vha = cmd->tgt->vha;
2718 struct qla_hw_data *ha = vha->hw;
2719 uint32_t t32 = 0;
2581 2720
2582 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 2721 /*
2722 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2583 * have been immplemented by TCM, before AppTag is avail. 2723 * have been immplemented by TCM, before AppTag is avail.
2584 * Look for modesense_handlers[] 2724 * Look for modesense_handlers[]
2585 */ 2725 */
@@ -2587,65 +2727,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
2587 ctx->app_tag_mask[0] = 0x0; 2727 ctx->app_tag_mask[0] = 0x0;
2588 ctx->app_tag_mask[1] = 0x0; 2728 ctx->app_tag_mask[1] = 0x0;
2589 2729
2730 if (IS_PI_UNINIT_CAPABLE(ha)) {
2731 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2732 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2733 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2734 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2735 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2736 }
2737
2738 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2739
2590 switch (se_cmd->prot_type) { 2740 switch (se_cmd->prot_type) {
2591 case TARGET_DIF_TYPE0_PROT: 2741 case TARGET_DIF_TYPE0_PROT:
2592 /* 2742 /*
2593 * No check for ql2xenablehba_err_chk, as it would be an 2743 * No check for ql2xenablehba_err_chk, as it
2594 * I/O error if hba tag generation is not done. 2744 * would be an I/O error if hba tag generation
2745 * is not done.
2595 */ 2746 */
2596 ctx->ref_tag = cpu_to_le32(lba); 2747 ctx->ref_tag = cpu_to_le32(lba);
2597
2598 if (!qlt_hba_err_chk_enabled(se_cmd))
2599 break;
2600
2601 /* enable ALL bytes of the ref tag */ 2748 /* enable ALL bytes of the ref tag */
2602 ctx->ref_tag_mask[0] = 0xff; 2749 ctx->ref_tag_mask[0] = 0xff;
2603 ctx->ref_tag_mask[1] = 0xff; 2750 ctx->ref_tag_mask[1] = 0xff;
2604 ctx->ref_tag_mask[2] = 0xff; 2751 ctx->ref_tag_mask[2] = 0xff;
2605 ctx->ref_tag_mask[3] = 0xff; 2752 ctx->ref_tag_mask[3] = 0xff;
2606 break; 2753 break;
2607 /*
2608 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2609 * 16 bit app tag.
2610 */
2611 case TARGET_DIF_TYPE1_PROT: 2754 case TARGET_DIF_TYPE1_PROT:
2612 ctx->ref_tag = cpu_to_le32(lba); 2755 /*
2613 2756 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
2614 if (!qlt_hba_err_chk_enabled(se_cmd)) 2757 * REF tag, and 16 bit app tag.
2615 break; 2758 */
2616 2759 ctx->ref_tag = cpu_to_le32(lba);
2617 /* enable ALL bytes of the ref tag */ 2760 if (!qla_tgt_ref_mask_check(se_cmd) ||
2618 ctx->ref_tag_mask[0] = 0xff; 2761 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2619 ctx->ref_tag_mask[1] = 0xff; 2762 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2620 ctx->ref_tag_mask[2] = 0xff; 2763 break;
2621 ctx->ref_tag_mask[3] = 0xff; 2764 }
2622 break; 2765 /* enable ALL bytes of the ref tag */
2623 /* 2766 ctx->ref_tag_mask[0] = 0xff;
2624 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 2767 ctx->ref_tag_mask[1] = 0xff;
2625 * match LBA in CDB + N 2768 ctx->ref_tag_mask[2] = 0xff;
2626 */ 2769 ctx->ref_tag_mask[3] = 0xff;
2770 break;
2627 case TARGET_DIF_TYPE2_PROT: 2771 case TARGET_DIF_TYPE2_PROT:
2628 ctx->ref_tag = cpu_to_le32(lba); 2772 /*
2629 2773 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
2630 if (!qlt_hba_err_chk_enabled(se_cmd)) 2774 * tag has to match LBA in CDB + N
2631 break; 2775 */
2632 2776 ctx->ref_tag = cpu_to_le32(lba);
2633 /* enable ALL bytes of the ref tag */ 2777 if (!qla_tgt_ref_mask_check(se_cmd) ||
2634 ctx->ref_tag_mask[0] = 0xff; 2778 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2635 ctx->ref_tag_mask[1] = 0xff; 2779 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2636 ctx->ref_tag_mask[2] = 0xff; 2780 break;
2637 ctx->ref_tag_mask[3] = 0xff; 2781 }
2638 break; 2782 /* enable ALL bytes of the ref tag */
2639 2783 ctx->ref_tag_mask[0] = 0xff;
2640 /* For Type 3 protection: 16 bit GUARD only */ 2784 ctx->ref_tag_mask[1] = 0xff;
2785 ctx->ref_tag_mask[2] = 0xff;
2786 ctx->ref_tag_mask[3] = 0xff;
2787 break;
2641 case TARGET_DIF_TYPE3_PROT: 2788 case TARGET_DIF_TYPE3_PROT:
2642 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 2789 /* For TYPE 3 protection: 16 bit GUARD only */
2643 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 2790 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2644 break; 2791 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2792 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2793 break;
2645 } 2794 }
2646} 2795}
2647 2796
2648
2649static inline int 2797static inline int
2650qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) 2798qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2651{ 2799{
@@ -2664,6 +2812,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2664 struct se_cmd *se_cmd = &cmd->se_cmd; 2812 struct se_cmd *se_cmd = &cmd->se_cmd;
2665 uint32_t h; 2813 uint32_t h;
2666 struct atio_from_isp *atio = &prm->cmd->atio; 2814 struct atio_from_isp *atio = &prm->cmd->atio;
2815 struct qla_tc_param tc;
2667 uint16_t t16; 2816 uint16_t t16;
2668 2817
2669 ha = vha->hw; 2818 ha = vha->hw;
@@ -2689,16 +2838,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2689 case TARGET_PROT_DIN_INSERT: 2838 case TARGET_PROT_DIN_INSERT:
2690 case TARGET_PROT_DOUT_STRIP: 2839 case TARGET_PROT_DOUT_STRIP:
2691 transfer_length = data_bytes; 2840 transfer_length = data_bytes;
2692 data_bytes += dif_bytes; 2841 if (cmd->prot_sg_cnt)
2842 data_bytes += dif_bytes;
2693 break; 2843 break;
2694
2695 case TARGET_PROT_DIN_STRIP: 2844 case TARGET_PROT_DIN_STRIP:
2696 case TARGET_PROT_DOUT_INSERT: 2845 case TARGET_PROT_DOUT_INSERT:
2697 case TARGET_PROT_DIN_PASS: 2846 case TARGET_PROT_DIN_PASS:
2698 case TARGET_PROT_DOUT_PASS: 2847 case TARGET_PROT_DOUT_PASS:
2699 transfer_length = data_bytes + dif_bytes; 2848 transfer_length = data_bytes + dif_bytes;
2700 break; 2849 break;
2701
2702 default: 2850 default:
2703 BUG(); 2851 BUG();
2704 break; 2852 break;
@@ -2734,7 +2882,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2734 break; 2882 break;
2735 } 2883 }
2736 2884
2737
2738 /* ---- PKT ---- */ 2885 /* ---- PKT ---- */
2739 /* Update entry type to indicate Command Type CRC_2 IOCB */ 2886 /* Update entry type to indicate Command Type CRC_2 IOCB */
2740 pkt->entry_type = CTIO_CRC2; 2887 pkt->entry_type = CTIO_CRC2;
@@ -2752,9 +2899,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2752 } else 2899 } else
2753 ha->tgt.cmds[h-1] = prm->cmd; 2900 ha->tgt.cmds[h-1] = prm->cmd;
2754 2901
2755
2756 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2902 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2757 pkt->nport_handle = prm->cmd->loop_id; 2903 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2758 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2904 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2759 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2905 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2760 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2906 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
@@ -2775,12 +2921,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2775 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 2921 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2776 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 2922 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2777 2923
2778
2779 pkt->dseg_count = prm->tot_dsds; 2924 pkt->dseg_count = prm->tot_dsds;
2780 /* Fibre channel byte count */ 2925 /* Fibre channel byte count */
2781 pkt->transfer_length = cpu_to_le32(transfer_length); 2926 pkt->transfer_length = cpu_to_le32(transfer_length);
2782 2927
2783
2784 /* ----- CRC context -------- */ 2928 /* ----- CRC context -------- */
2785 2929
2786 /* Allocate CRC context from global pool */ 2930 /* Allocate CRC context from global pool */
@@ -2800,13 +2944,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2800 /* Set handle */ 2944 /* Set handle */
2801 crc_ctx_pkt->handle = pkt->handle; 2945 crc_ctx_pkt->handle = pkt->handle;
2802 2946
2803 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); 2947 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
2804 2948
2805 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 2949 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
2806 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 2950 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
2807 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 2951 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
2808 2952
2809
2810 if (!bundling) { 2953 if (!bundling) {
2811 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 2954 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
2812 } else { 2955 } else {
@@ -2827,16 +2970,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2827 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 2970 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2828 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 2971 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
2829 2972
2973 memset((uint8_t *)&tc, 0 , sizeof(tc));
2974 tc.vha = vha;
2975 tc.blk_sz = cmd->blk_sz;
2976 tc.bufflen = cmd->bufflen;
2977 tc.sg = cmd->sg;
2978 tc.prot_sg = cmd->prot_sg;
2979 tc.ctx = crc_ctx_pkt;
2980 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
2830 2981
2831 /* Walks data segments */ 2982 /* Walks data segments */
2832 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 2983 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2833 2984
2834 if (!bundling && prm->prot_seg_cnt) { 2985 if (!bundling && prm->prot_seg_cnt) {
2835 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 2986 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2836 prm->tot_dsds, cmd)) 2987 prm->tot_dsds, &tc))
2837 goto crc_queuing_error; 2988 goto crc_queuing_error;
2838 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 2989 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2839 (prm->tot_dsds - prm->prot_seg_cnt), cmd)) 2990 (prm->tot_dsds - prm->prot_seg_cnt), &tc))
2840 goto crc_queuing_error; 2991 goto crc_queuing_error;
2841 2992
2842 if (bundling && prm->prot_seg_cnt) { 2993 if (bundling && prm->prot_seg_cnt) {
@@ -2845,18 +2996,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2845 2996
2846 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 2997 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
2847 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 2998 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2848 prm->prot_seg_cnt, cmd)) 2999 prm->prot_seg_cnt, &tc))
2849 goto crc_queuing_error; 3000 goto crc_queuing_error;
2850 } 3001 }
2851 return QLA_SUCCESS; 3002 return QLA_SUCCESS;
2852 3003
2853crc_queuing_error: 3004crc_queuing_error:
2854 /* Cleanup will be performed by the caller */ 3005 /* Cleanup will be performed by the caller */
3006 vha->hw->tgt.cmds[h - 1] = NULL;
2855 3007
2856 return QLA_FUNCTION_FAILED; 3008 return QLA_FUNCTION_FAILED;
2857} 3009}
2858 3010
2859
2860/* 3011/*
2861 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 3012 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2862 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 3013 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@ -2906,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2906 else 3057 else
2907 vha->tgt_counters.core_qla_que_buf++; 3058 vha->tgt_counters.core_qla_que_buf++;
2908 3059
2909 if (!vha->flags.online || cmd->reset_count != ha->chip_reset) { 3060 if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
2910 /* 3061 /*
2911 * Either the port is not online or this request was from 3062 * Either the port is not online or this request was from
2912 * previous life, just abort the processing. 3063 * previous life, just abort the processing.
@@ -3047,7 +3198,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3047 3198
3048 spin_lock_irqsave(&ha->hardware_lock, flags); 3199 spin_lock_irqsave(&ha->hardware_lock, flags);
3049 3200
3050 if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) || 3201 if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
3051 (cmd->sess && cmd->sess->deleted)) { 3202 (cmd->sess && cmd->sess->deleted)) {
3052 /* 3203 /*
3053 * Either the port is not online or this request was from 3204 * Either the port is not online or this request was from
@@ -3104,139 +3255,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer);
3104 3255
3105 3256
3106/* 3257/*
3107 * Checks the guard or meta-data for the type of error 3258 * it is assumed either hardware_lock or qpair lock is held.
3108 * detected by the HBA.
3109 */ 3259 */
3110static inline int 3260static void
3111qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, 3261qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
3112 struct ctio_crc_from_fw *sts) 3262 struct ctio_crc_from_fw *sts)
3113{ 3263{
3114 uint8_t *ap = &sts->actual_dif[0]; 3264 uint8_t *ap = &sts->actual_dif[0];
3115 uint8_t *ep = &sts->expected_dif[0]; 3265 uint8_t *ep = &sts->expected_dif[0];
3116 uint32_t e_ref_tag, a_ref_tag;
3117 uint16_t e_app_tag, a_app_tag;
3118 uint16_t e_guard, a_guard;
3119 uint64_t lba = cmd->se_cmd.t_task_lba; 3266 uint64_t lba = cmd->se_cmd.t_task_lba;
3267 uint8_t scsi_status, sense_key, asc, ascq;
3268 unsigned long flags;
3120 3269
3121 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 3270 cmd->trc_flags |= TRC_DIF_ERR;
3122 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
3123 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3124
3125 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
3126 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
3127 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3128
3129 ql_dbg(ql_dbg_tgt, vha, 0xe075,
3130 "iocb(s) %p Returned STATUS.\n", sts);
3131
3132 ql_dbg(ql_dbg_tgt, vha, 0xf075,
3133 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
3134 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
3135 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
3136
3137 /*
3138 * Ignore sector if:
3139 * For type 3: ref & app tag is all 'f's
3140 * For type 0,1,2: app tag is all 'f's
3141 */
3142 if ((a_app_tag == 0xffff) &&
3143 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
3144 (a_ref_tag == 0xffffffff))) {
3145 uint32_t blocks_done;
3146
3147 /* 2TB boundary case covered automatically with this */
3148 blocks_done = e_ref_tag - (uint32_t)lba + 1;
3149 cmd->se_cmd.bad_sector = e_ref_tag;
3150 cmd->se_cmd.pi_err = 0;
3151 ql_dbg(ql_dbg_tgt, vha, 0xf074,
3152 "need to return scsi good\n");
3153
3154 /* Update protection tag */
3155 if (cmd->prot_sg_cnt) {
3156 uint32_t i, k = 0, num_ent;
3157 struct scatterlist *sg, *sgl;
3158
3159
3160 sgl = cmd->prot_sg;
3161
3162 /* Patch the corresponding protection tags */
3163 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
3164 num_ent = sg_dma_len(sg) / 8;
3165 if (k + num_ent < blocks_done) {
3166 k += num_ent;
3167 continue;
3168 }
3169 k = blocks_done;
3170 break;
3171 }
3172 3271
3173 if (k != blocks_done) { 3272 cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
3174 ql_log(ql_log_warn, vha, 0xf076, 3273 cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
3175 "unexpected tag values tag:lba=%u:%llu)\n", 3274 cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3176 e_ref_tag, (unsigned long long)lba);
3177 goto out;
3178 }
3179 3275
3180#if 0 3276 cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
3181 struct sd_dif_tuple *spt; 3277 cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
3182 /* TODO: 3278 cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3183 * This section came from initiator. Is it valid here?
3184 * should ulp be override with actual val???
3185 */
3186 spt = page_address(sg_page(sg)) + sg->offset;
3187 spt += j;
3188 3279
3189 spt->app_tag = 0xffff; 3280 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3190 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) 3281 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3191 spt->ref_tag = 0xffffffff;
3192#endif
3193 }
3194 3282
3195 return 0; 3283 scsi_status = sense_key = asc = ascq = 0;
3196 }
3197 3284
3198 /* check guard */ 3285 /* check appl tag */
3199 if (e_guard != a_guard) { 3286 if (cmd->e_app_tag != cmd->a_app_tag) {
3200 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 3287 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
3201 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 3288 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
3202 3289 "Ref[%x|%x], App[%x|%x], "
3203 ql_log(ql_log_warn, vha, 0xe076, 3290 "Guard [%x|%x] cmd=%p ox_id[%04x]",
3204 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 3291 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3205 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3292 cmd->a_ref_tag, cmd->e_ref_tag,
3206 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 3293 cmd->a_app_tag, cmd->e_app_tag,
3207 a_guard, e_guard, cmd); 3294 cmd->a_guard, cmd->e_guard,
3208 goto out; 3295 cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
3296
3297 cmd->dif_err_code = DIF_ERR_APP;
3298 scsi_status = SAM_STAT_CHECK_CONDITION;
3299 sense_key = ABORTED_COMMAND;
3300 asc = 0x10;
3301 ascq = 0x2;
3209 } 3302 }
3210 3303
3211 /* check ref tag */ 3304 /* check ref tag */
3212 if (e_ref_tag != a_ref_tag) { 3305 if (cmd->e_ref_tag != cmd->a_ref_tag) {
3213 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 3306 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
3214 cmd->se_cmd.bad_sector = e_ref_tag; 3307 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
3215 3308 "Ref[%x|%x], App[%x|%x], "
3216 ql_log(ql_log_warn, vha, 0xe077, 3309 "Guard[%x|%x] cmd=%p ox_id[%04x] ",
3217 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 3310 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3218 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3311 cmd->a_ref_tag, cmd->e_ref_tag,
3219 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 3312 cmd->a_app_tag, cmd->e_app_tag,
3220 a_guard, e_guard, cmd); 3313 cmd->a_guard, cmd->e_guard,
3314 cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
3315
3316 cmd->dif_err_code = DIF_ERR_REF;
3317 scsi_status = SAM_STAT_CHECK_CONDITION;
3318 sense_key = ABORTED_COMMAND;
3319 asc = 0x10;
3320 ascq = 0x3;
3221 goto out; 3321 goto out;
3222 } 3322 }
3223 3323
3224 /* check appl tag */ 3324 /* check guard */
3225 if (e_app_tag != a_app_tag) { 3325 if (cmd->e_guard != cmd->a_guard) {
3226 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 3326 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
3227 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 3327 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
3228 3328 "Ref[%x|%x], App[%x|%x], "
3229 ql_log(ql_log_warn, vha, 0xe078, 3329 "Guard [%x|%x] cmd=%p ox_id[%04x]",
3230 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 3330 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3231 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3331 cmd->a_ref_tag, cmd->e_ref_tag,
3232 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 3332 cmd->a_app_tag, cmd->e_app_tag,
3233 a_guard, e_guard, cmd); 3333 cmd->a_guard, cmd->e_guard,
3234 goto out; 3334 cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
3335 cmd->dif_err_code = DIF_ERR_GRD;
3336 scsi_status = SAM_STAT_CHECK_CONDITION;
3337 sense_key = ABORTED_COMMAND;
3338 asc = 0x10;
3339 ascq = 0x1;
3235 } 3340 }
3236out: 3341out:
3237 return 1; 3342 switch (cmd->state) {
3238} 3343 case QLA_TGT_STATE_NEED_DATA:
3344 /* handle_data will load DIF error code */
3345 cmd->state = QLA_TGT_STATE_DATA_IN;
3346 vha->hw->tgt.tgt_ops->handle_data(cmd);
3347 break;
3348 default:
3349 spin_lock_irqsave(&cmd->cmd_lock, flags);
3350 if (cmd->aborted) {
3351 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3352 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3353 break;
3354 }
3355 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3239 3356
3357 qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
3358 /* assume scsi status gets out on the wire.
3359 * Will not wait for completion.
3360 */
3361 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3362 break;
3363 }
3364}
3240 3365
3241/* If hardware_lock held on entry, might drop it, then reaquire */ 3366/* If hardware_lock held on entry, might drop it, then reaquire */
3242/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3367/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
@@ -3251,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3251 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3376 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3252 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3377 "Sending TERM ELS CTIO (ha=%p)\n", ha);
3253 3378
3254 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); 3379 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3255 if (pkt == NULL) { 3380 if (pkt == NULL) {
3256 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3381 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3257 "qla_target(%d): %s failed: unable to allocate " 3382 "qla_target(%d): %s failed: unable to allocate "
@@ -3543,6 +3668,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
3543{ 3668{
3544 int term = 0; 3669 int term = 0;
3545 3670
3671 if (cmd->se_cmd.prot_op)
3672 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
3673 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3674 "se_cmd=%p tag[%x] op %#x/%s",
3675 cmd->lba, cmd->lba,
3676 cmd->num_blks, &cmd->se_cmd,
3677 cmd->atio.u.isp24.exchange_addr,
3678 cmd->se_cmd.prot_op,
3679 prot_op_str(cmd->se_cmd.prot_op));
3680
3546 if (ctio != NULL) { 3681 if (ctio != NULL) {
3547 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3682 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3548 term = !(c->flags & 3683 term = !(c->flags &
@@ -3760,32 +3895,15 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3760 struct ctio_crc_from_fw *crc = 3895 struct ctio_crc_from_fw *crc =
3761 (struct ctio_crc_from_fw *)ctio; 3896 (struct ctio_crc_from_fw *)ctio;
3762 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 3897 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3763 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", 3898 "qla_target(%d): CTIO with DIF_ERROR status %x "
3899 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
3900 "expect_dif[0x%llx]\n",
3764 vha->vp_idx, status, cmd->state, se_cmd, 3901 vha->vp_idx, status, cmd->state, se_cmd,
3765 *((u64 *)&crc->actual_dif[0]), 3902 *((u64 *)&crc->actual_dif[0]),
3766 *((u64 *)&crc->expected_dif[0])); 3903 *((u64 *)&crc->expected_dif[0]));
3767 3904
3768 if (qlt_handle_dif_error(vha, cmd, ctio)) { 3905 qlt_handle_dif_error(vha, cmd, ctio);
3769 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3906 return;
3770 /* scsi Write/xfer rdy complete */
3771 goto skip_term;
3772 } else {
3773 /* scsi read/xmit respond complete
3774 * call handle dif to send scsi status
3775 * rather than terminate exchange.
3776 */
3777 cmd->state = QLA_TGT_STATE_PROCESSED;
3778 ha->tgt.tgt_ops->handle_dif_err(cmd);
3779 return;
3780 }
3781 } else {
3782 /* Need to generate a SCSI good completion.
3783 * because FW did not send scsi status.
3784 */
3785 status = 0;
3786 goto skip_term;
3787 }
3788 break;
3789 } 3907 }
3790 default: 3908 default:
3791 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 3909 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
@@ -3808,7 +3926,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3808 return; 3926 return;
3809 } 3927 }
3810 } 3928 }
3811skip_term:
3812 3929
3813 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3930 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3814 cmd->trc_flags |= TRC_CTIO_DONE; 3931 cmd->trc_flags |= TRC_CTIO_DONE;
@@ -4584,7 +4701,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4584 } 4701 }
4585 4702
4586 if (sess != NULL) { 4703 if (sess != NULL) {
4587 if (sess->fw_login_state == DSC_LS_PLOGI_PEND) { 4704 if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
4705 sess->fw_login_state != DSC_LS_PLOGI_COMP) {
4588 /* 4706 /*
4589 * Impatient initiator sent PRLI before last 4707 * Impatient initiator sent PRLI before last
4590 * PLOGI could finish. Will force him to re-try, 4708 * PLOGI could finish. Will force him to re-try,
@@ -4623,15 +4741,23 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4623 4741
4624 /* Make session global (not used in fabric mode) */ 4742 /* Make session global (not used in fabric mode) */
4625 if (ha->current_topology != ISP_CFG_F) { 4743 if (ha->current_topology != ISP_CFG_F) {
4626 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4744 if (sess) {
4627 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4745 ql_dbg(ql_dbg_disc, vha, 0xffff,
4628 qla2xxx_wake_dpc(vha); 4746 "%s %d %8phC post nack\n",
4747 __func__, __LINE__, sess->port_name);
4748 qla24xx_post_nack_work(vha, sess, iocb,
4749 SRB_NACK_PRLI);
4750 res = 0;
4751 } else {
4752 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4753 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4754 qla2xxx_wake_dpc(vha);
4755 }
4629 } else { 4756 } else {
4630 if (sess) { 4757 if (sess) {
4631 ql_dbg(ql_dbg_disc, vha, 0xffff, 4758 ql_dbg(ql_dbg_disc, vha, 0xffff,
4632 "%s %d %8phC post nack\n", 4759 "%s %d %8phC post nack\n",
4633 __func__, __LINE__, sess->port_name); 4760 __func__, __LINE__, sess->port_name);
4634
4635 qla24xx_post_nack_work(vha, sess, iocb, 4761 qla24xx_post_nack_work(vha, sess, iocb,
4636 SRB_NACK_PRLI); 4762 SRB_NACK_PRLI);
4637 res = 0; 4763 res = 0;
@@ -4639,7 +4765,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4639 } 4765 }
4640 break; 4766 break;
4641 4767
4642
4643 case ELS_TPRLO: 4768 case ELS_TPRLO:
4644 if (le16_to_cpu(iocb->u.isp24.flags) & 4769 if (le16_to_cpu(iocb->u.isp24.flags) &
4645 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { 4770 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
@@ -5079,16 +5204,22 @@ qlt_send_busy(struct scsi_qla_host *vha,
5079 5204
5080static int 5205static int
5081qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, 5206qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
5082 struct atio_from_isp *atio) 5207 struct atio_from_isp *atio, bool ha_locked)
5083{ 5208{
5084 struct qla_hw_data *ha = vha->hw; 5209 struct qla_hw_data *ha = vha->hw;
5085 uint16_t status; 5210 uint16_t status;
5211 unsigned long flags;
5086 5212
5087 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5213 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5088 return 0; 5214 return 0;
5089 5215
5216 if (!ha_locked)
5217 spin_lock_irqsave(&ha->hardware_lock, flags);
5090 status = temp_sam_status; 5218 status = temp_sam_status;
5091 qlt_send_busy(vha, atio, status); 5219 qlt_send_busy(vha, atio, status);
5220 if (!ha_locked)
5221 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5222
5092 return 1; 5223 return 1;
5093} 5224}
5094 5225
@@ -5103,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5103 unsigned long flags; 5234 unsigned long flags;
5104 5235
5105 if (unlikely(tgt == NULL)) { 5236 if (unlikely(tgt == NULL)) {
5106 ql_dbg(ql_dbg_io, vha, 0x3064, 5237 ql_dbg(ql_dbg_tgt, vha, 0x3064,
5107 "ATIO pkt, but no tgt (ha %p)", ha); 5238 "ATIO pkt, but no tgt (ha %p)", ha);
5108 return; 5239 return;
5109 } 5240 }
@@ -5133,7 +5264,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5133 5264
5134 5265
5135 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5266 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5136 rc = qlt_chk_qfull_thresh_hold(vha, atio); 5267 rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
5137 if (rc != 0) { 5268 if (rc != 0) {
5138 tgt->atio_irq_cmd_count--; 5269 tgt->atio_irq_cmd_count--;
5139 return; 5270 return;
@@ -5256,7 +5387,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5256 break; 5387 break;
5257 } 5388 }
5258 5389
5259 rc = qlt_chk_qfull_thresh_hold(vha, atio); 5390 rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
5260 if (rc != 0) { 5391 if (rc != 0) {
5261 tgt->irq_cmd_count--; 5392 tgt->irq_cmd_count--;
5262 return; 5393 return;
@@ -5531,7 +5662,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5531 5662
5532 fcport->loop_id = loop_id; 5663 fcport->loop_id = loop_id;
5533 5664
5534 rc = qla2x00_get_port_database(vha, fcport, 0); 5665 rc = qla24xx_gpdb_wait(vha, fcport, 0);
5535 if (rc != QLA_SUCCESS) { 5666 if (rc != QLA_SUCCESS) {
5536 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 5667 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
5537 "qla_target(%d): Failed to retrieve fcport " 5668 "qla_target(%d): Failed to retrieve fcport "
@@ -5713,30 +5844,23 @@ static void qlt_abort_work(struct qla_tgt *tgt,
5713 } 5844 }
5714 } 5845 }
5715 5846
5716 spin_lock_irqsave(&ha->hardware_lock, flags);
5717
5718 if (tgt->tgt_stop)
5719 goto out_term;
5720
5721 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 5847 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5848 ha->tgt.tgt_ops->put_sess(sess);
5849 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5850
5722 if (rc != 0) 5851 if (rc != 0)
5723 goto out_term; 5852 goto out_term;
5724 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5725 if (sess)
5726 ha->tgt.tgt_ops->put_sess(sess);
5727 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5728 return; 5853 return;
5729 5854
5730out_term2: 5855out_term2:
5731 spin_lock_irqsave(&ha->hardware_lock, flags); 5856 if (sess)
5857 ha->tgt.tgt_ops->put_sess(sess);
5858 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5732 5859
5733out_term: 5860out_term:
5861 spin_lock_irqsave(&ha->hardware_lock, flags);
5734 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 5862 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5735 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5863 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5736
5737 if (sess)
5738 ha->tgt.tgt_ops->put_sess(sess);
5739 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5740} 5864}
5741 5865
5742static void qlt_tmr_work(struct qla_tgt *tgt, 5866static void qlt_tmr_work(struct qla_tgt *tgt,
@@ -5756,7 +5880,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5756 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5880 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5757 5881
5758 if (tgt->tgt_stop) 5882 if (tgt->tgt_stop)
5759 goto out_term; 5883 goto out_term2;
5760 5884
5761 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 5885 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
5762 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 5886 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
@@ -5768,11 +5892,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5768 5892
5769 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5893 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5770 if (!sess) 5894 if (!sess)
5771 goto out_term; 5895 goto out_term2;
5772 } else { 5896 } else {
5773 if (sess->deleted) { 5897 if (sess->deleted) {
5774 sess = NULL; 5898 sess = NULL;
5775 goto out_term; 5899 goto out_term2;
5776 } 5900 }
5777 5901
5778 if (!kref_get_unless_zero(&sess->sess_kref)) { 5902 if (!kref_get_unless_zero(&sess->sess_kref)) {
@@ -5780,7 +5904,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5780 "%s: kref_get fail %8phC\n", 5904 "%s: kref_get fail %8phC\n",
5781 __func__, sess->port_name); 5905 __func__, sess->port_name);
5782 sess = NULL; 5906 sess = NULL;
5783 goto out_term; 5907 goto out_term2;
5784 } 5908 }
5785 } 5909 }
5786 5910
@@ -5790,17 +5914,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5790 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 5914 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
5791 5915
5792 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 5916 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
5793 if (rc != 0)
5794 goto out_term;
5795
5796 ha->tgt.tgt_ops->put_sess(sess); 5917 ha->tgt.tgt_ops->put_sess(sess);
5797 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5918 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5919
5920 if (rc != 0)
5921 goto out_term;
5798 return; 5922 return;
5799 5923
5924out_term2:
5925 if (sess)
5926 ha->tgt.tgt_ops->put_sess(sess);
5927 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5800out_term: 5928out_term:
5801 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); 5929 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
5802 ha->tgt.tgt_ops->put_sess(sess);
5803 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5804} 5930}
5805 5931
5806static void qlt_sess_work_fn(struct work_struct *work) 5932static void qlt_sess_work_fn(struct work_struct *work)
@@ -5893,13 +6019,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
5893 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 6019 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
5894 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 6020 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
5895 6021
5896 if (base_vha->fc_vport)
5897 return 0;
5898
5899 mutex_lock(&qla_tgt_mutex); 6022 mutex_lock(&qla_tgt_mutex);
5900 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6023 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
5901 mutex_unlock(&qla_tgt_mutex); 6024 mutex_unlock(&qla_tgt_mutex);
5902 6025
6026 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6027 ha->tgt.tgt_ops->add_target(base_vha);
6028
5903 return 0; 6029 return 0;
5904} 6030}
5905 6031
@@ -5928,6 +6054,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
5928 return 0; 6054 return 0;
5929} 6055}
5930 6056
6057void qlt_remove_target_resources(struct qla_hw_data *ha)
6058{
6059 struct scsi_qla_host *node;
6060 u32 key = 0;
6061
6062 btree_for_each_safe32(&ha->tgt.host_map, key, node)
6063 btree_remove32(&ha->tgt.host_map, key);
6064
6065 btree_destroy32(&ha->tgt.host_map);
6066}
6067
5931static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6068static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
5932 unsigned char *b) 6069 unsigned char *b)
5933{ 6070{
@@ -6234,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6234 struct atio_from_isp *pkt; 6371 struct atio_from_isp *pkt;
6235 int cnt, i; 6372 int cnt, i;
6236 6373
6237 if (!vha->flags.online) 6374 if (!ha->flags.fw_started)
6238 return; 6375 return;
6239 6376
6240 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || 6377 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
@@ -6581,6 +6718,8 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
6581void 6718void
6582qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 6719qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
6583{ 6720{
6721 int rc;
6722
6584 if (!QLA_TGT_MODE_ENABLED()) 6723 if (!QLA_TGT_MODE_ENABLED())
6585 return; 6724 return;
6586 6725
@@ -6600,6 +6739,13 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
6600 qlt_unknown_atio_work_fn); 6739 qlt_unknown_atio_work_fn);
6601 6740
6602 qlt_clear_mode(base_vha); 6741 qlt_clear_mode(base_vha);
6742
6743 rc = btree_init32(&ha->tgt.host_map);
6744 if (rc)
6745 ql_log(ql_log_info, base_vha, 0xffff,
6746 "Unable to initialize ha->host_map btree\n");
6747
6748 qlt_update_vp_map(base_vha, SET_VP_IDX);
6603} 6749}
6604 6750
6605irqreturn_t 6751irqreturn_t
@@ -6642,6 +6788,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
6642 spin_lock_irqsave(&ha->hardware_lock, flags); 6788 spin_lock_irqsave(&ha->hardware_lock, flags);
6643 qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); 6789 qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
6644 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6790 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6791
6792 kfree(op);
6645} 6793}
6646 6794
6647void 6795void
@@ -6706,25 +6854,69 @@ qlt_mem_free(struct qla_hw_data *ha)
6706void 6854void
6707qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 6855qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
6708{ 6856{
6857 void *slot;
6858 u32 key;
6859 int rc;
6860
6709 if (!QLA_TGT_MODE_ENABLED()) 6861 if (!QLA_TGT_MODE_ENABLED())
6710 return; 6862 return;
6711 6863
6864 key = vha->d_id.b24;
6865
6712 switch (cmd) { 6866 switch (cmd) {
6713 case SET_VP_IDX: 6867 case SET_VP_IDX:
6714 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 6868 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
6715 break; 6869 break;
6716 case SET_AL_PA: 6870 case SET_AL_PA:
6717 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; 6871 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
6872 if (!slot) {
6873 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
6874 "Save vha in host_map %p %06x\n", vha, key);
6875 rc = btree_insert32(&vha->hw->tgt.host_map,
6876 key, vha, GFP_ATOMIC);
6877 if (rc)
6878 ql_log(ql_log_info, vha, 0xffff,
6879 "Unable to insert s_id into host_map: %06x\n",
6880 key);
6881 return;
6882 }
6883 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
6884 "replace existing vha in host_map %p %06x\n", vha, key);
6885 btree_update32(&vha->hw->tgt.host_map, key, vha);
6718 break; 6886 break;
6719 case RESET_VP_IDX: 6887 case RESET_VP_IDX:
6720 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 6888 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
6721 break; 6889 break;
6722 case RESET_AL_PA: 6890 case RESET_AL_PA:
6723 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; 6891 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
6892 "clear vha in host_map %p %06x\n", vha, key);
6893 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
6894 if (slot)
6895 btree_remove32(&vha->hw->tgt.host_map, key);
6896 vha->d_id.b24 = 0;
6724 break; 6897 break;
6725 } 6898 }
6726} 6899}
6727 6900
6901void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
6902{
6903 unsigned long flags;
6904 struct qla_hw_data *ha = vha->hw;
6905
6906 if (!vha->d_id.b24) {
6907 spin_lock_irqsave(&ha->vport_slock, flags);
6908 vha->d_id = id;
6909 qlt_update_vp_map(vha, SET_AL_PA);
6910 spin_unlock_irqrestore(&ha->vport_slock, flags);
6911 } else if (vha->d_id.b24 != id.b24) {
6912 spin_lock_irqsave(&ha->vport_slock, flags);
6913 qlt_update_vp_map(vha, RESET_AL_PA);
6914 vha->d_id = id;
6915 qlt_update_vp_map(vha, SET_AL_PA);
6916 spin_unlock_irqrestore(&ha->vport_slock, flags);
6917 }
6918}
6919
6728static int __init qlt_parse_ini_mode(void) 6920static int __init qlt_parse_ini_mode(void)
6729{ 6921{
6730 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 6922 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index a7f90dcaae37..d64420251194 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
378 atio->u.isp24.fcp_cmnd.add_cdb_len = 0; 378 atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
379} 379}
380 380
381static inline int get_datalen_for_atio(struct atio_from_isp *atio)
382{
383 int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
384
385 return (be32_to_cpu(get_unaligned((uint32_t *)
386 &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
387}
388
381#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ 389#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
382 390
383/* 391/*
@@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl {
667 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, 675 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
668 unsigned char *, uint32_t, int, int, int); 676 unsigned char *, uint32_t, int, int, int);
669 void (*handle_data)(struct qla_tgt_cmd *); 677 void (*handle_data)(struct qla_tgt_cmd *);
670 void (*handle_dif_err)(struct qla_tgt_cmd *);
671 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, 678 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
672 uint32_t); 679 uint32_t);
673 void (*free_cmd)(struct qla_tgt_cmd *); 680 void (*free_cmd)(struct qla_tgt_cmd *);
@@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl {
684 void (*clear_nacl_from_fcport_map)(struct fc_port *); 691 void (*clear_nacl_from_fcport_map)(struct fc_port *);
685 void (*put_sess)(struct fc_port *); 692 void (*put_sess)(struct fc_port *);
686 void (*shutdown_sess)(struct fc_port *); 693 void (*shutdown_sess)(struct fc_port *);
694 int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
695 int (*chk_dif_tags)(uint32_t tag);
696 void (*add_target)(struct scsi_qla_host *);
687}; 697};
688 698
689int qla2x00_wait_for_hba_online(struct scsi_qla_host *); 699int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -720,8 +730,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
720#define QLA_TGT_ABORT_ALL 0xFFFE 730#define QLA_TGT_ABORT_ALL 0xFFFE
721#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD 731#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
722#define QLA_TGT_NEXUS_LOSS 0xFFFC 732#define QLA_TGT_NEXUS_LOSS 0xFFFC
723#define QLA_TGT_ABTS 0xFFFB 733#define QLA_TGT_ABTS 0xFFFB
724#define QLA_TGT_2G_ABORT_TASK 0xFFFA 734#define QLA_TGT_2G_ABORT_TASK 0xFFFA
725 735
726/* Notify Acknowledge flags */ 736/* Notify Acknowledge flags */
727#define NOTIFY_ACK_RES_COUNT BIT_8 737#define NOTIFY_ACK_RES_COUNT BIT_8
@@ -845,6 +855,7 @@ enum trace_flags {
845 TRC_CMD_FREE = BIT_17, 855 TRC_CMD_FREE = BIT_17,
846 TRC_DATA_IN = BIT_18, 856 TRC_DATA_IN = BIT_18,
847 TRC_ABORT = BIT_19, 857 TRC_ABORT = BIT_19,
858 TRC_DIF_ERR = BIT_20,
848}; 859};
849 860
850struct qla_tgt_cmd { 861struct qla_tgt_cmd {
@@ -862,7 +873,6 @@ struct qla_tgt_cmd {
862 unsigned int sg_mapped:1; 873 unsigned int sg_mapped:1;
863 unsigned int free_sg:1; 874 unsigned int free_sg:1;
864 unsigned int write_data_transferred:1; 875 unsigned int write_data_transferred:1;
865 unsigned int ctx_dsd_alloced:1;
866 unsigned int q_full:1; 876 unsigned int q_full:1;
867 unsigned int term_exchg:1; 877 unsigned int term_exchg:1;
868 unsigned int cmd_sent_to_fw:1; 878 unsigned int cmd_sent_to_fw:1;
@@ -885,11 +895,25 @@ struct qla_tgt_cmd {
885 struct list_head cmd_list; 895 struct list_head cmd_list;
886 896
887 struct atio_from_isp atio; 897 struct atio_from_isp atio;
888 /* t10dif */ 898
899 uint8_t ctx_dsd_alloced;
900
901 /* T10-DIF */
902#define DIF_ERR_NONE 0
903#define DIF_ERR_GRD 1
904#define DIF_ERR_REF 2
905#define DIF_ERR_APP 3
906 int8_t dif_err_code;
889 struct scatterlist *prot_sg; 907 struct scatterlist *prot_sg;
890 uint32_t prot_sg_cnt; 908 uint32_t prot_sg_cnt;
891 uint32_t blk_sz; 909 uint32_t blk_sz, num_blks;
910 uint8_t scsi_status, sense_key, asc, ascq;
911
892 struct crc_context *ctx; 912 struct crc_context *ctx;
913 uint8_t *cdb;
914 uint64_t lba;
915 uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
916 uint32_t a_ref_tag, e_ref_tag;
893 917
894 uint64_t jiffies_at_alloc; 918 uint64_t jiffies_at_alloc;
895 uint64_t jiffies_at_free; 919 uint64_t jiffies_at_free;
@@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
1053extern void qlt_logo_completion_handler(fc_port_t *, int); 1077extern void qlt_logo_completion_handler(fc_port_t *, int);
1054extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); 1078extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
1055 1079
1080void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
1081 uint8_t, uint8_t, uint8_t);
1082
1056#endif /* __QLA_TARGET_H */ 1083#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3cb1964b7786..45bc84e8e3bf 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.07.00.38-k" 10#define QLA2XXX_VERSION "9.00.00.00-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 9
13#define QLA_DRIVER_MINOR_VER 7 13#define QLA_DRIVER_MINOR_VER 0
14#define QLA_DRIVER_PATCH_VER 0 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 8e8ab0fa9672..7443e4efa3ae 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
531 return; 531 return;
532 } 532 }
533 533
534 switch (cmd->dif_err_code) {
535 case DIF_ERR_GRD:
536 cmd->se_cmd.pi_err =
537 TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
538 break;
539 case DIF_ERR_REF:
540 cmd->se_cmd.pi_err =
541 TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
542 break;
543 case DIF_ERR_APP:
544 cmd->se_cmd.pi_err =
545 TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
546 break;
547 case DIF_ERR_NONE:
548 default:
549 break;
550 }
551
534 if (cmd->se_cmd.pi_err) 552 if (cmd->se_cmd.pi_err)
535 transport_generic_request_failure(&cmd->se_cmd, 553 transport_generic_request_failure(&cmd->se_cmd,
536 cmd->se_cmd.pi_err); 554 cmd->se_cmd.pi_err);
@@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
555 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); 573 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
556} 574}
557 575
558static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) 576static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
559{ 577{
560 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 578 return 0;
561
562 /* take an extra kref to prevent cmd free too early.
563 * need to wait for SCSI status/check condition to
564 * finish responding generate by transport_generic_request_failure.
565 */
566 kref_get(&cmd->se_cmd.cmd_kref);
567 transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
568} 579}
569 580
570/* 581static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
571 * Called from qla_target.c:qlt_do_ctio_completion() 582 uint16_t *pfw_prot_opts)
572 */
573static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
574{ 583{
575 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); 584 struct se_cmd *se_cmd = &cmd->se_cmd;
576 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 585
586 if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
587 *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK;
588
589 if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG))
590 *pfw_prot_opts |= PO_DIS_APP_TAG_VALD;
591
592 return 0;
577} 593}
578 594
579/* 595/*
@@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
1610static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { 1626static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1611 .handle_cmd = tcm_qla2xxx_handle_cmd, 1627 .handle_cmd = tcm_qla2xxx_handle_cmd,
1612 .handle_data = tcm_qla2xxx_handle_data, 1628 .handle_data = tcm_qla2xxx_handle_data,
1613 .handle_dif_err = tcm_qla2xxx_handle_dif_err,
1614 .handle_tmr = tcm_qla2xxx_handle_tmr, 1629 .handle_tmr = tcm_qla2xxx_handle_tmr,
1615 .free_cmd = tcm_qla2xxx_free_cmd, 1630 .free_cmd = tcm_qla2xxx_free_cmd,
1616 .free_mcmd = tcm_qla2xxx_free_mcmd, 1631 .free_mcmd = tcm_qla2xxx_free_mcmd,
@@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1622 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, 1637 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
1623 .put_sess = tcm_qla2xxx_put_sess, 1638 .put_sess = tcm_qla2xxx_put_sess,
1624 .shutdown_sess = tcm_qla2xxx_shutdown_sess, 1639 .shutdown_sess = tcm_qla2xxx_shutdown_sess,
1640 .get_dif_tags = tcm_qla2xxx_dif_tags,
1641 .chk_dif_tags = tcm_qla2xxx_chk_dif_tags,
1625}; 1642};
1626 1643
1627static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) 1644static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index f5e330099bfc..fd7c16a7ca6e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -43,7 +43,7 @@
43#include "target_core_ua.h" 43#include "target_core_ua.h"
44 44
45static sense_reason_t core_alua_check_transition(int state, int valid, 45static sense_reason_t core_alua_check_transition(int state, int valid,
46 int *primary); 46 int *primary, int explicit);
47static int core_alua_set_tg_pt_secondary_state( 47static int core_alua_set_tg_pt_secondary_state(
48 struct se_lun *lun, int explicit, int offline); 48 struct se_lun *lun, int explicit, int offline);
49 49
@@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
335 * the state is a primary or secondary target port asymmetric 335 * the state is a primary or secondary target port asymmetric
336 * access state. 336 * access state.
337 */ 337 */
338 rc = core_alua_check_transition(alua_access_state, 338 rc = core_alua_check_transition(alua_access_state, valid_states,
339 valid_states, &primary); 339 &primary, 1);
340 if (rc) { 340 if (rc) {
341 /* 341 /*
342 * If the SET TARGET PORT GROUPS attempts to establish 342 * If the SET TARGET PORT GROUPS attempts to establish
@@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd)
691 691
692 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 692 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
693 return 0; 693 return 0;
694 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 694 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
695 return 0; 695 return 0;
696 696
697 /* 697 /*
@@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd)
762 * Check implicit and explicit ALUA state change request. 762 * Check implicit and explicit ALUA state change request.
763 */ 763 */
764static sense_reason_t 764static sense_reason_t
765core_alua_check_transition(int state, int valid, int *primary) 765core_alua_check_transition(int state, int valid, int *primary, int explicit)
766{ 766{
767 /* 767 /*
768 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 768 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
@@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary)
804 *primary = 0; 804 *primary = 0;
805 break; 805 break;
806 case ALUA_ACCESS_STATE_TRANSITION: 806 case ALUA_ACCESS_STATE_TRANSITION:
807 /* 807 if (!(valid & ALUA_T_SUP) || explicit)
808 * Transitioning is set internally, and 808 /*
809 * cannot be selected manually. 809 * Transitioning is set internally and by tcmu daemon,
810 */ 810 * and cannot be selected through a STPG.
811 goto not_supported; 811 */
812 goto not_supported;
813 *primary = 0;
814 break;
812 default: 815 default:
813 pr_err("Unknown ALUA access state: 0x%02x\n", state); 816 pr_err("Unknown ALUA access state: 0x%02x\n", state);
814 return TCM_INVALID_PARAMETER_LIST; 817 return TCM_INVALID_PARAMETER_LIST;
@@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
1013static void core_alua_do_transition_tg_pt_work(struct work_struct *work) 1016static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1014{ 1017{
1015 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, 1018 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
1016 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); 1019 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
1017 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1020 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1018 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == 1021 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
1019 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); 1022 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
@@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt(
1070 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) 1073 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
1071 return 0; 1074 return 0;
1072 1075
1073 if (new_state == ALUA_ACCESS_STATE_TRANSITION) 1076 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
1074 return -EAGAIN; 1077 return -EAGAIN;
1075 1078
1076 /* 1079 /*
1077 * Flush any pending transitions 1080 * Flush any pending transitions
1078 */ 1081 */
1079 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && 1082 if (!explicit)
1080 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == 1083 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1081 ALUA_ACCESS_STATE_TRANSITION) {
1082 /* Just in case */
1083 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1084 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1085 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1086 wait_for_completion(&wait);
1087 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1088 return 0;
1089 }
1090 1084
1091 /* 1085 /*
1092 * Save the old primary ALUA access state, and set the current state 1086 * Save the old primary ALUA access state, and set the current state
1093 * to ALUA_ACCESS_STATE_TRANSITION. 1087 * to ALUA_ACCESS_STATE_TRANSITION.
1094 */ 1088 */
1095 tg_pt_gp->tg_pt_gp_alua_previous_state =
1096 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1097 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1098
1099 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1089 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1100 ALUA_ACCESS_STATE_TRANSITION); 1090 ALUA_ACCESS_STATE_TRANSITION);
1101 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 1091 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
@@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt(
1104 1094
1105 core_alua_queue_state_change_ua(tg_pt_gp); 1095 core_alua_queue_state_change_ua(tg_pt_gp);
1106 1096
1097 if (new_state == ALUA_ACCESS_STATE_TRANSITION)
1098 return 0;
1099
1100 tg_pt_gp->tg_pt_gp_alua_previous_state =
1101 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1102 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1103
1107 /* 1104 /*
1108 * Check for the optional ALUA primary state transition delay 1105 * Check for the optional ALUA primary state transition delay
1109 */ 1106 */
@@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt(
1117 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1114 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1118 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1115 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1119 1116
1120 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { 1117 schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
1121 unsigned long transition_tmo; 1118 if (explicit) {
1122
1123 transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
1124 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1125 &tg_pt_gp->tg_pt_gp_transition_work,
1126 transition_tmo);
1127 } else {
1128 tg_pt_gp->tg_pt_gp_transition_complete = &wait; 1119 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1129 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1130 &tg_pt_gp->tg_pt_gp_transition_work, 0);
1131 wait_for_completion(&wait); 1120 wait_for_completion(&wait);
1132 tg_pt_gp->tg_pt_gp_transition_complete = NULL; 1121 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1133 } 1122 }
@@ -1149,8 +1138,12 @@ int core_alua_do_port_transition(
1149 struct t10_alua_tg_pt_gp *tg_pt_gp; 1138 struct t10_alua_tg_pt_gp *tg_pt_gp;
1150 int primary, valid_states, rc = 0; 1139 int primary, valid_states, rc = 0;
1151 1140
1141 if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
1142 return -ENODEV;
1143
1152 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 1144 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1153 if (core_alua_check_transition(new_state, valid_states, &primary) != 0) 1145 if (core_alua_check_transition(new_state, valid_states, &primary,
1146 explicit) != 0)
1154 return -EINVAL; 1147 return -EINVAL;
1155 1148
1156 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 1149 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
@@ -1695,8 +1688,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1695 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1688 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1696 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1689 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1697 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1690 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1698 INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, 1691 INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1699 core_alua_do_transition_tg_pt_work); 1692 core_alua_do_transition_tg_pt_work);
1700 tg_pt_gp->tg_pt_gp_dev = dev; 1693 tg_pt_gp->tg_pt_gp_dev = dev;
1701 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1694 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1702 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1695 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
@@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp(
1804 dev->t10_alua.alua_tg_pt_gps_counter--; 1797 dev->t10_alua.alua_tg_pt_gps_counter--;
1805 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1798 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1806 1799
1807 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); 1800 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1808 1801
1809 /* 1802 /*
1810 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1803 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
@@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
1973 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 1966 unsigned char buf[TG_PT_GROUP_NAME_BUF];
1974 int move = 0; 1967 int move = 0;
1975 1968
1976 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || 1969 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
1977 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 1970 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1978 return -ENODEV; 1971 return -ENODEV;
1979 1972
@@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit(
2230 unsigned long tmp; 2223 unsigned long tmp;
2231 int ret; 2224 int ret;
2232 2225
2233 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || 2226 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
2234 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 2227 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2235 return -ENODEV; 2228 return -ENODEV;
2236 2229
@@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata(
2316 2309
2317int core_setup_alua(struct se_device *dev) 2310int core_setup_alua(struct se_device *dev)
2318{ 2311{
2319 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 2312 if (!(dev->transport->transport_flags &
2313 TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
2320 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 2314 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2321 struct t10_alua_lu_gp_member *lu_gp_mem; 2315 struct t10_alua_lu_gp_member *lu_gp_mem;
2322 2316
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 54b36c9835be..38b5025e4c7a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
421 pr_err("Missing tfo->aborted_task()\n"); 421 pr_err("Missing tfo->aborted_task()\n");
422 return -EINVAL; 422 return -EINVAL;
423 } 423 }
424 if (!tfo->check_stop_free) {
425 pr_err("Missing tfo->check_stop_free()\n");
426 return -EINVAL;
427 }
424 /* 428 /*
425 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() 429 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
426 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 430 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index a8f8e53f2f57..94cda7991e80 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
154 154
155 buf = kzalloc(12, GFP_KERNEL); 155 buf = kzalloc(12, GFP_KERNEL);
156 if (!buf) 156 if (!buf)
157 return; 157 goto out_free;
158 158
159 memset(cdb, 0, MAX_COMMAND_SIZE); 159 memset(cdb, 0, MAX_COMMAND_SIZE);
160 cdb[0] = MODE_SENSE; 160 cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
169 * If MODE_SENSE still returns zero, set the default value to 1024. 169 * If MODE_SENSE still returns zero, set the default value to 1024.
170 */ 170 */
171 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); 171 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
172out_free:
172 if (!sdev->sector_size) 173 if (!sdev->sector_size)
173 sdev->sector_size = 1024; 174 sdev->sector_size = 1024;
174out_free: 175
175 kfree(buf); 176 kfree(buf);
176} 177}
177 178
@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
314 sd->lun, sd->queue_depth); 315 sd->lun, sd->queue_depth);
315 } 316 }
316 317
317 dev->dev_attrib.hw_block_size = sd->sector_size; 318 dev->dev_attrib.hw_block_size =
319 min_not_zero((int)sd->sector_size, 512);
318 dev->dev_attrib.hw_max_sectors = 320 dev->dev_attrib.hw_max_sectors =
319 min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); 321 min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
320 dev->dev_attrib.hw_queue_depth = sd->queue_depth; 322 dev->dev_attrib.hw_queue_depth = sd->queue_depth;
321 323
322 /* 324 /*
@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
339 /* 341 /*
340 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. 342 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
341 */ 343 */
342 if (sd->type == TYPE_TAPE) 344 if (sd->type == TYPE_TAPE) {
343 pscsi_tape_read_blocksize(dev, sd); 345 pscsi_tape_read_blocksize(dev, sd);
346 dev->dev_attrib.hw_block_size = sd->sector_size;
347 }
344 return 0; 348 return 0;
345} 349}
346 350
@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
406/* 410/*
407 * Called with struct Scsi_Host->host_lock called. 411 * Called with struct Scsi_Host->host_lock called.
408 */ 412 */
409static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) 413static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
410 __releases(sh->host_lock) 414 __releases(sh->host_lock)
411{ 415{
412 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 416 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
433 return 0; 437 return 0;
434} 438}
435 439
436/*
437 * Called with struct Scsi_Host->host_lock called.
438 */
439static int pscsi_create_type_other(struct se_device *dev,
440 struct scsi_device *sd)
441 __releases(sh->host_lock)
442{
443 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
444 struct Scsi_Host *sh = sd->host;
445 int ret;
446
447 spin_unlock_irq(sh->host_lock);
448 ret = pscsi_add_device_to_list(dev, sd);
449 if (ret)
450 return ret;
451
452 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
453 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
454 sd->channel, sd->id, sd->lun);
455 return 0;
456}
457
458static int pscsi_configure_device(struct se_device *dev) 440static int pscsi_configure_device(struct se_device *dev)
459{ 441{
460 struct se_hba *hba = dev->se_hba; 442 struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
542 case TYPE_DISK: 524 case TYPE_DISK:
543 ret = pscsi_create_type_disk(dev, sd); 525 ret = pscsi_create_type_disk(dev, sd);
544 break; 526 break;
545 case TYPE_ROM:
546 ret = pscsi_create_type_rom(dev, sd);
547 break;
548 default: 527 default:
549 ret = pscsi_create_type_other(dev, sd); 528 ret = pscsi_create_type_nondisk(dev, sd);
550 break; 529 break;
551 } 530 }
552 531
@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
611 else if (pdv->pdv_lld_host) 590 else if (pdv->pdv_lld_host)
612 scsi_host_put(pdv->pdv_lld_host); 591 scsi_host_put(pdv->pdv_lld_host);
613 592
614 if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) 593 scsi_device_put(sd);
615 scsi_device_put(sd);
616 594
617 pdv->pdv_sd = NULL; 595 pdv->pdv_sd = NULL;
618 } 596 }
@@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
1064 if (pdv->pdv_bd && pdv->pdv_bd->bd_part) 1042 if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
1065 return pdv->pdv_bd->bd_part->nr_sects; 1043 return pdv->pdv_bd->bd_part->nr_sects;
1066 1044
1067 dump_stack();
1068 return 0; 1045 return 0;
1069} 1046}
1070 1047
@@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate)
1103static const struct target_backend_ops pscsi_ops = { 1080static const struct target_backend_ops pscsi_ops = {
1104 .name = "pscsi", 1081 .name = "pscsi",
1105 .owner = THIS_MODULE, 1082 .owner = THIS_MODULE,
1106 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1083 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH |
1084 TRANSPORT_FLAG_PASSTHROUGH_ALUA,
1107 .attach_hba = pscsi_attach_hba, 1085 .attach_hba = pscsi_attach_hba,
1108 .detach_hba = pscsi_detach_hba, 1086 .detach_hba = pscsi_detach_hba,
1109 .pmode_enable_hba = pscsi_pmode_enable_hba, 1087 .pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 68d8aef7ab78..c194063f169b 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
1105 return ret; 1105 return ret;
1106 break; 1106 break;
1107 case VERIFY: 1107 case VERIFY:
1108 case VERIFY_16:
1108 size = 0; 1109 size = 0;
1109 sectors = transport_get_sectors_10(cdb); 1110 if (cdb[0] == VERIFY) {
1110 cmd->t_task_lba = transport_lba_32(cdb); 1111 sectors = transport_get_sectors_10(cdb);
1112 cmd->t_task_lba = transport_lba_32(cdb);
1113 } else {
1114 sectors = transport_get_sectors_16(cdb);
1115 cmd->t_task_lba = transport_lba_64(cdb);
1116 }
1111 cmd->execute_cmd = sbc_emulate_noop; 1117 cmd->execute_cmd = sbc_emulate_noop;
1112 goto check_lba; 1118 goto check_lba;
1113 case REZERO_UNIT: 1119 case REZERO_UNIT:
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index c0dbfa016575..6fb191914f45 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -602,7 +602,8 @@ int core_tpg_add_lun(
602 if (ret) 602 if (ret)
603 goto out_kill_ref; 603 goto out_kill_ref;
604 604
605 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 605 if (!(dev->transport->transport_flags &
606 TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
606 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 607 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
607 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); 608 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
608 609
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 434d9d693989..b1a3cdb29468 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
636 * Fabric modules are expected to return '1' here if the se_cmd being 636 * Fabric modules are expected to return '1' here if the se_cmd being
637 * passed is released at this point, or zero if not being released. 637 * passed is released at this point, or zero if not being released.
638 */ 638 */
639 return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd) 639 return cmd->se_tfo->check_stop_free(cmd);
640 : 0;
641} 640}
642 641
643static void transport_lun_remove_cmd(struct se_cmd *cmd) 642static void transport_lun_remove_cmd(struct se_cmd *cmd)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c3adefe95e50..c6874c38a10b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -28,6 +28,7 @@
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include <linux/bitops.h> 29#include <linux/bitops.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/configfs.h>
31#include <net/genetlink.h> 32#include <net/genetlink.h>
32#include <scsi/scsi_common.h> 33#include <scsi/scsi_common.h>
33#include <scsi/scsi_proto.h> 34#include <scsi/scsi_proto.h>
@@ -112,6 +113,7 @@ struct tcmu_dev {
112 spinlock_t commands_lock; 113 spinlock_t commands_lock;
113 114
114 struct timer_list timeout; 115 struct timer_list timeout;
116 unsigned int cmd_time_out;
115 117
116 char dev_config[TCMU_CONFIG_LEN]; 118 char dev_config[TCMU_CONFIG_LEN];
117}; 119};
@@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
172 174
173 tcmu_cmd->se_cmd = se_cmd; 175 tcmu_cmd->se_cmd = se_cmd;
174 tcmu_cmd->tcmu_dev = udev; 176 tcmu_cmd->tcmu_dev = udev;
175 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); 177 if (udev->cmd_time_out)
178 tcmu_cmd->deadline = jiffies +
179 msecs_to_jiffies(udev->cmd_time_out);
176 180
177 idr_preload(GFP_KERNEL); 181 idr_preload(GFP_KERNEL);
178 spin_lock_irq(&udev->commands_lock); 182 spin_lock_irq(&udev->commands_lock);
@@ -451,7 +455,11 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
451 455
452 pr_debug("sleeping for ring space\n"); 456 pr_debug("sleeping for ring space\n");
453 spin_unlock_irq(&udev->cmdr_lock); 457 spin_unlock_irq(&udev->cmdr_lock);
454 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); 458 if (udev->cmd_time_out)
459 ret = schedule_timeout(
460 msecs_to_jiffies(udev->cmd_time_out));
461 else
462 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
455 finish_wait(&udev->wait_cmdr, &__wait); 463 finish_wait(&udev->wait_cmdr, &__wait);
456 if (!ret) { 464 if (!ret) {
457 pr_warn("tcmu: command timed out\n"); 465 pr_warn("tcmu: command timed out\n");
@@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
526 /* TODO: only if FLUSH and FUA? */ 534 /* TODO: only if FLUSH and FUA? */
527 uio_event_notify(&udev->uio_info); 535 uio_event_notify(&udev->uio_info);
528 536
529 mod_timer(&udev->timeout, 537 if (udev->cmd_time_out)
530 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); 538 mod_timer(&udev->timeout, round_jiffies_up(jiffies +
539 msecs_to_jiffies(udev->cmd_time_out)));
531 540
532 return TCM_NO_SENSE; 541 return TCM_NO_SENSE;
533} 542}
@@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
742 } 751 }
743 752
744 udev->hba = hba; 753 udev->hba = hba;
754 udev->cmd_time_out = TCMU_TIME_OUT;
745 755
746 init_waitqueue_head(&udev->wait_cmdr); 756 init_waitqueue_head(&udev->wait_cmdr);
747 spin_lock_init(&udev->cmdr_lock); 757 spin_lock_init(&udev->cmdr_lock);
@@ -960,7 +970,8 @@ static int tcmu_configure_device(struct se_device *dev)
960 if (dev->dev_attrib.hw_block_size == 0) 970 if (dev->dev_attrib.hw_block_size == 0)
961 dev->dev_attrib.hw_block_size = 512; 971 dev->dev_attrib.hw_block_size = 512;
962 /* Other attributes can be configured in userspace */ 972 /* Other attributes can be configured in userspace */
963 dev->dev_attrib.hw_max_sectors = 128; 973 if (!dev->dev_attrib.hw_max_sectors)
974 dev->dev_attrib.hw_max_sectors = 128;
964 dev->dev_attrib.hw_queue_depth = 128; 975 dev->dev_attrib.hw_queue_depth = 128;
965 976
966 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, 977 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
@@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
997 kfree(udev); 1008 kfree(udev);
998} 1009}
999 1010
1011static bool tcmu_dev_configured(struct tcmu_dev *udev)
1012{
1013 return udev->uio_info.uio_dev ? true : false;
1014}
1015
1000static void tcmu_free_device(struct se_device *dev) 1016static void tcmu_free_device(struct se_device *dev)
1001{ 1017{
1002 struct tcmu_dev *udev = TCMU_DEV(dev); 1018 struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev)
1018 spin_unlock_irq(&udev->commands_lock); 1034 spin_unlock_irq(&udev->commands_lock);
1019 WARN_ON(!all_expired); 1035 WARN_ON(!all_expired);
1020 1036
1021 /* Device was configured */ 1037 if (tcmu_dev_configured(udev)) {
1022 if (udev->uio_info.uio_dev) {
1023 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, 1038 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
1024 udev->uio_info.uio_dev->minor); 1039 udev->uio_info.uio_dev->minor);
1025 1040
@@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev)
1031} 1046}
1032 1047
1033enum { 1048enum {
1034 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, 1049 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
1050 Opt_err,
1035}; 1051};
1036 1052
1037static match_table_t tokens = { 1053static match_table_t tokens = {
1038 {Opt_dev_config, "dev_config=%s"}, 1054 {Opt_dev_config, "dev_config=%s"},
1039 {Opt_dev_size, "dev_size=%u"}, 1055 {Opt_dev_size, "dev_size=%u"},
1040 {Opt_hw_block_size, "hw_block_size=%u"}, 1056 {Opt_hw_block_size, "hw_block_size=%u"},
1057 {Opt_hw_max_sectors, "hw_max_sectors=%u"},
1041 {Opt_err, NULL} 1058 {Opt_err, NULL}
1042}; 1059};
1043 1060
1061static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
1062{
1063 unsigned long tmp_ul;
1064 char *arg_p;
1065 int ret;
1066
1067 arg_p = match_strdup(arg);
1068 if (!arg_p)
1069 return -ENOMEM;
1070
1071 ret = kstrtoul(arg_p, 0, &tmp_ul);
1072 kfree(arg_p);
1073 if (ret < 0) {
1074 pr_err("kstrtoul() failed for dev attrib\n");
1075 return ret;
1076 }
1077 if (!tmp_ul) {
1078 pr_err("dev attrib must be nonzero\n");
1079 return -EINVAL;
1080 }
1081 *dev_attrib = tmp_ul;
1082 return 0;
1083}
1084
1044static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 1085static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1045 const char *page, ssize_t count) 1086 const char *page, ssize_t count)
1046{ 1087{
@@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1048 char *orig, *ptr, *opts, *arg_p; 1089 char *orig, *ptr, *opts, *arg_p;
1049 substring_t args[MAX_OPT_ARGS]; 1090 substring_t args[MAX_OPT_ARGS];
1050 int ret = 0, token; 1091 int ret = 0, token;
1051 unsigned long tmp_ul;
1052 1092
1053 opts = kstrdup(page, GFP_KERNEL); 1093 opts = kstrdup(page, GFP_KERNEL);
1054 if (!opts) 1094 if (!opts)
@@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1082 pr_err("kstrtoul() failed for dev_size=\n"); 1122 pr_err("kstrtoul() failed for dev_size=\n");
1083 break; 1123 break;
1084 case Opt_hw_block_size: 1124 case Opt_hw_block_size:
1085 arg_p = match_strdup(&args[0]); 1125 ret = tcmu_set_dev_attrib(&args[0],
1086 if (!arg_p) { 1126 &(dev->dev_attrib.hw_block_size));
1087 ret = -ENOMEM; 1127 break;
1088 break; 1128 case Opt_hw_max_sectors:
1089 } 1129 ret = tcmu_set_dev_attrib(&args[0],
1090 ret = kstrtoul(arg_p, 0, &tmp_ul); 1130 &(dev->dev_attrib.hw_max_sectors));
1091 kfree(arg_p);
1092 if (ret < 0) {
1093 pr_err("kstrtoul() failed for hw_block_size=\n");
1094 break;
1095 }
1096 if (!tmp_ul) {
1097 pr_err("hw_block_size must be nonzero\n");
1098 break;
1099 }
1100 dev->dev_attrib.hw_block_size = tmp_ul;
1101 break; 1131 break;
1102 default: 1132 default:
1103 break; 1133 break;
1104 } 1134 }
1135
1136 if (ret)
1137 break;
1105 } 1138 }
1106 1139
1107 kfree(orig); 1140 kfree(orig);
@@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd)
1134 return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 1167 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
1135} 1168}
1136 1169
1137static const struct target_backend_ops tcmu_ops = { 1170static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
1171{
1172 struct se_dev_attrib *da = container_of(to_config_group(item),
1173 struct se_dev_attrib, da_group);
1174 struct tcmu_dev *udev = container_of(da->da_dev,
1175 struct tcmu_dev, se_dev);
1176
1177 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
1178}
1179
1180static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
1181 size_t count)
1182{
1183 struct se_dev_attrib *da = container_of(to_config_group(item),
1184 struct se_dev_attrib, da_group);
1185 struct tcmu_dev *udev = container_of(da->da_dev,
1186 struct tcmu_dev, se_dev);
1187 u32 val;
1188 int ret;
1189
1190 if (da->da_dev->export_count) {
1191 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
1192 return -EINVAL;
1193 }
1194
1195 ret = kstrtou32(page, 0, &val);
1196 if (ret < 0)
1197 return ret;
1198
1199 if (!val) {
1200 pr_err("Illegal value for cmd_time_out\n");
1201 return -EINVAL;
1202 }
1203
1204 udev->cmd_time_out = val * MSEC_PER_SEC;
1205 return count;
1206}
1207CONFIGFS_ATTR(tcmu_, cmd_time_out);
1208
1209static struct configfs_attribute **tcmu_attrs;
1210
1211static struct target_backend_ops tcmu_ops = {
1138 .name = "user", 1212 .name = "user",
1139 .owner = THIS_MODULE, 1213 .owner = THIS_MODULE,
1140 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1214 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
@@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = {
1148 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 1222 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1149 .get_device_type = sbc_get_device_type, 1223 .get_device_type = sbc_get_device_type,
1150 .get_blocks = tcmu_get_blocks, 1224 .get_blocks = tcmu_get_blocks,
1151 .tb_dev_attrib_attrs = passthrough_attrib_attrs, 1225 .tb_dev_attrib_attrs = NULL,
1152}; 1226};
1153 1227
1154static int __init tcmu_module_init(void) 1228static int __init tcmu_module_init(void)
1155{ 1229{
1156 int ret; 1230 int ret, i, len = 0;
1157 1231
1158 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1232 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1159 1233
@@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void)
1175 goto out_unreg_device; 1249 goto out_unreg_device;
1176 } 1250 }
1177 1251
1252 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1253 len += sizeof(struct configfs_attribute *);
1254 }
1255 len += sizeof(struct configfs_attribute *) * 2;
1256
1257 tcmu_attrs = kzalloc(len, GFP_KERNEL);
1258 if (!tcmu_attrs) {
1259 ret = -ENOMEM;
1260 goto out_unreg_genl;
1261 }
1262
1263 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1264 tcmu_attrs[i] = passthrough_attrib_attrs[i];
1265 }
1266 tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
1267 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
1268
1178 ret = transport_backend_register(&tcmu_ops); 1269 ret = transport_backend_register(&tcmu_ops);
1179 if (ret) 1270 if (ret)
1180 goto out_unreg_genl; 1271 goto out_attrs;
1181 1272
1182 return 0; 1273 return 0;
1183 1274
1275out_attrs:
1276 kfree(tcmu_attrs);
1184out_unreg_genl: 1277out_unreg_genl:
1185 genl_unregister_family(&tcmu_genl_family); 1278 genl_unregister_family(&tcmu_genl_family);
1186out_unreg_device: 1279out_unreg_device:
@@ -1194,6 +1287,7 @@ out_free_cache:
1194static void __exit tcmu_module_exit(void) 1287static void __exit tcmu_module_exit(void)
1195{ 1288{
1196 target_backend_unregister(&tcmu_ops); 1289 target_backend_unregister(&tcmu_ops);
1290 kfree(tcmu_attrs);
1197 genl_unregister_family(&tcmu_genl_family); 1291 genl_unregister_family(&tcmu_genl_family);
1198 root_device_unregister(tcmu_root_device); 1292 root_device_unregister(tcmu_root_device);
1199 kmem_cache_destroy(tcmu_cmd_cache); 1293 kmem_cache_destroy(tcmu_cmd_cache);