diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/scsi/qla2xxx | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'drivers/scsi/qla2xxx')
28 files changed, 3353 insertions, 17027 deletions
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 317a7fdc3b8..6208d562890 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig | |||
@@ -25,12 +25,3 @@ config SCSI_QLA_FC | |||
25 | Firmware images can be retrieved from: | 25 | Firmware images can be retrieved from: |
26 | 26 | ||
27 | ftp://ftp.qlogic.com/outgoing/linux/firmware/ | 27 | ftp://ftp.qlogic.com/outgoing/linux/firmware/ |
28 | |||
29 | config TCM_QLA2XXX | ||
30 | tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" | ||
31 | depends on SCSI_QLA_FC && TARGET_CORE | ||
32 | select LIBFC | ||
33 | select BTREE | ||
34 | default n | ||
35 | ---help--- | ||
36 | Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs | ||
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile index dce7d788cdc..5df782f4a09 100644 --- a/drivers/scsi/qla2xxx/Makefile +++ b/drivers/scsi/qla2xxx/Makefile | |||
@@ -1,6 +1,5 @@ | |||
1 | qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ | 1 | qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ |
2 | qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ | 2 | qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ |
3 | qla_nx.o qla_target.o | 3 | qla_nx.o |
4 | 4 | ||
5 | obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o | 5 | obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o |
6 | obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o | ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 83d798428c1..a31e05f3bfd 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | #include "qla_target.h" | ||
9 | 8 | ||
10 | #include <linux/kthread.h> | 9 | #include <linux/kthread.h> |
11 | #include <linux/vmalloc.h> | 10 | #include <linux/vmalloc.h> |
@@ -24,29 +23,12 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, | |||
24 | struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, | 23 | struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, |
25 | struct device, kobj))); | 24 | struct device, kobj))); |
26 | struct qla_hw_data *ha = vha->hw; | 25 | struct qla_hw_data *ha = vha->hw; |
27 | int rval = 0; | ||
28 | 26 | ||
29 | if (!(ha->fw_dump_reading || ha->mctp_dump_reading)) | 27 | if (ha->fw_dump_reading == 0) |
30 | return 0; | 28 | return 0; |
31 | 29 | ||
32 | if (IS_QLA82XX(ha)) { | 30 | return memory_read_from_buffer(buf, count, &off, ha->fw_dump, |
33 | if (off < ha->md_template_size) { | ||
34 | rval = memory_read_from_buffer(buf, count, | ||
35 | &off, ha->md_tmplt_hdr, ha->md_template_size); | ||
36 | return rval; | ||
37 | } | ||
38 | off -= ha->md_template_size; | ||
39 | rval = memory_read_from_buffer(buf, count, | ||
40 | &off, ha->md_dump, ha->md_dump_size); | ||
41 | return rval; | ||
42 | } else if (ha->mctp_dumped && ha->mctp_dump_reading) | ||
43 | return memory_read_from_buffer(buf, count, &off, ha->mctp_dump, | ||
44 | MCTP_DUMP_SIZE); | ||
45 | else if (ha->fw_dump_reading) | ||
46 | return memory_read_from_buffer(buf, count, &off, ha->fw_dump, | ||
47 | ha->fw_dump_len); | 31 | ha->fw_dump_len); |
48 | else | ||
49 | return 0; | ||
50 | } | 32 | } |
51 | 33 | ||
52 | static ssize_t | 34 | static ssize_t |
@@ -59,6 +41,12 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, | |||
59 | struct qla_hw_data *ha = vha->hw; | 41 | struct qla_hw_data *ha = vha->hw; |
60 | int reading; | 42 | int reading; |
61 | 43 | ||
44 | if (IS_QLA82XX(ha)) { | ||
45 | ql_dbg(ql_dbg_user, vha, 0x705b, | ||
46 | "Firmware dump not supported for ISP82xx\n"); | ||
47 | return count; | ||
48 | } | ||
49 | |||
62 | if (off != 0) | 50 | if (off != 0) |
63 | return (0); | 51 | return (0); |
64 | 52 | ||
@@ -71,10 +59,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, | |||
71 | ql_log(ql_log_info, vha, 0x705d, | 59 | ql_log(ql_log_info, vha, 0x705d, |
72 | "Firmware dump cleared on (%ld).\n", vha->host_no); | 60 | "Firmware dump cleared on (%ld).\n", vha->host_no); |
73 | 61 | ||
74 | if (IS_QLA82XX(vha->hw)) { | ||
75 | qla82xx_md_free(vha); | ||
76 | qla82xx_md_prep(vha); | ||
77 | } | ||
78 | ha->fw_dump_reading = 0; | 62 | ha->fw_dump_reading = 0; |
79 | ha->fw_dumped = 0; | 63 | ha->fw_dumped = 0; |
80 | break; | 64 | break; |
@@ -91,45 +75,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, | |||
91 | qla2x00_alloc_fw_dump(vha); | 75 | qla2x00_alloc_fw_dump(vha); |
92 | break; | 76 | break; |
93 | case 3: | 77 | case 3: |
94 | if (IS_QLA82XX(ha)) { | 78 | qla2x00_system_error(vha); |
95 | qla82xx_idc_lock(ha); | ||
96 | qla82xx_set_reset_owner(vha); | ||
97 | qla82xx_idc_unlock(ha); | ||
98 | } else | ||
99 | qla2x00_system_error(vha); | ||
100 | break; | ||
101 | case 4: | ||
102 | if (IS_QLA82XX(ha)) { | ||
103 | if (ha->md_tmplt_hdr) | ||
104 | ql_dbg(ql_dbg_user, vha, 0x705b, | ||
105 | "MiniDump supported with this firmware.\n"); | ||
106 | else | ||
107 | ql_dbg(ql_dbg_user, vha, 0x709d, | ||
108 | "MiniDump not supported with this firmware.\n"); | ||
109 | } | ||
110 | break; | ||
111 | case 5: | ||
112 | if (IS_QLA82XX(ha)) | ||
113 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | ||
114 | break; | ||
115 | case 6: | ||
116 | if (!ha->mctp_dump_reading) | ||
117 | break; | ||
118 | ql_log(ql_log_info, vha, 0x70c1, | ||
119 | "MCTP dump cleared on (%ld).\n", vha->host_no); | ||
120 | ha->mctp_dump_reading = 0; | ||
121 | ha->mctp_dumped = 0; | ||
122 | break; | ||
123 | case 7: | ||
124 | if (ha->mctp_dumped && !ha->mctp_dump_reading) { | ||
125 | ha->mctp_dump_reading = 1; | ||
126 | ql_log(ql_log_info, vha, 0x70c2, | ||
127 | "Raw mctp dump ready for read on (%ld).\n", | ||
128 | vha->host_no); | ||
129 | } | ||
130 | break; | 79 | break; |
131 | } | 80 | } |
132 | return count; | 81 | return (count); |
133 | } | 82 | } |
134 | 83 | ||
135 | static struct bin_attribute sysfs_fw_dump_attr = { | 84 | static struct bin_attribute sysfs_fw_dump_attr = { |
@@ -173,7 +122,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, | |||
173 | 122 | ||
174 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || | 123 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || |
175 | !ha->isp_ops->write_nvram) | 124 | !ha->isp_ops->write_nvram) |
176 | return -EINVAL; | 125 | return 0; |
177 | 126 | ||
178 | /* Checksum NVRAM. */ | 127 | /* Checksum NVRAM. */ |
179 | if (IS_FWI2_CAPABLE(ha)) { | 128 | if (IS_FWI2_CAPABLE(ha)) { |
@@ -216,7 +165,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, | |||
216 | qla2xxx_wake_dpc(vha); | 165 | qla2xxx_wake_dpc(vha); |
217 | qla2x00_wait_for_chip_reset(vha); | 166 | qla2x00_wait_for_chip_reset(vha); |
218 | 167 | ||
219 | return count; | 168 | return (count); |
220 | } | 169 | } |
221 | 170 | ||
222 | static struct bin_attribute sysfs_nvram_attr = { | 171 | static struct bin_attribute sysfs_nvram_attr = { |
@@ -290,10 +239,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, | |||
290 | int val, valid; | 239 | int val, valid; |
291 | 240 | ||
292 | if (off) | 241 | if (off) |
293 | return -EINVAL; | 242 | return 0; |
294 | 243 | ||
295 | if (unlikely(pci_channel_offline(ha->pdev))) | 244 | if (unlikely(pci_channel_offline(ha->pdev))) |
296 | return -EAGAIN; | 245 | return 0; |
297 | 246 | ||
298 | if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) | 247 | if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) |
299 | return -EINVAL; | 248 | return -EINVAL; |
@@ -304,7 +253,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, | |||
304 | case 0: | 253 | case 0: |
305 | if (ha->optrom_state != QLA_SREADING && | 254 | if (ha->optrom_state != QLA_SREADING && |
306 | ha->optrom_state != QLA_SWRITING) | 255 | ha->optrom_state != QLA_SWRITING) |
307 | return -EINVAL; | 256 | break; |
308 | 257 | ||
309 | ha->optrom_state = QLA_SWAITING; | 258 | ha->optrom_state = QLA_SWAITING; |
310 | 259 | ||
@@ -317,7 +266,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, | |||
317 | break; | 266 | break; |
318 | case 1: | 267 | case 1: |
319 | if (ha->optrom_state != QLA_SWAITING) | 268 | if (ha->optrom_state != QLA_SWAITING) |
320 | return -EINVAL; | 269 | break; |
321 | 270 | ||
322 | ha->optrom_region_start = start; | 271 | ha->optrom_region_start = start; |
323 | ha->optrom_region_size = start + size > ha->optrom_size ? | 272 | ha->optrom_region_size = start + size > ha->optrom_size ? |
@@ -331,7 +280,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, | |||
331 | "(%x).\n", ha->optrom_region_size); | 280 | "(%x).\n", ha->optrom_region_size); |
332 | 281 | ||
333 | ha->optrom_state = QLA_SWAITING; | 282 | ha->optrom_state = QLA_SWAITING; |
334 | return -ENOMEM; | 283 | return count; |
335 | } | 284 | } |
336 | 285 | ||
337 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { | 286 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { |
@@ -350,7 +299,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, | |||
350 | break; | 299 | break; |
351 | case 2: | 300 | case 2: |
352 | if (ha->optrom_state != QLA_SWAITING) | 301 | if (ha->optrom_state != QLA_SWAITING) |
353 | return -EINVAL; | 302 | break; |
354 | 303 | ||
355 | /* | 304 | /* |
356 | * We need to be more restrictive on which FLASH regions are | 305 | * We need to be more restrictive on which FLASH regions are |
@@ -378,8 +327,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, | |||
378 | else if (start == (ha->flt_region_boot * 4) || | 327 | else if (start == (ha->flt_region_boot * 4) || |
379 | start == (ha->flt_region_fw * 4)) | 328 | start == (ha->flt_region_fw * 4)) |
380 | valid = 1; | 329 | valid = 1; |
381 | else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) | 330 | else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha)) |
382 | || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) | ||
383 | valid = 1; | 331 | valid = 1; |
384 | if (!valid) { | 332 | if (!valid) { |
385 | ql_log(ql_log_warn, vha, 0x7065, | 333 | ql_log(ql_log_warn, vha, 0x7065, |
@@ -399,7 +347,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, | |||
399 | "(%x)\n", ha->optrom_region_size); | 347 | "(%x)\n", ha->optrom_region_size); |
400 | 348 | ||
401 | ha->optrom_state = QLA_SWAITING; | 349 | ha->optrom_state = QLA_SWAITING; |
402 | return -ENOMEM; | 350 | return count; |
403 | } | 351 | } |
404 | 352 | ||
405 | ql_dbg(ql_dbg_user, vha, 0x7067, | 353 | ql_dbg(ql_dbg_user, vha, 0x7067, |
@@ -410,7 +358,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, | |||
410 | break; | 358 | break; |
411 | case 3: | 359 | case 3: |
412 | if (ha->optrom_state != QLA_SWRITING) | 360 | if (ha->optrom_state != QLA_SWRITING) |
413 | return -EINVAL; | 361 | break; |
414 | 362 | ||
415 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { | 363 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { |
416 | ql_log(ql_log_warn, vha, 0x7068, | 364 | ql_log(ql_log_warn, vha, 0x7068, |
@@ -426,7 +374,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, | |||
426 | ha->optrom_region_start, ha->optrom_region_size); | 374 | ha->optrom_region_start, ha->optrom_region_size); |
427 | break; | 375 | break; |
428 | default: | 376 | default: |
429 | return -EINVAL; | 377 | count = -EINVAL; |
430 | } | 378 | } |
431 | return count; | 379 | return count; |
432 | } | 380 | } |
@@ -450,10 +398,10 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, | |||
450 | struct qla_hw_data *ha = vha->hw; | 398 | struct qla_hw_data *ha = vha->hw; |
451 | 399 | ||
452 | if (unlikely(pci_channel_offline(ha->pdev))) | 400 | if (unlikely(pci_channel_offline(ha->pdev))) |
453 | return -EAGAIN; | 401 | return 0; |
454 | 402 | ||
455 | if (!capable(CAP_SYS_ADMIN)) | 403 | if (!capable(CAP_SYS_ADMIN)) |
456 | return -EINVAL; | 404 | return 0; |
457 | 405 | ||
458 | if (IS_NOCACHE_VPD_TYPE(ha)) | 406 | if (IS_NOCACHE_VPD_TYPE(ha)) |
459 | ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, | 407 | ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, |
@@ -490,17 +438,17 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, | |||
490 | 438 | ||
491 | /* Update flash version information for 4Gb & above. */ | 439 | /* Update flash version information for 4Gb & above. */ |
492 | if (!IS_FWI2_CAPABLE(ha)) | 440 | if (!IS_FWI2_CAPABLE(ha)) |
493 | return -EINVAL; | 441 | goto done; |
494 | 442 | ||
495 | tmp_data = vmalloc(256); | 443 | tmp_data = vmalloc(256); |
496 | if (!tmp_data) { | 444 | if (!tmp_data) { |
497 | ql_log(ql_log_warn, vha, 0x706b, | 445 | ql_log(ql_log_warn, vha, 0x706b, |
498 | "Unable to allocate memory for VPD information update.\n"); | 446 | "Unable to allocate memory for VPD information update.\n"); |
499 | return -ENOMEM; | 447 | goto done; |
500 | } | 448 | } |
501 | ha->isp_ops->get_flash_version(vha, tmp_data); | 449 | ha->isp_ops->get_flash_version(vha, tmp_data); |
502 | vfree(tmp_data); | 450 | vfree(tmp_data); |
503 | 451 | done: | |
504 | return count; | 452 | return count; |
505 | } | 453 | } |
506 | 454 | ||
@@ -557,7 +505,8 @@ do_read: | |||
557 | "Unable to read SFP data (%x/%x/%x).\n", rval, | 505 | "Unable to read SFP data (%x/%x/%x).\n", rval, |
558 | addr, offset); | 506 | addr, offset); |
559 | 507 | ||
560 | return -EIO; | 508 | count = 0; |
509 | break; | ||
561 | } | 510 | } |
562 | memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE); | 511 | memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE); |
563 | buf += SFP_BLOCK_SIZE; | 512 | buf += SFP_BLOCK_SIZE; |
@@ -585,10 +534,9 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, | |||
585 | struct qla_hw_data *ha = vha->hw; | 534 | struct qla_hw_data *ha = vha->hw; |
586 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | 535 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); |
587 | int type; | 536 | int type; |
588 | uint32_t idc_control; | ||
589 | 537 | ||
590 | if (off != 0) | 538 | if (off != 0) |
591 | return -EINVAL; | 539 | return 0; |
592 | 540 | ||
593 | type = simple_strtol(buf, NULL, 10); | 541 | type = simple_strtol(buf, NULL, 10); |
594 | switch (type) { | 542 | switch (type) { |
@@ -598,52 +546,32 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, | |||
598 | 546 | ||
599 | scsi_block_requests(vha->host); | 547 | scsi_block_requests(vha->host); |
600 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 548 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
601 | if (IS_QLA82XX(ha)) { | ||
602 | ha->flags.isp82xx_no_md_cap = 1; | ||
603 | qla82xx_idc_lock(ha); | ||
604 | qla82xx_set_reset_owner(vha); | ||
605 | qla82xx_idc_unlock(ha); | ||
606 | } | ||
607 | qla2xxx_wake_dpc(vha); | 549 | qla2xxx_wake_dpc(vha); |
608 | qla2x00_wait_for_chip_reset(vha); | 550 | qla2x00_wait_for_chip_reset(vha); |
609 | scsi_unblock_requests(vha->host); | 551 | scsi_unblock_requests(vha->host); |
610 | break; | 552 | break; |
611 | case 0x2025d: | 553 | case 0x2025d: |
612 | if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) | 554 | if (!IS_QLA81XX(ha)) |
613 | return -EPERM; | 555 | break; |
614 | 556 | ||
615 | ql_log(ql_log_info, vha, 0x706f, | 557 | ql_log(ql_log_info, vha, 0x706f, |
616 | "Issuing MPI reset.\n"); | 558 | "Issuing MPI reset.\n"); |
617 | 559 | ||
618 | if (IS_QLA83XX(ha)) { | 560 | /* Make sure FC side is not in reset */ |
619 | uint32_t idc_control; | 561 | qla2x00_wait_for_hba_online(vha); |
620 | 562 | ||
621 | qla83xx_idc_lock(vha, 0); | 563 | /* Issue MPI reset */ |
622 | __qla83xx_get_idc_control(vha, &idc_control); | 564 | scsi_block_requests(vha->host); |
623 | idc_control |= QLA83XX_IDC_GRACEFUL_RESET; | 565 | if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) |
624 | __qla83xx_set_idc_control(vha, idc_control); | 566 | ql_log(ql_log_warn, vha, 0x7070, |
625 | qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, | 567 | "MPI reset failed.\n"); |
626 | QLA8XXX_DEV_NEED_RESET); | 568 | scsi_unblock_requests(vha->host); |
627 | qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); | 569 | break; |
628 | qla83xx_idc_unlock(vha, 0); | ||
629 | break; | ||
630 | } else { | ||
631 | /* Make sure FC side is not in reset */ | ||
632 | qla2x00_wait_for_hba_online(vha); | ||
633 | |||
634 | /* Issue MPI reset */ | ||
635 | scsi_block_requests(vha->host); | ||
636 | if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) | ||
637 | ql_log(ql_log_warn, vha, 0x7070, | ||
638 | "MPI reset failed.\n"); | ||
639 | scsi_unblock_requests(vha->host); | ||
640 | break; | ||
641 | } | ||
642 | case 0x2025e: | 570 | case 0x2025e: |
643 | if (!IS_QLA82XX(ha) || vha != base_vha) { | 571 | if (!IS_QLA82XX(ha) || vha != base_vha) { |
644 | ql_log(ql_log_info, vha, 0x7071, | 572 | ql_log(ql_log_info, vha, 0x7071, |
645 | "FCoE ctx reset no supported.\n"); | 573 | "FCoE ctx reset no supported.\n"); |
646 | return -EPERM; | 574 | return count; |
647 | } | 575 | } |
648 | 576 | ||
649 | ql_log(ql_log_info, vha, 0x7072, | 577 | ql_log(ql_log_info, vha, 0x7072, |
@@ -652,29 +580,6 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, | |||
652 | qla2xxx_wake_dpc(vha); | 580 | qla2xxx_wake_dpc(vha); |
653 | qla2x00_wait_for_fcoe_ctx_reset(vha); | 581 | qla2x00_wait_for_fcoe_ctx_reset(vha); |
654 | break; | 582 | break; |
655 | case 0x2025f: | ||
656 | if (!IS_QLA8031(ha)) | ||
657 | return -EPERM; | ||
658 | ql_log(ql_log_info, vha, 0x70bc, | ||
659 | "Disabling Reset by IDC control\n"); | ||
660 | qla83xx_idc_lock(vha, 0); | ||
661 | __qla83xx_get_idc_control(vha, &idc_control); | ||
662 | idc_control |= QLA83XX_IDC_RESET_DISABLED; | ||
663 | __qla83xx_set_idc_control(vha, idc_control); | ||
664 | qla83xx_idc_unlock(vha, 0); | ||
665 | break; | ||
666 | case 0x20260: | ||
667 | if (!IS_QLA8031(ha)) | ||
668 | return -EPERM; | ||
669 | ql_log(ql_log_info, vha, 0x70bd, | ||
670 | "Enabling Reset by IDC control\n"); | ||
671 | qla83xx_idc_lock(vha, 0); | ||
672 | __qla83xx_get_idc_control(vha, &idc_control); | ||
673 | idc_control &= ~QLA83XX_IDC_RESET_DISABLED; | ||
674 | __qla83xx_set_idc_control(vha, idc_control); | ||
675 | qla83xx_idc_unlock(vha, 0); | ||
676 | break; | ||
677 | |||
678 | } | 583 | } |
679 | return count; | 584 | return count; |
680 | } | 585 | } |
@@ -689,6 +594,144 @@ static struct bin_attribute sysfs_reset_attr = { | |||
689 | }; | 594 | }; |
690 | 595 | ||
691 | static ssize_t | 596 | static ssize_t |
597 | qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj, | ||
598 | struct bin_attribute *bin_attr, | ||
599 | char *buf, loff_t off, size_t count) | ||
600 | { | ||
601 | struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, | ||
602 | struct device, kobj))); | ||
603 | struct qla_hw_data *ha = vha->hw; | ||
604 | uint16_t dev, adr, opt, len; | ||
605 | int rval; | ||
606 | |||
607 | ha->edc_data_len = 0; | ||
608 | |||
609 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) | ||
610 | return 0; | ||
611 | |||
612 | if (!ha->edc_data) { | ||
613 | ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, | ||
614 | &ha->edc_data_dma); | ||
615 | if (!ha->edc_data) { | ||
616 | ql_log(ql_log_warn, vha, 0x7073, | ||
617 | "Unable to allocate memory for EDC write.\n"); | ||
618 | return 0; | ||
619 | } | ||
620 | } | ||
621 | |||
622 | dev = le16_to_cpup((void *)&buf[0]); | ||
623 | adr = le16_to_cpup((void *)&buf[2]); | ||
624 | opt = le16_to_cpup((void *)&buf[4]); | ||
625 | len = le16_to_cpup((void *)&buf[6]); | ||
626 | |||
627 | if (!(opt & BIT_0)) | ||
628 | if (len == 0 || len > DMA_POOL_SIZE || len > count - 8) | ||
629 | return -EINVAL; | ||
630 | |||
631 | memcpy(ha->edc_data, &buf[8], len); | ||
632 | |||
633 | rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data, | ||
634 | dev, adr, len, opt); | ||
635 | if (rval != QLA_SUCCESS) { | ||
636 | ql_log(ql_log_warn, vha, 0x7074, | ||
637 | "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n", | ||
638 | rval, dev, adr, opt, len, buf[8]); | ||
639 | return 0; | ||
640 | } | ||
641 | |||
642 | return count; | ||
643 | } | ||
644 | |||
645 | static struct bin_attribute sysfs_edc_attr = { | ||
646 | .attr = { | ||
647 | .name = "edc", | ||
648 | .mode = S_IWUSR, | ||
649 | }, | ||
650 | .size = 0, | ||
651 | .write = qla2x00_sysfs_write_edc, | ||
652 | }; | ||
653 | |||
654 | static ssize_t | ||
655 | qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj, | ||
656 | struct bin_attribute *bin_attr, | ||
657 | char *buf, loff_t off, size_t count) | ||
658 | { | ||
659 | struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, | ||
660 | struct device, kobj))); | ||
661 | struct qla_hw_data *ha = vha->hw; | ||
662 | uint16_t dev, adr, opt, len; | ||
663 | int rval; | ||
664 | |||
665 | ha->edc_data_len = 0; | ||
666 | |||
667 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) | ||
668 | return 0; | ||
669 | |||
670 | if (!ha->edc_data) { | ||
671 | ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, | ||
672 | &ha->edc_data_dma); | ||
673 | if (!ha->edc_data) { | ||
674 | ql_log(ql_log_warn, vha, 0x708c, | ||
675 | "Unable to allocate memory for EDC status.\n"); | ||
676 | return 0; | ||
677 | } | ||
678 | } | ||
679 | |||
680 | dev = le16_to_cpup((void *)&buf[0]); | ||
681 | adr = le16_to_cpup((void *)&buf[2]); | ||
682 | opt = le16_to_cpup((void *)&buf[4]); | ||
683 | len = le16_to_cpup((void *)&buf[6]); | ||
684 | |||
685 | if (!(opt & BIT_0)) | ||
686 | if (len == 0 || len > DMA_POOL_SIZE) | ||
687 | return -EINVAL; | ||
688 | |||
689 | memset(ha->edc_data, 0, len); | ||
690 | rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data, | ||
691 | dev, adr, len, opt); | ||
692 | if (rval != QLA_SUCCESS) { | ||
693 | ql_log(ql_log_info, vha, 0x7075, | ||
694 | "Unable to write EDC status (%x) %02x:%04x:%02x.\n", | ||
695 | rval, dev, adr, opt, len); | ||
696 | return 0; | ||
697 | } | ||
698 | |||
699 | ha->edc_data_len = len; | ||
700 | |||
701 | return count; | ||
702 | } | ||
703 | |||
704 | static ssize_t | ||
705 | qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj, | ||
706 | struct bin_attribute *bin_attr, | ||
707 | char *buf, loff_t off, size_t count) | ||
708 | { | ||
709 | struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, | ||
710 | struct device, kobj))); | ||
711 | struct qla_hw_data *ha = vha->hw; | ||
712 | |||
713 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0) | ||
714 | return 0; | ||
715 | |||
716 | if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count) | ||
717 | return -EINVAL; | ||
718 | |||
719 | memcpy(buf, ha->edc_data, ha->edc_data_len); | ||
720 | |||
721 | return ha->edc_data_len; | ||
722 | } | ||
723 | |||
724 | static struct bin_attribute sysfs_edc_status_attr = { | ||
725 | .attr = { | ||
726 | .name = "edc_status", | ||
727 | .mode = S_IRUSR | S_IWUSR, | ||
728 | }, | ||
729 | .size = 0, | ||
730 | .write = qla2x00_sysfs_write_edc_status, | ||
731 | .read = qla2x00_sysfs_read_edc_status, | ||
732 | }; | ||
733 | |||
734 | static ssize_t | ||
692 | qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, | 735 | qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, |
693 | struct bin_attribute *bin_attr, | 736 | struct bin_attribute *bin_attr, |
694 | char *buf, loff_t off, size_t count) | 737 | char *buf, loff_t off, size_t count) |
@@ -762,7 +805,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, | |||
762 | if (!ha->dcbx_tlv) { | 805 | if (!ha->dcbx_tlv) { |
763 | ql_log(ql_log_warn, vha, 0x7078, | 806 | ql_log(ql_log_warn, vha, 0x7078, |
764 | "Unable to allocate memory for DCBX TLV read-data.\n"); | 807 | "Unable to allocate memory for DCBX TLV read-data.\n"); |
765 | return -ENOMEM; | 808 | return 0; |
766 | } | 809 | } |
767 | 810 | ||
768 | do_read: | 811 | do_read: |
@@ -774,7 +817,7 @@ do_read: | |||
774 | if (rval != QLA_SUCCESS) { | 817 | if (rval != QLA_SUCCESS) { |
775 | ql_log(ql_log_warn, vha, 0x7079, | 818 | ql_log(ql_log_warn, vha, 0x7079, |
776 | "Unable to read DCBX TLV (%x).\n", rval); | 819 | "Unable to read DCBX TLV (%x).\n", rval); |
777 | return -EIO; | 820 | count = 0; |
778 | } | 821 | } |
779 | 822 | ||
780 | memcpy(buf, ha->dcbx_tlv, count); | 823 | memcpy(buf, ha->dcbx_tlv, count); |
@@ -803,6 +846,8 @@ static struct sysfs_entry { | |||
803 | { "vpd", &sysfs_vpd_attr, 1 }, | 846 | { "vpd", &sysfs_vpd_attr, 1 }, |
804 | { "sfp", &sysfs_sfp_attr, 1 }, | 847 | { "sfp", &sysfs_sfp_attr, 1 }, |
805 | { "reset", &sysfs_reset_attr, }, | 848 | { "reset", &sysfs_reset_attr, }, |
849 | { "edc", &sysfs_edc_attr, 2 }, | ||
850 | { "edc_status", &sysfs_edc_status_attr, 2 }, | ||
806 | { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, | 851 | { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, |
807 | { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, | 852 | { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, |
808 | { NULL }, | 853 | { NULL }, |
@@ -820,7 +865,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) | |||
820 | continue; | 865 | continue; |
821 | if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) | 866 | if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) |
822 | continue; | 867 | continue; |
823 | if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) | 868 | if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw))) |
824 | continue; | 869 | continue; |
825 | 870 | ||
826 | ret = sysfs_create_bin_file(&host->shost_gendev.kobj, | 871 | ret = sysfs_create_bin_file(&host->shost_gendev.kobj, |
@@ -848,7 +893,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha) | |||
848 | continue; | 893 | continue; |
849 | if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) | 894 | if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) |
850 | continue; | 895 | continue; |
851 | if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) | 896 | if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw))) |
852 | continue; | 897 | continue; |
853 | 898 | ||
854 | sysfs_remove_bin_file(&host->shost_gendev.kobj, | 899 | sysfs_remove_bin_file(&host->shost_gendev.kobj, |
@@ -958,7 +1003,8 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, | |||
958 | vha->device_flags & DFLG_NO_CABLE) | 1003 | vha->device_flags & DFLG_NO_CABLE) |
959 | len = snprintf(buf, PAGE_SIZE, "Link Down\n"); | 1004 | len = snprintf(buf, PAGE_SIZE, "Link Down\n"); |
960 | else if (atomic_read(&vha->loop_state) != LOOP_READY || | 1005 | else if (atomic_read(&vha->loop_state) != LOOP_READY || |
961 | qla2x00_reset_active(vha)) | 1006 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || |
1007 | test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) | ||
962 | len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n"); | 1008 | len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n"); |
963 | else { | 1009 | else { |
964 | len = snprintf(buf, PAGE_SIZE, "Link Up - "); | 1010 | len = snprintf(buf, PAGE_SIZE, "Link Up - "); |
@@ -1153,7 +1199,7 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev, | |||
1153 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | 1199 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); |
1154 | struct qla_hw_data *ha = vha->hw; | 1200 | struct qla_hw_data *ha = vha->hw; |
1155 | 1201 | ||
1156 | if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) | 1202 | if (!IS_QLA81XX(ha)) |
1157 | return snprintf(buf, PAGE_SIZE, "\n"); | 1203 | return snprintf(buf, PAGE_SIZE, "\n"); |
1158 | 1204 | ||
1159 | return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", | 1205 | return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", |
@@ -1166,8 +1212,9 @@ qla2x00_total_isp_aborts_show(struct device *dev, | |||
1166 | struct device_attribute *attr, char *buf) | 1212 | struct device_attribute *attr, char *buf) |
1167 | { | 1213 | { |
1168 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | 1214 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); |
1215 | struct qla_hw_data *ha = vha->hw; | ||
1169 | return snprintf(buf, PAGE_SIZE, "%d\n", | 1216 | return snprintf(buf, PAGE_SIZE, "%d\n", |
1170 | vha->qla_stats.total_isp_aborts); | 1217 | ha->qla_stats.total_isp_aborts); |
1171 | } | 1218 | } |
1172 | 1219 | ||
1173 | static ssize_t | 1220 | static ssize_t |
@@ -1199,7 +1246,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, | |||
1199 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | 1246 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); |
1200 | struct qla_hw_data *ha = vha->hw; | 1247 | struct qla_hw_data *ha = vha->hw; |
1201 | 1248 | ||
1202 | if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) | 1249 | if (!IS_QLA81XX(ha)) |
1203 | return snprintf(buf, PAGE_SIZE, "\n"); | 1250 | return snprintf(buf, PAGE_SIZE, "\n"); |
1204 | 1251 | ||
1205 | return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", | 1252 | return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", |
@@ -1214,7 +1261,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr, | |||
1214 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | 1261 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); |
1215 | struct qla_hw_data *ha = vha->hw; | 1262 | struct qla_hw_data *ha = vha->hw; |
1216 | 1263 | ||
1217 | if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) | 1264 | if (!IS_QLA81XX(ha)) |
1218 | return snprintf(buf, PAGE_SIZE, "\n"); | 1265 | return snprintf(buf, PAGE_SIZE, "\n"); |
1219 | 1266 | ||
1220 | return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", | 1267 | return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", |
@@ -1237,7 +1284,7 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, | |||
1237 | { | 1284 | { |
1238 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | 1285 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); |
1239 | 1286 | ||
1240 | if (!IS_CNA_CAPABLE(vha->hw)) | 1287 | if (!IS_QLA8XXX_TYPE(vha->hw)) |
1241 | return snprintf(buf, PAGE_SIZE, "\n"); | 1288 | return snprintf(buf, PAGE_SIZE, "\n"); |
1242 | 1289 | ||
1243 | return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); | 1290 | return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); |
@@ -1249,7 +1296,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev, | |||
1249 | { | 1296 | { |
1250 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | 1297 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); |
1251 | 1298 | ||
1252 | if (!IS_CNA_CAPABLE(vha->hw)) | 1299 | if (!IS_QLA8XXX_TYPE(vha->hw)) |
1253 | return snprintf(buf, PAGE_SIZE, "\n"); | 1300 | return snprintf(buf, PAGE_SIZE, "\n"); |
1254 | 1301 | ||
1255 | return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", | 1302 | return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", |
@@ -1279,13 +1326,14 @@ qla2x00_thermal_temp_show(struct device *dev, | |||
1279 | return snprintf(buf, PAGE_SIZE, "\n"); | 1326 | return snprintf(buf, PAGE_SIZE, "\n"); |
1280 | 1327 | ||
1281 | temp = frac = 0; | 1328 | temp = frac = 0; |
1282 | if (qla2x00_reset_active(vha)) | 1329 | if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || |
1330 | test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) | ||
1283 | ql_log(ql_log_warn, vha, 0x707b, | 1331 | ql_log(ql_log_warn, vha, 0x707b, |
1284 | "ISP reset active.\n"); | 1332 | "ISP reset active.\n"); |
1285 | else if (!vha->hw->flags.eeh_busy) | 1333 | else if (!vha->hw->flags.eeh_busy) |
1286 | rval = qla2x00_get_thermal_temp(vha, &temp, &frac); | 1334 | rval = qla2x00_get_thermal_temp(vha, &temp, &frac); |
1287 | if (rval != QLA_SUCCESS) | 1335 | if (rval != QLA_SUCCESS) |
1288 | return snprintf(buf, PAGE_SIZE, "\n"); | 1336 | temp = frac = 0; |
1289 | 1337 | ||
1290 | return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac); | 1338 | return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac); |
1291 | } | 1339 | } |
@@ -1298,7 +1346,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, | |||
1298 | int rval = QLA_FUNCTION_FAILED; | 1346 | int rval = QLA_FUNCTION_FAILED; |
1299 | uint16_t state[5]; | 1347 | uint16_t state[5]; |
1300 | 1348 | ||
1301 | if (qla2x00_reset_active(vha)) | 1349 | if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || |
1350 | test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) | ||
1302 | ql_log(ql_log_warn, vha, 0x707c, | 1351 | ql_log(ql_log_warn, vha, 0x707c, |
1303 | "ISP reset active.\n"); | 1352 | "ISP reset active.\n"); |
1304 | else if (!vha->hw->flags.eeh_busy) | 1353 | else if (!vha->hw->flags.eeh_busy) |
@@ -1310,49 +1359,6 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, | |||
1310 | state[1], state[2], state[3], state[4]); | 1359 | state[1], state[2], state[3], state[4]); |
1311 | } | 1360 | } |
1312 | 1361 | ||
1313 | static ssize_t | ||
1314 | qla2x00_diag_requests_show(struct device *dev, | ||
1315 | struct device_attribute *attr, char *buf) | ||
1316 | { | ||
1317 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | ||
1318 | |||
1319 | if (!IS_BIDI_CAPABLE(vha->hw)) | ||
1320 | return snprintf(buf, PAGE_SIZE, "\n"); | ||
1321 | |||
1322 | return snprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count); | ||
1323 | } | ||
1324 | |||
1325 | static ssize_t | ||
1326 | qla2x00_diag_megabytes_show(struct device *dev, | ||
1327 | struct device_attribute *attr, char *buf) | ||
1328 | { | ||
1329 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | ||
1330 | |||
1331 | if (!IS_BIDI_CAPABLE(vha->hw)) | ||
1332 | return snprintf(buf, PAGE_SIZE, "\n"); | ||
1333 | |||
1334 | return snprintf(buf, PAGE_SIZE, "%llu\n", | ||
1335 | vha->bidi_stats.transfer_bytes >> 20); | ||
1336 | } | ||
1337 | |||
1338 | static ssize_t | ||
1339 | qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr, | ||
1340 | char *buf) | ||
1341 | { | ||
1342 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | ||
1343 | struct qla_hw_data *ha = vha->hw; | ||
1344 | uint32_t size; | ||
1345 | |||
1346 | if (!ha->fw_dumped) | ||
1347 | size = 0; | ||
1348 | else if (IS_QLA82XX(ha)) | ||
1349 | size = ha->md_template_size + ha->md_dump_size; | ||
1350 | else | ||
1351 | size = ha->fw_dump_len; | ||
1352 | |||
1353 | return snprintf(buf, PAGE_SIZE, "%d\n", size); | ||
1354 | } | ||
1355 | |||
1356 | static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); | 1362 | static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); |
1357 | static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); | 1363 | static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); |
1358 | static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); | 1364 | static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); |
@@ -1391,9 +1397,6 @@ static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, | |||
1391 | static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); | 1397 | static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); |
1392 | static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); | 1398 | static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); |
1393 | static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); | 1399 | static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); |
1394 | static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL); | ||
1395 | static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL); | ||
1396 | static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); | ||
1397 | 1400 | ||
1398 | struct device_attribute *qla2x00_host_attrs[] = { | 1401 | struct device_attribute *qla2x00_host_attrs[] = { |
1399 | &dev_attr_driver_version, | 1402 | &dev_attr_driver_version, |
@@ -1423,9 +1426,6 @@ struct device_attribute *qla2x00_host_attrs[] = { | |||
1423 | &dev_attr_fw_state, | 1426 | &dev_attr_fw_state, |
1424 | &dev_attr_optrom_gold_fw_version, | 1427 | &dev_attr_optrom_gold_fw_version, |
1425 | &dev_attr_thermal_temp, | 1428 | &dev_attr_thermal_temp, |
1426 | &dev_attr_diag_requests, | ||
1427 | &dev_attr_diag_megabytes, | ||
1428 | &dev_attr_fw_dump_size, | ||
1429 | NULL, | 1429 | NULL, |
1430 | }; | 1430 | }; |
1431 | 1431 | ||
@@ -1463,9 +1463,6 @@ qla2x00_get_host_speed(struct Scsi_Host *shost) | |||
1463 | case PORT_SPEED_10GB: | 1463 | case PORT_SPEED_10GB: |
1464 | speed = FC_PORTSPEED_10GBIT; | 1464 | speed = FC_PORTSPEED_10GBIT; |
1465 | break; | 1465 | break; |
1466 | case PORT_SPEED_16GB: | ||
1467 | speed = FC_PORTSPEED_16GBIT; | ||
1468 | break; | ||
1469 | } | 1466 | } |
1470 | fc_host_speed(shost) = speed; | 1467 | fc_host_speed(shost) = speed; |
1471 | } | 1468 | } |
@@ -1615,14 +1612,11 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) | |||
1615 | * At this point all fcport's software-states are cleared. Perform any | 1612 | * At this point all fcport's software-states are cleared. Perform any |
1616 | * final cleanup of firmware resources (PCBs and XCBs). | 1613 | * final cleanup of firmware resources (PCBs and XCBs). |
1617 | */ | 1614 | */ |
1618 | if (fcport->loop_id != FC_NO_LOOP_ID) { | 1615 | if (fcport->loop_id != FC_NO_LOOP_ID && |
1619 | if (IS_FWI2_CAPABLE(fcport->vha->hw)) | 1616 | !test_bit(UNLOADING, &fcport->vha->dpc_flags)) |
1620 | fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, | 1617 | fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, |
1621 | fcport->loop_id, fcport->d_id.b.domain, | 1618 | fcport->loop_id, fcport->d_id.b.domain, |
1622 | fcport->d_id.b.area, fcport->d_id.b.al_pa); | 1619 | fcport->d_id.b.area, fcport->d_id.b.al_pa); |
1623 | else | ||
1624 | qla2x00_port_logout(fcport->vha, fcport); | ||
1625 | } | ||
1626 | } | 1620 | } |
1627 | 1621 | ||
1628 | static int | 1622 | static int |
@@ -1645,7 +1639,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) | |||
1645 | dma_addr_t stats_dma; | 1639 | dma_addr_t stats_dma; |
1646 | struct fc_host_statistics *pfc_host_stat; | 1640 | struct fc_host_statistics *pfc_host_stat; |
1647 | 1641 | ||
1648 | pfc_host_stat = &vha->fc_host_stat; | 1642 | pfc_host_stat = &ha->fc_host_stat; |
1649 | memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); | 1643 | memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); |
1650 | 1644 | ||
1651 | if (test_bit(UNLOADING, &vha->dpc_flags)) | 1645 | if (test_bit(UNLOADING, &vha->dpc_flags)) |
@@ -1666,7 +1660,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) | |||
1666 | if (IS_FWI2_CAPABLE(ha)) { | 1660 | if (IS_FWI2_CAPABLE(ha)) { |
1667 | rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); | 1661 | rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); |
1668 | } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && | 1662 | } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && |
1669 | !qla2x00_reset_active(vha) && !ha->dpc_active) { | 1663 | !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) && |
1664 | !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && | ||
1665 | !ha->dpc_active) { | ||
1670 | /* Must be in a 'READY' state for statistics retrieval. */ | 1666 | /* Must be in a 'READY' state for statistics retrieval. */ |
1671 | rval = qla2x00_get_link_status(base_vha, base_vha->loop_id, | 1667 | rval = qla2x00_get_link_status(base_vha, base_vha->loop_id, |
1672 | stats, stats_dma); | 1668 | stats, stats_dma); |
@@ -1688,8 +1684,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) | |||
1688 | pfc_host_stat->dumped_frames = stats->dumped_frames; | 1684 | pfc_host_stat->dumped_frames = stats->dumped_frames; |
1689 | pfc_host_stat->nos_count = stats->nos_rcvd; | 1685 | pfc_host_stat->nos_count = stats->nos_rcvd; |
1690 | } | 1686 | } |
1691 | pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; | 1687 | pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20; |
1692 | pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; | 1688 | pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20; |
1693 | 1689 | ||
1694 | done_free: | 1690 | done_free: |
1695 | dma_pool_free(ha->s_dma_pool, stats, stats_dma); | 1691 | dma_pool_free(ha->s_dma_pool, stats, stats_dma); |
@@ -1733,31 +1729,12 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost) | |||
1733 | scsi_qla_host_t *vha = shost_priv(shost); | 1729 | scsi_qla_host_t *vha = shost_priv(shost); |
1734 | struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); | 1730 | struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); |
1735 | 1731 | ||
1736 | if (!base_vha->flags.online) { | 1732 | if (!base_vha->flags.online) |
1737 | fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; | 1733 | fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; |
1738 | return; | 1734 | else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT) |
1739 | } | ||
1740 | |||
1741 | switch (atomic_read(&base_vha->loop_state)) { | ||
1742 | case LOOP_UPDATE: | ||
1743 | fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; | ||
1744 | break; | ||
1745 | case LOOP_DOWN: | ||
1746 | if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags)) | ||
1747 | fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; | ||
1748 | else | ||
1749 | fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; | ||
1750 | break; | ||
1751 | case LOOP_DEAD: | ||
1752 | fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; | ||
1753 | break; | ||
1754 | case LOOP_READY: | ||
1755 | fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; | ||
1756 | break; | ||
1757 | default: | ||
1758 | fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; | 1735 | fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; |
1759 | break; | 1736 | else |
1760 | } | 1737 | fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; |
1761 | } | 1738 | } |
1762 | 1739 | ||
1763 | static int | 1740 | static int |
@@ -1811,7 +1788,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) | |||
1811 | 1788 | ||
1812 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { | 1789 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { |
1813 | if (ha->fw_attributes & BIT_4) { | 1790 | if (ha->fw_attributes & BIT_4) { |
1814 | int prot = 0, guard; | 1791 | int prot = 0; |
1815 | vha->flags.difdix_supported = 1; | 1792 | vha->flags.difdix_supported = 1; |
1816 | ql_dbg(ql_dbg_user, vha, 0x7082, | 1793 | ql_dbg(ql_dbg_user, vha, 0x7082, |
1817 | "Registered for DIF/DIX type 1 and 3 protection.\n"); | 1794 | "Registered for DIF/DIX type 1 and 3 protection.\n"); |
@@ -1824,14 +1801,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) | |||
1824 | | SHOST_DIX_TYPE1_PROTECTION | 1801 | | SHOST_DIX_TYPE1_PROTECTION |
1825 | | SHOST_DIX_TYPE2_PROTECTION | 1802 | | SHOST_DIX_TYPE2_PROTECTION |
1826 | | SHOST_DIX_TYPE3_PROTECTION); | 1803 | | SHOST_DIX_TYPE3_PROTECTION); |
1827 | 1804 | scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC); | |
1828 | guard = SHOST_DIX_GUARD_CRC; | ||
1829 | |||
1830 | if (IS_PI_IPGUARD_CAPABLE(ha) && | ||
1831 | (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) | ||
1832 | guard |= SHOST_DIX_GUARD_IP; | ||
1833 | |||
1834 | scsi_host_set_guard(vha->host, guard); | ||
1835 | } else | 1805 | } else |
1836 | vha->flags.difdix_supported = 0; | 1806 | vha->flags.difdix_supported = 0; |
1837 | } | 1807 | } |
@@ -1852,7 +1822,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) | |||
1852 | fc_host_supported_speeds(vha->host) = | 1822 | fc_host_supported_speeds(vha->host) = |
1853 | fc_host_supported_speeds(base_vha->host); | 1823 | fc_host_supported_speeds(base_vha->host); |
1854 | 1824 | ||
1855 | qlt_vport_create(vha, ha); | ||
1856 | qla24xx_vport_disable(fc_vport, disable); | 1825 | qla24xx_vport_disable(fc_vport, disable); |
1857 | 1826 | ||
1858 | if (ha->flags.cpu_affinity_enabled) { | 1827 | if (ha->flags.cpu_affinity_enabled) { |
@@ -1873,7 +1842,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) | |||
1873 | break; | 1842 | break; |
1874 | } | 1843 | } |
1875 | } | 1844 | } |
1876 | |||
1877 | if (qos) { | 1845 | if (qos) { |
1878 | ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, | 1846 | ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, |
1879 | qos); | 1847 | qos); |
@@ -1951,8 +1919,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
1951 | "Queue delete failed.\n"); | 1919 | "Queue delete failed.\n"); |
1952 | } | 1920 | } |
1953 | 1921 | ||
1954 | ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); | ||
1955 | scsi_host_put(vha->host); | 1922 | scsi_host_put(vha->host); |
1923 | ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); | ||
1956 | return 0; | 1924 | return 0; |
1957 | } | 1925 | } |
1958 | 1926 | ||
@@ -2067,16 +2035,12 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha) | |||
2067 | fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; | 2035 | fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; |
2068 | fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); | 2036 | fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); |
2069 | fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); | 2037 | fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); |
2070 | fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ? | 2038 | fc_host_supported_classes(vha->host) = FC_COS_CLASS3; |
2071 | (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3; | ||
2072 | fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; | 2039 | fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; |
2073 | fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; | 2040 | fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; |
2074 | 2041 | ||
2075 | if (IS_CNA_CAPABLE(ha)) | 2042 | if (IS_QLA8XXX_TYPE(ha)) |
2076 | speed = FC_PORTSPEED_10GBIT; | 2043 | speed = FC_PORTSPEED_10GBIT; |
2077 | else if (IS_QLA2031(ha)) | ||
2078 | speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | | ||
2079 | FC_PORTSPEED_4GBIT; | ||
2080 | else if (IS_QLA25XX(ha)) | 2044 | else if (IS_QLA25XX(ha)) |
2081 | speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | | 2045 | speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | |
2082 | FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; | 2046 | FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 9f34dedcdad..07d1767cd26 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -11,36 +11,28 @@ | |||
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | 12 | ||
13 | /* BSG support for ELS/CT pass through */ | 13 | /* BSG support for ELS/CT pass through */ |
14 | void | 14 | inline srb_t * |
15 | qla2x00_bsg_job_done(void *data, void *ptr, int res) | 15 | qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size) |
16 | { | 16 | { |
17 | srb_t *sp = (srb_t *)ptr; | 17 | srb_t *sp; |
18 | struct scsi_qla_host *vha = (scsi_qla_host_t *)data; | ||
19 | struct fc_bsg_job *bsg_job = sp->u.bsg_job; | ||
20 | |||
21 | bsg_job->reply->result = res; | ||
22 | bsg_job->job_done(bsg_job); | ||
23 | sp->free(vha, sp); | ||
24 | } | ||
25 | |||
26 | void | ||
27 | qla2x00_bsg_sp_free(void *data, void *ptr) | ||
28 | { | ||
29 | srb_t *sp = (srb_t *)ptr; | ||
30 | struct scsi_qla_host *vha = (scsi_qla_host_t *)data; | ||
31 | struct fc_bsg_job *bsg_job = sp->u.bsg_job; | ||
32 | struct qla_hw_data *ha = vha->hw; | 18 | struct qla_hw_data *ha = vha->hw; |
19 | struct srb_ctx *ctx; | ||
33 | 20 | ||
34 | dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, | 21 | sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); |
35 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | 22 | if (!sp) |
36 | 23 | goto done; | |
37 | dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, | 24 | ctx = kzalloc(size, GFP_KERNEL); |
38 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | 25 | if (!ctx) { |
26 | mempool_free(sp, ha->srb_mempool); | ||
27 | sp = NULL; | ||
28 | goto done; | ||
29 | } | ||
39 | 30 | ||
40 | if (sp->type == SRB_CT_CMD || | 31 | memset(sp, 0, sizeof(*sp)); |
41 | sp->type == SRB_ELS_CMD_HST) | 32 | sp->fcport = fcport; |
42 | kfree(sp->fcport); | 33 | sp->ctx = ctx; |
43 | mempool_free(sp, vha->hw->srb_mempool); | 34 | done: |
35 | return sp; | ||
44 | } | 36 | } |
45 | 37 | ||
46 | int | 38 | int |
@@ -108,11 +100,20 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) | |||
108 | uint32_t len; | 100 | uint32_t len; |
109 | uint32_t oper; | 101 | uint32_t oper; |
110 | 102 | ||
111 | if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) { | 103 | bsg_job->reply->reply_payload_rcv_len = 0; |
104 | |||
105 | if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) { | ||
112 | ret = -EINVAL; | 106 | ret = -EINVAL; |
113 | goto exit_fcp_prio_cfg; | 107 | goto exit_fcp_prio_cfg; |
114 | } | 108 | } |
115 | 109 | ||
110 | if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || | ||
111 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || | ||
112 | test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { | ||
113 | ret = -EBUSY; | ||
114 | goto exit_fcp_prio_cfg; | ||
115 | } | ||
116 | |||
116 | /* Get the sub command */ | 117 | /* Get the sub command */ |
117 | oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; | 118 | oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; |
118 | 119 | ||
@@ -219,11 +220,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) | |||
219 | break; | 220 | break; |
220 | } | 221 | } |
221 | exit_fcp_prio_cfg: | 222 | exit_fcp_prio_cfg: |
222 | if (!ret) | 223 | bsg_job->job_done(bsg_job); |
223 | bsg_job->job_done(bsg_job); | ||
224 | return ret; | 224 | return ret; |
225 | } | 225 | } |
226 | |||
227 | static int | 226 | static int |
228 | qla2x00_process_els(struct fc_bsg_job *bsg_job) | 227 | qla2x00_process_els(struct fc_bsg_job *bsg_job) |
229 | { | 228 | { |
@@ -237,6 +236,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) | |||
237 | int req_sg_cnt, rsp_sg_cnt; | 236 | int req_sg_cnt, rsp_sg_cnt; |
238 | int rval = (DRIVER_ERROR << 16); | 237 | int rval = (DRIVER_ERROR << 16); |
239 | uint16_t nextlid = 0; | 238 | uint16_t nextlid = 0; |
239 | struct srb_ctx *els; | ||
240 | 240 | ||
241 | if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { | 241 | if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { |
242 | rport = bsg_job->rport; | 242 | rport = bsg_job->rport; |
@@ -298,6 +298,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) | |||
298 | 298 | ||
299 | /* Initialize all required fields of fcport */ | 299 | /* Initialize all required fields of fcport */ |
300 | fcport->vha = vha; | 300 | fcport->vha = vha; |
301 | fcport->vp_idx = vha->vp_idx; | ||
301 | fcport->d_id.b.al_pa = | 302 | fcport->d_id.b.al_pa = |
302 | bsg_job->request->rqst_data.h_els.port_id[0]; | 303 | bsg_job->request->rqst_data.h_els.port_id[0]; |
303 | fcport->d_id.b.area = | 304 | fcport->d_id.b.area = |
@@ -342,21 +343,20 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) | |||
342 | } | 343 | } |
343 | 344 | ||
344 | /* Alloc SRB structure */ | 345 | /* Alloc SRB structure */ |
345 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); | 346 | sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx)); |
346 | if (!sp) { | 347 | if (!sp) { |
347 | rval = -ENOMEM; | 348 | rval = -ENOMEM; |
348 | goto done_unmap_sg; | 349 | goto done_unmap_sg; |
349 | } | 350 | } |
350 | 351 | ||
351 | sp->type = | 352 | els = sp->ctx; |
353 | els->type = | ||
352 | (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? | 354 | (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? |
353 | SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); | 355 | SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); |
354 | sp->name = | 356 | els->name = |
355 | (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? | 357 | (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? |
356 | "bsg_els_rpt" : "bsg_els_hst"); | 358 | "bsg_els_rpt" : "bsg_els_hst"); |
357 | sp->u.bsg_job = bsg_job; | 359 | els->u.bsg_job = bsg_job; |
358 | sp->free = qla2x00_bsg_sp_free; | ||
359 | sp->done = qla2x00_bsg_job_done; | ||
360 | 360 | ||
361 | ql_dbg(ql_dbg_user, vha, 0x700a, | 361 | ql_dbg(ql_dbg_user, vha, 0x700a, |
362 | "bsg rqst type: %s els type: %x - loop-id=%x " | 362 | "bsg rqst type: %s els type: %x - loop-id=%x " |
@@ -368,6 +368,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) | |||
368 | if (rval != QLA_SUCCESS) { | 368 | if (rval != QLA_SUCCESS) { |
369 | ql_log(ql_log_warn, vha, 0x700e, | 369 | ql_log(ql_log_warn, vha, 0x700e, |
370 | "qla2x00_start_sp failed = %d\n", rval); | 370 | "qla2x00_start_sp failed = %d\n", rval); |
371 | kfree(sp->ctx); | ||
371 | mempool_free(sp, ha->srb_mempool); | 372 | mempool_free(sp, ha->srb_mempool); |
372 | rval = -EIO; | 373 | rval = -EIO; |
373 | goto done_unmap_sg; | 374 | goto done_unmap_sg; |
@@ -388,20 +389,6 @@ done: | |||
388 | return rval; | 389 | return rval; |
389 | } | 390 | } |
390 | 391 | ||
391 | inline uint16_t | ||
392 | qla24xx_calc_ct_iocbs(uint16_t dsds) | ||
393 | { | ||
394 | uint16_t iocbs; | ||
395 | |||
396 | iocbs = 1; | ||
397 | if (dsds > 2) { | ||
398 | iocbs += (dsds - 2) / 5; | ||
399 | if ((dsds - 2) % 5) | ||
400 | iocbs++; | ||
401 | } | ||
402 | return iocbs; | ||
403 | } | ||
404 | |||
405 | static int | 392 | static int |
406 | qla2x00_process_ct(struct fc_bsg_job *bsg_job) | 393 | qla2x00_process_ct(struct fc_bsg_job *bsg_job) |
407 | { | 394 | { |
@@ -414,6 +401,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) | |||
414 | uint16_t loop_id; | 401 | uint16_t loop_id; |
415 | struct fc_port *fcport; | 402 | struct fc_port *fcport; |
416 | char *type = "FC_BSG_HST_CT"; | 403 | char *type = "FC_BSG_HST_CT"; |
404 | struct srb_ctx *ct; | ||
417 | 405 | ||
418 | req_sg_cnt = | 406 | req_sg_cnt = |
419 | dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, | 407 | dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, |
@@ -483,26 +471,25 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) | |||
483 | 471 | ||
484 | /* Initialize all required fields of fcport */ | 472 | /* Initialize all required fields of fcport */ |
485 | fcport->vha = vha; | 473 | fcport->vha = vha; |
474 | fcport->vp_idx = vha->vp_idx; | ||
486 | fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; | 475 | fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; |
487 | fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; | 476 | fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; |
488 | fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; | 477 | fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; |
489 | fcport->loop_id = loop_id; | 478 | fcport->loop_id = loop_id; |
490 | 479 | ||
491 | /* Alloc SRB structure */ | 480 | /* Alloc SRB structure */ |
492 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); | 481 | sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx)); |
493 | if (!sp) { | 482 | if (!sp) { |
494 | ql_log(ql_log_warn, vha, 0x7015, | 483 | ql_log(ql_log_warn, vha, 0x7015, |
495 | "qla2x00_get_sp failed.\n"); | 484 | "qla2x00_get_ctx_bsg_sp failed.\n"); |
496 | rval = -ENOMEM; | 485 | rval = -ENOMEM; |
497 | goto done_free_fcport; | 486 | goto done_free_fcport; |
498 | } | 487 | } |
499 | 488 | ||
500 | sp->type = SRB_CT_CMD; | 489 | ct = sp->ctx; |
501 | sp->name = "bsg_ct"; | 490 | ct->type = SRB_CT_CMD; |
502 | sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); | 491 | ct->name = "bsg_ct"; |
503 | sp->u.bsg_job = bsg_job; | 492 | ct->u.bsg_job = bsg_job; |
504 | sp->free = qla2x00_bsg_sp_free; | ||
505 | sp->done = qla2x00_bsg_job_done; | ||
506 | 493 | ||
507 | ql_dbg(ql_dbg_user, vha, 0x7016, | 494 | ql_dbg(ql_dbg_user, vha, 0x7016, |
508 | "bsg rqst type: %s else type: %x - " | 495 | "bsg rqst type: %s else type: %x - " |
@@ -515,6 +502,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) | |||
515 | if (rval != QLA_SUCCESS) { | 502 | if (rval != QLA_SUCCESS) { |
516 | ql_log(ql_log_warn, vha, 0x7017, | 503 | ql_log(ql_log_warn, vha, 0x7017, |
517 | "qla2x00_start_sp failed=%d.\n", rval); | 504 | "qla2x00_start_sp failed=%d.\n", rval); |
505 | kfree(sp->ctx); | ||
518 | mempool_free(sp, ha->srb_mempool); | 506 | mempool_free(sp, ha->srb_mempool); |
519 | rval = -EIO; | 507 | rval = -EIO; |
520 | goto done_free_fcport; | 508 | goto done_free_fcport; |
@@ -531,29 +519,23 @@ done_unmap_sg: | |||
531 | done: | 519 | done: |
532 | return rval; | 520 | return rval; |
533 | } | 521 | } |
534 | /* | 522 | |
535 | * Set the port configuration to enable the internal or external loopback | 523 | /* Set the port configuration to enable the |
536 | * depending on the loopback mode. | 524 | * internal loopback on ISP81XX |
537 | */ | 525 | */ |
538 | static inline int | 526 | static inline int |
539 | qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, | 527 | qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, |
540 | uint16_t *new_config, uint16_t mode) | 528 | uint16_t *new_config) |
541 | { | 529 | { |
542 | int ret = 0; | 530 | int ret = 0; |
543 | int rval = 0; | 531 | int rval = 0; |
544 | struct qla_hw_data *ha = vha->hw; | 532 | struct qla_hw_data *ha = vha->hw; |
545 | 533 | ||
546 | if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) | 534 | if (!IS_QLA81XX(ha)) |
547 | goto done_set_internal; | 535 | goto done_set_internal; |
548 | 536 | ||
549 | if (mode == INTERNAL_LOOPBACK) | 537 | new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); |
550 | new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); | 538 | memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; |
551 | else if (mode == EXTERNAL_LOOPBACK) | ||
552 | new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); | ||
553 | ql_dbg(ql_dbg_user, vha, 0x70be, | ||
554 | "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); | ||
555 | |||
556 | memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); | ||
557 | 539 | ||
558 | ha->notify_dcbx_comp = 1; | 540 | ha->notify_dcbx_comp = 1; |
559 | ret = qla81xx_set_port_config(vha, new_config); | 541 | ret = qla81xx_set_port_config(vha, new_config); |
@@ -569,17 +551,9 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, | |||
569 | if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { | 551 | if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { |
570 | ql_dbg(ql_dbg_user, vha, 0x7022, | 552 | ql_dbg(ql_dbg_user, vha, 0x7022, |
571 | "State change notification not received.\n"); | 553 | "State change notification not received.\n"); |
572 | rval = -EINVAL; | 554 | } else |
573 | } else { | 555 | ql_dbg(ql_dbg_user, vha, 0x7023, |
574 | if (ha->flags.idc_compl_status) { | 556 | "State change received.\n"); |
575 | ql_dbg(ql_dbg_user, vha, 0x70c3, | ||
576 | "Bad status in IDC Completion AEN\n"); | ||
577 | rval = -EINVAL; | ||
578 | ha->flags.idc_compl_status = 0; | ||
579 | } else | ||
580 | ql_dbg(ql_dbg_user, vha, 0x7023, | ||
581 | "State change received.\n"); | ||
582 | } | ||
583 | 557 | ||
584 | ha->notify_dcbx_comp = 0; | 558 | ha->notify_dcbx_comp = 0; |
585 | 559 | ||
@@ -587,9 +561,11 @@ done_set_internal: | |||
587 | return rval; | 561 | return rval; |
588 | } | 562 | } |
589 | 563 | ||
590 | /* Disable loopback mode */ | 564 | /* Set the port configuration to disable the |
565 | * internal loopback on ISP81XX | ||
566 | */ | ||
591 | static inline int | 567 | static inline int |
592 | qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, | 568 | qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, |
593 | int wait) | 569 | int wait) |
594 | { | 570 | { |
595 | int ret = 0; | 571 | int ret = 0; |
@@ -597,17 +573,13 @@ qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, | |||
597 | uint16_t new_config[4]; | 573 | uint16_t new_config[4]; |
598 | struct qla_hw_data *ha = vha->hw; | 574 | struct qla_hw_data *ha = vha->hw; |
599 | 575 | ||
600 | if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) | 576 | if (!IS_QLA81XX(ha)) |
601 | goto done_reset_internal; | 577 | goto done_reset_internal; |
602 | 578 | ||
603 | memset(new_config, 0 , sizeof(new_config)); | 579 | memset(new_config, 0 , sizeof(new_config)); |
604 | if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == | 580 | if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == |
605 | ENABLE_INTERNAL_LOOPBACK || | 581 | ENABLE_INTERNAL_LOOPBACK) { |
606 | (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == | ||
607 | ENABLE_EXTERNAL_LOOPBACK) { | ||
608 | new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; | 582 | new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; |
609 | ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", | ||
610 | (new_config[0] & INTERNAL_LOOPBACK_MASK)); | ||
611 | memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; | 583 | memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; |
612 | 584 | ||
613 | ha->notify_dcbx_comp = wait; | 585 | ha->notify_dcbx_comp = wait; |
@@ -658,6 +630,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
658 | dma_addr_t rsp_data_dma; | 630 | dma_addr_t rsp_data_dma; |
659 | uint32_t rsp_data_len; | 631 | uint32_t rsp_data_len; |
660 | 632 | ||
633 | if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || | ||
634 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || | ||
635 | test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { | ||
636 | ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n"); | ||
637 | return -EBUSY; | ||
638 | } | ||
639 | |||
661 | if (!vha->flags.online) { | 640 | if (!vha->flags.online) { |
662 | ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); | 641 | ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); |
663 | return -EIO; | 642 | return -EIO; |
@@ -724,9 +703,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
724 | 703 | ||
725 | elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; | 704 | elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; |
726 | 705 | ||
727 | if (atomic_read(&vha->loop_state) == LOOP_READY && | 706 | if ((ha->current_topology == ISP_CFG_F || |
728 | (ha->current_topology == ISP_CFG_F || | 707 | (IS_QLA81XX(ha) && |
729 | ((IS_QLA81XX(ha) || IS_QLA8031(ha)) && | ||
730 | le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE | 708 | le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE |
731 | && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && | 709 | && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && |
732 | elreq.options == EXTERNAL_LOOPBACK) { | 710 | elreq.options == EXTERNAL_LOOPBACK) { |
@@ -736,33 +714,46 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
736 | command_sent = INT_DEF_LB_ECHO_CMD; | 714 | command_sent = INT_DEF_LB_ECHO_CMD; |
737 | rval = qla2x00_echo_test(vha, &elreq, response); | 715 | rval = qla2x00_echo_test(vha, &elreq, response); |
738 | } else { | 716 | } else { |
739 | if (IS_QLA81XX(ha) || IS_QLA8031(ha)) { | 717 | if (IS_QLA81XX(ha)) { |
740 | memset(config, 0, sizeof(config)); | 718 | memset(config, 0, sizeof(config)); |
741 | memset(new_config, 0, sizeof(new_config)); | 719 | memset(new_config, 0, sizeof(new_config)); |
742 | if (qla81xx_get_port_config(vha, config)) { | 720 | if (qla81xx_get_port_config(vha, config)) { |
743 | ql_log(ql_log_warn, vha, 0x701f, | 721 | ql_log(ql_log_warn, vha, 0x701f, |
744 | "Get port config failed.\n"); | 722 | "Get port config failed.\n"); |
723 | bsg_job->reply->reply_payload_rcv_len = 0; | ||
724 | bsg_job->reply->result = (DID_ERROR << 16); | ||
745 | rval = -EPERM; | 725 | rval = -EPERM; |
746 | goto done_free_dma_rsp; | 726 | goto done_free_dma_req; |
747 | } | 727 | } |
748 | 728 | ||
749 | ql_dbg(ql_dbg_user, vha, 0x70c0, | 729 | if (elreq.options != EXTERNAL_LOOPBACK) { |
750 | "elreq.options=%04x\n", elreq.options); | 730 | ql_dbg(ql_dbg_user, vha, 0x7020, |
751 | 731 | "Internal: curent port config = %x\n", | |
752 | if (elreq.options == EXTERNAL_LOOPBACK) | 732 | config[0]); |
753 | if (IS_QLA8031(ha)) | 733 | if (qla81xx_set_internal_loopback(vha, config, |
754 | rval = qla81xx_set_loopback_mode(vha, | 734 | new_config)) { |
755 | config, new_config, elreq.options); | 735 | ql_log(ql_log_warn, vha, 0x7024, |
756 | else | 736 | "Internal loopback failed.\n"); |
757 | rval = qla81xx_reset_loopback_mode(vha, | 737 | bsg_job->reply->reply_payload_rcv_len = |
758 | config, 1); | 738 | 0; |
759 | else | 739 | bsg_job->reply->result = |
760 | rval = qla81xx_set_loopback_mode(vha, config, | 740 | (DID_ERROR << 16); |
761 | new_config, elreq.options); | 741 | rval = -EPERM; |
762 | 742 | goto done_free_dma_req; | |
763 | if (rval) { | 743 | } |
764 | rval = -EPERM; | 744 | } else { |
765 | goto done_free_dma_rsp; | 745 | /* For external loopback to work |
746 | * ensure internal loopback is disabled | ||
747 | */ | ||
748 | if (qla81xx_reset_internal_loopback(vha, | ||
749 | config, 1)) { | ||
750 | bsg_job->reply->reply_payload_rcv_len = | ||
751 | 0; | ||
752 | bsg_job->reply->result = | ||
753 | (DID_ERROR << 16); | ||
754 | rval = -EPERM; | ||
755 | goto done_free_dma_req; | ||
756 | } | ||
766 | } | 757 | } |
767 | 758 | ||
768 | type = "FC_BSG_HST_VENDOR_LOOPBACK"; | 759 | type = "FC_BSG_HST_VENDOR_LOOPBACK"; |
@@ -776,7 +767,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
776 | /* Revert back to original port config | 767 | /* Revert back to original port config |
777 | * Also clear internal loopback | 768 | * Also clear internal loopback |
778 | */ | 769 | */ |
779 | qla81xx_reset_loopback_mode(vha, | 770 | qla81xx_reset_internal_loopback(vha, |
780 | new_config, 0); | 771 | new_config, 0); |
781 | } | 772 | } |
782 | 773 | ||
@@ -794,8 +785,10 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
794 | "MPI reset failed.\n"); | 785 | "MPI reset failed.\n"); |
795 | } | 786 | } |
796 | 787 | ||
788 | bsg_job->reply->reply_payload_rcv_len = 0; | ||
789 | bsg_job->reply->result = (DID_ERROR << 16); | ||
797 | rval = -EIO; | 790 | rval = -EIO; |
798 | goto done_free_dma_rsp; | 791 | goto done_free_dma_req; |
799 | } | 792 | } |
800 | } else { | 793 | } else { |
801 | type = "FC_BSG_HST_VENDOR_LOOPBACK"; | 794 | type = "FC_BSG_HST_VENDOR_LOOPBACK"; |
@@ -810,27 +803,35 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
810 | ql_log(ql_log_warn, vha, 0x702c, | 803 | ql_log(ql_log_warn, vha, 0x702c, |
811 | "Vendor request %s failed.\n", type); | 804 | "Vendor request %s failed.\n", type); |
812 | 805 | ||
806 | fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + | ||
807 | sizeof(struct fc_bsg_reply); | ||
808 | |||
809 | memcpy(fw_sts_ptr, response, sizeof(response)); | ||
810 | fw_sts_ptr += sizeof(response); | ||
811 | *fw_sts_ptr = command_sent; | ||
813 | rval = 0; | 812 | rval = 0; |
814 | bsg_job->reply->result = (DID_ERROR << 16); | ||
815 | bsg_job->reply->reply_payload_rcv_len = 0; | 813 | bsg_job->reply->reply_payload_rcv_len = 0; |
814 | bsg_job->reply->result = (DID_ERROR << 16); | ||
816 | } else { | 815 | } else { |
817 | ql_dbg(ql_dbg_user, vha, 0x702d, | 816 | ql_dbg(ql_dbg_user, vha, 0x702d, |
818 | "Vendor request %s completed.\n", type); | 817 | "Vendor request %s completed.\n", type); |
819 | bsg_job->reply->result = (DID_OK << 16); | 818 | |
819 | bsg_job->reply_len = sizeof(struct fc_bsg_reply) + | ||
820 | sizeof(response) + sizeof(uint8_t); | ||
821 | bsg_job->reply->reply_payload_rcv_len = | ||
822 | bsg_job->reply_payload.payload_len; | ||
823 | fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + | ||
824 | sizeof(struct fc_bsg_reply); | ||
825 | memcpy(fw_sts_ptr, response, sizeof(response)); | ||
826 | fw_sts_ptr += sizeof(response); | ||
827 | *fw_sts_ptr = command_sent; | ||
828 | bsg_job->reply->result = DID_OK; | ||
820 | sg_copy_from_buffer(bsg_job->reply_payload.sg_list, | 829 | sg_copy_from_buffer(bsg_job->reply_payload.sg_list, |
821 | bsg_job->reply_payload.sg_cnt, rsp_data, | 830 | bsg_job->reply_payload.sg_cnt, rsp_data, |
822 | rsp_data_len); | 831 | rsp_data_len); |
823 | } | 832 | } |
833 | bsg_job->job_done(bsg_job); | ||
824 | 834 | ||
825 | bsg_job->reply_len = sizeof(struct fc_bsg_reply) + | ||
826 | sizeof(response) + sizeof(uint8_t); | ||
827 | fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + | ||
828 | sizeof(struct fc_bsg_reply); | ||
829 | memcpy(fw_sts_ptr, response, sizeof(response)); | ||
830 | fw_sts_ptr += sizeof(response); | ||
831 | *fw_sts_ptr = command_sent; | ||
832 | |||
833 | done_free_dma_rsp: | ||
834 | dma_free_coherent(&ha->pdev->dev, rsp_data_len, | 835 | dma_free_coherent(&ha->pdev->dev, rsp_data_len, |
835 | rsp_data, rsp_data_dma); | 836 | rsp_data, rsp_data_dma); |
836 | done_free_dma_req: | 837 | done_free_dma_req: |
@@ -844,8 +845,6 @@ done_unmap_req_sg: | |||
844 | dma_unmap_sg(&ha->pdev->dev, | 845 | dma_unmap_sg(&ha->pdev->dev, |
845 | bsg_job->request_payload.sg_list, | 846 | bsg_job->request_payload.sg_list, |
846 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | 847 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); |
847 | if (!rval) | ||
848 | bsg_job->job_done(bsg_job); | ||
849 | return rval; | 848 | return rval; |
850 | } | 849 | } |
851 | 850 | ||
@@ -858,6 +857,13 @@ qla84xx_reset(struct fc_bsg_job *bsg_job) | |||
858 | int rval = 0; | 857 | int rval = 0; |
859 | uint32_t flag; | 858 | uint32_t flag; |
860 | 859 | ||
860 | if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || | ||
861 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || | ||
862 | test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { | ||
863 | ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n"); | ||
864 | return -EBUSY; | ||
865 | } | ||
866 | |||
861 | if (!IS_QLA84XX(ha)) { | 867 | if (!IS_QLA84XX(ha)) { |
862 | ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); | 868 | ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); |
863 | return -EINVAL; | 869 | return -EINVAL; |
@@ -870,15 +876,16 @@ qla84xx_reset(struct fc_bsg_job *bsg_job) | |||
870 | if (rval) { | 876 | if (rval) { |
871 | ql_log(ql_log_warn, vha, 0x7030, | 877 | ql_log(ql_log_warn, vha, 0x7030, |
872 | "Vendor request 84xx reset failed.\n"); | 878 | "Vendor request 84xx reset failed.\n"); |
873 | rval = (DID_ERROR << 16); | 879 | rval = bsg_job->reply->reply_payload_rcv_len = 0; |
880 | bsg_job->reply->result = (DID_ERROR << 16); | ||
874 | 881 | ||
875 | } else { | 882 | } else { |
876 | ql_dbg(ql_dbg_user, vha, 0x7031, | 883 | ql_dbg(ql_dbg_user, vha, 0x7031, |
877 | "Vendor request 84xx reset completed.\n"); | 884 | "Vendor request 84xx reset completed.\n"); |
878 | bsg_job->reply->result = DID_OK; | 885 | bsg_job->reply->result = DID_OK; |
879 | bsg_job->job_done(bsg_job); | ||
880 | } | 886 | } |
881 | 887 | ||
888 | bsg_job->job_done(bsg_job); | ||
882 | return rval; | 889 | return rval; |
883 | } | 890 | } |
884 | 891 | ||
@@ -898,6 +905,11 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job) | |||
898 | uint32_t flag; | 905 | uint32_t flag; |
899 | uint32_t fw_ver; | 906 | uint32_t fw_ver; |
900 | 907 | ||
908 | if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || | ||
909 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || | ||
910 | test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) | ||
911 | return -EBUSY; | ||
912 | |||
901 | if (!IS_QLA84XX(ha)) { | 913 | if (!IS_QLA84XX(ha)) { |
902 | ql_dbg(ql_dbg_user, vha, 0x7032, | 914 | ql_dbg(ql_dbg_user, vha, 0x7032, |
903 | "Not 84xx, exiting.\n"); | 915 | "Not 84xx, exiting.\n"); |
@@ -968,7 +980,9 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job) | |||
968 | ql_log(ql_log_warn, vha, 0x7037, | 980 | ql_log(ql_log_warn, vha, 0x7037, |
969 | "Vendor request 84xx updatefw failed.\n"); | 981 | "Vendor request 84xx updatefw failed.\n"); |
970 | 982 | ||
971 | rval = (DID_ERROR << 16); | 983 | rval = bsg_job->reply->reply_payload_rcv_len = 0; |
984 | bsg_job->reply->result = (DID_ERROR << 16); | ||
985 | |||
972 | } else { | 986 | } else { |
973 | ql_dbg(ql_dbg_user, vha, 0x7038, | 987 | ql_dbg(ql_dbg_user, vha, 0x7038, |
974 | "Vendor request 84xx updatefw completed.\n"); | 988 | "Vendor request 84xx updatefw completed.\n"); |
@@ -977,6 +991,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job) | |||
977 | bsg_job->reply->result = DID_OK; | 991 | bsg_job->reply->result = DID_OK; |
978 | } | 992 | } |
979 | 993 | ||
994 | bsg_job->job_done(bsg_job); | ||
980 | dma_pool_free(ha->s_dma_pool, mn, mn_dma); | 995 | dma_pool_free(ha->s_dma_pool, mn, mn_dma); |
981 | 996 | ||
982 | done_free_fw_buf: | 997 | done_free_fw_buf: |
@@ -986,8 +1001,6 @@ done_unmap_sg: | |||
986 | dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, | 1001 | dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, |
987 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | 1002 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); |
988 | 1003 | ||
989 | if (!rval) | ||
990 | bsg_job->job_done(bsg_job); | ||
991 | return rval; | 1004 | return rval; |
992 | } | 1005 | } |
993 | 1006 | ||
@@ -1006,6 +1019,14 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) | |||
1006 | uint32_t data_len = 0; | 1019 | uint32_t data_len = 0; |
1007 | uint32_t dma_direction = DMA_NONE; | 1020 | uint32_t dma_direction = DMA_NONE; |
1008 | 1021 | ||
1022 | if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || | ||
1023 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || | ||
1024 | test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { | ||
1025 | ql_log(ql_log_warn, vha, 0x7039, | ||
1026 | "Abort active or needed.\n"); | ||
1027 | return -EBUSY; | ||
1028 | } | ||
1029 | |||
1009 | if (!IS_QLA84XX(ha)) { | 1030 | if (!IS_QLA84XX(ha)) { |
1010 | ql_log(ql_log_warn, vha, 0x703a, | 1031 | ql_log(ql_log_warn, vha, 0x703a, |
1011 | "Not 84xx, exiting.\n"); | 1032 | "Not 84xx, exiting.\n"); |
@@ -1155,7 +1176,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) | |||
1155 | ql_log(ql_log_warn, vha, 0x7043, | 1176 | ql_log(ql_log_warn, vha, 0x7043, |
1156 | "Vendor request 84xx mgmt failed.\n"); | 1177 | "Vendor request 84xx mgmt failed.\n"); |
1157 | 1178 | ||
1158 | rval = (DID_ERROR << 16); | 1179 | rval = bsg_job->reply->reply_payload_rcv_len = 0; |
1180 | bsg_job->reply->result = (DID_ERROR << 16); | ||
1159 | 1181 | ||
1160 | } else { | 1182 | } else { |
1161 | ql_dbg(ql_dbg_user, vha, 0x7044, | 1183 | ql_dbg(ql_dbg_user, vha, 0x7044, |
@@ -1175,6 +1197,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) | |||
1175 | } | 1197 | } |
1176 | } | 1198 | } |
1177 | 1199 | ||
1200 | bsg_job->job_done(bsg_job); | ||
1201 | |||
1178 | done_unmap_sg: | 1202 | done_unmap_sg: |
1179 | if (mgmt_b) | 1203 | if (mgmt_b) |
1180 | dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); | 1204 | dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); |
@@ -1189,8 +1213,6 @@ done_unmap_sg: | |||
1189 | exit_mgmt: | 1213 | exit_mgmt: |
1190 | dma_pool_free(ha->s_dma_pool, mn, mn_dma); | 1214 | dma_pool_free(ha->s_dma_pool, mn, mn_dma); |
1191 | 1215 | ||
1192 | if (!rval) | ||
1193 | bsg_job->job_done(bsg_job); | ||
1194 | return rval; | 1216 | return rval; |
1195 | } | 1217 | } |
1196 | 1218 | ||
@@ -1205,6 +1227,15 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) | |||
1205 | uint16_t mb[MAILBOX_REGISTER_COUNT]; | 1227 | uint16_t mb[MAILBOX_REGISTER_COUNT]; |
1206 | uint8_t *rsp_ptr = NULL; | 1228 | uint8_t *rsp_ptr = NULL; |
1207 | 1229 | ||
1230 | bsg_job->reply->reply_payload_rcv_len = 0; | ||
1231 | |||
1232 | if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || | ||
1233 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || | ||
1234 | test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { | ||
1235 | ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n"); | ||
1236 | return -EBUSY; | ||
1237 | } | ||
1238 | |||
1208 | if (!IS_IIDMA_CAPABLE(vha->hw)) { | 1239 | if (!IS_IIDMA_CAPABLE(vha->hw)) { |
1209 | ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); | 1240 | ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); |
1210 | return -EINVAL; | 1241 | return -EINVAL; |
@@ -1267,7 +1298,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) | |||
1267 | fcport->port_name[3], fcport->port_name[4], | 1298 | fcport->port_name[3], fcport->port_name[4], |
1268 | fcport->port_name[5], fcport->port_name[6], | 1299 | fcport->port_name[5], fcport->port_name[6], |
1269 | fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]); | 1300 | fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]); |
1270 | rval = (DID_ERROR << 16); | 1301 | rval = 0; |
1302 | bsg_job->reply->result = (DID_ERROR << 16); | ||
1303 | |||
1271 | } else { | 1304 | } else { |
1272 | if (!port_param->mode) { | 1305 | if (!port_param->mode) { |
1273 | bsg_job->reply_len = sizeof(struct fc_bsg_reply) + | 1306 | bsg_job->reply_len = sizeof(struct fc_bsg_reply) + |
@@ -1281,9 +1314,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) | |||
1281 | } | 1314 | } |
1282 | 1315 | ||
1283 | bsg_job->reply->result = DID_OK; | 1316 | bsg_job->reply->result = DID_OK; |
1284 | bsg_job->job_done(bsg_job); | ||
1285 | } | 1317 | } |
1286 | 1318 | ||
1319 | bsg_job->job_done(bsg_job); | ||
1287 | return rval; | 1320 | return rval; |
1288 | } | 1321 | } |
1289 | 1322 | ||
@@ -1295,6 +1328,8 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha, | |||
1295 | int valid = 0; | 1328 | int valid = 0; |
1296 | struct qla_hw_data *ha = vha->hw; | 1329 | struct qla_hw_data *ha = vha->hw; |
1297 | 1330 | ||
1331 | bsg_job->reply->reply_payload_rcv_len = 0; | ||
1332 | |||
1298 | if (unlikely(pci_channel_offline(ha->pdev))) | 1333 | if (unlikely(pci_channel_offline(ha->pdev))) |
1299 | return -EINVAL; | 1334 | return -EINVAL; |
1300 | 1335 | ||
@@ -1320,7 +1355,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha, | |||
1320 | start == (ha->flt_region_fw * 4)) | 1355 | start == (ha->flt_region_fw * 4)) |
1321 | valid = 1; | 1356 | valid = 1; |
1322 | else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || | 1357 | else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || |
1323 | IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) | 1358 | IS_QLA8XXX_TYPE(ha)) |
1324 | valid = 1; | 1359 | valid = 1; |
1325 | if (!valid) { | 1360 | if (!valid) { |
1326 | ql_log(ql_log_warn, vha, 0x7058, | 1361 | ql_log(ql_log_warn, vha, 0x7058, |
@@ -1364,9 +1399,6 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job) | |||
1364 | struct qla_hw_data *ha = vha->hw; | 1399 | struct qla_hw_data *ha = vha->hw; |
1365 | int rval = 0; | 1400 | int rval = 0; |
1366 | 1401 | ||
1367 | if (ha->flags.nic_core_reset_hdlr_active) | ||
1368 | return -EBUSY; | ||
1369 | |||
1370 | rval = qla2x00_optrom_setup(bsg_job, vha, 0); | 1402 | rval = qla2x00_optrom_setup(bsg_job, vha, 0); |
1371 | if (rval) | 1403 | if (rval) |
1372 | return rval; | 1404 | return rval; |
@@ -1399,9 +1431,6 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job) | |||
1399 | if (rval) | 1431 | if (rval) |
1400 | return rval; | 1432 | return rval; |
1401 | 1433 | ||
1402 | /* Set the isp82xx_no_md_cap not to capture minidump */ | ||
1403 | ha->flags.isp82xx_no_md_cap = 1; | ||
1404 | |||
1405 | sg_copy_to_buffer(bsg_job->request_payload.sg_list, | 1434 | sg_copy_to_buffer(bsg_job->request_payload.sg_list, |
1406 | bsg_job->request_payload.sg_cnt, ha->optrom_buffer, | 1435 | bsg_job->request_payload.sg_cnt, ha->optrom_buffer, |
1407 | ha->optrom_region_size); | 1436 | ha->optrom_region_size); |
@@ -1418,418 +1447,6 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job) | |||
1418 | } | 1447 | } |
1419 | 1448 | ||
1420 | static int | 1449 | static int |
1421 | qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job) | ||
1422 | { | ||
1423 | struct Scsi_Host *host = bsg_job->shost; | ||
1424 | scsi_qla_host_t *vha = shost_priv(host); | ||
1425 | struct qla_hw_data *ha = vha->hw; | ||
1426 | int rval = 0; | ||
1427 | uint8_t bsg[DMA_POOL_SIZE]; | ||
1428 | struct qla_image_version_list *list = (void *)bsg; | ||
1429 | struct qla_image_version *image; | ||
1430 | uint32_t count; | ||
1431 | dma_addr_t sfp_dma; | ||
1432 | void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); | ||
1433 | if (!sfp) { | ||
1434 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = | ||
1435 | EXT_STATUS_NO_MEMORY; | ||
1436 | goto done; | ||
1437 | } | ||
1438 | |||
1439 | sg_copy_to_buffer(bsg_job->request_payload.sg_list, | ||
1440 | bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); | ||
1441 | |||
1442 | image = list->version; | ||
1443 | count = list->count; | ||
1444 | while (count--) { | ||
1445 | memcpy(sfp, &image->field_info, sizeof(image->field_info)); | ||
1446 | rval = qla2x00_write_sfp(vha, sfp_dma, sfp, | ||
1447 | image->field_address.device, image->field_address.offset, | ||
1448 | sizeof(image->field_info), image->field_address.option); | ||
1449 | if (rval) { | ||
1450 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = | ||
1451 | EXT_STATUS_MAILBOX; | ||
1452 | goto dealloc; | ||
1453 | } | ||
1454 | image++; | ||
1455 | } | ||
1456 | |||
1457 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; | ||
1458 | |||
1459 | dealloc: | ||
1460 | dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); | ||
1461 | |||
1462 | done: | ||
1463 | bsg_job->reply_len = sizeof(struct fc_bsg_reply); | ||
1464 | bsg_job->reply->result = DID_OK << 16; | ||
1465 | bsg_job->job_done(bsg_job); | ||
1466 | |||
1467 | return 0; | ||
1468 | } | ||
1469 | |||
1470 | static int | ||
1471 | qla2x00_read_fru_status(struct fc_bsg_job *bsg_job) | ||
1472 | { | ||
1473 | struct Scsi_Host *host = bsg_job->shost; | ||
1474 | scsi_qla_host_t *vha = shost_priv(host); | ||
1475 | struct qla_hw_data *ha = vha->hw; | ||
1476 | int rval = 0; | ||
1477 | uint8_t bsg[DMA_POOL_SIZE]; | ||
1478 | struct qla_status_reg *sr = (void *)bsg; | ||
1479 | dma_addr_t sfp_dma; | ||
1480 | uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); | ||
1481 | if (!sfp) { | ||
1482 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = | ||
1483 | EXT_STATUS_NO_MEMORY; | ||
1484 | goto done; | ||
1485 | } | ||
1486 | |||
1487 | sg_copy_to_buffer(bsg_job->request_payload.sg_list, | ||
1488 | bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); | ||
1489 | |||
1490 | rval = qla2x00_read_sfp(vha, sfp_dma, sfp, | ||
1491 | sr->field_address.device, sr->field_address.offset, | ||
1492 | sizeof(sr->status_reg), sr->field_address.option); | ||
1493 | sr->status_reg = *sfp; | ||
1494 | |||
1495 | if (rval) { | ||
1496 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = | ||
1497 | EXT_STATUS_MAILBOX; | ||
1498 | goto dealloc; | ||
1499 | } | ||
1500 | |||
1501 | sg_copy_from_buffer(bsg_job->reply_payload.sg_list, | ||
1502 | bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); | ||
1503 | |||
1504 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; | ||
1505 | |||
1506 | dealloc: | ||
1507 | dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); | ||
1508 | |||
1509 | done: | ||
1510 | bsg_job->reply_len = sizeof(struct fc_bsg_reply); | ||
1511 | bsg_job->reply->reply_payload_rcv_len = sizeof(*sr); | ||
1512 | bsg_job->reply->result = DID_OK << 16; | ||
1513 | bsg_job->job_done(bsg_job); | ||
1514 | |||
1515 | return 0; | ||
1516 | } | ||
1517 | |||
1518 | static int | ||
1519 | qla2x00_write_fru_status(struct fc_bsg_job *bsg_job) | ||
1520 | { | ||
1521 | struct Scsi_Host *host = bsg_job->shost; | ||
1522 | scsi_qla_host_t *vha = shost_priv(host); | ||
1523 | struct qla_hw_data *ha = vha->hw; | ||
1524 | int rval = 0; | ||
1525 | uint8_t bsg[DMA_POOL_SIZE]; | ||
1526 | struct qla_status_reg *sr = (void *)bsg; | ||
1527 | dma_addr_t sfp_dma; | ||
1528 | uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); | ||
1529 | if (!sfp) { | ||
1530 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = | ||
1531 | EXT_STATUS_NO_MEMORY; | ||
1532 | goto done; | ||
1533 | } | ||
1534 | |||
1535 | sg_copy_to_buffer(bsg_job->request_payload.sg_list, | ||
1536 | bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); | ||
1537 | |||
1538 | *sfp = sr->status_reg; | ||
1539 | rval = qla2x00_write_sfp(vha, sfp_dma, sfp, | ||
1540 | sr->field_address.device, sr->field_address.offset, | ||
1541 | sizeof(sr->status_reg), sr->field_address.option); | ||
1542 | |||
1543 | if (rval) { | ||
1544 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = | ||
1545 | EXT_STATUS_MAILBOX; | ||
1546 | goto dealloc; | ||
1547 | } | ||
1548 | |||
1549 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; | ||
1550 | |||
1551 | dealloc: | ||
1552 | dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); | ||
1553 | |||
1554 | done: | ||
1555 | bsg_job->reply_len = sizeof(struct fc_bsg_reply); | ||
1556 | bsg_job->reply->result = DID_OK << 16; | ||
1557 | bsg_job->job_done(bsg_job); | ||
1558 | |||
1559 | return 0; | ||
1560 | } | ||
1561 | |||
1562 | static int | ||
1563 | qla2x00_write_i2c(struct fc_bsg_job *bsg_job) | ||
1564 | { | ||
1565 | struct Scsi_Host *host = bsg_job->shost; | ||
1566 | scsi_qla_host_t *vha = shost_priv(host); | ||
1567 | struct qla_hw_data *ha = vha->hw; | ||
1568 | int rval = 0; | ||
1569 | uint8_t bsg[DMA_POOL_SIZE]; | ||
1570 | struct qla_i2c_access *i2c = (void *)bsg; | ||
1571 | dma_addr_t sfp_dma; | ||
1572 | uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); | ||
1573 | if (!sfp) { | ||
1574 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = | ||
1575 | EXT_STATUS_NO_MEMORY; | ||
1576 | goto done; | ||
1577 | } | ||
1578 | |||
1579 | sg_copy_to_buffer(bsg_job->request_payload.sg_list, | ||
1580 | bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); | ||
1581 | |||
1582 | memcpy(sfp, i2c->buffer, i2c->length); | ||
1583 | rval = qla2x00_write_sfp(vha, sfp_dma, sfp, | ||
1584 | i2c->device, i2c->offset, i2c->length, i2c->option); | ||
1585 | |||
1586 | if (rval) { | ||
1587 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = | ||
1588 | EXT_STATUS_MAILBOX; | ||
1589 | goto dealloc; | ||
1590 | } | ||
1591 | |||
1592 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; | ||
1593 | |||
1594 | dealloc: | ||
1595 | dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); | ||
1596 | |||
1597 | done: | ||
1598 | bsg_job->reply_len = sizeof(struct fc_bsg_reply); | ||
1599 | bsg_job->reply->result = DID_OK << 16; | ||
1600 | bsg_job->job_done(bsg_job); | ||
1601 | |||
1602 | return 0; | ||
1603 | } | ||
1604 | |||
1605 | static int | ||
1606 | qla2x00_read_i2c(struct fc_bsg_job *bsg_job) | ||
1607 | { | ||
1608 | struct Scsi_Host *host = bsg_job->shost; | ||
1609 | scsi_qla_host_t *vha = shost_priv(host); | ||
1610 | struct qla_hw_data *ha = vha->hw; | ||
1611 | int rval = 0; | ||
1612 | uint8_t bsg[DMA_POOL_SIZE]; | ||
1613 | struct qla_i2c_access *i2c = (void *)bsg; | ||
1614 | dma_addr_t sfp_dma; | ||
1615 | uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); | ||
1616 | if (!sfp) { | ||
1617 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = | ||
1618 | EXT_STATUS_NO_MEMORY; | ||
1619 | goto done; | ||
1620 | } | ||
1621 | |||
1622 | sg_copy_to_buffer(bsg_job->request_payload.sg_list, | ||
1623 | bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); | ||
1624 | |||
1625 | rval = qla2x00_read_sfp(vha, sfp_dma, sfp, | ||
1626 | i2c->device, i2c->offset, i2c->length, i2c->option); | ||
1627 | |||
1628 | if (rval) { | ||
1629 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = | ||
1630 | EXT_STATUS_MAILBOX; | ||
1631 | goto dealloc; | ||
1632 | } | ||
1633 | |||
1634 | memcpy(i2c->buffer, sfp, i2c->length); | ||
1635 | sg_copy_from_buffer(bsg_job->reply_payload.sg_list, | ||
1636 | bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); | ||
1637 | |||
1638 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; | ||
1639 | |||
1640 | dealloc: | ||
1641 | dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); | ||
1642 | |||
1643 | done: | ||
1644 | bsg_job->reply_len = sizeof(struct fc_bsg_reply); | ||
1645 | bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c); | ||
1646 | bsg_job->reply->result = DID_OK << 16; | ||
1647 | bsg_job->job_done(bsg_job); | ||
1648 | |||
1649 | return 0; | ||
1650 | } | ||
1651 | |||
1652 | static int | ||
1653 | qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job) | ||
1654 | { | ||
1655 | struct Scsi_Host *host = bsg_job->shost; | ||
1656 | scsi_qla_host_t *vha = shost_priv(host); | ||
1657 | struct qla_hw_data *ha = vha->hw; | ||
1658 | uint16_t thread_id; | ||
1659 | uint32_t rval = EXT_STATUS_OK; | ||
1660 | uint16_t req_sg_cnt = 0; | ||
1661 | uint16_t rsp_sg_cnt = 0; | ||
1662 | uint16_t nextlid = 0; | ||
1663 | uint32_t tot_dsds; | ||
1664 | srb_t *sp = NULL; | ||
1665 | uint32_t req_data_len = 0; | ||
1666 | uint32_t rsp_data_len = 0; | ||
1667 | |||
1668 | /* Check the type of the adapter */ | ||
1669 | if (!IS_BIDI_CAPABLE(ha)) { | ||
1670 | ql_log(ql_log_warn, vha, 0x70a0, | ||
1671 | "This adapter is not supported\n"); | ||
1672 | rval = EXT_STATUS_NOT_SUPPORTED; | ||
1673 | goto done; | ||
1674 | } | ||
1675 | |||
1676 | if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || | ||
1677 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || | ||
1678 | test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { | ||
1679 | rval = EXT_STATUS_BUSY; | ||
1680 | goto done; | ||
1681 | } | ||
1682 | |||
1683 | /* Check if host is online */ | ||
1684 | if (!vha->flags.online) { | ||
1685 | ql_log(ql_log_warn, vha, 0x70a1, | ||
1686 | "Host is not online\n"); | ||
1687 | rval = EXT_STATUS_DEVICE_OFFLINE; | ||
1688 | goto done; | ||
1689 | } | ||
1690 | |||
1691 | /* Check if cable is plugged in or not */ | ||
1692 | if (vha->device_flags & DFLG_NO_CABLE) { | ||
1693 | ql_log(ql_log_warn, vha, 0x70a2, | ||
1694 | "Cable is unplugged...\n"); | ||
1695 | rval = EXT_STATUS_INVALID_CFG; | ||
1696 | goto done; | ||
1697 | } | ||
1698 | |||
1699 | /* Check if the switch is connected or not */ | ||
1700 | if (ha->current_topology != ISP_CFG_F) { | ||
1701 | ql_log(ql_log_warn, vha, 0x70a3, | ||
1702 | "Host is not connected to the switch\n"); | ||
1703 | rval = EXT_STATUS_INVALID_CFG; | ||
1704 | goto done; | ||
1705 | } | ||
1706 | |||
1707 | /* Check if operating mode is P2P */ | ||
1708 | if (ha->operating_mode != P2P) { | ||
1709 | ql_log(ql_log_warn, vha, 0x70a4, | ||
1710 | "Host is operating mode is not P2p\n"); | ||
1711 | rval = EXT_STATUS_INVALID_CFG; | ||
1712 | goto done; | ||
1713 | } | ||
1714 | |||
1715 | thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; | ||
1716 | |||
1717 | mutex_lock(&ha->selflogin_lock); | ||
1718 | if (vha->self_login_loop_id == 0) { | ||
1719 | /* Initialize all required fields of fcport */ | ||
1720 | vha->bidir_fcport.vha = vha; | ||
1721 | vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; | ||
1722 | vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; | ||
1723 | vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; | ||
1724 | vha->bidir_fcport.loop_id = vha->loop_id; | ||
1725 | |||
1726 | if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { | ||
1727 | ql_log(ql_log_warn, vha, 0x70a7, | ||
1728 | "Failed to login port %06X for bidirectional IOCB\n", | ||
1729 | vha->bidir_fcport.d_id.b24); | ||
1730 | mutex_unlock(&ha->selflogin_lock); | ||
1731 | rval = EXT_STATUS_MAILBOX; | ||
1732 | goto done; | ||
1733 | } | ||
1734 | vha->self_login_loop_id = nextlid - 1; | ||
1735 | |||
1736 | } | ||
1737 | /* Assign the self login loop id to fcport */ | ||
1738 | mutex_unlock(&ha->selflogin_lock); | ||
1739 | |||
1740 | vha->bidir_fcport.loop_id = vha->self_login_loop_id; | ||
1741 | |||
1742 | req_sg_cnt = dma_map_sg(&ha->pdev->dev, | ||
1743 | bsg_job->request_payload.sg_list, | ||
1744 | bsg_job->request_payload.sg_cnt, | ||
1745 | DMA_TO_DEVICE); | ||
1746 | |||
1747 | if (!req_sg_cnt) { | ||
1748 | rval = EXT_STATUS_NO_MEMORY; | ||
1749 | goto done; | ||
1750 | } | ||
1751 | |||
1752 | rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, | ||
1753 | bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, | ||
1754 | DMA_FROM_DEVICE); | ||
1755 | |||
1756 | if (!rsp_sg_cnt) { | ||
1757 | rval = EXT_STATUS_NO_MEMORY; | ||
1758 | goto done_unmap_req_sg; | ||
1759 | } | ||
1760 | |||
1761 | if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || | ||
1762 | (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { | ||
1763 | ql_dbg(ql_dbg_user, vha, 0x70a9, | ||
1764 | "Dma mapping resulted in different sg counts " | ||
1765 | "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " | ||
1766 | "%x dma_reply_sg_cnt: %x]\n", | ||
1767 | bsg_job->request_payload.sg_cnt, req_sg_cnt, | ||
1768 | bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); | ||
1769 | rval = EXT_STATUS_NO_MEMORY; | ||
1770 | goto done_unmap_sg; | ||
1771 | } | ||
1772 | |||
1773 | if (req_data_len != rsp_data_len) { | ||
1774 | rval = EXT_STATUS_BUSY; | ||
1775 | ql_log(ql_log_warn, vha, 0x70aa, | ||
1776 | "req_data_len != rsp_data_len\n"); | ||
1777 | goto done_unmap_sg; | ||
1778 | } | ||
1779 | |||
1780 | req_data_len = bsg_job->request_payload.payload_len; | ||
1781 | rsp_data_len = bsg_job->reply_payload.payload_len; | ||
1782 | |||
1783 | |||
1784 | /* Alloc SRB structure */ | ||
1785 | sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); | ||
1786 | if (!sp) { | ||
1787 | ql_dbg(ql_dbg_user, vha, 0x70ac, | ||
1788 | "Alloc SRB structure failed\n"); | ||
1789 | rval = EXT_STATUS_NO_MEMORY; | ||
1790 | goto done_unmap_sg; | ||
1791 | } | ||
1792 | |||
1793 | /*Populate srb->ctx with bidir ctx*/ | ||
1794 | sp->u.bsg_job = bsg_job; | ||
1795 | sp->free = qla2x00_bsg_sp_free; | ||
1796 | sp->type = SRB_BIDI_CMD; | ||
1797 | sp->done = qla2x00_bsg_job_done; | ||
1798 | |||
1799 | /* Add the read and write sg count */ | ||
1800 | tot_dsds = rsp_sg_cnt + req_sg_cnt; | ||
1801 | |||
1802 | rval = qla2x00_start_bidir(sp, vha, tot_dsds); | ||
1803 | if (rval != EXT_STATUS_OK) | ||
1804 | goto done_free_srb; | ||
1805 | /* the bsg request will be completed in the interrupt handler */ | ||
1806 | return rval; | ||
1807 | |||
1808 | done_free_srb: | ||
1809 | mempool_free(sp, ha->srb_mempool); | ||
1810 | done_unmap_sg: | ||
1811 | dma_unmap_sg(&ha->pdev->dev, | ||
1812 | bsg_job->reply_payload.sg_list, | ||
1813 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
1814 | done_unmap_req_sg: | ||
1815 | dma_unmap_sg(&ha->pdev->dev, | ||
1816 | bsg_job->request_payload.sg_list, | ||
1817 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1818 | done: | ||
1819 | |||
1820 | /* Return an error vendor specific response | ||
1821 | * and complete the bsg request | ||
1822 | */ | ||
1823 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; | ||
1824 | bsg_job->reply_len = sizeof(struct fc_bsg_reply); | ||
1825 | bsg_job->reply->reply_payload_rcv_len = 0; | ||
1826 | bsg_job->reply->result = (DID_OK) << 16; | ||
1827 | bsg_job->job_done(bsg_job); | ||
1828 | /* Always retrun success, vendor rsp carries correct status */ | ||
1829 | return 0; | ||
1830 | } | ||
1831 | |||
1832 | static int | ||
1833 | qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) | 1450 | qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) |
1834 | { | 1451 | { |
1835 | switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { | 1452 | switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { |
@@ -1857,25 +1474,9 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) | |||
1857 | case QL_VND_UPDATE_FLASH: | 1474 | case QL_VND_UPDATE_FLASH: |
1858 | return qla2x00_update_optrom(bsg_job); | 1475 | return qla2x00_update_optrom(bsg_job); |
1859 | 1476 | ||
1860 | case QL_VND_SET_FRU_VERSION: | ||
1861 | return qla2x00_update_fru_versions(bsg_job); | ||
1862 | |||
1863 | case QL_VND_READ_FRU_STATUS: | ||
1864 | return qla2x00_read_fru_status(bsg_job); | ||
1865 | |||
1866 | case QL_VND_WRITE_FRU_STATUS: | ||
1867 | return qla2x00_write_fru_status(bsg_job); | ||
1868 | |||
1869 | case QL_VND_WRITE_I2C: | ||
1870 | return qla2x00_write_i2c(bsg_job); | ||
1871 | |||
1872 | case QL_VND_READ_I2C: | ||
1873 | return qla2x00_read_i2c(bsg_job); | ||
1874 | |||
1875 | case QL_VND_DIAG_IO_CMD: | ||
1876 | return qla24xx_process_bidir_cmd(bsg_job); | ||
1877 | |||
1878 | default: | 1477 | default: |
1478 | bsg_job->reply->result = (DID_ERROR << 16); | ||
1479 | bsg_job->job_done(bsg_job); | ||
1879 | return -ENOSYS; | 1480 | return -ENOSYS; |
1880 | } | 1481 | } |
1881 | } | 1482 | } |
@@ -1889,9 +1490,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job) | |||
1889 | struct Scsi_Host *host; | 1490 | struct Scsi_Host *host; |
1890 | scsi_qla_host_t *vha; | 1491 | scsi_qla_host_t *vha; |
1891 | 1492 | ||
1892 | /* In case no data transferred. */ | ||
1893 | bsg_job->reply->reply_payload_rcv_len = 0; | ||
1894 | |||
1895 | if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { | 1493 | if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { |
1896 | rport = bsg_job->rport; | 1494 | rport = bsg_job->rport; |
1897 | fcport = *(fc_port_t **) rport->dd_data; | 1495 | fcport = *(fc_port_t **) rport->dd_data; |
@@ -1902,15 +1500,8 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job) | |||
1902 | vha = shost_priv(host); | 1500 | vha = shost_priv(host); |
1903 | } | 1501 | } |
1904 | 1502 | ||
1905 | if (qla2x00_reset_active(vha)) { | ||
1906 | ql_dbg(ql_dbg_user, vha, 0x709f, | ||
1907 | "BSG: ISP abort active/needed -- cmd=%d.\n", | ||
1908 | bsg_job->request->msgcode); | ||
1909 | return -EBUSY; | ||
1910 | } | ||
1911 | |||
1912 | ql_dbg(ql_dbg_user, vha, 0x7000, | 1503 | ql_dbg(ql_dbg_user, vha, 0x7000, |
1913 | "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode); | 1504 | "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode); |
1914 | 1505 | ||
1915 | switch (bsg_job->request->msgcode) { | 1506 | switch (bsg_job->request->msgcode) { |
1916 | case FC_BSG_RPT_ELS: | 1507 | case FC_BSG_RPT_ELS: |
@@ -1942,6 +1533,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) | |||
1942 | int cnt, que; | 1533 | int cnt, que; |
1943 | unsigned long flags; | 1534 | unsigned long flags; |
1944 | struct req_que *req; | 1535 | struct req_que *req; |
1536 | struct srb_ctx *sp_bsg; | ||
1945 | 1537 | ||
1946 | /* find the bsg job from the active list of commands */ | 1538 | /* find the bsg job from the active list of commands */ |
1947 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1539 | spin_lock_irqsave(&ha->hardware_lock, flags); |
@@ -1953,9 +1545,11 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) | |||
1953 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | 1545 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { |
1954 | sp = req->outstanding_cmds[cnt]; | 1546 | sp = req->outstanding_cmds[cnt]; |
1955 | if (sp) { | 1547 | if (sp) { |
1956 | if (((sp->type == SRB_CT_CMD) || | 1548 | sp_bsg = sp->ctx; |
1957 | (sp->type == SRB_ELS_CMD_HST)) | 1549 | |
1958 | && (sp->u.bsg_job == bsg_job)) { | 1550 | if (((sp_bsg->type == SRB_CT_CMD) || |
1551 | (sp_bsg->type == SRB_ELS_CMD_HST)) | ||
1552 | && (sp_bsg->u.bsg_job == bsg_job)) { | ||
1959 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1553 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1960 | if (ha->isp_ops->abort_command(sp)) { | 1554 | if (ha->isp_ops->abort_command(sp)) { |
1961 | ql_log(ql_log_warn, vha, 0x7089, | 1555 | ql_log(ql_log_warn, vha, 0x7089, |
@@ -1985,6 +1579,7 @@ done: | |||
1985 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1579 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1986 | if (bsg_job->request->msgcode == FC_BSG_HST_CT) | 1580 | if (bsg_job->request->msgcode == FC_BSG_HST_CT) |
1987 | kfree(sp->fcport); | 1581 | kfree(sp->fcport); |
1582 | kfree(sp->ctx); | ||
1988 | mempool_free(sp, ha->srb_mempool); | 1583 | mempool_free(sp, ha->srb_mempool); |
1989 | return 0; | 1584 | return 0; |
1990 | } | 1585 | } |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index 37b8b7ba742..0f0f54e35f0 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -16,44 +16,14 @@ | |||
16 | #define QL_VND_FCP_PRIO_CFG_CMD 0x06 | 16 | #define QL_VND_FCP_PRIO_CFG_CMD 0x06 |
17 | #define QL_VND_READ_FLASH 0x07 | 17 | #define QL_VND_READ_FLASH 0x07 |
18 | #define QL_VND_UPDATE_FLASH 0x08 | 18 | #define QL_VND_UPDATE_FLASH 0x08 |
19 | #define QL_VND_SET_FRU_VERSION 0x0B | ||
20 | #define QL_VND_READ_FRU_STATUS 0x0C | ||
21 | #define QL_VND_WRITE_FRU_STATUS 0x0D | ||
22 | #define QL_VND_DIAG_IO_CMD 0x0A | ||
23 | #define QL_VND_WRITE_I2C 0x10 | ||
24 | #define QL_VND_READ_I2C 0x11 | ||
25 | |||
26 | /* BSG Vendor specific subcode returns */ | ||
27 | #define EXT_STATUS_OK 0 | ||
28 | #define EXT_STATUS_ERR 1 | ||
29 | #define EXT_STATUS_BUSY 2 | ||
30 | #define EXT_STATUS_INVALID_PARAM 6 | ||
31 | #define EXT_STATUS_DATA_OVERRUN 7 | ||
32 | #define EXT_STATUS_DATA_UNDERRUN 8 | ||
33 | #define EXT_STATUS_MAILBOX 11 | ||
34 | #define EXT_STATUS_NO_MEMORY 17 | ||
35 | #define EXT_STATUS_DEVICE_OFFLINE 22 | ||
36 | |||
37 | /* | ||
38 | * To support bidirectional iocb | ||
39 | * BSG Vendor specific returns | ||
40 | */ | ||
41 | #define EXT_STATUS_NOT_SUPPORTED 27 | ||
42 | #define EXT_STATUS_INVALID_CFG 28 | ||
43 | #define EXT_STATUS_DMA_ERR 29 | ||
44 | #define EXT_STATUS_TIMEOUT 30 | ||
45 | #define EXT_STATUS_THREAD_FAILED 31 | ||
46 | #define EXT_STATUS_DATA_CMP_FAILED 32 | ||
47 | 19 | ||
48 | /* BSG definations for interpreting CommandSent field */ | 20 | /* BSG definations for interpreting CommandSent field */ |
49 | #define INT_DEF_LB_LOOPBACK_CMD 0 | 21 | #define INT_DEF_LB_LOOPBACK_CMD 0 |
50 | #define INT_DEF_LB_ECHO_CMD 1 | 22 | #define INT_DEF_LB_ECHO_CMD 1 |
51 | 23 | ||
52 | /* Loopback related definations */ | 24 | /* Loopback related definations */ |
53 | #define INTERNAL_LOOPBACK 0xF1 | ||
54 | #define EXTERNAL_LOOPBACK 0xF2 | 25 | #define EXTERNAL_LOOPBACK 0xF2 |
55 | #define ENABLE_INTERNAL_LOOPBACK 0x02 | 26 | #define ENABLE_INTERNAL_LOOPBACK 0x02 |
56 | #define ENABLE_EXTERNAL_LOOPBACK 0x04 | ||
57 | #define INTERNAL_LOOPBACK_MASK 0x000E | 27 | #define INTERNAL_LOOPBACK_MASK 0x000E |
58 | #define MAX_ELS_FRAME_PAYLOAD 252 | 28 | #define MAX_ELS_FRAME_PAYLOAD 252 |
59 | #define ELS_OPCODE_BYTE 0x10 | 29 | #define ELS_OPCODE_BYTE 0x10 |
@@ -171,44 +141,4 @@ struct qla_port_param { | |||
171 | uint16_t mode; | 141 | uint16_t mode; |
172 | uint16_t speed; | 142 | uint16_t speed; |
173 | } __attribute__ ((packed)); | 143 | } __attribute__ ((packed)); |
174 | |||
175 | |||
176 | /* FRU VPD */ | ||
177 | |||
178 | #define MAX_FRU_SIZE 36 | ||
179 | |||
180 | struct qla_field_address { | ||
181 | uint16_t offset; | ||
182 | uint16_t device; | ||
183 | uint16_t option; | ||
184 | } __packed; | ||
185 | |||
186 | struct qla_field_info { | ||
187 | uint8_t version[MAX_FRU_SIZE]; | ||
188 | } __packed; | ||
189 | |||
190 | struct qla_image_version { | ||
191 | struct qla_field_address field_address; | ||
192 | struct qla_field_info field_info; | ||
193 | } __packed; | ||
194 | |||
195 | struct qla_image_version_list { | ||
196 | uint32_t count; | ||
197 | struct qla_image_version version[0]; | ||
198 | } __packed; | ||
199 | |||
200 | struct qla_status_reg { | ||
201 | struct qla_field_address field_address; | ||
202 | uint8_t status_reg; | ||
203 | uint8_t reserved[7]; | ||
204 | } __packed; | ||
205 | |||
206 | struct qla_i2c_access { | ||
207 | uint16_t device; | ||
208 | uint16_t offset; | ||
209 | uint16_t option; | ||
210 | uint16_t length; | ||
211 | uint8_t buffer[0x40]; | ||
212 | } __packed; | ||
213 | |||
214 | #endif | 144 | #endif |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 53f9e492f9d..d79cd8a5f83 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -11,36 +11,20 @@ | |||
11 | * ---------------------------------------------------------------------- | 11 | * ---------------------------------------------------------------------- |
12 | * | Level | Last Value Used | Holes | | 12 | * | Level | Last Value Used | Holes | |
13 | * ---------------------------------------------------------------------- | 13 | * ---------------------------------------------------------------------- |
14 | * | Module Init and Probe | 0x0125 | 0x4b,0xba,0xfa | | 14 | * | Module Init and Probe | 0x0116 | | |
15 | * | Mailbox commands | 0x114f | 0x111a-0x111b | | 15 | * | Mailbox commands | 0x1126 | | |
16 | * | | | 0x112c-0x112e | | 16 | * | Device Discovery | 0x2083 | | |
17 | * | | | 0x113a | | 17 | * | Queue Command and IO tracing | 0x302e | 0x3008 | |
18 | * | Device Discovery | 0x2087 | 0x2020-0x2022, | | 18 | * | DPC Thread | 0x401c | | |
19 | * | | | 0x2016 | | 19 | * | Async Events | 0x5059 | | |
20 | * | Queue Command and IO tracing | 0x3030 | 0x3006-0x300b | | 20 | * | Timer Routines | 0x600d | | |
21 | * | | | 0x3027-0x3028 | | 21 | * | User Space Interactions | 0x709d | | |
22 | * | | | 0x302d-0x302e | | 22 | * | Task Management | 0x8041 | | |
23 | * | DPC Thread | 0x401d | 0x4002,0x4013 | | 23 | * | AER/EEH | 0x900f | | |
24 | * | Async Events | 0x5071 | 0x502b-0x502f | | ||
25 | * | | | 0x5047,0x5052 | | ||
26 | * | Timer Routines | 0x6011 | | | ||
27 | * | User Space Interactions | 0x70c3 | 0x7018,0x702e, | | ||
28 | * | | | 0x7039,0x7045, | | ||
29 | * | | | 0x7073-0x7075, | | ||
30 | * | | | 0x708c, | | ||
31 | * | | | 0x70a5,0x70a6, | | ||
32 | * | | | 0x70a8,0x70ab, | | ||
33 | * | | | 0x70ad-0x70ae | | ||
34 | * | Task Management | 0x803c | 0x8025-0x8026 | | ||
35 | * | | | 0x800b,0x8039 | | ||
36 | * | AER/EEH | 0x9011 | | | ||
37 | * | Virtual Port | 0xa007 | | | 24 | * | Virtual Port | 0xa007 | | |
38 | * | ISP82XX Specific | 0xb084 | 0xb002,0xb024 | | 25 | * | ISP82XX Specific | 0xb04f | | |
39 | * | MultiQ | 0xc00c | | | 26 | * | MultiQ | 0xc00b | | |
40 | * | Misc | 0xd010 | | | 27 | * | Misc | 0xd00b | | |
41 | * | Target Mode | 0xe06f | | | ||
42 | * | Target Mode Management | 0xf071 | | | ||
43 | * | Target Mode Task Management | 0x1000b | | | ||
44 | * ---------------------------------------------------------------------- | 28 | * ---------------------------------------------------------------------- |
45 | */ | 29 | */ |
46 | 30 | ||
@@ -98,7 +82,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, | |||
98 | WRT_REG_WORD(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); | 82 | WRT_REG_WORD(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); |
99 | clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); | 83 | clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); |
100 | 84 | ||
101 | dwords = qla2x00_gid_list_size(ha) / 4; | 85 | dwords = GID_LIST_SIZE / 4; |
102 | for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; | 86 | for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; |
103 | cnt += dwords, addr += dwords) { | 87 | cnt += dwords, addr += dwords) { |
104 | if (cnt + dwords > ram_dwords) | 88 | if (cnt + dwords > ram_dwords) |
@@ -273,7 +257,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, | |||
273 | WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); | 257 | WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); |
274 | clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); | 258 | clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); |
275 | 259 | ||
276 | words = qla2x00_gid_list_size(ha) / 2; | 260 | words = GID_LIST_SIZE / 2; |
277 | for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; | 261 | for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; |
278 | cnt += words, addr += words) { | 262 | cnt += words, addr += words) { |
279 | if (cnt + words > ram_words) | 263 | if (cnt + words > ram_words) |
@@ -384,126 +368,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) | |||
384 | 368 | ||
385 | memcpy(iter_reg, ha->fce, ntohl(fcec->size)); | 369 | memcpy(iter_reg, ha->fce, ntohl(fcec->size)); |
386 | 370 | ||
387 | return (char *)iter_reg + ntohl(fcec->size); | 371 | return iter_reg; |
388 | } | ||
389 | |||
390 | static inline void * | ||
391 | qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, | ||
392 | uint32_t **last_chain) | ||
393 | { | ||
394 | struct qla2xxx_mqueue_chain *q; | ||
395 | struct qla2xxx_mqueue_header *qh; | ||
396 | uint32_t num_queues; | ||
397 | int que; | ||
398 | struct { | ||
399 | int length; | ||
400 | void *ring; | ||
401 | } aq, *aqp; | ||
402 | |||
403 | if (!ha->tgt.atio_q_length) | ||
404 | return ptr; | ||
405 | |||
406 | num_queues = 1; | ||
407 | aqp = &aq; | ||
408 | aqp->length = ha->tgt.atio_q_length; | ||
409 | aqp->ring = ha->tgt.atio_ring; | ||
410 | |||
411 | for (que = 0; que < num_queues; que++) { | ||
412 | /* aqp = ha->atio_q_map[que]; */ | ||
413 | q = ptr; | ||
414 | *last_chain = &q->type; | ||
415 | q->type = __constant_htonl(DUMP_CHAIN_QUEUE); | ||
416 | q->chain_size = htonl( | ||
417 | sizeof(struct qla2xxx_mqueue_chain) + | ||
418 | sizeof(struct qla2xxx_mqueue_header) + | ||
419 | (aqp->length * sizeof(request_t))); | ||
420 | ptr += sizeof(struct qla2xxx_mqueue_chain); | ||
421 | |||
422 | /* Add header. */ | ||
423 | qh = ptr; | ||
424 | qh->queue = __constant_htonl(TYPE_ATIO_QUEUE); | ||
425 | qh->number = htonl(que); | ||
426 | qh->size = htonl(aqp->length * sizeof(request_t)); | ||
427 | ptr += sizeof(struct qla2xxx_mqueue_header); | ||
428 | |||
429 | /* Add data. */ | ||
430 | memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t)); | ||
431 | |||
432 | ptr += aqp->length * sizeof(request_t); | ||
433 | } | ||
434 | |||
435 | return ptr; | ||
436 | } | ||
437 | |||
438 | static inline void * | ||
439 | qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) | ||
440 | { | ||
441 | struct qla2xxx_mqueue_chain *q; | ||
442 | struct qla2xxx_mqueue_header *qh; | ||
443 | struct req_que *req; | ||
444 | struct rsp_que *rsp; | ||
445 | int que; | ||
446 | |||
447 | if (!ha->mqenable) | ||
448 | return ptr; | ||
449 | |||
450 | /* Request queues */ | ||
451 | for (que = 1; que < ha->max_req_queues; que++) { | ||
452 | req = ha->req_q_map[que]; | ||
453 | if (!req) | ||
454 | break; | ||
455 | |||
456 | /* Add chain. */ | ||
457 | q = ptr; | ||
458 | *last_chain = &q->type; | ||
459 | q->type = __constant_htonl(DUMP_CHAIN_QUEUE); | ||
460 | q->chain_size = htonl( | ||
461 | sizeof(struct qla2xxx_mqueue_chain) + | ||
462 | sizeof(struct qla2xxx_mqueue_header) + | ||
463 | (req->length * sizeof(request_t))); | ||
464 | ptr += sizeof(struct qla2xxx_mqueue_chain); | ||
465 | |||
466 | /* Add header. */ | ||
467 | qh = ptr; | ||
468 | qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE); | ||
469 | qh->number = htonl(que); | ||
470 | qh->size = htonl(req->length * sizeof(request_t)); | ||
471 | ptr += sizeof(struct qla2xxx_mqueue_header); | ||
472 | |||
473 | /* Add data. */ | ||
474 | memcpy(ptr, req->ring, req->length * sizeof(request_t)); | ||
475 | ptr += req->length * sizeof(request_t); | ||
476 | } | ||
477 | |||
478 | /* Response queues */ | ||
479 | for (que = 1; que < ha->max_rsp_queues; que++) { | ||
480 | rsp = ha->rsp_q_map[que]; | ||
481 | if (!rsp) | ||
482 | break; | ||
483 | |||
484 | /* Add chain. */ | ||
485 | q = ptr; | ||
486 | *last_chain = &q->type; | ||
487 | q->type = __constant_htonl(DUMP_CHAIN_QUEUE); | ||
488 | q->chain_size = htonl( | ||
489 | sizeof(struct qla2xxx_mqueue_chain) + | ||
490 | sizeof(struct qla2xxx_mqueue_header) + | ||
491 | (rsp->length * sizeof(response_t))); | ||
492 | ptr += sizeof(struct qla2xxx_mqueue_chain); | ||
493 | |||
494 | /* Add header. */ | ||
495 | qh = ptr; | ||
496 | qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE); | ||
497 | qh->number = htonl(que); | ||
498 | qh->size = htonl(rsp->length * sizeof(response_t)); | ||
499 | ptr += sizeof(struct qla2xxx_mqueue_header); | ||
500 | |||
501 | /* Add data. */ | ||
502 | memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t)); | ||
503 | ptr += rsp->length * sizeof(response_t); | ||
504 | } | ||
505 | |||
506 | return ptr; | ||
507 | } | 372 | } |
508 | 373 | ||
509 | static inline void * | 374 | static inline void * |
@@ -514,7 +379,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) | |||
514 | struct qla2xxx_mq_chain *mq = ptr; | 379 | struct qla2xxx_mq_chain *mq = ptr; |
515 | struct device_reg_25xxmq __iomem *reg; | 380 | struct device_reg_25xxmq __iomem *reg; |
516 | 381 | ||
517 | if (!ha->mqenable || IS_QLA83XX(ha)) | 382 | if (!ha->mqenable) |
518 | return ptr; | 383 | return ptr; |
519 | 384 | ||
520 | mq = ptr; | 385 | mq = ptr; |
@@ -526,8 +391,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) | |||
526 | ha->max_req_queues : ha->max_rsp_queues; | 391 | ha->max_req_queues : ha->max_rsp_queues; |
527 | mq->count = htonl(que_cnt); | 392 | mq->count = htonl(que_cnt); |
528 | for (cnt = 0; cnt < que_cnt; cnt++) { | 393 | for (cnt = 0; cnt < que_cnt; cnt++) { |
529 | reg = (struct device_reg_25xxmq __iomem *) | 394 | reg = (struct device_reg_25xxmq *) ((void *) |
530 | (ha->mqiobase + cnt * QLA_QUE_PAGE); | 395 | ha->mqiobase + cnt * QLA_QUE_PAGE); |
531 | que_idx = cnt * 4; | 396 | que_idx = cnt * 4; |
532 | mq->qregs[que_idx] = htonl(RD_REG_DWORD(®->req_q_in)); | 397 | mq->qregs[que_idx] = htonl(RD_REG_DWORD(®->req_q_in)); |
533 | mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(®->req_q_out)); | 398 | mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(®->req_q_out)); |
@@ -538,7 +403,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) | |||
538 | return ptr + sizeof(struct qla2xxx_mq_chain); | 403 | return ptr + sizeof(struct qla2xxx_mq_chain); |
539 | } | 404 | } |
540 | 405 | ||
541 | void | 406 | static void |
542 | qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) | 407 | qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) |
543 | { | 408 | { |
544 | struct qla_hw_data *ha = vha->hw; | 409 | struct qla_hw_data *ha = vha->hw; |
@@ -930,8 +795,6 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
930 | struct qla24xx_fw_dump *fw; | 795 | struct qla24xx_fw_dump *fw; |
931 | uint32_t ext_mem_cnt; | 796 | uint32_t ext_mem_cnt; |
932 | void *nxt; | 797 | void *nxt; |
933 | void *nxt_chain; | ||
934 | uint32_t *last_chain = NULL; | ||
935 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | 798 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); |
936 | 799 | ||
937 | if (IS_QLA82XX(ha)) | 800 | if (IS_QLA82XX(ha)) |
@@ -1150,16 +1013,6 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
1150 | 1013 | ||
1151 | qla24xx_copy_eft(ha, nxt); | 1014 | qla24xx_copy_eft(ha, nxt); |
1152 | 1015 | ||
1153 | nxt_chain = (void *)ha->fw_dump + ha->chain_offset; | ||
1154 | nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); | ||
1155 | if (last_chain) { | ||
1156 | ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); | ||
1157 | *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); | ||
1158 | } | ||
1159 | |||
1160 | /* Adjust valid length. */ | ||
1161 | ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); | ||
1162 | |||
1163 | qla24xx_fw_dump_failed_0: | 1016 | qla24xx_fw_dump_failed_0: |
1164 | qla2xxx_dump_post_process(base_vha, rval); | 1017 | qla2xxx_dump_post_process(base_vha, rval); |
1165 | 1018 | ||
@@ -1466,17 +1319,12 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
1466 | nxt = qla24xx_copy_eft(ha, nxt); | 1319 | nxt = qla24xx_copy_eft(ha, nxt); |
1467 | 1320 | ||
1468 | /* Chain entries -- started with MQ. */ | 1321 | /* Chain entries -- started with MQ. */ |
1469 | nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); | 1322 | qla25xx_copy_fce(ha, nxt_chain, &last_chain); |
1470 | nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); | ||
1471 | nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); | ||
1472 | if (last_chain) { | 1323 | if (last_chain) { |
1473 | ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); | 1324 | ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); |
1474 | *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); | 1325 | *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); |
1475 | } | 1326 | } |
1476 | 1327 | ||
1477 | /* Adjust valid length. */ | ||
1478 | ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); | ||
1479 | |||
1480 | qla25xx_fw_dump_failed_0: | 1328 | qla25xx_fw_dump_failed_0: |
1481 | qla2xxx_dump_post_process(base_vha, rval); | 1329 | qla2xxx_dump_post_process(base_vha, rval); |
1482 | 1330 | ||
@@ -1785,17 +1633,12 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
1785 | nxt = qla24xx_copy_eft(ha, nxt); | 1633 | nxt = qla24xx_copy_eft(ha, nxt); |
1786 | 1634 | ||
1787 | /* Chain entries -- started with MQ. */ | 1635 | /* Chain entries -- started with MQ. */ |
1788 | nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); | 1636 | qla25xx_copy_fce(ha, nxt_chain, &last_chain); |
1789 | nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); | ||
1790 | nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); | ||
1791 | if (last_chain) { | 1637 | if (last_chain) { |
1792 | ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); | 1638 | ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); |
1793 | *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); | 1639 | *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); |
1794 | } | 1640 | } |
1795 | 1641 | ||
1796 | /* Adjust valid length. */ | ||
1797 | ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); | ||
1798 | |||
1799 | qla81xx_fw_dump_failed_0: | 1642 | qla81xx_fw_dump_failed_0: |
1800 | qla2xxx_dump_post_process(base_vha, rval); | 1643 | qla2xxx_dump_post_process(base_vha, rval); |
1801 | 1644 | ||
@@ -1804,520 +1647,9 @@ qla81xx_fw_dump_failed: | |||
1804 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1647 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1805 | } | 1648 | } |
1806 | 1649 | ||
1807 | void | ||
1808 | qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | ||
1809 | { | ||
1810 | int rval; | ||
1811 | uint32_t cnt, reg_data; | ||
1812 | uint32_t risc_address; | ||
1813 | struct qla_hw_data *ha = vha->hw; | ||
1814 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | ||
1815 | uint32_t __iomem *dmp_reg; | ||
1816 | uint32_t *iter_reg; | ||
1817 | uint16_t __iomem *mbx_reg; | ||
1818 | unsigned long flags; | ||
1819 | struct qla83xx_fw_dump *fw; | ||
1820 | uint32_t ext_mem_cnt; | ||
1821 | void *nxt, *nxt_chain; | ||
1822 | uint32_t *last_chain = NULL; | ||
1823 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | ||
1824 | |||
1825 | risc_address = ext_mem_cnt = 0; | ||
1826 | flags = 0; | ||
1827 | |||
1828 | if (!hardware_locked) | ||
1829 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
1830 | |||
1831 | if (!ha->fw_dump) { | ||
1832 | ql_log(ql_log_warn, vha, 0xd00c, | ||
1833 | "No buffer available for dump!!!\n"); | ||
1834 | goto qla83xx_fw_dump_failed; | ||
1835 | } | ||
1836 | |||
1837 | if (ha->fw_dumped) { | ||
1838 | ql_log(ql_log_warn, vha, 0xd00d, | ||
1839 | "Firmware has been previously dumped (%p) -- ignoring " | ||
1840 | "request...\n", ha->fw_dump); | ||
1841 | goto qla83xx_fw_dump_failed; | ||
1842 | } | ||
1843 | fw = &ha->fw_dump->isp.isp83; | ||
1844 | qla2xxx_prep_dump(ha, ha->fw_dump); | ||
1845 | |||
1846 | fw->host_status = htonl(RD_REG_DWORD(®->host_status)); | ||
1847 | |||
1848 | /* Pause RISC. */ | ||
1849 | rval = qla24xx_pause_risc(reg); | ||
1850 | if (rval != QLA_SUCCESS) | ||
1851 | goto qla83xx_fw_dump_failed_0; | ||
1852 | |||
1853 | WRT_REG_DWORD(®->iobase_addr, 0x6000); | ||
1854 | dmp_reg = ®->iobase_window; | ||
1855 | reg_data = RD_REG_DWORD(dmp_reg); | ||
1856 | WRT_REG_DWORD(dmp_reg, 0); | ||
1857 | |||
1858 | dmp_reg = ®->unused_4_1[0]; | ||
1859 | reg_data = RD_REG_DWORD(dmp_reg); | ||
1860 | WRT_REG_DWORD(dmp_reg, 0); | ||
1861 | |||
1862 | WRT_REG_DWORD(®->iobase_addr, 0x6010); | ||
1863 | dmp_reg = ®->unused_4_1[2]; | ||
1864 | reg_data = RD_REG_DWORD(dmp_reg); | ||
1865 | WRT_REG_DWORD(dmp_reg, 0); | ||
1866 | |||
1867 | /* select PCR and disable ecc checking and correction */ | ||
1868 | WRT_REG_DWORD(®->iobase_addr, 0x0F70); | ||
1869 | RD_REG_DWORD(®->iobase_addr); | ||
1870 | WRT_REG_DWORD(®->iobase_select, 0x60000000); /* write to F0h = PCR */ | ||
1871 | |||
1872 | /* Host/Risc registers. */ | ||
1873 | iter_reg = fw->host_risc_reg; | ||
1874 | iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); | ||
1875 | iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg); | ||
1876 | qla24xx_read_window(reg, 0x7040, 16, iter_reg); | ||
1877 | |||
1878 | /* PCIe registers. */ | ||
1879 | WRT_REG_DWORD(®->iobase_addr, 0x7C00); | ||
1880 | RD_REG_DWORD(®->iobase_addr); | ||
1881 | WRT_REG_DWORD(®->iobase_window, 0x01); | ||
1882 | dmp_reg = ®->iobase_c4; | ||
1883 | fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); | ||
1884 | fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); | ||
1885 | fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); | ||
1886 | fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); | ||
1887 | |||
1888 | WRT_REG_DWORD(®->iobase_window, 0x00); | ||
1889 | RD_REG_DWORD(®->iobase_window); | ||
1890 | |||
1891 | /* Host interface registers. */ | ||
1892 | dmp_reg = ®->flash_addr; | ||
1893 | for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) | ||
1894 | fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); | ||
1895 | |||
1896 | /* Disable interrupts. */ | ||
1897 | WRT_REG_DWORD(®->ictrl, 0); | ||
1898 | RD_REG_DWORD(®->ictrl); | ||
1899 | |||
1900 | /* Shadow registers. */ | ||
1901 | WRT_REG_DWORD(®->iobase_addr, 0x0F70); | ||
1902 | RD_REG_DWORD(®->iobase_addr); | ||
1903 | WRT_REG_DWORD(®->iobase_select, 0xB0000000); | ||
1904 | fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); | ||
1905 | |||
1906 | WRT_REG_DWORD(®->iobase_select, 0xB0100000); | ||
1907 | fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); | ||
1908 | |||
1909 | WRT_REG_DWORD(®->iobase_select, 0xB0200000); | ||
1910 | fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); | ||
1911 | |||
1912 | WRT_REG_DWORD(®->iobase_select, 0xB0300000); | ||
1913 | fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); | ||
1914 | |||
1915 | WRT_REG_DWORD(®->iobase_select, 0xB0400000); | ||
1916 | fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); | ||
1917 | |||
1918 | WRT_REG_DWORD(®->iobase_select, 0xB0500000); | ||
1919 | fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); | ||
1920 | |||
1921 | WRT_REG_DWORD(®->iobase_select, 0xB0600000); | ||
1922 | fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); | ||
1923 | |||
1924 | WRT_REG_DWORD(®->iobase_select, 0xB0700000); | ||
1925 | fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); | ||
1926 | |||
1927 | WRT_REG_DWORD(®->iobase_select, 0xB0800000); | ||
1928 | fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); | ||
1929 | |||
1930 | WRT_REG_DWORD(®->iobase_select, 0xB0900000); | ||
1931 | fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); | ||
1932 | |||
1933 | WRT_REG_DWORD(®->iobase_select, 0xB0A00000); | ||
1934 | fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); | ||
1935 | |||
1936 | /* RISC I/O register. */ | ||
1937 | WRT_REG_DWORD(®->iobase_addr, 0x0010); | ||
1938 | fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); | ||
1939 | |||
1940 | /* Mailbox registers. */ | ||
1941 | mbx_reg = ®->mailbox0; | ||
1942 | for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) | ||
1943 | fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); | ||
1944 | |||
1945 | /* Transfer sequence registers. */ | ||
1946 | iter_reg = fw->xseq_gp_reg; | ||
1947 | iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg); | ||
1948 | iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg); | ||
1949 | iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg); | ||
1950 | iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg); | ||
1951 | iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg); | ||
1952 | iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg); | ||
1953 | iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg); | ||
1954 | iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg); | ||
1955 | iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); | ||
1956 | iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); | ||
1957 | iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); | ||
1958 | iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); | ||
1959 | iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); | ||
1960 | iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); | ||
1961 | iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); | ||
1962 | qla24xx_read_window(reg, 0xBF70, 16, iter_reg); | ||
1963 | |||
1964 | iter_reg = fw->xseq_0_reg; | ||
1965 | iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); | ||
1966 | iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); | ||
1967 | qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); | ||
1968 | |||
1969 | qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); | ||
1970 | |||
1971 | qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg); | ||
1972 | |||
1973 | /* Receive sequence registers. */ | ||
1974 | iter_reg = fw->rseq_gp_reg; | ||
1975 | iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg); | ||
1976 | iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg); | ||
1977 | iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg); | ||
1978 | iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg); | ||
1979 | iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg); | ||
1980 | iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg); | ||
1981 | iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg); | ||
1982 | iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg); | ||
1983 | iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); | ||
1984 | iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); | ||
1985 | iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); | ||
1986 | iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); | ||
1987 | iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); | ||
1988 | iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); | ||
1989 | iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); | ||
1990 | qla24xx_read_window(reg, 0xFF70, 16, iter_reg); | ||
1991 | |||
1992 | iter_reg = fw->rseq_0_reg; | ||
1993 | iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); | ||
1994 | qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); | ||
1995 | |||
1996 | qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); | ||
1997 | qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); | ||
1998 | qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg); | ||
1999 | |||
2000 | /* Auxiliary sequence registers. */ | ||
2001 | iter_reg = fw->aseq_gp_reg; | ||
2002 | iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); | ||
2003 | iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); | ||
2004 | iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); | ||
2005 | iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); | ||
2006 | iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); | ||
2007 | iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); | ||
2008 | iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); | ||
2009 | iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg); | ||
2010 | iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg); | ||
2011 | iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg); | ||
2012 | iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg); | ||
2013 | iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg); | ||
2014 | iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg); | ||
2015 | iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg); | ||
2016 | iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg); | ||
2017 | qla24xx_read_window(reg, 0xB170, 16, iter_reg); | ||
2018 | |||
2019 | iter_reg = fw->aseq_0_reg; | ||
2020 | iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); | ||
2021 | qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); | ||
2022 | |||
2023 | qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); | ||
2024 | qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); | ||
2025 | qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg); | ||
2026 | |||
2027 | /* Command DMA registers. */ | ||
2028 | iter_reg = fw->cmd_dma_reg; | ||
2029 | iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg); | ||
2030 | iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg); | ||
2031 | iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg); | ||
2032 | qla24xx_read_window(reg, 0x71F0, 16, iter_reg); | ||
2033 | |||
2034 | /* Queues. */ | ||
2035 | iter_reg = fw->req0_dma_reg; | ||
2036 | iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); | ||
2037 | dmp_reg = ®->iobase_q; | ||
2038 | for (cnt = 0; cnt < 7; cnt++) | ||
2039 | *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); | ||
2040 | |||
2041 | iter_reg = fw->resp0_dma_reg; | ||
2042 | iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); | ||
2043 | dmp_reg = ®->iobase_q; | ||
2044 | for (cnt = 0; cnt < 7; cnt++) | ||
2045 | *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); | ||
2046 | |||
2047 | iter_reg = fw->req1_dma_reg; | ||
2048 | iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); | ||
2049 | dmp_reg = ®->iobase_q; | ||
2050 | for (cnt = 0; cnt < 7; cnt++) | ||
2051 | *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); | ||
2052 | |||
2053 | /* Transmit DMA registers. */ | ||
2054 | iter_reg = fw->xmt0_dma_reg; | ||
2055 | iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); | ||
2056 | qla24xx_read_window(reg, 0x7610, 16, iter_reg); | ||
2057 | |||
2058 | iter_reg = fw->xmt1_dma_reg; | ||
2059 | iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); | ||
2060 | qla24xx_read_window(reg, 0x7630, 16, iter_reg); | ||
2061 | |||
2062 | iter_reg = fw->xmt2_dma_reg; | ||
2063 | iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); | ||
2064 | qla24xx_read_window(reg, 0x7650, 16, iter_reg); | ||
2065 | |||
2066 | iter_reg = fw->xmt3_dma_reg; | ||
2067 | iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); | ||
2068 | qla24xx_read_window(reg, 0x7670, 16, iter_reg); | ||
2069 | |||
2070 | iter_reg = fw->xmt4_dma_reg; | ||
2071 | iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); | ||
2072 | qla24xx_read_window(reg, 0x7690, 16, iter_reg); | ||
2073 | |||
2074 | qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); | ||
2075 | |||
2076 | /* Receive DMA registers. */ | ||
2077 | iter_reg = fw->rcvt0_data_dma_reg; | ||
2078 | iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); | ||
2079 | qla24xx_read_window(reg, 0x7710, 16, iter_reg); | ||
2080 | |||
2081 | iter_reg = fw->rcvt1_data_dma_reg; | ||
2082 | iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); | ||
2083 | qla24xx_read_window(reg, 0x7730, 16, iter_reg); | ||
2084 | |||
2085 | /* RISC registers. */ | ||
2086 | iter_reg = fw->risc_gp_reg; | ||
2087 | iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); | ||
2088 | iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); | ||
2089 | iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); | ||
2090 | iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); | ||
2091 | iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); | ||
2092 | iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); | ||
2093 | iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); | ||
2094 | qla24xx_read_window(reg, 0x0F70, 16, iter_reg); | ||
2095 | |||
2096 | /* Local memory controller registers. */ | ||
2097 | iter_reg = fw->lmc_reg; | ||
2098 | iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); | ||
2099 | iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); | ||
2100 | iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); | ||
2101 | iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); | ||
2102 | iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); | ||
2103 | iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); | ||
2104 | iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); | ||
2105 | qla24xx_read_window(reg, 0x3070, 16, iter_reg); | ||
2106 | |||
2107 | /* Fibre Protocol Module registers. */ | ||
2108 | iter_reg = fw->fpm_hdw_reg; | ||
2109 | iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); | ||
2110 | iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); | ||
2111 | iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); | ||
2112 | iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); | ||
2113 | iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); | ||
2114 | iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); | ||
2115 | iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); | ||
2116 | iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); | ||
2117 | iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); | ||
2118 | iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); | ||
2119 | iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); | ||
2120 | iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); | ||
2121 | iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); | ||
2122 | iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg); | ||
2123 | iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg); | ||
2124 | qla24xx_read_window(reg, 0x40F0, 16, iter_reg); | ||
2125 | |||
2126 | /* RQ0 Array registers. */ | ||
2127 | iter_reg = fw->rq0_array_reg; | ||
2128 | iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg); | ||
2129 | iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg); | ||
2130 | iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg); | ||
2131 | iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg); | ||
2132 | iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg); | ||
2133 | iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg); | ||
2134 | iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg); | ||
2135 | iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg); | ||
2136 | iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg); | ||
2137 | iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg); | ||
2138 | iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg); | ||
2139 | iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg); | ||
2140 | iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg); | ||
2141 | iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg); | ||
2142 | iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg); | ||
2143 | qla24xx_read_window(reg, 0x5CF0, 16, iter_reg); | ||
2144 | |||
2145 | /* RQ1 Array registers. */ | ||
2146 | iter_reg = fw->rq1_array_reg; | ||
2147 | iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg); | ||
2148 | iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg); | ||
2149 | iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg); | ||
2150 | iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg); | ||
2151 | iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg); | ||
2152 | iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg); | ||
2153 | iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg); | ||
2154 | iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg); | ||
2155 | iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg); | ||
2156 | iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg); | ||
2157 | iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg); | ||
2158 | iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg); | ||
2159 | iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg); | ||
2160 | iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg); | ||
2161 | iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg); | ||
2162 | qla24xx_read_window(reg, 0x5DF0, 16, iter_reg); | ||
2163 | |||
2164 | /* RP0 Array registers. */ | ||
2165 | iter_reg = fw->rp0_array_reg; | ||
2166 | iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg); | ||
2167 | iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg); | ||
2168 | iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg); | ||
2169 | iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg); | ||
2170 | iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg); | ||
2171 | iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg); | ||
2172 | iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg); | ||
2173 | iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg); | ||
2174 | iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg); | ||
2175 | iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg); | ||
2176 | iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg); | ||
2177 | iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg); | ||
2178 | iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg); | ||
2179 | iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg); | ||
2180 | iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg); | ||
2181 | qla24xx_read_window(reg, 0x5EF0, 16, iter_reg); | ||
2182 | |||
2183 | /* RP1 Array registers. */ | ||
2184 | iter_reg = fw->rp1_array_reg; | ||
2185 | iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg); | ||
2186 | iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg); | ||
2187 | iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg); | ||
2188 | iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg); | ||
2189 | iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg); | ||
2190 | iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg); | ||
2191 | iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg); | ||
2192 | iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg); | ||
2193 | iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg); | ||
2194 | iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg); | ||
2195 | iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg); | ||
2196 | iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg); | ||
2197 | iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg); | ||
2198 | iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg); | ||
2199 | iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg); | ||
2200 | qla24xx_read_window(reg, 0x5FF0, 16, iter_reg); | ||
2201 | |||
2202 | iter_reg = fw->at0_array_reg; | ||
2203 | iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg); | ||
2204 | iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg); | ||
2205 | iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg); | ||
2206 | iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg); | ||
2207 | iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg); | ||
2208 | iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg); | ||
2209 | iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg); | ||
2210 | qla24xx_read_window(reg, 0x70F0, 16, iter_reg); | ||
2211 | |||
2212 | /* I/O Queue Control registers. */ | ||
2213 | qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg); | ||
2214 | |||
2215 | /* Frame Buffer registers. */ | ||
2216 | iter_reg = fw->fb_hdw_reg; | ||
2217 | iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); | ||
2218 | iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); | ||
2219 | iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); | ||
2220 | iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); | ||
2221 | iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); | ||
2222 | iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg); | ||
2223 | iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg); | ||
2224 | iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); | ||
2225 | iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); | ||
2226 | iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); | ||
2227 | iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); | ||
2228 | iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); | ||
2229 | iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); | ||
2230 | iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); | ||
2231 | iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg); | ||
2232 | iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg); | ||
2233 | iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg); | ||
2234 | iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg); | ||
2235 | iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg); | ||
2236 | iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg); | ||
2237 | iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg); | ||
2238 | iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg); | ||
2239 | iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg); | ||
2240 | iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg); | ||
2241 | iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg); | ||
2242 | iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg); | ||
2243 | qla24xx_read_window(reg, 0x6F00, 16, iter_reg); | ||
2244 | |||
2245 | /* Multi queue registers */ | ||
2246 | nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, | ||
2247 | &last_chain); | ||
2248 | |||
2249 | rval = qla24xx_soft_reset(ha); | ||
2250 | if (rval != QLA_SUCCESS) { | ||
2251 | ql_log(ql_log_warn, vha, 0xd00e, | ||
2252 | "SOFT RESET FAILED, forcing continuation of dump!!!\n"); | ||
2253 | rval = QLA_SUCCESS; | ||
2254 | |||
2255 | ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n"); | ||
2256 | |||
2257 | WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); | ||
2258 | RD_REG_DWORD(®->hccr); | ||
2259 | |||
2260 | WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); | ||
2261 | RD_REG_DWORD(®->hccr); | ||
2262 | |||
2263 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); | ||
2264 | RD_REG_DWORD(®->hccr); | ||
2265 | |||
2266 | for (cnt = 30000; cnt && (RD_REG_WORD(®->mailbox0)); cnt--) | ||
2267 | udelay(5); | ||
2268 | |||
2269 | if (!cnt) { | ||
2270 | nxt = fw->code_ram; | ||
2271 | nxt += sizeof(fw->code_ram); | ||
2272 | nxt += (ha->fw_memory_size - 0x100000 + 1); | ||
2273 | goto copy_queue; | ||
2274 | } else | ||
2275 | ql_log(ql_log_warn, vha, 0xd010, | ||
2276 | "bigger hammer success?\n"); | ||
2277 | } | ||
2278 | |||
2279 | rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), | ||
2280 | &nxt); | ||
2281 | if (rval != QLA_SUCCESS) | ||
2282 | goto qla83xx_fw_dump_failed_0; | ||
2283 | |||
2284 | copy_queue: | ||
2285 | nxt = qla2xxx_copy_queues(ha, nxt); | ||
2286 | |||
2287 | nxt = qla24xx_copy_eft(ha, nxt); | ||
2288 | |||
2289 | /* Chain entries -- started with MQ. */ | ||
2290 | nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); | ||
2291 | nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); | ||
2292 | nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); | ||
2293 | if (last_chain) { | ||
2294 | ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); | ||
2295 | *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); | ||
2296 | } | ||
2297 | |||
2298 | /* Adjust valid length. */ | ||
2299 | ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); | ||
2300 | |||
2301 | qla83xx_fw_dump_failed_0: | ||
2302 | qla2xxx_dump_post_process(base_vha, rval); | ||
2303 | |||
2304 | qla83xx_fw_dump_failed: | ||
2305 | if (!hardware_locked) | ||
2306 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2307 | } | ||
2308 | |||
2309 | /****************************************************************************/ | 1650 | /****************************************************************************/ |
2310 | /* Driver Debug Functions. */ | 1651 | /* Driver Debug Functions. */ |
2311 | /****************************************************************************/ | 1652 | /****************************************************************************/ |
2312 | |||
2313 | static inline int | ||
2314 | ql_mask_match(uint32_t level) | ||
2315 | { | ||
2316 | if (ql2xextended_error_logging == 1) | ||
2317 | ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; | ||
2318 | return (level & ql2xextended_error_logging) == level; | ||
2319 | } | ||
2320 | |||
2321 | /* | 1653 | /* |
2322 | * This function is for formatting and logging debug information. | 1654 | * This function is for formatting and logging debug information. |
2323 | * It is to be used when vha is available. It formats the message | 1655 | * It is to be used when vha is available. It formats the message |
@@ -2332,37 +1664,40 @@ ql_mask_match(uint32_t level) | |||
2332 | * msg: The message to be displayed. | 1664 | * msg: The message to be displayed. |
2333 | */ | 1665 | */ |
2334 | void | 1666 | void |
2335 | ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) | 1667 | ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) { |
2336 | { | ||
2337 | va_list va; | ||
2338 | struct va_format vaf; | ||
2339 | 1668 | ||
2340 | if (!ql_mask_match(level)) | 1669 | char pbuf[QL_DBG_BUF_LEN]; |
2341 | return; | 1670 | va_list ap; |
1671 | uint32_t len; | ||
1672 | struct pci_dev *pdev = NULL; | ||
2342 | 1673 | ||
2343 | va_start(va, fmt); | 1674 | memset(pbuf, 0, QL_DBG_BUF_LEN); |
2344 | 1675 | ||
2345 | vaf.fmt = fmt; | 1676 | va_start(ap, msg); |
2346 | vaf.va = &va; | ||
2347 | 1677 | ||
2348 | if (vha != NULL) { | 1678 | if ((level & ql2xextended_error_logging) == level) { |
2349 | const struct pci_dev *pdev = vha->hw->pdev; | 1679 | if (vha != NULL) { |
2350 | /* <module-name> <pci-name> <msg-id>:<host> Message */ | 1680 | pdev = vha->hw->pdev; |
2351 | pr_warn("%s [%s]-%04x:%ld: %pV", | 1681 | /* <module-name> <pci-name> <msg-id>:<host> Message */ |
2352 | QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, | 1682 | sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR, |
2353 | vha->host_no, &vaf); | 1683 | dev_name(&(pdev->dev)), id + ql_dbg_offset, |
2354 | } else { | 1684 | vha->host_no); |
2355 | pr_warn("%s [%s]-%04x: : %pV", | 1685 | } else |
2356 | QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf); | 1686 | sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, |
1687 | "0000:00:00.0", id + ql_dbg_offset); | ||
1688 | |||
1689 | len = strlen(pbuf); | ||
1690 | vsprintf(pbuf+len, msg, ap); | ||
1691 | pr_warning("%s", pbuf); | ||
2357 | } | 1692 | } |
2358 | 1693 | ||
2359 | va_end(va); | 1694 | va_end(ap); |
2360 | 1695 | ||
2361 | } | 1696 | } |
2362 | 1697 | ||
2363 | /* | 1698 | /* |
2364 | * This function is for formatting and logging debug information. | 1699 | * This function is for formatting and logging debug information. |
2365 | * It is to be used when vha is not available and pci is available, | 1700 | * It is to be used when vha is not available and pci is availble, |
2366 | * i.e., before host allocation. It formats the message and logs it | 1701 | * i.e., before host allocation. It formats the message and logs it |
2367 | * to the messages file. | 1702 | * to the messages file. |
2368 | * parameters: | 1703 | * parameters: |
@@ -2375,27 +1710,31 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) | |||
2375 | * msg: The message to be displayed. | 1710 | * msg: The message to be displayed. |
2376 | */ | 1711 | */ |
2377 | void | 1712 | void |
2378 | ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, | 1713 | ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) { |
2379 | const char *fmt, ...) | 1714 | |
2380 | { | 1715 | char pbuf[QL_DBG_BUF_LEN]; |
2381 | va_list va; | 1716 | va_list ap; |
2382 | struct va_format vaf; | 1717 | uint32_t len; |
2383 | 1718 | ||
2384 | if (pdev == NULL) | 1719 | if (pdev == NULL) |
2385 | return; | 1720 | return; |
2386 | if (!ql_mask_match(level)) | ||
2387 | return; | ||
2388 | 1721 | ||
2389 | va_start(va, fmt); | 1722 | memset(pbuf, 0, QL_DBG_BUF_LEN); |
2390 | 1723 | ||
2391 | vaf.fmt = fmt; | 1724 | va_start(ap, msg); |
2392 | vaf.va = &va; | 1725 | |
1726 | if ((level & ql2xextended_error_logging) == level) { | ||
1727 | /* <module-name> <dev-name>:<msg-id> Message */ | ||
1728 | sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, | ||
1729 | dev_name(&(pdev->dev)), id + ql_dbg_offset); | ||
1730 | |||
1731 | len = strlen(pbuf); | ||
1732 | vsprintf(pbuf+len, msg, ap); | ||
1733 | pr_warning("%s", pbuf); | ||
1734 | } | ||
2393 | 1735 | ||
2394 | /* <module-name> <dev-name>:<msg-id> Message */ | 1736 | va_end(ap); |
2395 | pr_warn("%s [%s]-%04x: : %pV", | ||
2396 | QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf); | ||
2397 | 1737 | ||
2398 | va_end(va); | ||
2399 | } | 1738 | } |
2400 | 1739 | ||
2401 | /* | 1740 | /* |
@@ -2412,52 +1751,52 @@ ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, | |||
2412 | * msg: The message to be displayed. | 1751 | * msg: The message to be displayed. |
2413 | */ | 1752 | */ |
2414 | void | 1753 | void |
2415 | ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) | 1754 | ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) { |
2416 | { | ||
2417 | va_list va; | ||
2418 | struct va_format vaf; | ||
2419 | char pbuf[128]; | ||
2420 | 1755 | ||
2421 | if (level > ql_errlev) | 1756 | char pbuf[QL_DBG_BUF_LEN]; |
2422 | return; | 1757 | va_list ap; |
1758 | uint32_t len; | ||
1759 | struct pci_dev *pdev = NULL; | ||
2423 | 1760 | ||
2424 | if (vha != NULL) { | 1761 | memset(pbuf, 0, QL_DBG_BUF_LEN); |
2425 | const struct pci_dev *pdev = vha->hw->pdev; | 1762 | |
2426 | /* <module-name> <msg-id>:<host> Message */ | 1763 | va_start(ap, msg); |
2427 | snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ", | 1764 | |
2428 | QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no); | 1765 | if (level <= ql_errlev) { |
2429 | } else { | 1766 | if (vha != NULL) { |
2430 | snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", | 1767 | pdev = vha->hw->pdev; |
2431 | QL_MSGHDR, "0000:00:00.0", id); | 1768 | /* <module-name> <msg-id>:<host> Message */ |
2432 | } | 1769 | sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR, |
2433 | pbuf[sizeof(pbuf) - 1] = 0; | 1770 | dev_name(&(pdev->dev)), id, vha->host_no); |
2434 | 1771 | } else | |
2435 | va_start(va, fmt); | 1772 | sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, |
2436 | 1773 | "0000:00:00.0", id); | |
2437 | vaf.fmt = fmt; | 1774 | |
2438 | vaf.va = &va; | 1775 | len = strlen(pbuf); |
2439 | 1776 | vsprintf(pbuf+len, msg, ap); | |
2440 | switch (level) { | 1777 | |
2441 | case ql_log_fatal: /* FATAL LOG */ | 1778 | switch (level) { |
2442 | pr_crit("%s%pV", pbuf, &vaf); | 1779 | case 0: /* FATAL LOG */ |
2443 | break; | 1780 | pr_crit("%s", pbuf); |
2444 | case ql_log_warn: | 1781 | break; |
2445 | pr_err("%s%pV", pbuf, &vaf); | 1782 | case 1: |
2446 | break; | 1783 | pr_err("%s", pbuf); |
2447 | case ql_log_info: | 1784 | break; |
2448 | pr_warn("%s%pV", pbuf, &vaf); | 1785 | case 2: |
2449 | break; | 1786 | pr_warn("%s", pbuf); |
2450 | default: | 1787 | break; |
2451 | pr_info("%s%pV", pbuf, &vaf); | 1788 | default: |
2452 | break; | 1789 | pr_info("%s", pbuf); |
1790 | break; | ||
1791 | } | ||
2453 | } | 1792 | } |
2454 | 1793 | ||
2455 | va_end(va); | 1794 | va_end(ap); |
2456 | } | 1795 | } |
2457 | 1796 | ||
2458 | /* | 1797 | /* |
2459 | * This function is for formatting and logging log messages. | 1798 | * This function is for formatting and logging log messages. |
2460 | * It is to be used when vha is not available and pci is available, | 1799 | * It is to be used when vha is not available and pci is availble, |
2461 | * i.e., before host allocation. It formats the message and logs | 1800 | * i.e., before host allocation. It formats the message and logs |
2462 | * it to the messages file. All the messages are logged irrespective | 1801 | * it to the messages file. All the messages are logged irrespective |
2463 | * of the value of ql2xextended_error_logging. | 1802 | * of the value of ql2xextended_error_logging. |
@@ -2470,44 +1809,43 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) | |||
2470 | * msg: The message to be displayed. | 1809 | * msg: The message to be displayed. |
2471 | */ | 1810 | */ |
2472 | void | 1811 | void |
2473 | ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, | 1812 | ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) { |
2474 | const char *fmt, ...) | 1813 | |
2475 | { | 1814 | char pbuf[QL_DBG_BUF_LEN]; |
2476 | va_list va; | 1815 | va_list ap; |
2477 | struct va_format vaf; | 1816 | uint32_t len; |
2478 | char pbuf[128]; | ||
2479 | 1817 | ||
2480 | if (pdev == NULL) | 1818 | if (pdev == NULL) |
2481 | return; | 1819 | return; |
2482 | if (level > ql_errlev) | ||
2483 | return; | ||
2484 | 1820 | ||
2485 | /* <module-name> <dev-name>:<msg-id> Message */ | 1821 | memset(pbuf, 0, QL_DBG_BUF_LEN); |
2486 | snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", | 1822 | |
2487 | QL_MSGHDR, dev_name(&(pdev->dev)), id); | 1823 | va_start(ap, msg); |
2488 | pbuf[sizeof(pbuf) - 1] = 0; | 1824 | |
2489 | 1825 | if (level <= ql_errlev) { | |
2490 | va_start(va, fmt); | 1826 | /* <module-name> <dev-name>:<msg-id> Message */ |
2491 | 1827 | sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, | |
2492 | vaf.fmt = fmt; | 1828 | dev_name(&(pdev->dev)), id); |
2493 | vaf.va = &va; | 1829 | |
2494 | 1830 | len = strlen(pbuf); | |
2495 | switch (level) { | 1831 | vsprintf(pbuf+len, msg, ap); |
2496 | case ql_log_fatal: /* FATAL LOG */ | 1832 | switch (level) { |
2497 | pr_crit("%s%pV", pbuf, &vaf); | 1833 | case 0: /* FATAL LOG */ |
2498 | break; | 1834 | pr_crit("%s", pbuf); |
2499 | case ql_log_warn: | 1835 | break; |
2500 | pr_err("%s%pV", pbuf, &vaf); | 1836 | case 1: |
2501 | break; | 1837 | pr_err("%s", pbuf); |
2502 | case ql_log_info: | 1838 | break; |
2503 | pr_warn("%s%pV", pbuf, &vaf); | 1839 | case 2: |
2504 | break; | 1840 | pr_warn("%s", pbuf); |
2505 | default: | 1841 | break; |
2506 | pr_info("%s%pV", pbuf, &vaf); | 1842 | default: |
2507 | break; | 1843 | pr_info("%s", pbuf); |
1844 | break; | ||
1845 | } | ||
2508 | } | 1846 | } |
2509 | 1847 | ||
2510 | va_end(va); | 1848 | va_end(ap); |
2511 | } | 1849 | } |
2512 | 1850 | ||
2513 | void | 1851 | void |
@@ -2520,20 +1858,20 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) | |||
2520 | struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; | 1858 | struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; |
2521 | uint16_t __iomem *mbx_reg; | 1859 | uint16_t __iomem *mbx_reg; |
2522 | 1860 | ||
2523 | if (!ql_mask_match(level)) | 1861 | if ((level & ql2xextended_error_logging) == level) { |
2524 | return; | ||
2525 | 1862 | ||
2526 | if (IS_QLA82XX(ha)) | 1863 | if (IS_QLA82XX(ha)) |
2527 | mbx_reg = ®82->mailbox_in[0]; | 1864 | mbx_reg = ®82->mailbox_in[0]; |
2528 | else if (IS_FWI2_CAPABLE(ha)) | 1865 | else if (IS_FWI2_CAPABLE(ha)) |
2529 | mbx_reg = ®24->mailbox0; | 1866 | mbx_reg = ®24->mailbox0; |
2530 | else | 1867 | else |
2531 | mbx_reg = MAILBOX_REG(ha, reg, 0); | 1868 | mbx_reg = MAILBOX_REG(ha, reg, 0); |
2532 | 1869 | ||
2533 | ql_dbg(level, vha, id, "Mailbox registers:\n"); | 1870 | ql_dbg(level, vha, id, "Mailbox registers:\n"); |
2534 | for (i = 0; i < 6; i++) | 1871 | for (i = 0; i < 6; i++) |
2535 | ql_dbg(level, vha, id, | 1872 | ql_dbg(level, vha, id, |
2536 | "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); | 1873 | "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); |
1874 | } | ||
2537 | } | 1875 | } |
2538 | 1876 | ||
2539 | 1877 | ||
@@ -2543,25 +1881,24 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id, | |||
2543 | { | 1881 | { |
2544 | uint32_t cnt; | 1882 | uint32_t cnt; |
2545 | uint8_t c; | 1883 | uint8_t c; |
2546 | 1884 | if ((level & ql2xextended_error_logging) == level) { | |
2547 | if (!ql_mask_match(level)) | 1885 | |
2548 | return; | 1886 | ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 " |
2549 | 1887 | "9 Ah Bh Ch Dh Eh Fh\n"); | |
2550 | ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 " | 1888 | ql_dbg(level, vha, id, "----------------------------------" |
2551 | "9 Ah Bh Ch Dh Eh Fh\n"); | 1889 | "----------------------------\n"); |
2552 | ql_dbg(level, vha, id, "----------------------------------" | 1890 | |
2553 | "----------------------------\n"); | 1891 | ql_dbg(level, vha, id, ""); |
2554 | 1892 | for (cnt = 0; cnt < size;) { | |
2555 | ql_dbg(level, vha, id, " "); | 1893 | c = *b++; |
2556 | for (cnt = 0; cnt < size;) { | 1894 | printk("%02x", (uint32_t) c); |
2557 | c = *b++; | 1895 | cnt++; |
2558 | printk("%02x", (uint32_t) c); | 1896 | if (!(cnt % 16)) |
2559 | cnt++; | 1897 | printk("\n"); |
2560 | if (!(cnt % 16)) | 1898 | else |
2561 | printk("\n"); | 1899 | printk(" "); |
2562 | else | 1900 | } |
2563 | printk(" "); | 1901 | if (cnt % 16) |
1902 | ql_dbg(level, vha, id, "\n"); | ||
2564 | } | 1903 | } |
2565 | if (cnt % 16) | ||
2566 | ql_dbg(level, vha, id, "\n"); | ||
2567 | } | 1904 | } |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 8f911c0b1e7..98a377b9901 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -165,54 +165,6 @@ struct qla81xx_fw_dump { | |||
165 | uint32_t ext_mem[1]; | 165 | uint32_t ext_mem[1]; |
166 | }; | 166 | }; |
167 | 167 | ||
168 | struct qla83xx_fw_dump { | ||
169 | uint32_t host_status; | ||
170 | uint32_t host_risc_reg[48]; | ||
171 | uint32_t pcie_regs[4]; | ||
172 | uint32_t host_reg[32]; | ||
173 | uint32_t shadow_reg[11]; | ||
174 | uint32_t risc_io_reg; | ||
175 | uint16_t mailbox_reg[32]; | ||
176 | uint32_t xseq_gp_reg[256]; | ||
177 | uint32_t xseq_0_reg[48]; | ||
178 | uint32_t xseq_1_reg[16]; | ||
179 | uint32_t xseq_2_reg[16]; | ||
180 | uint32_t rseq_gp_reg[256]; | ||
181 | uint32_t rseq_0_reg[32]; | ||
182 | uint32_t rseq_1_reg[16]; | ||
183 | uint32_t rseq_2_reg[16]; | ||
184 | uint32_t rseq_3_reg[16]; | ||
185 | uint32_t aseq_gp_reg[256]; | ||
186 | uint32_t aseq_0_reg[32]; | ||
187 | uint32_t aseq_1_reg[16]; | ||
188 | uint32_t aseq_2_reg[16]; | ||
189 | uint32_t aseq_3_reg[16]; | ||
190 | uint32_t cmd_dma_reg[64]; | ||
191 | uint32_t req0_dma_reg[15]; | ||
192 | uint32_t resp0_dma_reg[15]; | ||
193 | uint32_t req1_dma_reg[15]; | ||
194 | uint32_t xmt0_dma_reg[32]; | ||
195 | uint32_t xmt1_dma_reg[32]; | ||
196 | uint32_t xmt2_dma_reg[32]; | ||
197 | uint32_t xmt3_dma_reg[32]; | ||
198 | uint32_t xmt4_dma_reg[32]; | ||
199 | uint32_t xmt_data_dma_reg[16]; | ||
200 | uint32_t rcvt0_data_dma_reg[32]; | ||
201 | uint32_t rcvt1_data_dma_reg[32]; | ||
202 | uint32_t risc_gp_reg[128]; | ||
203 | uint32_t lmc_reg[128]; | ||
204 | uint32_t fpm_hdw_reg[256]; | ||
205 | uint32_t rq0_array_reg[256]; | ||
206 | uint32_t rq1_array_reg[256]; | ||
207 | uint32_t rp0_array_reg[256]; | ||
208 | uint32_t rp1_array_reg[256]; | ||
209 | uint32_t queue_control_reg[16]; | ||
210 | uint32_t fb_hdw_reg[432]; | ||
211 | uint32_t at0_array_reg[128]; | ||
212 | uint32_t code_ram[0x2400]; | ||
213 | uint32_t ext_mem[1]; | ||
214 | }; | ||
215 | |||
216 | #define EFT_NUM_BUFFERS 4 | 168 | #define EFT_NUM_BUFFERS 4 |
217 | #define EFT_BYTES_PER_BUFFER 0x4000 | 169 | #define EFT_BYTES_PER_BUFFER 0x4000 |
218 | #define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS)) | 170 | #define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS)) |
@@ -240,24 +192,9 @@ struct qla2xxx_mq_chain { | |||
240 | uint32_t qregs[4 * QLA_MQ_SIZE]; | 192 | uint32_t qregs[4 * QLA_MQ_SIZE]; |
241 | }; | 193 | }; |
242 | 194 | ||
243 | struct qla2xxx_mqueue_header { | ||
244 | uint32_t queue; | ||
245 | #define TYPE_REQUEST_QUEUE 0x1 | ||
246 | #define TYPE_RESPONSE_QUEUE 0x2 | ||
247 | #define TYPE_ATIO_QUEUE 0x3 | ||
248 | uint32_t number; | ||
249 | uint32_t size; | ||
250 | }; | ||
251 | |||
252 | struct qla2xxx_mqueue_chain { | ||
253 | uint32_t type; | ||
254 | uint32_t chain_size; | ||
255 | }; | ||
256 | |||
257 | #define DUMP_CHAIN_VARIANT 0x80000000 | 195 | #define DUMP_CHAIN_VARIANT 0x80000000 |
258 | #define DUMP_CHAIN_FCE 0x7FFFFAF0 | 196 | #define DUMP_CHAIN_FCE 0x7FFFFAF0 |
259 | #define DUMP_CHAIN_MQ 0x7FFFFAF1 | 197 | #define DUMP_CHAIN_MQ 0x7FFFFAF1 |
260 | #define DUMP_CHAIN_QUEUE 0x7FFFFAF2 | ||
261 | #define DUMP_CHAIN_LAST 0x80000000 | 198 | #define DUMP_CHAIN_LAST 0x80000000 |
262 | 199 | ||
263 | struct qla2xxx_fw_dump { | 200 | struct qla2xxx_fw_dump { |
@@ -291,12 +228,10 @@ struct qla2xxx_fw_dump { | |||
291 | struct qla24xx_fw_dump isp24; | 228 | struct qla24xx_fw_dump isp24; |
292 | struct qla25xx_fw_dump isp25; | 229 | struct qla25xx_fw_dump isp25; |
293 | struct qla81xx_fw_dump isp81; | 230 | struct qla81xx_fw_dump isp81; |
294 | struct qla83xx_fw_dump isp83; | ||
295 | } isp; | 231 | } isp; |
296 | }; | 232 | }; |
297 | 233 | ||
298 | #define QL_MSGHDR "qla2xxx" | 234 | #define QL_MSGHDR "qla2xxx" |
299 | #define QL_DBG_DEFAULT1_MASK 0x1e400000 | ||
300 | 235 | ||
301 | #define ql_log_fatal 0 /* display fatal errors */ | 236 | #define ql_log_fatal 0 /* display fatal errors */ |
302 | #define ql_log_warn 1 /* display critical errors */ | 237 | #define ql_log_warn 1 /* display critical errors */ |
@@ -309,15 +244,15 @@ struct qla2xxx_fw_dump { | |||
309 | 244 | ||
310 | extern int ql_errlev; | 245 | extern int ql_errlev; |
311 | 246 | ||
312 | void __attribute__((format (printf, 4, 5))) | 247 | void |
313 | ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...); | 248 | ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...); |
314 | void __attribute__((format (printf, 4, 5))) | 249 | void |
315 | ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); | 250 | ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...); |
316 | 251 | ||
317 | void __attribute__((format (printf, 4, 5))) | 252 | void |
318 | ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...); | 253 | ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...); |
319 | void __attribute__((format (printf, 4, 5))) | 254 | void |
320 | ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); | 255 | ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...); |
321 | 256 | ||
322 | /* Debug Levels */ | 257 | /* Debug Levels */ |
323 | /* The 0x40000000 is the max value any debug level can have | 258 | /* The 0x40000000 is the max value any debug level can have |
@@ -340,11 +275,5 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); | |||
340 | #define ql_dbg_misc 0x00010000 /* For dumping everything that is not | 275 | #define ql_dbg_misc 0x00010000 /* For dumping everything that is not |
341 | * not covered by upper categories | 276 | * not covered by upper categories |
342 | */ | 277 | */ |
343 | #define ql_dbg_verbose 0x00008000 /* More verbosity for each level | 278 | |
344 | * This is to be used with other levels where | 279 | #define QL_DBG_BUF_LEN 512 |
345 | * more verbosity is required. It might not | ||
346 | * be applicable to all the levels. | ||
347 | */ | ||
348 | #define ql_dbg_tgt 0x00004000 /* Target mode */ | ||
349 | #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ | ||
350 | #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ | ||
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 6e7727f46d4..a03eaf40f37 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -44,7 +44,6 @@ | |||
44 | * ISP2100 HBAs. | 44 | * ISP2100 HBAs. |
45 | */ | 45 | */ |
46 | #define MAILBOX_REGISTER_COUNT_2100 8 | 46 | #define MAILBOX_REGISTER_COUNT_2100 8 |
47 | #define MAILBOX_REGISTER_COUNT_2200 24 | ||
48 | #define MAILBOX_REGISTER_COUNT 32 | 47 | #define MAILBOX_REGISTER_COUNT 32 |
49 | 48 | ||
50 | #define QLA2200A_RISC_ROM_VER 4 | 49 | #define QLA2200A_RISC_ROM_VER 4 |
@@ -115,82 +114,6 @@ | |||
115 | #define WRT_REG_DWORD(addr, data) writel(data,addr) | 114 | #define WRT_REG_DWORD(addr, data) writel(data,addr) |
116 | 115 | ||
117 | /* | 116 | /* |
118 | * ISP83XX specific remote register addresses | ||
119 | */ | ||
120 | #define QLA83XX_LED_PORT0 0x00201320 | ||
121 | #define QLA83XX_LED_PORT1 0x00201328 | ||
122 | #define QLA83XX_IDC_DEV_STATE 0x22102384 | ||
123 | #define QLA83XX_IDC_MAJOR_VERSION 0x22102380 | ||
124 | #define QLA83XX_IDC_MINOR_VERSION 0x22102398 | ||
125 | #define QLA83XX_IDC_DRV_PRESENCE 0x22102388 | ||
126 | #define QLA83XX_IDC_DRIVER_ACK 0x2210238c | ||
127 | #define QLA83XX_IDC_CONTROL 0x22102390 | ||
128 | #define QLA83XX_IDC_AUDIT 0x22102394 | ||
129 | #define QLA83XX_IDC_LOCK_RECOVERY 0x2210239c | ||
130 | #define QLA83XX_DRIVER_LOCKID 0x22102104 | ||
131 | #define QLA83XX_DRIVER_LOCK 0x8111c028 | ||
132 | #define QLA83XX_DRIVER_UNLOCK 0x8111c02c | ||
133 | #define QLA83XX_FLASH_LOCKID 0x22102100 | ||
134 | #define QLA83XX_FLASH_LOCK 0x8111c010 | ||
135 | #define QLA83XX_FLASH_UNLOCK 0x8111c014 | ||
136 | #define QLA83XX_DEV_PARTINFO1 0x221023e0 | ||
137 | #define QLA83XX_DEV_PARTINFO2 0x221023e4 | ||
138 | #define QLA83XX_FW_HEARTBEAT 0x221020b0 | ||
139 | #define QLA83XX_PEG_HALT_STATUS1 0x221020a8 | ||
140 | #define QLA83XX_PEG_HALT_STATUS2 0x221020ac | ||
141 | |||
142 | /* 83XX: Macros defining 8200 AEN Reason codes */ | ||
143 | #define IDC_DEVICE_STATE_CHANGE BIT_0 | ||
144 | #define IDC_PEG_HALT_STATUS_CHANGE BIT_1 | ||
145 | #define IDC_NIC_FW_REPORTED_FAILURE BIT_2 | ||
146 | #define IDC_HEARTBEAT_FAILURE BIT_3 | ||
147 | |||
148 | /* 83XX: Macros defining 8200 AEN Error-levels */ | ||
149 | #define ERR_LEVEL_NON_FATAL 0x1 | ||
150 | #define ERR_LEVEL_RECOVERABLE_FATAL 0x2 | ||
151 | #define ERR_LEVEL_UNRECOVERABLE_FATAL 0x4 | ||
152 | |||
153 | /* 83XX: Macros for IDC Version */ | ||
154 | #define QLA83XX_SUPP_IDC_MAJOR_VERSION 0x01 | ||
155 | #define QLA83XX_SUPP_IDC_MINOR_VERSION 0x0 | ||
156 | |||
157 | /* 83XX: Macros for scheduling dpc tasks */ | ||
158 | #define QLA83XX_NIC_CORE_RESET 0x1 | ||
159 | #define QLA83XX_IDC_STATE_HANDLER 0x2 | ||
160 | #define QLA83XX_NIC_CORE_UNRECOVERABLE 0x3 | ||
161 | |||
162 | /* 83XX: Macros for defining IDC-Control bits */ | ||
163 | #define QLA83XX_IDC_RESET_DISABLED BIT_0 | ||
164 | #define QLA83XX_IDC_GRACEFUL_RESET BIT_1 | ||
165 | |||
166 | /* 83XX: Macros for different timeouts */ | ||
167 | #define QLA83XX_IDC_INITIALIZATION_TIMEOUT 30 | ||
168 | #define QLA83XX_IDC_RESET_ACK_TIMEOUT 10 | ||
169 | #define QLA83XX_MAX_LOCK_RECOVERY_WAIT (2 * HZ) | ||
170 | |||
171 | /* 83XX: Macros for defining class in DEV-Partition Info register */ | ||
172 | #define QLA83XX_CLASS_TYPE_NONE 0x0 | ||
173 | #define QLA83XX_CLASS_TYPE_NIC 0x1 | ||
174 | #define QLA83XX_CLASS_TYPE_FCOE 0x2 | ||
175 | #define QLA83XX_CLASS_TYPE_ISCSI 0x3 | ||
176 | |||
177 | /* 83XX: Macros for IDC Lock-Recovery stages */ | ||
178 | #define IDC_LOCK_RECOVERY_STAGE1 0x1 /* Stage1: Intent for | ||
179 | * lock-recovery | ||
180 | */ | ||
181 | #define IDC_LOCK_RECOVERY_STAGE2 0x2 /* Stage2: Perform lock-recovery */ | ||
182 | |||
183 | /* 83XX: Macros for IDC Audit type */ | ||
184 | #define IDC_AUDIT_TIMESTAMP 0x0 /* IDC-AUDIT: Record timestamp of | ||
185 | * dev-state change to NEED-RESET | ||
186 | * or NEED-QUIESCENT | ||
187 | */ | ||
188 | #define IDC_AUDIT_COMPLETION 0x1 /* IDC-AUDIT: Record duration of | ||
189 | * reset-recovery completion is | ||
190 | * second | ||
191 | */ | ||
192 | |||
193 | /* | ||
194 | * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an | 117 | * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an |
195 | * 133Mhz slot. | 118 | * 133Mhz slot. |
196 | */ | 119 | */ |
@@ -201,18 +124,17 @@ | |||
201 | * Fibre Channel device definitions. | 124 | * Fibre Channel device definitions. |
202 | */ | 125 | */ |
203 | #define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */ | 126 | #define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */ |
204 | #define MAX_FIBRE_DEVICES_2100 512 | 127 | #define MAX_FIBRE_DEVICES 512 |
205 | #define MAX_FIBRE_DEVICES_2400 2048 | ||
206 | #define MAX_FIBRE_DEVICES_LOOP 128 | ||
207 | #define MAX_FIBRE_DEVICES_MAX MAX_FIBRE_DEVICES_2400 | ||
208 | #define LOOPID_MAP_SIZE (ha->max_fibre_devices) | ||
209 | #define MAX_FIBRE_LUNS 0xFFFF | 128 | #define MAX_FIBRE_LUNS 0xFFFF |
129 | #define MAX_RSCN_COUNT 32 | ||
210 | #define MAX_HOST_COUNT 16 | 130 | #define MAX_HOST_COUNT 16 |
211 | 131 | ||
212 | /* | 132 | /* |
213 | * Host adapter default definitions. | 133 | * Host adapter default definitions. |
214 | */ | 134 | */ |
215 | #define MAX_BUSES 1 /* We only have one bus today */ | 135 | #define MAX_BUSES 1 /* We only have one bus today */ |
136 | #define MAX_TARGETS_2100 MAX_FIBRE_DEVICES | ||
137 | #define MAX_TARGETS_2200 MAX_FIBRE_DEVICES | ||
216 | #define MIN_LUNS 8 | 138 | #define MIN_LUNS 8 |
217 | #define MAX_LUNS MAX_FIBRE_LUNS | 139 | #define MAX_LUNS MAX_FIBRE_LUNS |
218 | #define MAX_CMDS_PER_LUN 255 | 140 | #define MAX_CMDS_PER_LUN 255 |
@@ -263,7 +185,6 @@ | |||
263 | #define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ | 185 | #define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ |
264 | #define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ | 186 | #define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ |
265 | #define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ | 187 | #define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ |
266 | #define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */ | ||
267 | 188 | ||
268 | struct req_que; | 189 | struct req_que; |
269 | 190 | ||
@@ -280,12 +201,20 @@ struct sd_dif_tuple { | |||
280 | /* | 201 | /* |
281 | * SCSI Request Block | 202 | * SCSI Request Block |
282 | */ | 203 | */ |
283 | struct srb_cmd { | 204 | typedef struct srb { |
205 | atomic_t ref_count; | ||
206 | struct fc_port *fcport; | ||
207 | uint32_t handle; | ||
208 | |||
284 | struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ | 209 | struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ |
210 | |||
211 | uint16_t flags; | ||
212 | |||
285 | uint32_t request_sense_length; | 213 | uint32_t request_sense_length; |
286 | uint8_t *request_sense_ptr; | 214 | uint8_t *request_sense_ptr; |
215 | |||
287 | void *ctx; | 216 | void *ctx; |
288 | }; | 217 | } srb_t; |
289 | 218 | ||
290 | /* | 219 | /* |
291 | * SRB flag definitions | 220 | * SRB flag definitions |
@@ -324,7 +253,10 @@ struct srb_iocb { | |||
324 | } u; | 253 | } u; |
325 | 254 | ||
326 | struct timer_list timer; | 255 | struct timer_list timer; |
327 | void (*timeout)(void *); | 256 | |
257 | void (*done)(srb_t *); | ||
258 | void (*free)(srb_t *); | ||
259 | void (*timeout)(srb_t *); | ||
328 | }; | 260 | }; |
329 | 261 | ||
330 | /* Values for srb_ctx type */ | 262 | /* Values for srb_ctx type */ |
@@ -335,38 +267,15 @@ struct srb_iocb { | |||
335 | #define SRB_CT_CMD 5 | 267 | #define SRB_CT_CMD 5 |
336 | #define SRB_ADISC_CMD 6 | 268 | #define SRB_ADISC_CMD 6 |
337 | #define SRB_TM_CMD 7 | 269 | #define SRB_TM_CMD 7 |
338 | #define SRB_SCSI_CMD 8 | ||
339 | #define SRB_BIDI_CMD 9 | ||
340 | 270 | ||
341 | typedef struct srb { | 271 | struct srb_ctx { |
342 | atomic_t ref_count; | ||
343 | struct fc_port *fcport; | ||
344 | uint32_t handle; | ||
345 | uint16_t flags; | ||
346 | uint16_t type; | 272 | uint16_t type; |
347 | char *name; | 273 | char *name; |
348 | int iocbs; | ||
349 | union { | 274 | union { |
350 | struct srb_iocb iocb_cmd; | 275 | struct srb_iocb *iocb_cmd; |
351 | struct fc_bsg_job *bsg_job; | 276 | struct fc_bsg_job *bsg_job; |
352 | struct srb_cmd scmd; | ||
353 | } u; | 277 | } u; |
354 | void (*done)(void *, void *, int); | 278 | }; |
355 | void (*free)(void *, void *); | ||
356 | } srb_t; | ||
357 | |||
358 | #define GET_CMD_SP(sp) (sp->u.scmd.cmd) | ||
359 | #define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd) | ||
360 | #define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx) | ||
361 | |||
362 | #define GET_CMD_SENSE_LEN(sp) \ | ||
363 | (sp->u.scmd.request_sense_length) | ||
364 | #define SET_CMD_SENSE_LEN(sp, len) \ | ||
365 | (sp->u.scmd.request_sense_length = len) | ||
366 | #define GET_CMD_SENSE_PTR(sp) \ | ||
367 | (sp->u.scmd.request_sense_ptr) | ||
368 | #define SET_CMD_SENSE_PTR(sp, ptr) \ | ||
369 | (sp->u.scmd.request_sense_ptr = ptr) | ||
370 | 279 | ||
371 | struct msg_echo_lb { | 280 | struct msg_echo_lb { |
372 | dma_addr_t send_dma; | 281 | dma_addr_t send_dma; |
@@ -672,20 +581,6 @@ typedef struct { | |||
672 | #define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ | 581 | #define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ |
673 | #define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ | 582 | #define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ |
674 | 583 | ||
675 | /* 83XX FCoE specific */ | ||
676 | #define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */ | ||
677 | |||
678 | /* Interrupt type codes */ | ||
679 | #define INTR_ROM_MB_SUCCESS 0x1 | ||
680 | #define INTR_ROM_MB_FAILED 0x2 | ||
681 | #define INTR_MB_SUCCESS 0x10 | ||
682 | #define INTR_MB_FAILED 0x11 | ||
683 | #define INTR_ASYNC_EVENT 0x12 | ||
684 | #define INTR_RSP_QUE_UPDATE 0x13 | ||
685 | #define INTR_RSP_QUE_UPDATE_83XX 0x14 | ||
686 | #define INTR_ATIO_QUE_UPDATE 0x1C | ||
687 | #define INTR_ATIO_RSP_QUE_UPDATE 0x1D | ||
688 | |||
689 | /* ISP mailbox loopback echo diagnostic error code */ | 584 | /* ISP mailbox loopback echo diagnostic error code */ |
690 | #define MBS_LB_RESET 0x17 | 585 | #define MBS_LB_RESET 0x17 |
691 | /* | 586 | /* |
@@ -756,10 +651,8 @@ typedef struct { | |||
756 | #define MBC_DIAGNOSTIC_LOOP_BACK 0x45 /* Diagnostic loop back. */ | 651 | #define MBC_DIAGNOSTIC_LOOP_BACK 0x45 /* Diagnostic loop back. */ |
757 | #define MBC_ONLINE_SELF_TEST 0x46 /* Online self-test. */ | 652 | #define MBC_ONLINE_SELF_TEST 0x46 /* Online self-test. */ |
758 | #define MBC_ENHANCED_GET_PORT_DATABASE 0x47 /* Get port database + login */ | 653 | #define MBC_ENHANCED_GET_PORT_DATABASE 0x47 /* Get port database + login */ |
759 | #define MBC_CONFIGURE_VF 0x4b /* Configure VFs */ | ||
760 | #define MBC_RESET_LINK_STATUS 0x52 /* Reset Link Error Status */ | 654 | #define MBC_RESET_LINK_STATUS 0x52 /* Reset Link Error Status */ |
761 | #define MBC_IOCB_COMMAND_A64 0x54 /* Execute IOCB command (64) */ | 655 | #define MBC_IOCB_COMMAND_A64 0x54 /* Execute IOCB command (64) */ |
762 | #define MBC_PORT_LOGOUT 0x56 /* Port Logout request */ | ||
763 | #define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */ | 656 | #define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */ |
764 | #define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */ | 657 | #define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */ |
765 | #define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */ | 658 | #define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */ |
@@ -810,7 +703,6 @@ typedef struct { | |||
810 | #define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */ | 703 | #define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */ |
811 | #define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */ | 704 | #define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */ |
812 | #define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */ | 705 | #define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */ |
813 | #define MBC_PORT_RESET 0x120 /* Port Reset */ | ||
814 | #define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */ | 706 | #define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */ |
815 | #define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */ | 707 | #define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */ |
816 | 708 | ||
@@ -1328,27 +1220,11 @@ typedef struct { | |||
1328 | * ISP queue - response queue entry definition. | 1220 | * ISP queue - response queue entry definition. |
1329 | */ | 1221 | */ |
1330 | typedef struct { | 1222 | typedef struct { |
1331 | uint8_t entry_type; /* Entry type. */ | 1223 | uint8_t data[60]; |
1332 | uint8_t entry_count; /* Entry count. */ | ||
1333 | uint8_t sys_define; /* System defined. */ | ||
1334 | uint8_t entry_status; /* Entry Status. */ | ||
1335 | uint32_t handle; /* System defined handle */ | ||
1336 | uint8_t data[52]; | ||
1337 | uint32_t signature; | 1224 | uint32_t signature; |
1338 | #define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */ | 1225 | #define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */ |
1339 | } response_t; | 1226 | } response_t; |
1340 | 1227 | ||
1341 | /* | ||
1342 | * ISP queue - ATIO queue entry definition. | ||
1343 | */ | ||
1344 | struct atio { | ||
1345 | uint8_t entry_type; /* Entry type. */ | ||
1346 | uint8_t entry_count; /* Entry count. */ | ||
1347 | uint8_t data[58]; | ||
1348 | uint32_t signature; | ||
1349 | #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ | ||
1350 | }; | ||
1351 | |||
1352 | typedef union { | 1228 | typedef union { |
1353 | uint16_t extended; | 1229 | uint16_t extended; |
1354 | struct { | 1230 | struct { |
@@ -1468,10 +1344,9 @@ typedef struct { | |||
1468 | } cont_a64_entry_t; | 1344 | } cont_a64_entry_t; |
1469 | 1345 | ||
1470 | #define PO_MODE_DIF_INSERT 0 | 1346 | #define PO_MODE_DIF_INSERT 0 |
1471 | #define PO_MODE_DIF_REMOVE 1 | 1347 | #define PO_MODE_DIF_REMOVE BIT_0 |
1472 | #define PO_MODE_DIF_PASS 2 | 1348 | #define PO_MODE_DIF_PASS BIT_1 |
1473 | #define PO_MODE_DIF_REPLACE 3 | 1349 | #define PO_MODE_DIF_REPLACE (BIT_0 + BIT_1) |
1474 | #define PO_MODE_DIF_TCP_CKSUM 6 | ||
1475 | #define PO_ENABLE_DIF_BUNDLING BIT_8 | 1350 | #define PO_ENABLE_DIF_BUNDLING BIT_8 |
1476 | #define PO_ENABLE_INCR_GUARD_SEED BIT_3 | 1351 | #define PO_ENABLE_INCR_GUARD_SEED BIT_3 |
1477 | #define PO_DISABLE_INCR_REF_TAG BIT_5 | 1352 | #define PO_DISABLE_INCR_REF_TAG BIT_5 |
@@ -1603,13 +1478,6 @@ typedef struct { | |||
1603 | #define CS_RETRY 0x82 /* Driver defined */ | 1478 | #define CS_RETRY 0x82 /* Driver defined */ |
1604 | #define CS_LOOP_DOWN_ABORT 0x83 /* Driver defined */ | 1479 | #define CS_LOOP_DOWN_ABORT 0x83 /* Driver defined */ |
1605 | 1480 | ||
1606 | #define CS_BIDIR_RD_OVERRUN 0x700 | ||
1607 | #define CS_BIDIR_RD_WR_OVERRUN 0x707 | ||
1608 | #define CS_BIDIR_RD_OVERRUN_WR_UNDERRUN 0x715 | ||
1609 | #define CS_BIDIR_RD_UNDERRUN 0x1500 | ||
1610 | #define CS_BIDIR_RD_UNDERRUN_WR_OVERRUN 0x1507 | ||
1611 | #define CS_BIDIR_RD_WR_UNDERRUN 0x1515 | ||
1612 | #define CS_BIDIR_DMA 0x200 | ||
1613 | /* | 1481 | /* |
1614 | * Status entry status flags | 1482 | * Status entry status flags |
1615 | */ | 1483 | */ |
@@ -1837,13 +1705,10 @@ typedef struct fc_port { | |||
1837 | struct fc_rport *rport, *drport; | 1705 | struct fc_rport *rport, *drport; |
1838 | u32 supported_classes; | 1706 | u32 supported_classes; |
1839 | 1707 | ||
1708 | uint16_t vp_idx; | ||
1840 | uint8_t fc4_type; | 1709 | uint8_t fc4_type; |
1841 | uint8_t scan_state; | ||
1842 | } fc_port_t; | 1710 | } fc_port_t; |
1843 | 1711 | ||
1844 | #define QLA_FCPORT_SCAN_NONE 0 | ||
1845 | #define QLA_FCPORT_SCAN_FOUND 1 | ||
1846 | |||
1847 | /* | 1712 | /* |
1848 | * Fibre channel port/lun states. | 1713 | * Fibre channel port/lun states. |
1849 | */ | 1714 | */ |
@@ -1867,7 +1732,6 @@ static const char * const port_state_str[] = { | |||
1867 | #define FCF_LOGIN_NEEDED BIT_1 | 1732 | #define FCF_LOGIN_NEEDED BIT_1 |
1868 | #define FCF_FCP2_DEVICE BIT_2 | 1733 | #define FCF_FCP2_DEVICE BIT_2 |
1869 | #define FCF_ASYNC_SENT BIT_3 | 1734 | #define FCF_ASYNC_SENT BIT_3 |
1870 | #define FCF_CONF_COMP_SUPPORTED BIT_4 | ||
1871 | 1735 | ||
1872 | /* No loop ID flag. */ | 1736 | /* No loop ID flag. */ |
1873 | #define FC_NO_LOOP_ID 0x1000 | 1737 | #define FC_NO_LOOP_ID 0x1000 |
@@ -1895,6 +1759,7 @@ static const char * const port_state_str[] = { | |||
1895 | 1759 | ||
1896 | #define GID_PT_CMD 0x1A1 | 1760 | #define GID_PT_CMD 0x1A1 |
1897 | #define GID_PT_REQ_SIZE (16 + 4) | 1761 | #define GID_PT_REQ_SIZE (16 + 4) |
1762 | #define GID_PT_RSP_SIZE (16 + (MAX_FIBRE_DEVICES * 4)) | ||
1898 | 1763 | ||
1899 | #define GPN_ID_CMD 0x112 | 1764 | #define GPN_ID_CMD 0x112 |
1900 | #define GPN_ID_REQ_SIZE (16 + 4) | 1765 | #define GPN_ID_REQ_SIZE (16 + 4) |
@@ -2184,9 +2049,7 @@ struct ct_sns_rsp { | |||
2184 | } ga_nxt; | 2049 | } ga_nxt; |
2185 | 2050 | ||
2186 | struct { | 2051 | struct { |
2187 | /* Assume the largest number of targets for the union */ | 2052 | struct ct_sns_gid_pt_data entries[MAX_FIBRE_DEVICES]; |
2188 | struct ct_sns_gid_pt_data | ||
2189 | entries[MAX_FIBRE_DEVICES_MAX]; | ||
2190 | } gid_pt; | 2053 | } gid_pt; |
2191 | 2054 | ||
2192 | struct { | 2055 | struct { |
@@ -2247,11 +2110,7 @@ struct ct_sns_pkt { | |||
2247 | 2110 | ||
2248 | #define GID_PT_SNS_SCMD_LEN 6 | 2111 | #define GID_PT_SNS_SCMD_LEN 6 |
2249 | #define GID_PT_SNS_CMD_SIZE 28 | 2112 | #define GID_PT_SNS_CMD_SIZE 28 |
2250 | /* | 2113 | #define GID_PT_SNS_DATA_SIZE (MAX_FIBRE_DEVICES * 4 + 16) |
2251 | * Assume MAX_FIBRE_DEVICES_2100 as these defines are only used with older | ||
2252 | * adapters. | ||
2253 | */ | ||
2254 | #define GID_PT_SNS_DATA_SIZE (MAX_FIBRE_DEVICES_2100 * 4 + 16) | ||
2255 | 2114 | ||
2256 | #define GPN_ID_SNS_SCMD_LEN 6 | 2115 | #define GPN_ID_SNS_SCMD_LEN 6 |
2257 | #define GPN_ID_SNS_CMD_SIZE 28 | 2116 | #define GPN_ID_SNS_CMD_SIZE 28 |
@@ -2299,6 +2158,7 @@ struct gid_list_info { | |||
2299 | uint16_t loop_id; /* ISP23XX -- 6 bytes. */ | 2158 | uint16_t loop_id; /* ISP23XX -- 6 bytes. */ |
2300 | uint16_t reserved_1; /* ISP24XX -- 8 bytes. */ | 2159 | uint16_t reserved_1; /* ISP24XX -- 8 bytes. */ |
2301 | }; | 2160 | }; |
2161 | #define GID_LIST_SIZE (sizeof(struct gid_list_info) * MAX_FIBRE_DEVICES) | ||
2302 | 2162 | ||
2303 | /* NPIV */ | 2163 | /* NPIV */ |
2304 | typedef struct vport_info { | 2164 | typedef struct vport_info { |
@@ -2384,7 +2244,6 @@ struct isp_operations { | |||
2384 | int (*get_flash_version) (struct scsi_qla_host *, void *); | 2244 | int (*get_flash_version) (struct scsi_qla_host *, void *); |
2385 | int (*start_scsi) (srb_t *); | 2245 | int (*start_scsi) (srb_t *); |
2386 | int (*abort_isp) (struct scsi_qla_host *); | 2246 | int (*abort_isp) (struct scsi_qla_host *); |
2387 | int (*iospace_config)(struct qla_hw_data*); | ||
2388 | }; | 2247 | }; |
2389 | 2248 | ||
2390 | /* MSI-X Support *************************************************************/ | 2249 | /* MSI-X Support *************************************************************/ |
@@ -2399,7 +2258,6 @@ struct isp_operations { | |||
2399 | #define QLA_MIDX_DEFAULT 0 | 2258 | #define QLA_MIDX_DEFAULT 0 |
2400 | #define QLA_MIDX_RSP_Q 1 | 2259 | #define QLA_MIDX_RSP_Q 1 |
2401 | #define QLA_PCI_MSIX_CONTROL 0xa2 | 2260 | #define QLA_PCI_MSIX_CONTROL 0xa2 |
2402 | #define QLA_83XX_PCI_MSIX_CONTROL 0x92 | ||
2403 | 2261 | ||
2404 | struct scsi_qla_host; | 2262 | struct scsi_qla_host; |
2405 | 2263 | ||
@@ -2474,21 +2332,16 @@ struct qla_statistics { | |||
2474 | uint64_t output_bytes; | 2332 | uint64_t output_bytes; |
2475 | }; | 2333 | }; |
2476 | 2334 | ||
2477 | struct bidi_statistics { | ||
2478 | unsigned long long io_count; | ||
2479 | unsigned long long transfer_bytes; | ||
2480 | }; | ||
2481 | |||
2482 | /* Multi queue support */ | 2335 | /* Multi queue support */ |
2483 | #define MBC_INITIALIZE_MULTIQ 0x1f | 2336 | #define MBC_INITIALIZE_MULTIQ 0x1f |
2484 | #define QLA_QUE_PAGE 0X1000 | 2337 | #define QLA_QUE_PAGE 0X1000 |
2485 | #define QLA_MQ_SIZE 32 | 2338 | #define QLA_MQ_SIZE 32 |
2486 | #define QLA_MAX_QUEUES 256 | 2339 | #define QLA_MAX_QUEUES 256 |
2487 | #define ISP_QUE_REG(ha, id) \ | 2340 | #define ISP_QUE_REG(ha, id) \ |
2488 | ((ha->mqenable || IS_QLA83XX(ha)) ? \ | 2341 | ((ha->mqenable) ? \ |
2489 | ((device_reg_t __iomem *)(ha->mqiobase) +\ | 2342 | ((void *)(ha->mqiobase) +\ |
2490 | (QLA_QUE_PAGE * id)) :\ | 2343 | (QLA_QUE_PAGE * id)) :\ |
2491 | ((device_reg_t __iomem *)(ha->iobase))) | 2344 | ((void *)(ha->iobase))) |
2492 | #define QLA_REQ_QUE_ID(tag) \ | 2345 | #define QLA_REQ_QUE_ID(tag) \ |
2493 | ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0) | 2346 | ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0) |
2494 | #define QLA_DEFAULT_QUE_QOS 5 | 2347 | #define QLA_DEFAULT_QUE_QOS 5 |
@@ -2545,40 +2398,6 @@ struct qlfc_fw { | |||
2545 | uint32_t len; | 2398 | uint32_t len; |
2546 | }; | 2399 | }; |
2547 | 2400 | ||
2548 | struct qlt_hw_data { | ||
2549 | /* Protected by hw lock */ | ||
2550 | uint32_t enable_class_2:1; | ||
2551 | uint32_t enable_explicit_conf:1; | ||
2552 | uint32_t ini_mode_force_reverse:1; | ||
2553 | uint32_t node_name_set:1; | ||
2554 | |||
2555 | dma_addr_t atio_dma; /* Physical address. */ | ||
2556 | struct atio *atio_ring; /* Base virtual address */ | ||
2557 | struct atio *atio_ring_ptr; /* Current address. */ | ||
2558 | uint16_t atio_ring_index; /* Current index. */ | ||
2559 | uint16_t atio_q_length; | ||
2560 | |||
2561 | void *target_lport_ptr; | ||
2562 | struct qla_tgt_func_tmpl *tgt_ops; | ||
2563 | struct qla_tgt *qla_tgt; | ||
2564 | struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS]; | ||
2565 | uint16_t current_handle; | ||
2566 | |||
2567 | struct qla_tgt_vp_map *tgt_vp_map; | ||
2568 | struct mutex tgt_mutex; | ||
2569 | struct mutex tgt_host_action_mutex; | ||
2570 | |||
2571 | int saved_set; | ||
2572 | uint16_t saved_exchange_count; | ||
2573 | uint32_t saved_firmware_options_1; | ||
2574 | uint32_t saved_firmware_options_2; | ||
2575 | uint32_t saved_firmware_options_3; | ||
2576 | uint8_t saved_firmware_options[2]; | ||
2577 | uint8_t saved_add_firmware_options[2]; | ||
2578 | |||
2579 | uint8_t tgt_node_name[WWN_SIZE]; | ||
2580 | }; | ||
2581 | |||
2582 | /* | 2401 | /* |
2583 | * Qlogic host adapter specific data structure. | 2402 | * Qlogic host adapter specific data structure. |
2584 | */ | 2403 | */ |
@@ -2615,16 +2434,11 @@ struct qla_hw_data { | |||
2615 | uint32_t disable_msix_handshake :1; | 2434 | uint32_t disable_msix_handshake :1; |
2616 | uint32_t fcp_prio_enabled :1; | 2435 | uint32_t fcp_prio_enabled :1; |
2617 | uint32_t isp82xx_fw_hung:1; | 2436 | uint32_t isp82xx_fw_hung:1; |
2618 | uint32_t nic_core_hung:1; | ||
2619 | 2437 | ||
2620 | uint32_t quiesce_owner:1; | 2438 | uint32_t quiesce_owner:1; |
2621 | uint32_t thermal_supported:1; | 2439 | uint32_t thermal_supported:1; |
2622 | uint32_t nic_core_reset_hdlr_active:1; | 2440 | uint32_t isp82xx_reset_hdlr_active:1; |
2623 | uint32_t nic_core_reset_owner:1; | 2441 | /* 26 bits */ |
2624 | uint32_t isp82xx_no_md_cap:1; | ||
2625 | uint32_t host_shutting_down:1; | ||
2626 | uint32_t idc_compl_status:1; | ||
2627 | /* 32 bits */ | ||
2628 | } flags; | 2442 | } flags; |
2629 | 2443 | ||
2630 | /* This spinlock is used to protect "io transactions", you must | 2444 | /* This spinlock is used to protect "io transactions", you must |
@@ -2643,7 +2457,6 @@ struct qla_hw_data { | |||
2643 | #define MIN_IOBASE_LEN 0x100 | 2457 | #define MIN_IOBASE_LEN 0x100 |
2644 | /* Multi queue data structs */ | 2458 | /* Multi queue data structs */ |
2645 | device_reg_t __iomem *mqiobase; | 2459 | device_reg_t __iomem *mqiobase; |
2646 | device_reg_t __iomem *msixbase; | ||
2647 | uint16_t msix_count; | 2460 | uint16_t msix_count; |
2648 | uint8_t mqenable; | 2461 | uint8_t mqenable; |
2649 | struct req_que **req_q_map; | 2462 | struct req_que **req_q_map; |
@@ -2668,7 +2481,6 @@ struct qla_hw_data { | |||
2668 | atomic_t loop_down_timer; /* loop down timer */ | 2481 | atomic_t loop_down_timer; /* loop down timer */ |
2669 | uint8_t link_down_timeout; /* link down timeout */ | 2482 | uint8_t link_down_timeout; /* link down timeout */ |
2670 | uint16_t max_loop_id; | 2483 | uint16_t max_loop_id; |
2671 | uint16_t max_fibre_devices; /* Maximum number of targets */ | ||
2672 | 2484 | ||
2673 | uint16_t fb_rev; | 2485 | uint16_t fb_rev; |
2674 | uint16_t min_external_loopid; /* First external loop Id */ | 2486 | uint16_t min_external_loopid; /* First external loop Id */ |
@@ -2678,7 +2490,6 @@ struct qla_hw_data { | |||
2678 | #define PORT_SPEED_2GB 0x01 | 2490 | #define PORT_SPEED_2GB 0x01 |
2679 | #define PORT_SPEED_4GB 0x03 | 2491 | #define PORT_SPEED_4GB 0x03 |
2680 | #define PORT_SPEED_8GB 0x04 | 2492 | #define PORT_SPEED_8GB 0x04 |
2681 | #define PORT_SPEED_16GB 0x05 | ||
2682 | #define PORT_SPEED_10GB 0x13 | 2493 | #define PORT_SPEED_10GB 0x13 |
2683 | uint16_t link_data_rate; /* F/W operating speed */ | 2494 | uint16_t link_data_rate; /* F/W operating speed */ |
2684 | 2495 | ||
@@ -2700,8 +2511,6 @@ struct qla_hw_data { | |||
2700 | #define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 | 2511 | #define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 |
2701 | #define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 | 2512 | #define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 |
2702 | #define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 | 2513 | #define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 |
2703 | #define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031 | ||
2704 | #define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031 | ||
2705 | uint32_t device_type; | 2514 | uint32_t device_type; |
2706 | #define DT_ISP2100 BIT_0 | 2515 | #define DT_ISP2100 BIT_0 |
2707 | #define DT_ISP2200 BIT_1 | 2516 | #define DT_ISP2200 BIT_1 |
@@ -2718,9 +2527,7 @@ struct qla_hw_data { | |||
2718 | #define DT_ISP8432 BIT_12 | 2527 | #define DT_ISP8432 BIT_12 |
2719 | #define DT_ISP8001 BIT_13 | 2528 | #define DT_ISP8001 BIT_13 |
2720 | #define DT_ISP8021 BIT_14 | 2529 | #define DT_ISP8021 BIT_14 |
2721 | #define DT_ISP2031 BIT_15 | 2530 | #define DT_ISP_LAST (DT_ISP8021 << 1) |
2722 | #define DT_ISP8031 BIT_16 | ||
2723 | #define DT_ISP_LAST (DT_ISP8031 << 1) | ||
2724 | 2531 | ||
2725 | #define DT_T10_PI BIT_25 | 2532 | #define DT_T10_PI BIT_25 |
2726 | #define DT_IIDMA BIT_26 | 2533 | #define DT_IIDMA BIT_26 |
@@ -2744,30 +2551,26 @@ struct qla_hw_data { | |||
2744 | #define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532) | 2551 | #define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532) |
2745 | #define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432) | 2552 | #define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432) |
2746 | #define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001) | 2553 | #define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001) |
2747 | #define IS_QLA81XX(ha) (IS_QLA8001(ha)) | ||
2748 | #define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021) | 2554 | #define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021) |
2749 | #define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031) | ||
2750 | #define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) | ||
2751 | 2555 | ||
2752 | #define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ | 2556 | #define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ |
2753 | IS_QLA6312(ha) || IS_QLA6322(ha)) | 2557 | IS_QLA6312(ha) || IS_QLA6322(ha)) |
2754 | #define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha)) | 2558 | #define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha)) |
2755 | #define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha)) | 2559 | #define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha)) |
2756 | #define IS_QLA25XX(ha) (IS_QLA2532(ha)) | 2560 | #define IS_QLA25XX(ha) (IS_QLA2532(ha)) |
2757 | #define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) | ||
2758 | #define IS_QLA84XX(ha) (IS_QLA8432(ha)) | 2561 | #define IS_QLA84XX(ha) (IS_QLA8432(ha)) |
2759 | #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ | 2562 | #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ |
2760 | IS_QLA84XX(ha)) | 2563 | IS_QLA84XX(ha)) |
2761 | #define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ | 2564 | #define IS_QLA81XX(ha) (IS_QLA8001(ha)) |
2762 | IS_QLA8031(ha)) | 2565 | #define IS_QLA8XXX_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha)) |
2763 | #define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ | 2566 | #define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ |
2764 | IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ | 2567 | IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ |
2765 | IS_QLA82XX(ha) || IS_QLA83XX(ha)) | 2568 | IS_QLA82XX(ha)) |
2766 | #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 2569 | #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha)) |
2767 | #define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ | 2570 | #define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \ |
2768 | IS_QLA83XX(ha)) && (ha)->flags.msix_enabled) | 2571 | (ha)->flags.msix_enabled) |
2769 | #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 2572 | #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha)) |
2770 | #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 2573 | #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) |
2771 | #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) | 2574 | #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) |
2772 | 2575 | ||
2773 | #define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) | 2576 | #define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) |
@@ -2776,18 +2579,6 @@ struct qla_hw_data { | |||
2776 | #define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) | 2579 | #define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) |
2777 | #define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001) | 2580 | #define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001) |
2778 | #define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) | 2581 | #define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) |
2779 | #define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED) | ||
2780 | #define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha)) | ||
2781 | #define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha))) | ||
2782 | /* Bit 21 of fw_attributes decides the MCTP capabilities */ | ||
2783 | #define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ | ||
2784 | ((ha)->fw_attributes_ext[0] & BIT_0)) | ||
2785 | #define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha)) | ||
2786 | #define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha)) | ||
2787 | #define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) | ||
2788 | #define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha)) | ||
2789 | #define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ | ||
2790 | (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) | ||
2791 | 2582 | ||
2792 | /* HBA serial number */ | 2583 | /* HBA serial number */ |
2793 | uint8_t serial0; | 2584 | uint8_t serial0; |
@@ -2826,6 +2617,10 @@ struct qla_hw_data { | |||
2826 | void *sfp_data; | 2617 | void *sfp_data; |
2827 | dma_addr_t sfp_data_dma; | 2618 | dma_addr_t sfp_data_dma; |
2828 | 2619 | ||
2620 | uint8_t *edc_data; | ||
2621 | dma_addr_t edc_data_dma; | ||
2622 | uint16_t edc_data_len; | ||
2623 | |||
2829 | #define XGMAC_DATA_SIZE 4096 | 2624 | #define XGMAC_DATA_SIZE 4096 |
2830 | void *xgmac_data; | 2625 | void *xgmac_data; |
2831 | dma_addr_t xgmac_data_dma; | 2626 | dma_addr_t xgmac_data_dma; |
@@ -2854,8 +2649,6 @@ struct qla_hw_data { | |||
2854 | void *async_pd; | 2649 | void *async_pd; |
2855 | dma_addr_t async_pd_dma; | 2650 | dma_addr_t async_pd_dma; |
2856 | 2651 | ||
2857 | void *swl; | ||
2858 | |||
2859 | /* These are used by mailbox operations. */ | 2652 | /* These are used by mailbox operations. */ |
2860 | volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT]; | 2653 | volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT]; |
2861 | 2654 | ||
@@ -2871,15 +2664,12 @@ struct qla_hw_data { | |||
2871 | struct completion mbx_intr_comp; /* Used for completion notification */ | 2664 | struct completion mbx_intr_comp; /* Used for completion notification */ |
2872 | struct completion dcbx_comp; /* For set port config notification */ | 2665 | struct completion dcbx_comp; /* For set port config notification */ |
2873 | int notify_dcbx_comp; | 2666 | int notify_dcbx_comp; |
2874 | struct mutex selflogin_lock; | ||
2875 | 2667 | ||
2876 | /* Basic firmware related information. */ | 2668 | /* Basic firmware related information. */ |
2877 | uint16_t fw_major_version; | 2669 | uint16_t fw_major_version; |
2878 | uint16_t fw_minor_version; | 2670 | uint16_t fw_minor_version; |
2879 | uint16_t fw_subminor_version; | 2671 | uint16_t fw_subminor_version; |
2880 | uint16_t fw_attributes; | 2672 | uint16_t fw_attributes; |
2881 | uint16_t fw_attributes_h; | ||
2882 | uint16_t fw_attributes_ext[2]; | ||
2883 | uint32_t fw_memory_size; | 2673 | uint32_t fw_memory_size; |
2884 | uint32_t fw_transfer_size; | 2674 | uint32_t fw_transfer_size; |
2885 | uint32_t fw_srisc_address; | 2675 | uint32_t fw_srisc_address; |
@@ -2903,12 +2693,7 @@ struct qla_hw_data { | |||
2903 | int fw_dump_reading; | 2693 | int fw_dump_reading; |
2904 | dma_addr_t eft_dma; | 2694 | dma_addr_t eft_dma; |
2905 | void *eft; | 2695 | void *eft; |
2906 | /* Current size of mctp dump is 0x086064 bytes */ | 2696 | |
2907 | #define MCTP_DUMP_SIZE 0x086064 | ||
2908 | dma_addr_t mctp_dump_dma; | ||
2909 | void *mctp_dump; | ||
2910 | int mctp_dumped; | ||
2911 | int mctp_dump_reading; | ||
2912 | uint32_t chain_offset; | 2697 | uint32_t chain_offset; |
2913 | struct dentry *dfs_dir; | 2698 | struct dentry *dfs_dir; |
2914 | struct dentry *dfs_fce; | 2699 | struct dentry *dfs_fce; |
@@ -2984,6 +2769,7 @@ struct qla_hw_data { | |||
2984 | /* ISP2322: red, green, amber. */ | 2769 | /* ISP2322: red, green, amber. */ |
2985 | uint16_t zio_mode; | 2770 | uint16_t zio_mode; |
2986 | uint16_t zio_timer; | 2771 | uint16_t zio_timer; |
2772 | struct fc_host_statistics fc_host_stat; | ||
2987 | 2773 | ||
2988 | struct qla_msix_entry *msix_entries; | 2774 | struct qla_msix_entry *msix_entries; |
2989 | 2775 | ||
@@ -2996,6 +2782,7 @@ struct qla_hw_data { | |||
2996 | int cur_vport_count; | 2782 | int cur_vport_count; |
2997 | 2783 | ||
2998 | struct qla_chip_state_84xx *cs84xx; | 2784 | struct qla_chip_state_84xx *cs84xx; |
2785 | struct qla_statistics qla_stats; | ||
2999 | struct isp_operations *isp_ops; | 2786 | struct isp_operations *isp_ops; |
3000 | struct workqueue_struct *wq; | 2787 | struct workqueue_struct *wq; |
3001 | struct qlfc_fw fw_buf; | 2788 | struct qlfc_fw fw_buf; |
@@ -3020,8 +2807,8 @@ struct qla_hw_data { | |||
3020 | unsigned long mn_win_crb; | 2807 | unsigned long mn_win_crb; |
3021 | unsigned long ms_win_crb; | 2808 | unsigned long ms_win_crb; |
3022 | int qdr_sn_window; | 2809 | int qdr_sn_window; |
3023 | uint32_t fcoe_dev_init_timeout; | 2810 | uint32_t nx_dev_init_timeout; |
3024 | uint32_t fcoe_reset_timeout; | 2811 | uint32_t nx_reset_timeout; |
3025 | rwlock_t hw_lock; | 2812 | rwlock_t hw_lock; |
3026 | uint16_t portnum; /* port number */ | 2813 | uint16_t portnum; /* port number */ |
3027 | int link_width; | 2814 | int link_width; |
@@ -3035,28 +2822,6 @@ struct qla_hw_data { | |||
3035 | 2822 | ||
3036 | uint8_t fw_type; | 2823 | uint8_t fw_type; |
3037 | __le32 file_prd_off; /* File firmware product offset */ | 2824 | __le32 file_prd_off; /* File firmware product offset */ |
3038 | |||
3039 | uint32_t md_template_size; | ||
3040 | void *md_tmplt_hdr; | ||
3041 | dma_addr_t md_tmplt_hdr_dma; | ||
3042 | void *md_dump; | ||
3043 | uint32_t md_dump_size; | ||
3044 | |||
3045 | void *loop_id_map; | ||
3046 | |||
3047 | /* QLA83XX IDC specific fields */ | ||
3048 | uint32_t idc_audit_ts; | ||
3049 | |||
3050 | /* DPC low-priority workqueue */ | ||
3051 | struct workqueue_struct *dpc_lp_wq; | ||
3052 | struct work_struct idc_aen; | ||
3053 | /* DPC high-priority workqueue */ | ||
3054 | struct workqueue_struct *dpc_hp_wq; | ||
3055 | struct work_struct nic_core_reset; | ||
3056 | struct work_struct idc_state_handler; | ||
3057 | struct work_struct nic_core_unrecoverable; | ||
3058 | |||
3059 | struct qlt_hw_data tgt; | ||
3060 | }; | 2825 | }; |
3061 | 2826 | ||
3062 | /* | 2827 | /* |
@@ -3076,6 +2841,7 @@ typedef struct scsi_qla_host { | |||
3076 | volatile struct { | 2841 | volatile struct { |
3077 | uint32_t init_done :1; | 2842 | uint32_t init_done :1; |
3078 | uint32_t online :1; | 2843 | uint32_t online :1; |
2844 | uint32_t rscn_queue_overflow :1; | ||
3079 | uint32_t reset_active :1; | 2845 | uint32_t reset_active :1; |
3080 | 2846 | ||
3081 | uint32_t management_server_logged_in :1; | 2847 | uint32_t management_server_logged_in :1; |
@@ -3114,7 +2880,6 @@ typedef struct scsi_qla_host { | |||
3114 | #define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ | 2880 | #define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ |
3115 | #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ | 2881 | #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ |
3116 | #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ | 2882 | #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ |
3117 | #define SCR_PENDING 21 /* SCR in target mode */ | ||
3118 | 2883 | ||
3119 | uint32_t device_flags; | 2884 | uint32_t device_flags; |
3120 | #define SWITCH_FOUND BIT_0 | 2885 | #define SWITCH_FOUND BIT_0 |
@@ -3123,13 +2888,6 @@ typedef struct scsi_qla_host { | |||
3123 | 2888 | ||
3124 | /* ISP configuration data. */ | 2889 | /* ISP configuration data. */ |
3125 | uint16_t loop_id; /* Host adapter loop id */ | 2890 | uint16_t loop_id; /* Host adapter loop id */ |
3126 | uint16_t self_login_loop_id; /* host adapter loop id | ||
3127 | * get it on self login | ||
3128 | */ | ||
3129 | fc_port_t bidir_fcport; /* fcport used for bidir cmnds | ||
3130 | * no need of allocating it for | ||
3131 | * each command | ||
3132 | */ | ||
3133 | 2891 | ||
3134 | port_id_t d_id; /* Host adapter port id */ | 2892 | port_id_t d_id; /* Host adapter port id */ |
3135 | uint8_t marker_needed; | 2893 | uint8_t marker_needed; |
@@ -3137,6 +2895,11 @@ typedef struct scsi_qla_host { | |||
3137 | 2895 | ||
3138 | 2896 | ||
3139 | 2897 | ||
2898 | /* RSCN queue. */ | ||
2899 | uint32_t rscn_queue[MAX_RSCN_COUNT]; | ||
2900 | uint8_t rscn_in_ptr; | ||
2901 | uint8_t rscn_out_ptr; | ||
2902 | |||
3140 | /* Timeout timers. */ | 2903 | /* Timeout timers. */ |
3141 | uint8_t loop_down_abort_time; /* port down timer */ | 2904 | uint8_t loop_down_abort_time; /* port down timer */ |
3142 | atomic_t loop_down_timer; /* loop down timer */ | 2905 | atomic_t loop_down_timer; /* loop down timer */ |
@@ -3181,22 +2944,10 @@ typedef struct scsi_qla_host { | |||
3181 | struct req_que *req; | 2944 | struct req_que *req; |
3182 | int fw_heartbeat_counter; | 2945 | int fw_heartbeat_counter; |
3183 | int seconds_since_last_heartbeat; | 2946 | int seconds_since_last_heartbeat; |
3184 | struct fc_host_statistics fc_host_stat; | ||
3185 | struct qla_statistics qla_stats; | ||
3186 | struct bidi_statistics bidi_stats; | ||
3187 | 2947 | ||
3188 | atomic_t vref_count; | 2948 | atomic_t vref_count; |
3189 | } scsi_qla_host_t; | 2949 | } scsi_qla_host_t; |
3190 | 2950 | ||
3191 | #define SET_VP_IDX 1 | ||
3192 | #define SET_AL_PA 2 | ||
3193 | #define RESET_VP_IDX 3 | ||
3194 | #define RESET_AL_PA 4 | ||
3195 | struct qla_tgt_vp_map { | ||
3196 | uint8_t idx; | ||
3197 | scsi_qla_host_t *vha; | ||
3198 | }; | ||
3199 | |||
3200 | /* | 2951 | /* |
3201 | * Macros to help code, maintain, etc. | 2952 | * Macros to help code, maintain, etc. |
3202 | */ | 2953 | */ |
@@ -3220,6 +2971,10 @@ struct qla_tgt_vp_map { | |||
3220 | atomic_dec(&__vha->vref_count); \ | 2971 | atomic_dec(&__vha->vref_count); \ |
3221 | } while (0) | 2972 | } while (0) |
3222 | 2973 | ||
2974 | |||
2975 | #define qla_printk(level, ha, format, arg...) \ | ||
2976 | dev_printk(level , &((ha)->pdev->dev) , format , ## arg) | ||
2977 | |||
3223 | /* | 2978 | /* |
3224 | * qla2x00 local function return status codes | 2979 | * qla2x00 local function return status codes |
3225 | */ | 2980 | */ |
@@ -3244,6 +2999,7 @@ struct qla_tgt_vp_map { | |||
3244 | #define QLA_ABORTED 0x105 | 2999 | #define QLA_ABORTED 0x105 |
3245 | #define QLA_SUSPENDED 0x106 | 3000 | #define QLA_SUSPENDED 0x106 |
3246 | #define QLA_BUSY 0x107 | 3001 | #define QLA_BUSY 0x107 |
3002 | #define QLA_RSCNS_HANDLED 0x108 | ||
3247 | #define QLA_ALREADY_REGISTERED 0x109 | 3003 | #define QLA_ALREADY_REGISTERED 0x109 |
3248 | 3004 | ||
3249 | #define NVRAM_DELAY() udelay(10) | 3005 | #define NVRAM_DELAY() udelay(10) |
@@ -3259,7 +3015,6 @@ struct qla_tgt_vp_map { | |||
3259 | #define OPTROM_SIZE_25XX 0x200000 | 3015 | #define OPTROM_SIZE_25XX 0x200000 |
3260 | #define OPTROM_SIZE_81XX 0x400000 | 3016 | #define OPTROM_SIZE_81XX 0x400000 |
3261 | #define OPTROM_SIZE_82XX 0x800000 | 3017 | #define OPTROM_SIZE_82XX 0x800000 |
3262 | #define OPTROM_SIZE_83XX 0x1000000 | ||
3263 | 3018 | ||
3264 | #define OPTROM_BURST_SIZE 0x1000 | 3019 | #define OPTROM_BURST_SIZE 0x1000 |
3265 | #define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4) | 3020 | #define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4) |
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 706c4f7bc7c..0b4c2b794c6 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -114,7 +114,7 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) | |||
114 | { | 114 | { |
115 | struct qla_hw_data *ha = vha->hw; | 115 | struct qla_hw_data *ha = vha->hw; |
116 | 116 | ||
117 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) | 117 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) |
118 | goto out; | 118 | goto out; |
119 | if (!ha->fce) | 119 | if (!ha->fce) |
120 | goto out; | 120 | goto out; |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index be6d61a89ed..aa69486dc06 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -381,44 +381,6 @@ struct init_cb_24xx { | |||
381 | /* | 381 | /* |
382 | * ISP queue - command entry structure definition. | 382 | * ISP queue - command entry structure definition. |
383 | */ | 383 | */ |
384 | #define COMMAND_BIDIRECTIONAL 0x75 | ||
385 | struct cmd_bidir { | ||
386 | uint8_t entry_type; /* Entry type. */ | ||
387 | uint8_t entry_count; /* Entry count. */ | ||
388 | uint8_t sys_define; /* System defined */ | ||
389 | uint8_t entry_status; /* Entry status. */ | ||
390 | |||
391 | uint32_t handle; /* System handle. */ | ||
392 | |||
393 | uint16_t nport_handle; /* N_PORT hanlde. */ | ||
394 | |||
395 | uint16_t timeout; /* Commnad timeout. */ | ||
396 | |||
397 | uint16_t wr_dseg_count; /* Write Data segment count. */ | ||
398 | uint16_t rd_dseg_count; /* Read Data segment count. */ | ||
399 | |||
400 | struct scsi_lun lun; /* FCP LUN (BE). */ | ||
401 | |||
402 | uint16_t control_flags; /* Control flags. */ | ||
403 | #define BD_WRAP_BACK BIT_3 | ||
404 | #define BD_READ_DATA BIT_1 | ||
405 | #define BD_WRITE_DATA BIT_0 | ||
406 | |||
407 | uint16_t fcp_cmnd_dseg_len; /* Data segment length. */ | ||
408 | uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */ | ||
409 | |||
410 | uint16_t reserved[2]; /* Reserved */ | ||
411 | |||
412 | uint32_t rd_byte_count; /* Total Byte count Read. */ | ||
413 | uint32_t wr_byte_count; /* Total Byte count write. */ | ||
414 | |||
415 | uint8_t port_id[3]; /* PortID of destination port.*/ | ||
416 | uint8_t vp_index; | ||
417 | |||
418 | uint32_t fcp_data_dseg_address[2]; /* Data segment address. */ | ||
419 | uint16_t fcp_data_dseg_len; /* Data segment length. */ | ||
420 | }; | ||
421 | |||
422 | #define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */ | 384 | #define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */ |
423 | struct cmd_type_6 { | 385 | struct cmd_type_6 { |
424 | uint8_t entry_type; /* Entry type. */ | 386 | uint8_t entry_type; /* Entry type. */ |
@@ -1092,27 +1054,6 @@ struct device_reg_24xx { | |||
1092 | uint32_t unused_6[2]; /* Gap. */ | 1054 | uint32_t unused_6[2]; /* Gap. */ |
1093 | uint32_t iobase_sdata; | 1055 | uint32_t iobase_sdata; |
1094 | }; | 1056 | }; |
1095 | /* RISC-RISC semaphore register PCI offet */ | ||
1096 | #define RISC_REGISTER_BASE_OFFSET 0x7010 | ||
1097 | #define RISC_REGISTER_WINDOW_OFFET 0x6 | ||
1098 | |||
1099 | /* RISC-RISC semaphore/flag register (risc address 0x7016) */ | ||
1100 | |||
1101 | #define RISC_SEMAPHORE 0x1UL | ||
1102 | #define RISC_SEMAPHORE_WE (RISC_SEMAPHORE << 16) | ||
1103 | #define RISC_SEMAPHORE_CLR (RISC_SEMAPHORE_WE | 0x0UL) | ||
1104 | #define RISC_SEMAPHORE_SET (RISC_SEMAPHORE_WE | RISC_SEMAPHORE) | ||
1105 | |||
1106 | #define RISC_SEMAPHORE_FORCE 0x8000UL | ||
1107 | #define RISC_SEMAPHORE_FORCE_WE (RISC_SEMAPHORE_FORCE << 16) | ||
1108 | #define RISC_SEMAPHORE_FORCE_CLR (RISC_SEMAPHORE_FORCE_WE | 0x0UL) | ||
1109 | #define RISC_SEMAPHORE_FORCE_SET \ | ||
1110 | (RISC_SEMAPHORE_FORCE_WE | RISC_SEMAPHORE_FORCE) | ||
1111 | |||
1112 | /* RISC semaphore timeouts (ms) */ | ||
1113 | #define TIMEOUT_SEMAPHORE 2500 | ||
1114 | #define TIMEOUT_SEMAPHORE_FORCE 2000 | ||
1115 | #define TIMEOUT_TOTAL_ELAPSED 4500 | ||
1116 | 1057 | ||
1117 | /* Trace Control *************************************************************/ | 1058 | /* Trace Control *************************************************************/ |
1118 | 1059 | ||
@@ -1189,7 +1130,7 @@ struct mid_db_entry_24xx { | |||
1189 | /* | 1130 | /* |
1190 | * Virtual Port Control IOCB | 1131 | * Virtual Port Control IOCB |
1191 | */ | 1132 | */ |
1192 | #define VP_CTRL_IOCB_TYPE 0x30 /* Virtual Port Control entry. */ | 1133 | #define VP_CTRL_IOCB_TYPE 0x30 /* Vitual Port Control entry. */ |
1193 | struct vp_ctrl_entry_24xx { | 1134 | struct vp_ctrl_entry_24xx { |
1194 | uint8_t entry_type; /* Entry type. */ | 1135 | uint8_t entry_type; /* Entry type. */ |
1195 | uint8_t entry_count; /* Entry count. */ | 1136 | uint8_t entry_count; /* Entry count. */ |
@@ -1225,7 +1166,7 @@ struct vp_ctrl_entry_24xx { | |||
1225 | /* | 1166 | /* |
1226 | * Modify Virtual Port Configuration IOCB | 1167 | * Modify Virtual Port Configuration IOCB |
1227 | */ | 1168 | */ |
1228 | #define VP_CONFIG_IOCB_TYPE 0x31 /* Virtual Port Config entry. */ | 1169 | #define VP_CONFIG_IOCB_TYPE 0x31 /* Vitual Port Config entry. */ |
1229 | struct vp_config_entry_24xx { | 1170 | struct vp_config_entry_24xx { |
1230 | uint8_t entry_type; /* Entry type. */ | 1171 | uint8_t entry_type; /* Entry type. */ |
1231 | uint8_t entry_count; /* Entry count. */ | 1172 | uint8_t entry_count; /* Entry count. */ |
@@ -1386,11 +1327,6 @@ struct qla_flt_header { | |||
1386 | #define FLT_REG_GOLD_FW 0x2f | 1327 | #define FLT_REG_GOLD_FW 0x2f |
1387 | #define FLT_REG_FCP_PRIO_0 0x87 | 1328 | #define FLT_REG_FCP_PRIO_0 0x87 |
1388 | #define FLT_REG_FCP_PRIO_1 0x88 | 1329 | #define FLT_REG_FCP_PRIO_1 0x88 |
1389 | #define FLT_REG_FCOE_FW 0xA4 | ||
1390 | #define FLT_REG_FCOE_VPD_0 0xA9 | ||
1391 | #define FLT_REG_FCOE_NVRAM_0 0xAA | ||
1392 | #define FLT_REG_FCOE_VPD_1 0xAB | ||
1393 | #define FLT_REG_FCOE_NVRAM_1 0xAC | ||
1394 | 1330 | ||
1395 | struct qla_flt_region { | 1331 | struct qla_flt_region { |
1396 | uint32_t code; | 1332 | uint32_t code; |
@@ -1558,14 +1494,6 @@ struct access_chip_rsp_84xx { | |||
1558 | #define MBC_GET_XGMAC_STATS 0x7a | 1494 | #define MBC_GET_XGMAC_STATS 0x7a |
1559 | #define MBC_GET_DCBX_PARAMS 0x51 | 1495 | #define MBC_GET_DCBX_PARAMS 0x51 |
1560 | 1496 | ||
1561 | /* | ||
1562 | * ISP83xx mailbox commands | ||
1563 | */ | ||
1564 | #define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */ | ||
1565 | #define MBC_READ_REMOTE_REG 0x0009 /* Read remote register */ | ||
1566 | #define MBC_RESTART_NIC_FIRMWARE 0x003d /* Restart NIC firmware */ | ||
1567 | #define MBC_SET_ACCESS_CONTROL 0x003e /* Access control command */ | ||
1568 | |||
1569 | /* Flash access control option field bit definitions */ | 1497 | /* Flash access control option field bit definitions */ |
1570 | #define FAC_OPT_FORCE_SEMAPHORE BIT_15 | 1498 | #define FAC_OPT_FORCE_SEMAPHORE BIT_15 |
1571 | #define FAC_OPT_REQUESTOR_ID BIT_14 | 1499 | #define FAC_OPT_REQUESTOR_ID BIT_14 |
@@ -1947,7 +1875,4 @@ struct qla_fcp_prio_cfg { | |||
1947 | #define FA_NPIV_CONF0_ADDR_81 0xD1000 | 1875 | #define FA_NPIV_CONF0_ADDR_81 0xD1000 |
1948 | #define FA_NPIV_CONF1_ADDR_81 0xD2000 | 1876 | #define FA_NPIV_CONF1_ADDR_81 0xD2000 |
1949 | 1877 | ||
1950 | /* 83XX Flash locations -- occupies second 8MB region. */ | ||
1951 | #define FA_FLASH_LAYOUT_ADDR_83 0xFC400 | ||
1952 | |||
1953 | #endif | 1878 | #endif |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 2411d1a12b2..29b1a3e2823 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -48,7 +48,7 @@ extern void qla2x00_update_fcports(scsi_qla_host_t *); | |||
48 | 48 | ||
49 | extern int qla2x00_abort_isp(scsi_qla_host_t *); | 49 | extern int qla2x00_abort_isp(scsi_qla_host_t *); |
50 | extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *); | 50 | extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *); |
51 | extern void qla2x00_quiesce_io(scsi_qla_host_t *); | 51 | extern void qla82xx_quiescent_state_cleanup(scsi_qla_host_t *); |
52 | 52 | ||
53 | extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); | 53 | extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); |
54 | 54 | ||
@@ -71,19 +71,13 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, | |||
71 | uint16_t *); | 71 | uint16_t *); |
72 | extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *, | 72 | extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *, |
73 | uint16_t *); | 73 | uint16_t *); |
74 | extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *, | ||
75 | struct srb_iocb *); | ||
74 | extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); | 76 | extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); |
75 | extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); | 77 | extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); |
76 | 78 | ||
77 | extern fc_port_t * | 79 | extern fc_port_t * |
78 | qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t ); | 80 | qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t ); |
79 | |||
80 | extern int __qla83xx_set_idc_control(scsi_qla_host_t *, uint32_t); | ||
81 | extern int __qla83xx_get_idc_control(scsi_qla_host_t *, uint32_t *); | ||
82 | extern void qla83xx_idc_audit(scsi_qla_host_t *, int); | ||
83 | extern int qla83xx_nic_core_reset(scsi_qla_host_t *); | ||
84 | extern void qla83xx_reset_ownership(scsi_qla_host_t *); | ||
85 | extern int qla2xxx_mctp_dump(scsi_qla_host_t *); | ||
86 | |||
87 | /* | 81 | /* |
88 | * Global Data in qla_os.c source file. | 82 | * Global Data in qla_os.c source file. |
89 | */ | 83 | */ |
@@ -110,8 +104,6 @@ extern int ql2xenablehba_err_chk; | |||
110 | extern int ql2xtargetreset; | 104 | extern int ql2xtargetreset; |
111 | extern int ql2xdontresethba; | 105 | extern int ql2xdontresethba; |
112 | extern unsigned int ql2xmaxlun; | 106 | extern unsigned int ql2xmaxlun; |
113 | extern int ql2xmdcapmask; | ||
114 | extern int ql2xmdenable; | ||
115 | 107 | ||
116 | extern int qla2x00_loop_reset(scsi_qla_host_t *); | 108 | extern int qla2x00_loop_reset(scsi_qla_host_t *); |
117 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); | 109 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); |
@@ -141,20 +133,6 @@ extern void qla2x00_relogin(struct scsi_qla_host *); | |||
141 | extern void qla2x00_do_work(struct scsi_qla_host *); | 133 | extern void qla2x00_do_work(struct scsi_qla_host *); |
142 | extern void qla2x00_free_fcports(struct scsi_qla_host *); | 134 | extern void qla2x00_free_fcports(struct scsi_qla_host *); |
143 | 135 | ||
144 | extern void qla83xx_schedule_work(scsi_qla_host_t *, int); | ||
145 | extern void qla83xx_service_idc_aen(struct work_struct *); | ||
146 | extern void qla83xx_nic_core_unrecoverable_work(struct work_struct *); | ||
147 | extern void qla83xx_idc_state_handler_work(struct work_struct *); | ||
148 | extern void qla83xx_nic_core_reset_work(struct work_struct *); | ||
149 | |||
150 | extern void qla83xx_idc_lock(scsi_qla_host_t *, uint16_t); | ||
151 | extern void qla83xx_idc_unlock(scsi_qla_host_t *, uint16_t); | ||
152 | extern int qla83xx_idc_state_handler(scsi_qla_host_t *); | ||
153 | extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha); | ||
154 | extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha); | ||
155 | extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha); | ||
156 | extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha); | ||
157 | |||
158 | /* | 136 | /* |
159 | * Global Functions in qla_mid.c source file. | 137 | * Global Functions in qla_mid.c source file. |
160 | */ | 138 | */ |
@@ -176,7 +154,8 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *); | |||
176 | extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); | 154 | extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); |
177 | extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); | 155 | extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); |
178 | 156 | ||
179 | extern void qla2x00_sp_free_dma(void *, void *); | 157 | extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *); |
158 | |||
180 | extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); | 159 | extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); |
181 | 160 | ||
182 | extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); | 161 | extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); |
@@ -197,7 +176,6 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *); | |||
197 | /* | 176 | /* |
198 | * Global Function Prototypes in qla_iocb.c source file. | 177 | * Global Function Prototypes in qla_iocb.c source file. |
199 | */ | 178 | */ |
200 | |||
201 | extern uint16_t qla2x00_calc_iocbs_32(uint16_t); | 179 | extern uint16_t qla2x00_calc_iocbs_32(uint16_t); |
202 | extern uint16_t qla2x00_calc_iocbs_64(uint16_t); | 180 | extern uint16_t qla2x00_calc_iocbs_64(uint16_t); |
203 | extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); | 181 | extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); |
@@ -210,11 +188,7 @@ extern int qla2x00_start_sp(srb_t *); | |||
210 | extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t); | 188 | extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t); |
211 | extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); | 189 | extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); |
212 | extern int qla24xx_dif_start_scsi(srb_t *); | 190 | extern int qla24xx_dif_start_scsi(srb_t *); |
213 | extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); | ||
214 | extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); | ||
215 | 191 | ||
216 | extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); | ||
217 | extern int qla2x00_issue_marker(scsi_qla_host_t *, int); | ||
218 | 192 | ||
219 | /* | 193 | /* |
220 | * Global Function Prototypes in qla_mbx.c source file. | 194 | * Global Function Prototypes in qla_mbx.c source file. |
@@ -229,7 +203,8 @@ extern int | |||
229 | qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); | 203 | qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); |
230 | 204 | ||
231 | extern int | 205 | extern int |
232 | qla2x00_get_fw_version(scsi_qla_host_t *); | 206 | qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *, |
207 | uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *); | ||
233 | 208 | ||
234 | extern int | 209 | extern int |
235 | qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *); | 210 | qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *); |
@@ -266,9 +241,6 @@ extern int | |||
266 | qla2x00_init_firmware(scsi_qla_host_t *, uint16_t); | 241 | qla2x00_init_firmware(scsi_qla_host_t *, uint16_t); |
267 | 242 | ||
268 | extern int | 243 | extern int |
269 | qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *); | ||
270 | |||
271 | extern int | ||
272 | qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t); | 244 | qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t); |
273 | 245 | ||
274 | extern int | 246 | extern int |
@@ -397,12 +369,6 @@ qla81xx_get_port_config(scsi_qla_host_t *, uint16_t *); | |||
397 | extern int | 369 | extern int |
398 | qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *); | 370 | qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *); |
399 | 371 | ||
400 | extern int | ||
401 | qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *); | ||
402 | |||
403 | extern int | ||
404 | qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); | ||
405 | |||
406 | /* | 372 | /* |
407 | * Global Function Prototypes in qla_isr.c source file. | 373 | * Global Function Prototypes in qla_isr.c source file. |
408 | */ | 374 | */ |
@@ -416,8 +382,6 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); | |||
416 | extern void qla2x00_free_irqs(scsi_qla_host_t *); | 382 | extern void qla2x00_free_irqs(scsi_qla_host_t *); |
417 | 383 | ||
418 | extern int qla2x00_get_data_rate(scsi_qla_host_t *); | 384 | extern int qla2x00_get_data_rate(scsi_qla_host_t *); |
419 | extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t); | ||
420 | |||
421 | /* | 385 | /* |
422 | * Global Function Prototypes in qla_sup.c source file. | 386 | * Global Function Prototypes in qla_sup.c source file. |
423 | */ | 387 | */ |
@@ -443,14 +407,6 @@ extern void qla2x00_beacon_blink(struct scsi_qla_host *); | |||
443 | extern int qla24xx_beacon_on(struct scsi_qla_host *); | 407 | extern int qla24xx_beacon_on(struct scsi_qla_host *); |
444 | extern int qla24xx_beacon_off(struct scsi_qla_host *); | 408 | extern int qla24xx_beacon_off(struct scsi_qla_host *); |
445 | extern void qla24xx_beacon_blink(struct scsi_qla_host *); | 409 | extern void qla24xx_beacon_blink(struct scsi_qla_host *); |
446 | extern void qla83xx_beacon_blink(struct scsi_qla_host *); | ||
447 | extern int qla82xx_beacon_on(struct scsi_qla_host *); | ||
448 | extern int qla82xx_beacon_off(struct scsi_qla_host *); | ||
449 | extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t); | ||
450 | extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *); | ||
451 | extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *); | ||
452 | extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t, | ||
453 | uint32_t, uint16_t *); | ||
454 | 410 | ||
455 | extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, | 411 | extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, |
456 | uint32_t, uint32_t); | 412 | uint32_t, uint32_t); |
@@ -486,7 +442,6 @@ extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); | |||
486 | extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); | 442 | extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); |
487 | extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, | 443 | extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, |
488 | uint8_t *, uint32_t); | 444 | uint8_t *, uint32_t); |
489 | extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); | ||
490 | 445 | ||
491 | /* | 446 | /* |
492 | * Global Function Prototypes in qla_gs.c source file. | 447 | * Global Function Prototypes in qla_gs.c source file. |
@@ -558,6 +513,7 @@ extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); | |||
558 | /* PCI related functions */ | 513 | /* PCI related functions */ |
559 | extern int qla82xx_pci_config(struct scsi_qla_host *); | 514 | extern int qla82xx_pci_config(struct scsi_qla_host *); |
560 | extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int); | 515 | extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int); |
516 | extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *); | ||
561 | extern int qla82xx_pci_region_offset(struct pci_dev *, int); | 517 | extern int qla82xx_pci_region_offset(struct pci_dev *, int); |
562 | extern int qla82xx_iospace_config(struct qla_hw_data *); | 518 | extern int qla82xx_iospace_config(struct qla_hw_data *); |
563 | 519 | ||
@@ -580,11 +536,6 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *); | |||
580 | 536 | ||
581 | /* IOCB related functions */ | 537 | /* IOCB related functions */ |
582 | extern int qla82xx_start_scsi(srb_t *); | 538 | extern int qla82xx_start_scsi(srb_t *); |
583 | extern void qla2x00_sp_free(void *, void *); | ||
584 | extern void qla2x00_sp_timeout(unsigned long); | ||
585 | extern void qla2x00_bsg_job_done(void *, void *, int); | ||
586 | extern void qla2x00_bsg_sp_free(void *, void *); | ||
587 | extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *); | ||
588 | 539 | ||
589 | /* Interrupt related */ | 540 | /* Interrupt related */ |
590 | extern irqreturn_t qla82xx_intr_handler(int, void *); | 541 | extern irqreturn_t qla82xx_intr_handler(int, void *); |
@@ -598,6 +549,7 @@ extern void qla82xx_init_flags(struct qla_hw_data *); | |||
598 | 549 | ||
599 | /* ISP 8021 hardware related */ | 550 | /* ISP 8021 hardware related */ |
600 | extern void qla82xx_set_drv_active(scsi_qla_host_t *); | 551 | extern void qla82xx_set_drv_active(scsi_qla_host_t *); |
552 | extern void qla82xx_crb_win_unlock(struct qla_hw_data *); | ||
601 | extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32); | 553 | extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32); |
602 | extern int qla82xx_rd_32(struct qla_hw_data *, ulong); | 554 | extern int qla82xx_rd_32(struct qla_hw_data *, ulong); |
603 | extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int); | 555 | extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int); |
@@ -609,22 +561,15 @@ extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t); | |||
609 | extern int qla82xx_idc_lock(struct qla_hw_data *); | 561 | extern int qla82xx_idc_lock(struct qla_hw_data *); |
610 | extern void qla82xx_idc_unlock(struct qla_hw_data *); | 562 | extern void qla82xx_idc_unlock(struct qla_hw_data *); |
611 | extern int qla82xx_device_state_handler(scsi_qla_host_t *); | 563 | extern int qla82xx_device_state_handler(scsi_qla_host_t *); |
612 | extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *); | ||
613 | extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *); | 564 | extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *); |
614 | 565 | ||
615 | extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, | 566 | extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, |
616 | size_t, char *); | 567 | size_t, char *); |
617 | extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); | 568 | extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); |
618 | extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); | 569 | extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); |
619 | extern void qla82xx_start_iocbs(scsi_qla_host_t *); | 570 | extern void qla82xx_start_iocbs(srb_t *); |
620 | extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); | 571 | extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); |
621 | extern int qla82xx_check_md_needed(scsi_qla_host_t *); | ||
622 | extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); | 572 | extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); |
623 | extern int qla81xx_set_led_config(scsi_qla_host_t *, uint16_t *); | ||
624 | extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *); | ||
625 | extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int); | ||
626 | extern char *qdev_state(uint32_t); | ||
627 | extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *); | ||
628 | 573 | ||
629 | /* BSG related functions */ | 574 | /* BSG related functions */ |
630 | extern int qla24xx_bsg_request(struct fc_bsg_job *); | 575 | extern int qla24xx_bsg_request(struct fc_bsg_job *); |
@@ -634,17 +579,4 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, | |||
634 | dma_addr_t, size_t, uint32_t); | 579 | dma_addr_t, size_t, uint32_t); |
635 | extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, | 580 | extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, |
636 | uint16_t *, uint16_t *); | 581 | uint16_t *, uint16_t *); |
637 | |||
638 | /* 83xx related functions */ | ||
639 | extern void qla83xx_fw_dump(scsi_qla_host_t *, int); | ||
640 | |||
641 | /* Minidump related functions */ | ||
642 | extern int qla82xx_md_get_template_size(scsi_qla_host_t *); | ||
643 | extern int qla82xx_md_get_template(scsi_qla_host_t *); | ||
644 | extern int qla82xx_md_alloc(scsi_qla_host_t *); | ||
645 | extern void qla82xx_md_free(scsi_qla_host_t *); | ||
646 | extern int qla82xx_md_collect(scsi_qla_host_t *); | ||
647 | extern void qla82xx_md_prep(scsi_qla_host_t *); | ||
648 | extern void qla82xx_set_reset_owner(scsi_qla_host_t *); | ||
649 | |||
650 | #endif /* _QLA_GBL_H */ | 582 | #endif /* _QLA_GBL_H */ |
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 01efc0e9cc3..37937aa3c3b 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | #include "qla_target.h" | ||
9 | 8 | ||
10 | static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); | 9 | static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); |
11 | static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); | 10 | static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); |
@@ -218,9 +217,6 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
218 | memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name, | 217 | memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name, |
219 | WWN_SIZE); | 218 | WWN_SIZE); |
220 | 219 | ||
221 | fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ? | ||
222 | FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER; | ||
223 | |||
224 | if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE && | 220 | if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE && |
225 | ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) | 221 | ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) |
226 | fcport->d_id.b.domain = 0xf0; | 222 | fcport->d_id.b.domain = 0xf0; |
@@ -244,12 +240,6 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
244 | return (rval); | 240 | return (rval); |
245 | } | 241 | } |
246 | 242 | ||
247 | static inline int | ||
248 | qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha) | ||
249 | { | ||
250 | return vha->hw->max_fibre_devices * 4 + 16; | ||
251 | } | ||
252 | |||
253 | /** | 243 | /** |
254 | * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command. | 244 | * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command. |
255 | * @ha: HA context | 245 | * @ha: HA context |
@@ -271,21 +261,20 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) | |||
271 | 261 | ||
272 | struct ct_sns_gid_pt_data *gid_data; | 262 | struct ct_sns_gid_pt_data *gid_data; |
273 | struct qla_hw_data *ha = vha->hw; | 263 | struct qla_hw_data *ha = vha->hw; |
274 | uint16_t gid_pt_rsp_size; | ||
275 | 264 | ||
276 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) | 265 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) |
277 | return qla2x00_sns_gid_pt(vha, list); | 266 | return qla2x00_sns_gid_pt(vha, list); |
278 | 267 | ||
279 | gid_data = NULL; | 268 | gid_data = NULL; |
280 | gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha); | 269 | |
281 | /* Issue GID_PT */ | 270 | /* Issue GID_PT */ |
282 | /* Prepare common MS IOCB */ | 271 | /* Prepare common MS IOCB */ |
283 | ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE, | 272 | ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE, |
284 | gid_pt_rsp_size); | 273 | GID_PT_RSP_SIZE); |
285 | 274 | ||
286 | /* Prepare CT request */ | 275 | /* Prepare CT request */ |
287 | ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD, | 276 | ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD, |
288 | gid_pt_rsp_size); | 277 | GID_PT_RSP_SIZE); |
289 | ct_rsp = &ha->ct_sns->p.rsp; | 278 | ct_rsp = &ha->ct_sns->p.rsp; |
290 | 279 | ||
291 | /* Prepare CT arguments -- port_type */ | 280 | /* Prepare CT arguments -- port_type */ |
@@ -303,7 +292,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) | |||
303 | rval = QLA_FUNCTION_FAILED; | 292 | rval = QLA_FUNCTION_FAILED; |
304 | } else { | 293 | } else { |
305 | /* Set port IDs in switch info list. */ | 294 | /* Set port IDs in switch info list. */ |
306 | for (i = 0; i < ha->max_fibre_devices; i++) { | 295 | for (i = 0; i < MAX_FIBRE_DEVICES; i++) { |
307 | gid_data = &ct_rsp->rsp.gid_pt.entries[i]; | 296 | gid_data = &ct_rsp->rsp.gid_pt.entries[i]; |
308 | list[i].d_id.b.domain = gid_data->port_id[0]; | 297 | list[i].d_id.b.domain = gid_data->port_id[0]; |
309 | list[i].d_id.b.area = gid_data->port_id[1]; | 298 | list[i].d_id.b.area = gid_data->port_id[1]; |
@@ -324,7 +313,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) | |||
324 | * single call. Return a failed status, and let GA_NXT handle | 313 | * single call. Return a failed status, and let GA_NXT handle |
325 | * the overload. | 314 | * the overload. |
326 | */ | 315 | */ |
327 | if (i == ha->max_fibre_devices) | 316 | if (i == MAX_FIBRE_DEVICES) |
328 | rval = QLA_FUNCTION_FAILED; | 317 | rval = QLA_FUNCTION_FAILED; |
329 | } | 318 | } |
330 | 319 | ||
@@ -341,7 +330,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) | |||
341 | int | 330 | int |
342 | qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) | 331 | qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) |
343 | { | 332 | { |
344 | int rval = QLA_SUCCESS; | 333 | int rval; |
345 | uint16_t i; | 334 | uint16_t i; |
346 | 335 | ||
347 | ms_iocb_entry_t *ms_pkt; | 336 | ms_iocb_entry_t *ms_pkt; |
@@ -352,7 +341,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) | |||
352 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) | 341 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) |
353 | return qla2x00_sns_gpn_id(vha, list); | 342 | return qla2x00_sns_gpn_id(vha, list); |
354 | 343 | ||
355 | for (i = 0; i < ha->max_fibre_devices; i++) { | 344 | for (i = 0; i < MAX_FIBRE_DEVICES; i++) { |
356 | /* Issue GPN_ID */ | 345 | /* Issue GPN_ID */ |
357 | /* Prepare common MS IOCB */ | 346 | /* Prepare common MS IOCB */ |
358 | ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE, | 347 | ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE, |
@@ -375,11 +364,9 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) | |||
375 | /*EMPTY*/ | 364 | /*EMPTY*/ |
376 | ql_dbg(ql_dbg_disc, vha, 0x2056, | 365 | ql_dbg(ql_dbg_disc, vha, 0x2056, |
377 | "GPN_ID issue IOCB failed (%d).\n", rval); | 366 | "GPN_ID issue IOCB failed (%d).\n", rval); |
378 | break; | ||
379 | } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, | 367 | } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, |
380 | "GPN_ID") != QLA_SUCCESS) { | 368 | "GPN_ID") != QLA_SUCCESS) { |
381 | rval = QLA_FUNCTION_FAILED; | 369 | rval = QLA_FUNCTION_FAILED; |
382 | break; | ||
383 | } else { | 370 | } else { |
384 | /* Save portname */ | 371 | /* Save portname */ |
385 | memcpy(list[i].port_name, | 372 | memcpy(list[i].port_name, |
@@ -404,7 +391,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) | |||
404 | int | 391 | int |
405 | qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) | 392 | qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) |
406 | { | 393 | { |
407 | int rval = QLA_SUCCESS; | 394 | int rval; |
408 | uint16_t i; | 395 | uint16_t i; |
409 | struct qla_hw_data *ha = vha->hw; | 396 | struct qla_hw_data *ha = vha->hw; |
410 | ms_iocb_entry_t *ms_pkt; | 397 | ms_iocb_entry_t *ms_pkt; |
@@ -414,7 +401,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) | |||
414 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) | 401 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) |
415 | return qla2x00_sns_gnn_id(vha, list); | 402 | return qla2x00_sns_gnn_id(vha, list); |
416 | 403 | ||
417 | for (i = 0; i < ha->max_fibre_devices; i++) { | 404 | for (i = 0; i < MAX_FIBRE_DEVICES; i++) { |
418 | /* Issue GNN_ID */ | 405 | /* Issue GNN_ID */ |
419 | /* Prepare common MS IOCB */ | 406 | /* Prepare common MS IOCB */ |
420 | ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE, | 407 | ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE, |
@@ -437,11 +424,9 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) | |||
437 | /*EMPTY*/ | 424 | /*EMPTY*/ |
438 | ql_dbg(ql_dbg_disc, vha, 0x2057, | 425 | ql_dbg(ql_dbg_disc, vha, 0x2057, |
439 | "GNN_ID issue IOCB failed (%d).\n", rval); | 426 | "GNN_ID issue IOCB failed (%d).\n", rval); |
440 | break; | ||
441 | } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, | 427 | } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, |
442 | "GNN_ID") != QLA_SUCCESS) { | 428 | "GNN_ID") != QLA_SUCCESS) { |
443 | rval = QLA_FUNCTION_FAILED; | 429 | rval = QLA_FUNCTION_FAILED; |
444 | break; | ||
445 | } else { | 430 | } else { |
446 | /* Save nodename */ | 431 | /* Save nodename */ |
447 | memcpy(list[i].node_name, | 432 | memcpy(list[i].node_name, |
@@ -560,8 +545,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha) | |||
560 | ct_req->req.rff_id.port_id[1] = vha->d_id.b.area; | 545 | ct_req->req.rff_id.port_id[1] = vha->d_id.b.area; |
561 | ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa; | 546 | ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa; |
562 | 547 | ||
563 | qlt_rff_id(vha, ct_req); | 548 | ct_req->req.rff_id.fc4_feature = BIT_1; |
564 | |||
565 | ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ | 549 | ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ |
566 | 550 | ||
567 | /* Execute MS IOCB */ | 551 | /* Execute MS IOCB */ |
@@ -751,7 +735,7 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len, | |||
751 | static int | 735 | static int |
752 | qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) | 736 | qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) |
753 | { | 737 | { |
754 | int rval = QLA_SUCCESS; | 738 | int rval; |
755 | struct qla_hw_data *ha = vha->hw; | 739 | struct qla_hw_data *ha = vha->hw; |
756 | struct sns_cmd_pkt *sns_cmd; | 740 | struct sns_cmd_pkt *sns_cmd; |
757 | 741 | ||
@@ -774,7 +758,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
774 | "GA_NXT Send SNS failed (%d).\n", rval); | 758 | "GA_NXT Send SNS failed (%d).\n", rval); |
775 | } else if (sns_cmd->p.gan_data[8] != 0x80 || | 759 | } else if (sns_cmd->p.gan_data[8] != 0x80 || |
776 | sns_cmd->p.gan_data[9] != 0x02) { | 760 | sns_cmd->p.gan_data[9] != 0x02) { |
777 | ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084, | 761 | ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d, |
778 | "GA_NXT failed, rejected request ga_nxt_rsp:\n"); | 762 | "GA_NXT failed, rejected request ga_nxt_rsp:\n"); |
779 | ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074, | 763 | ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074, |
780 | sns_cmd->p.gan_data, 16); | 764 | sns_cmd->p.gan_data, 16); |
@@ -830,14 +814,11 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) | |||
830 | uint16_t i; | 814 | uint16_t i; |
831 | uint8_t *entry; | 815 | uint8_t *entry; |
832 | struct sns_cmd_pkt *sns_cmd; | 816 | struct sns_cmd_pkt *sns_cmd; |
833 | uint16_t gid_pt_sns_data_size; | ||
834 | |||
835 | gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha); | ||
836 | 817 | ||
837 | /* Issue GID_PT. */ | 818 | /* Issue GID_PT. */ |
838 | /* Prepare SNS command request. */ | 819 | /* Prepare SNS command request. */ |
839 | sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN, | 820 | sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN, |
840 | gid_pt_sns_data_size); | 821 | GID_PT_SNS_DATA_SIZE); |
841 | 822 | ||
842 | /* Prepare SNS command arguments -- port_type. */ | 823 | /* Prepare SNS command arguments -- port_type. */ |
843 | sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE; | 824 | sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE; |
@@ -858,7 +839,7 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) | |||
858 | rval = QLA_FUNCTION_FAILED; | 839 | rval = QLA_FUNCTION_FAILED; |
859 | } else { | 840 | } else { |
860 | /* Set port IDs in switch info list. */ | 841 | /* Set port IDs in switch info list. */ |
861 | for (i = 0; i < ha->max_fibre_devices; i++) { | 842 | for (i = 0; i < MAX_FIBRE_DEVICES; i++) { |
862 | entry = &sns_cmd->p.gid_data[(i * 4) + 16]; | 843 | entry = &sns_cmd->p.gid_data[(i * 4) + 16]; |
863 | list[i].d_id.b.domain = entry[1]; | 844 | list[i].d_id.b.domain = entry[1]; |
864 | list[i].d_id.b.area = entry[2]; | 845 | list[i].d_id.b.area = entry[2]; |
@@ -877,7 +858,7 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) | |||
877 | * single call. Return a failed status, and let GA_NXT handle | 858 | * single call. Return a failed status, and let GA_NXT handle |
878 | * the overload. | 859 | * the overload. |
879 | */ | 860 | */ |
880 | if (i == ha->max_fibre_devices) | 861 | if (i == MAX_FIBRE_DEVICES) |
881 | rval = QLA_FUNCTION_FAILED; | 862 | rval = QLA_FUNCTION_FAILED; |
882 | } | 863 | } |
883 | 864 | ||
@@ -896,12 +877,12 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) | |||
896 | static int | 877 | static int |
897 | qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) | 878 | qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) |
898 | { | 879 | { |
899 | int rval = QLA_SUCCESS; | 880 | int rval; |
900 | struct qla_hw_data *ha = vha->hw; | 881 | struct qla_hw_data *ha = vha->hw; |
901 | uint16_t i; | 882 | uint16_t i; |
902 | struct sns_cmd_pkt *sns_cmd; | 883 | struct sns_cmd_pkt *sns_cmd; |
903 | 884 | ||
904 | for (i = 0; i < ha->max_fibre_devices; i++) { | 885 | for (i = 0; i < MAX_FIBRE_DEVICES; i++) { |
905 | /* Issue GPN_ID */ | 886 | /* Issue GPN_ID */ |
906 | /* Prepare SNS command request. */ | 887 | /* Prepare SNS command request. */ |
907 | sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD, | 888 | sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD, |
@@ -952,12 +933,12 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) | |||
952 | static int | 933 | static int |
953 | qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) | 934 | qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) |
954 | { | 935 | { |
955 | int rval = QLA_SUCCESS; | 936 | int rval; |
956 | struct qla_hw_data *ha = vha->hw; | 937 | struct qla_hw_data *ha = vha->hw; |
957 | uint16_t i; | 938 | uint16_t i; |
958 | struct sns_cmd_pkt *sns_cmd; | 939 | struct sns_cmd_pkt *sns_cmd; |
959 | 940 | ||
960 | for (i = 0; i < ha->max_fibre_devices; i++) { | 941 | for (i = 0; i < MAX_FIBRE_DEVICES; i++) { |
961 | /* Issue GNN_ID */ | 942 | /* Issue GNN_ID */ |
962 | /* Prepare SNS command request. */ | 943 | /* Prepare SNS command request. */ |
963 | sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD, | 944 | sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD, |
@@ -1126,26 +1107,20 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha) | |||
1126 | static int | 1107 | static int |
1127 | qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) | 1108 | qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) |
1128 | { | 1109 | { |
1129 | int ret, rval; | 1110 | int ret; |
1130 | uint16_t mb[MAILBOX_REGISTER_COUNT]; | 1111 | uint16_t mb[MAILBOX_REGISTER_COUNT]; |
1131 | struct qla_hw_data *ha = vha->hw; | 1112 | struct qla_hw_data *ha = vha->hw; |
1132 | ret = QLA_SUCCESS; | 1113 | ret = QLA_SUCCESS; |
1133 | if (vha->flags.management_server_logged_in) | 1114 | if (vha->flags.management_server_logged_in) |
1134 | return ret; | 1115 | return ret; |
1135 | 1116 | ||
1136 | rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, | 1117 | ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, |
1137 | 0xfa, mb, BIT_1); | 1118 | mb, BIT_1|BIT_0); |
1138 | if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { | 1119 | if (mb[0] != MBS_COMMAND_COMPLETE) { |
1139 | if (rval == QLA_MEMORY_ALLOC_FAILED) | 1120 | ql_dbg(ql_dbg_disc, vha, 0x2024, |
1140 | ql_dbg(ql_dbg_disc, vha, 0x2085, | 1121 | "Failed management_server login: loopid=%x mb[0]=%x " |
1141 | "Failed management_server login: loopid=%x " | 1122 | "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", |
1142 | "rval=%d\n", vha->mgmt_svr_loop_id, rval); | 1123 | vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]); |
1143 | else | ||
1144 | ql_dbg(ql_dbg_disc, vha, 0x2024, | ||
1145 | "Failed management_server login: loopid=%x " | ||
1146 | "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", | ||
1147 | vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], | ||
1148 | mb[7]); | ||
1149 | ret = QLA_FUNCTION_FAILED; | 1124 | ret = QLA_FUNCTION_FAILED; |
1150 | } else | 1125 | } else |
1151 | vha->flags.management_server_logged_in = 1; | 1126 | vha->flags.management_server_logged_in = 1; |
@@ -1572,7 +1547,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) | |||
1572 | eiter = (struct ct_fdmi_port_attr *) (entries + size); | 1547 | eiter = (struct ct_fdmi_port_attr *) (entries + size); |
1573 | eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); | 1548 | eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); |
1574 | eiter->len = __constant_cpu_to_be16(4 + 4); | 1549 | eiter->len = __constant_cpu_to_be16(4 + 4); |
1575 | if (IS_CNA_CAPABLE(ha)) | 1550 | if (IS_QLA8XXX_TYPE(ha)) |
1576 | eiter->a.sup_speed = __constant_cpu_to_be32( | 1551 | eiter->a.sup_speed = __constant_cpu_to_be32( |
1577 | FDMI_PORT_SPEED_10GB); | 1552 | FDMI_PORT_SPEED_10GB); |
1578 | else if (IS_QLA25XX(ha)) | 1553 | else if (IS_QLA25XX(ha)) |
@@ -1619,10 +1594,6 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) | |||
1619 | eiter->a.cur_speed = | 1594 | eiter->a.cur_speed = |
1620 | __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB); | 1595 | __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB); |
1621 | break; | 1596 | break; |
1622 | case PORT_SPEED_16GB: | ||
1623 | eiter->a.cur_speed = | ||
1624 | __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB); | ||
1625 | break; | ||
1626 | default: | 1597 | default: |
1627 | eiter->a.cur_speed = | 1598 | eiter->a.cur_speed = |
1628 | __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); | 1599 | __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); |
@@ -1753,7 +1724,7 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha) | |||
1753 | int | 1724 | int |
1754 | qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) | 1725 | qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) |
1755 | { | 1726 | { |
1756 | int rval = QLA_SUCCESS; | 1727 | int rval; |
1757 | uint16_t i; | 1728 | uint16_t i; |
1758 | struct qla_hw_data *ha = vha->hw; | 1729 | struct qla_hw_data *ha = vha->hw; |
1759 | ms_iocb_entry_t *ms_pkt; | 1730 | ms_iocb_entry_t *ms_pkt; |
@@ -1763,7 +1734,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) | |||
1763 | if (!IS_IIDMA_CAPABLE(ha)) | 1734 | if (!IS_IIDMA_CAPABLE(ha)) |
1764 | return QLA_FUNCTION_FAILED; | 1735 | return QLA_FUNCTION_FAILED; |
1765 | 1736 | ||
1766 | for (i = 0; i < ha->max_fibre_devices; i++) { | 1737 | for (i = 0; i < MAX_FIBRE_DEVICES; i++) { |
1767 | /* Issue GFPN_ID */ | 1738 | /* Issue GFPN_ID */ |
1768 | /* Prepare common MS IOCB */ | 1739 | /* Prepare common MS IOCB */ |
1769 | ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE, | 1740 | ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE, |
@@ -1786,11 +1757,9 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) | |||
1786 | /*EMPTY*/ | 1757 | /*EMPTY*/ |
1787 | ql_dbg(ql_dbg_disc, vha, 0x2023, | 1758 | ql_dbg(ql_dbg_disc, vha, 0x2023, |
1788 | "GFPN_ID issue IOCB failed (%d).\n", rval); | 1759 | "GFPN_ID issue IOCB failed (%d).\n", rval); |
1789 | break; | ||
1790 | } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, | 1760 | } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, |
1791 | "GFPN_ID") != QLA_SUCCESS) { | 1761 | "GFPN_ID") != QLA_SUCCESS) { |
1792 | rval = QLA_FUNCTION_FAILED; | 1762 | rval = QLA_FUNCTION_FAILED; |
1793 | break; | ||
1794 | } else { | 1763 | } else { |
1795 | /* Save fabric portname */ | 1764 | /* Save fabric portname */ |
1796 | memcpy(list[i].fabric_port_name, | 1765 | memcpy(list[i].fabric_port_name, |
@@ -1877,7 +1846,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) | |||
1877 | if (rval) | 1846 | if (rval) |
1878 | return rval; | 1847 | return rval; |
1879 | 1848 | ||
1880 | for (i = 0; i < ha->max_fibre_devices; i++) { | 1849 | for (i = 0; i < MAX_FIBRE_DEVICES; i++) { |
1881 | /* Issue GFPN_ID */ | 1850 | /* Issue GFPN_ID */ |
1882 | /* Prepare common MS IOCB */ | 1851 | /* Prepare common MS IOCB */ |
1883 | ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE, | 1852 | ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE, |
@@ -1933,9 +1902,6 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) | |||
1933 | case BIT_11: | 1902 | case BIT_11: |
1934 | list[i].fp_speed = PORT_SPEED_8GB; | 1903 | list[i].fp_speed = PORT_SPEED_8GB; |
1935 | break; | 1904 | break; |
1936 | case BIT_10: | ||
1937 | list[i].fp_speed = PORT_SPEED_16GB; | ||
1938 | break; | ||
1939 | } | 1905 | } |
1940 | 1906 | ||
1941 | ql_dbg(ql_dbg_disc, vha, 0x205b, | 1907 | ql_dbg(ql_dbg_disc, vha, 0x205b, |
@@ -1981,7 +1947,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) | |||
1981 | struct qla_hw_data *ha = vha->hw; | 1947 | struct qla_hw_data *ha = vha->hw; |
1982 | uint8_t fcp_scsi_features = 0; | 1948 | uint8_t fcp_scsi_features = 0; |
1983 | 1949 | ||
1984 | for (i = 0; i < ha->max_fibre_devices; i++) { | 1950 | for (i = 0; i < MAX_FIBRE_DEVICES; i++) { |
1985 | /* Set default FC4 Type as UNKNOWN so the default is to | 1951 | /* Set default FC4 Type as UNKNOWN so the default is to |
1986 | * Process this port */ | 1952 | * Process this port */ |
1987 | list[i].fc4_type = FC4_TYPE_UNKNOWN; | 1953 | list[i].fc4_type = FC4_TYPE_UNKNOWN; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 563eee3fa92..37da04d3db2 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -17,9 +17,6 @@ | |||
17 | #include <asm/prom.h> | 17 | #include <asm/prom.h> |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | #include <target/target_core_base.h> | ||
21 | #include "qla_target.h" | ||
22 | |||
23 | /* | 20 | /* |
24 | * QLogic ISP2x00 Hardware Support Function Prototypes. | 21 | * QLogic ISP2x00 Hardware Support Function Prototypes. |
25 | */ | 22 | */ |
@@ -32,6 +29,7 @@ static int qla2x00_configure_loop(scsi_qla_host_t *); | |||
32 | static int qla2x00_configure_local_loop(scsi_qla_host_t *); | 29 | static int qla2x00_configure_local_loop(scsi_qla_host_t *); |
33 | static int qla2x00_configure_fabric(scsi_qla_host_t *); | 30 | static int qla2x00_configure_fabric(scsi_qla_host_t *); |
34 | static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); | 31 | static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); |
32 | static int qla2x00_device_resync(scsi_qla_host_t *); | ||
35 | static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, | 33 | static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, |
36 | uint16_t *); | 34 | uint16_t *); |
37 | 35 | ||
@@ -43,10 +41,11 @@ static int qla25xx_init_queues(struct qla_hw_data *); | |||
43 | 41 | ||
44 | /* SRB Extensions ---------------------------------------------------------- */ | 42 | /* SRB Extensions ---------------------------------------------------------- */ |
45 | 43 | ||
46 | void | 44 | static void |
47 | qla2x00_sp_timeout(unsigned long __data) | 45 | qla2x00_ctx_sp_timeout(unsigned long __data) |
48 | { | 46 | { |
49 | srb_t *sp = (srb_t *)__data; | 47 | srb_t *sp = (srb_t *)__data; |
48 | struct srb_ctx *ctx; | ||
50 | struct srb_iocb *iocb; | 49 | struct srb_iocb *iocb; |
51 | fc_port_t *fcport = sp->fcport; | 50 | fc_port_t *fcport = sp->fcport; |
52 | struct qla_hw_data *ha = fcport->vha->hw; | 51 | struct qla_hw_data *ha = fcport->vha->hw; |
@@ -56,28 +55,81 @@ qla2x00_sp_timeout(unsigned long __data) | |||
56 | spin_lock_irqsave(&ha->hardware_lock, flags); | 55 | spin_lock_irqsave(&ha->hardware_lock, flags); |
57 | req = ha->req_q_map[0]; | 56 | req = ha->req_q_map[0]; |
58 | req->outstanding_cmds[sp->handle] = NULL; | 57 | req->outstanding_cmds[sp->handle] = NULL; |
59 | iocb = &sp->u.iocb_cmd; | 58 | ctx = sp->ctx; |
59 | iocb = ctx->u.iocb_cmd; | ||
60 | iocb->timeout(sp); | 60 | iocb->timeout(sp); |
61 | sp->free(fcport->vha, sp); | 61 | iocb->free(sp); |
62 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 62 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
63 | } | 63 | } |
64 | 64 | ||
65 | void | 65 | static void |
66 | qla2x00_sp_free(void *data, void *ptr) | 66 | qla2x00_ctx_sp_free(srb_t *sp) |
67 | { | 67 | { |
68 | srb_t *sp = (srb_t *)ptr; | 68 | struct srb_ctx *ctx = sp->ctx; |
69 | struct srb_iocb *iocb = &sp->u.iocb_cmd; | 69 | struct srb_iocb *iocb = ctx->u.iocb_cmd; |
70 | struct scsi_qla_host *vha = (scsi_qla_host_t *)data; | 70 | struct scsi_qla_host *vha = sp->fcport->vha; |
71 | 71 | ||
72 | del_timer(&iocb->timer); | 72 | del_timer(&iocb->timer); |
73 | mempool_free(sp, vha->hw->srb_mempool); | 73 | kfree(iocb); |
74 | kfree(ctx); | ||
75 | mempool_free(sp, sp->fcport->vha->hw->srb_mempool); | ||
74 | 76 | ||
75 | QLA_VHA_MARK_NOT_BUSY(vha); | 77 | QLA_VHA_MARK_NOT_BUSY(vha); |
76 | } | 78 | } |
77 | 79 | ||
80 | inline srb_t * | ||
81 | qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, | ||
82 | unsigned long tmo) | ||
83 | { | ||
84 | srb_t *sp = NULL; | ||
85 | struct qla_hw_data *ha = vha->hw; | ||
86 | struct srb_ctx *ctx; | ||
87 | struct srb_iocb *iocb; | ||
88 | uint8_t bail; | ||
89 | |||
90 | QLA_VHA_MARK_BUSY(vha, bail); | ||
91 | if (bail) | ||
92 | return NULL; | ||
93 | |||
94 | sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); | ||
95 | if (!sp) | ||
96 | goto done; | ||
97 | ctx = kzalloc(size, GFP_KERNEL); | ||
98 | if (!ctx) { | ||
99 | mempool_free(sp, ha->srb_mempool); | ||
100 | sp = NULL; | ||
101 | goto done; | ||
102 | } | ||
103 | iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL); | ||
104 | if (!iocb) { | ||
105 | mempool_free(sp, ha->srb_mempool); | ||
106 | sp = NULL; | ||
107 | kfree(ctx); | ||
108 | goto done; | ||
109 | } | ||
110 | |||
111 | memset(sp, 0, sizeof(*sp)); | ||
112 | sp->fcport = fcport; | ||
113 | sp->ctx = ctx; | ||
114 | ctx->u.iocb_cmd = iocb; | ||
115 | iocb->free = qla2x00_ctx_sp_free; | ||
116 | |||
117 | init_timer(&iocb->timer); | ||
118 | if (!tmo) | ||
119 | goto done; | ||
120 | iocb->timer.expires = jiffies + tmo * HZ; | ||
121 | iocb->timer.data = (unsigned long)sp; | ||
122 | iocb->timer.function = qla2x00_ctx_sp_timeout; | ||
123 | add_timer(&iocb->timer); | ||
124 | done: | ||
125 | if (!sp) | ||
126 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
127 | return sp; | ||
128 | } | ||
129 | |||
78 | /* Asynchronous Login/Logout Routines -------------------------------------- */ | 130 | /* Asynchronous Login/Logout Routines -------------------------------------- */ |
79 | 131 | ||
80 | unsigned long | 132 | static inline unsigned long |
81 | qla2x00_get_async_timeout(struct scsi_qla_host *vha) | 133 | qla2x00_get_async_timeout(struct scsi_qla_host *vha) |
82 | { | 134 | { |
83 | unsigned long tmo; | 135 | unsigned long tmo; |
@@ -96,19 +148,19 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha) | |||
96 | } | 148 | } |
97 | 149 | ||
98 | static void | 150 | static void |
99 | qla2x00_async_iocb_timeout(void *data) | 151 | qla2x00_async_iocb_timeout(srb_t *sp) |
100 | { | 152 | { |
101 | srb_t *sp = (srb_t *)data; | ||
102 | fc_port_t *fcport = sp->fcport; | 153 | fc_port_t *fcport = sp->fcport; |
154 | struct srb_ctx *ctx = sp->ctx; | ||
103 | 155 | ||
104 | ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, | 156 | ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, |
105 | "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n", | 157 | "Async-%s timeout - portid=%02x%02x%02x.\n", |
106 | sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, | 158 | ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area, |
107 | fcport->d_id.b.al_pa); | 159 | fcport->d_id.b.al_pa); |
108 | 160 | ||
109 | fcport->flags &= ~FCF_ASYNC_SENT; | 161 | fcport->flags &= ~FCF_ASYNC_SENT; |
110 | if (sp->type == SRB_LOGIN_CMD) { | 162 | if (ctx->type == SRB_LOGIN_CMD) { |
111 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 163 | struct srb_iocb *lio = ctx->u.iocb_cmd; |
112 | qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); | 164 | qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); |
113 | /* Retry as needed. */ | 165 | /* Retry as needed. */ |
114 | lio->u.logio.data[0] = MBS_COMMAND_ERROR; | 166 | lio->u.logio.data[0] = MBS_COMMAND_ERROR; |
@@ -120,16 +172,14 @@ qla2x00_async_iocb_timeout(void *data) | |||
120 | } | 172 | } |
121 | 173 | ||
122 | static void | 174 | static void |
123 | qla2x00_async_login_sp_done(void *data, void *ptr, int res) | 175 | qla2x00_async_login_ctx_done(srb_t *sp) |
124 | { | 176 | { |
125 | srb_t *sp = (srb_t *)ptr; | 177 | struct srb_ctx *ctx = sp->ctx; |
126 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 178 | struct srb_iocb *lio = ctx->u.iocb_cmd; |
127 | struct scsi_qla_host *vha = (scsi_qla_host_t *)data; | 179 | |
128 | 180 | qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport, | |
129 | if (!test_bit(UNLOADING, &vha->dpc_flags)) | 181 | lio->u.logio.data); |
130 | qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport, | 182 | lio->free(sp); |
131 | lio->u.logio.data); | ||
132 | sp->free(sp->fcport->vha, sp); | ||
133 | } | 183 | } |
134 | 184 | ||
135 | int | 185 | int |
@@ -137,21 +187,22 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
137 | uint16_t *data) | 187 | uint16_t *data) |
138 | { | 188 | { |
139 | srb_t *sp; | 189 | srb_t *sp; |
190 | struct srb_ctx *ctx; | ||
140 | struct srb_iocb *lio; | 191 | struct srb_iocb *lio; |
141 | int rval; | 192 | int rval; |
142 | 193 | ||
143 | rval = QLA_FUNCTION_FAILED; | 194 | rval = QLA_FUNCTION_FAILED; |
144 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); | 195 | sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), |
196 | qla2x00_get_async_timeout(vha) + 2); | ||
145 | if (!sp) | 197 | if (!sp) |
146 | goto done; | 198 | goto done; |
147 | 199 | ||
148 | sp->type = SRB_LOGIN_CMD; | 200 | ctx = sp->ctx; |
149 | sp->name = "login"; | 201 | ctx->type = SRB_LOGIN_CMD; |
150 | qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); | 202 | ctx->name = "login"; |
151 | 203 | lio = ctx->u.iocb_cmd; | |
152 | lio = &sp->u.iocb_cmd; | ||
153 | lio->timeout = qla2x00_async_iocb_timeout; | 204 | lio->timeout = qla2x00_async_iocb_timeout; |
154 | sp->done = qla2x00_async_login_sp_done; | 205 | lio->done = qla2x00_async_login_ctx_done; |
155 | lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; | 206 | lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; |
156 | if (data[1] & QLA_LOGIO_LOGIN_RETRIED) | 207 | if (data[1] & QLA_LOGIO_LOGIN_RETRIED) |
157 | lio->u.logio.flags |= SRB_LOGIN_RETRIED; | 208 | lio->u.logio.flags |= SRB_LOGIN_RETRIED; |
@@ -160,77 +211,73 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
160 | goto done_free_sp; | 211 | goto done_free_sp; |
161 | 212 | ||
162 | ql_dbg(ql_dbg_disc, vha, 0x2072, | 213 | ql_dbg(ql_dbg_disc, vha, 0x2072, |
163 | "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x " | 214 | "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n", |
164 | "retries=%d.\n", sp->handle, fcport->loop_id, | 215 | fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, |
165 | fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, | 216 | fcport->d_id.b.al_pa, fcport->login_retry); |
166 | fcport->login_retry); | ||
167 | return rval; | 217 | return rval; |
168 | 218 | ||
169 | done_free_sp: | 219 | done_free_sp: |
170 | sp->free(fcport->vha, sp); | 220 | lio->free(sp); |
171 | done: | 221 | done: |
172 | return rval; | 222 | return rval; |
173 | } | 223 | } |
174 | 224 | ||
175 | static void | 225 | static void |
176 | qla2x00_async_logout_sp_done(void *data, void *ptr, int res) | 226 | qla2x00_async_logout_ctx_done(srb_t *sp) |
177 | { | 227 | { |
178 | srb_t *sp = (srb_t *)ptr; | 228 | struct srb_ctx *ctx = sp->ctx; |
179 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 229 | struct srb_iocb *lio = ctx->u.iocb_cmd; |
180 | struct scsi_qla_host *vha = (scsi_qla_host_t *)data; | 230 | |
181 | 231 | qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport, | |
182 | if (!test_bit(UNLOADING, &vha->dpc_flags)) | 232 | lio->u.logio.data); |
183 | qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport, | 233 | lio->free(sp); |
184 | lio->u.logio.data); | ||
185 | sp->free(sp->fcport->vha, sp); | ||
186 | } | 234 | } |
187 | 235 | ||
188 | int | 236 | int |
189 | qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) | 237 | qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) |
190 | { | 238 | { |
191 | srb_t *sp; | 239 | srb_t *sp; |
240 | struct srb_ctx *ctx; | ||
192 | struct srb_iocb *lio; | 241 | struct srb_iocb *lio; |
193 | int rval; | 242 | int rval; |
194 | 243 | ||
195 | rval = QLA_FUNCTION_FAILED; | 244 | rval = QLA_FUNCTION_FAILED; |
196 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); | 245 | sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), |
246 | qla2x00_get_async_timeout(vha) + 2); | ||
197 | if (!sp) | 247 | if (!sp) |
198 | goto done; | 248 | goto done; |
199 | 249 | ||
200 | sp->type = SRB_LOGOUT_CMD; | 250 | ctx = sp->ctx; |
201 | sp->name = "logout"; | 251 | ctx->type = SRB_LOGOUT_CMD; |
202 | qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); | 252 | ctx->name = "logout"; |
203 | 253 | lio = ctx->u.iocb_cmd; | |
204 | lio = &sp->u.iocb_cmd; | ||
205 | lio->timeout = qla2x00_async_iocb_timeout; | 254 | lio->timeout = qla2x00_async_iocb_timeout; |
206 | sp->done = qla2x00_async_logout_sp_done; | 255 | lio->done = qla2x00_async_logout_ctx_done; |
207 | rval = qla2x00_start_sp(sp); | 256 | rval = qla2x00_start_sp(sp); |
208 | if (rval != QLA_SUCCESS) | 257 | if (rval != QLA_SUCCESS) |
209 | goto done_free_sp; | 258 | goto done_free_sp; |
210 | 259 | ||
211 | ql_dbg(ql_dbg_disc, vha, 0x2070, | 260 | ql_dbg(ql_dbg_disc, vha, 0x2070, |
212 | "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", | 261 | "Async-logout - loop-id=%x portid=%02x%02x%02x.\n", |
213 | sp->handle, fcport->loop_id, fcport->d_id.b.domain, | 262 | fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, |
214 | fcport->d_id.b.area, fcport->d_id.b.al_pa); | 263 | fcport->d_id.b.al_pa); |
215 | return rval; | 264 | return rval; |
216 | 265 | ||
217 | done_free_sp: | 266 | done_free_sp: |
218 | sp->free(fcport->vha, sp); | 267 | lio->free(sp); |
219 | done: | 268 | done: |
220 | return rval; | 269 | return rval; |
221 | } | 270 | } |
222 | 271 | ||
223 | static void | 272 | static void |
224 | qla2x00_async_adisc_sp_done(void *data, void *ptr, int res) | 273 | qla2x00_async_adisc_ctx_done(srb_t *sp) |
225 | { | 274 | { |
226 | srb_t *sp = (srb_t *)ptr; | 275 | struct srb_ctx *ctx = sp->ctx; |
227 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 276 | struct srb_iocb *lio = ctx->u.iocb_cmd; |
228 | struct scsi_qla_host *vha = (scsi_qla_host_t *)data; | 277 | |
229 | 278 | qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport, | |
230 | if (!test_bit(UNLOADING, &vha->dpc_flags)) | 279 | lio->u.logio.data); |
231 | qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport, | 280 | lio->free(sp); |
232 | lio->u.logio.data); | ||
233 | sp->free(sp->fcport->vha, sp); | ||
234 | } | 281 | } |
235 | 282 | ||
236 | int | 283 | int |
@@ -238,21 +285,22 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
238 | uint16_t *data) | 285 | uint16_t *data) |
239 | { | 286 | { |
240 | srb_t *sp; | 287 | srb_t *sp; |
288 | struct srb_ctx *ctx; | ||
241 | struct srb_iocb *lio; | 289 | struct srb_iocb *lio; |
242 | int rval; | 290 | int rval; |
243 | 291 | ||
244 | rval = QLA_FUNCTION_FAILED; | 292 | rval = QLA_FUNCTION_FAILED; |
245 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); | 293 | sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), |
294 | qla2x00_get_async_timeout(vha) + 2); | ||
246 | if (!sp) | 295 | if (!sp) |
247 | goto done; | 296 | goto done; |
248 | 297 | ||
249 | sp->type = SRB_ADISC_CMD; | 298 | ctx = sp->ctx; |
250 | sp->name = "adisc"; | 299 | ctx->type = SRB_ADISC_CMD; |
251 | qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); | 300 | ctx->name = "adisc"; |
252 | 301 | lio = ctx->u.iocb_cmd; | |
253 | lio = &sp->u.iocb_cmd; | ||
254 | lio->timeout = qla2x00_async_iocb_timeout; | 302 | lio->timeout = qla2x00_async_iocb_timeout; |
255 | sp->done = qla2x00_async_adisc_sp_done; | 303 | lio->done = qla2x00_async_adisc_ctx_done; |
256 | if (data[1] & QLA_LOGIO_LOGIN_RETRIED) | 304 | if (data[1] & QLA_LOGIO_LOGIN_RETRIED) |
257 | lio->u.logio.flags |= SRB_LOGIN_RETRIED; | 305 | lio->u.logio.flags |= SRB_LOGIN_RETRIED; |
258 | rval = qla2x00_start_sp(sp); | 306 | rval = qla2x00_start_sp(sp); |
@@ -260,81 +308,65 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
260 | goto done_free_sp; | 308 | goto done_free_sp; |
261 | 309 | ||
262 | ql_dbg(ql_dbg_disc, vha, 0x206f, | 310 | ql_dbg(ql_dbg_disc, vha, 0x206f, |
263 | "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n", | 311 | "Async-adisc - loopid=%x portid=%02x%02x%02x.\n", |
264 | sp->handle, fcport->loop_id, fcport->d_id.b.domain, | 312 | fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, |
265 | fcport->d_id.b.area, fcport->d_id.b.al_pa); | 313 | fcport->d_id.b.al_pa); |
266 | return rval; | 314 | return rval; |
267 | 315 | ||
268 | done_free_sp: | 316 | done_free_sp: |
269 | sp->free(fcport->vha, sp); | 317 | lio->free(sp); |
270 | done: | 318 | done: |
271 | return rval; | 319 | return rval; |
272 | } | 320 | } |
273 | 321 | ||
274 | static void | 322 | static void |
275 | qla2x00_async_tm_cmd_done(void *data, void *ptr, int res) | 323 | qla2x00_async_tm_cmd_ctx_done(srb_t *sp) |
276 | { | 324 | { |
277 | srb_t *sp = (srb_t *)ptr; | 325 | struct srb_ctx *ctx = sp->ctx; |
278 | struct srb_iocb *iocb = &sp->u.iocb_cmd; | 326 | struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd; |
279 | struct scsi_qla_host *vha = (scsi_qla_host_t *)data; | ||
280 | uint32_t flags; | ||
281 | uint16_t lun; | ||
282 | int rval; | ||
283 | 327 | ||
284 | if (!test_bit(UNLOADING, &vha->dpc_flags)) { | 328 | qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb); |
285 | flags = iocb->u.tmf.flags; | 329 | iocb->free(sp); |
286 | lun = (uint16_t)iocb->u.tmf.lun; | ||
287 | |||
288 | /* Issue Marker IOCB */ | ||
289 | rval = qla2x00_marker(vha, vha->hw->req_q_map[0], | ||
290 | vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, | ||
291 | flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); | ||
292 | |||
293 | if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { | ||
294 | ql_dbg(ql_dbg_taskm, vha, 0x8030, | ||
295 | "TM IOCB failed (%x).\n", rval); | ||
296 | } | ||
297 | } | ||
298 | sp->free(sp->fcport->vha, sp); | ||
299 | } | 330 | } |
300 | 331 | ||
301 | int | 332 | int |
302 | qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun, | 333 | qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, |
303 | uint32_t tag) | 334 | uint32_t tag) |
304 | { | 335 | { |
305 | struct scsi_qla_host *vha = fcport->vha; | 336 | struct scsi_qla_host *vha = fcport->vha; |
306 | srb_t *sp; | 337 | srb_t *sp; |
338 | struct srb_ctx *ctx; | ||
307 | struct srb_iocb *tcf; | 339 | struct srb_iocb *tcf; |
308 | int rval; | 340 | int rval; |
309 | 341 | ||
310 | rval = QLA_FUNCTION_FAILED; | 342 | rval = QLA_FUNCTION_FAILED; |
311 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); | 343 | sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), |
344 | qla2x00_get_async_timeout(vha) + 2); | ||
312 | if (!sp) | 345 | if (!sp) |
313 | goto done; | 346 | goto done; |
314 | 347 | ||
315 | sp->type = SRB_TM_CMD; | 348 | ctx = sp->ctx; |
316 | sp->name = "tmf"; | 349 | ctx->type = SRB_TM_CMD; |
317 | qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); | 350 | ctx->name = "tmf"; |
318 | 351 | tcf = ctx->u.iocb_cmd; | |
319 | tcf = &sp->u.iocb_cmd; | 352 | tcf->u.tmf.flags = flags; |
320 | tcf->u.tmf.flags = tm_flags; | ||
321 | tcf->u.tmf.lun = lun; | 353 | tcf->u.tmf.lun = lun; |
322 | tcf->u.tmf.data = tag; | 354 | tcf->u.tmf.data = tag; |
323 | tcf->timeout = qla2x00_async_iocb_timeout; | 355 | tcf->timeout = qla2x00_async_iocb_timeout; |
324 | sp->done = qla2x00_async_tm_cmd_done; | 356 | tcf->done = qla2x00_async_tm_cmd_ctx_done; |
325 | 357 | ||
326 | rval = qla2x00_start_sp(sp); | 358 | rval = qla2x00_start_sp(sp); |
327 | if (rval != QLA_SUCCESS) | 359 | if (rval != QLA_SUCCESS) |
328 | goto done_free_sp; | 360 | goto done_free_sp; |
329 | 361 | ||
330 | ql_dbg(ql_dbg_taskm, vha, 0x802f, | 362 | ql_dbg(ql_dbg_taskm, vha, 0x802f, |
331 | "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", | 363 | "Async-tmf loop-id=%x portid=%02x%02x%02x.\n", |
332 | sp->handle, fcport->loop_id, fcport->d_id.b.domain, | 364 | fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, |
333 | fcport->d_id.b.area, fcport->d_id.b.al_pa); | 365 | fcport->d_id.b.al_pa); |
334 | return rval; | 366 | return rval; |
335 | 367 | ||
336 | done_free_sp: | 368 | done_free_sp: |
337 | sp->free(fcport->vha, sp); | 369 | tcf->free(sp); |
338 | done: | 370 | done: |
339 | return rval; | 371 | return rval; |
340 | } | 372 | } |
@@ -353,13 +385,6 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
353 | * requests. | 385 | * requests. |
354 | */ | 386 | */ |
355 | rval = qla2x00_get_port_database(vha, fcport, 0); | 387 | rval = qla2x00_get_port_database(vha, fcport, 0); |
356 | if (rval == QLA_NOT_LOGGED_IN) { | ||
357 | fcport->flags &= ~FCF_ASYNC_SENT; | ||
358 | fcport->flags |= FCF_LOGIN_NEEDED; | ||
359 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | ||
360 | break; | ||
361 | } | ||
362 | |||
363 | if (rval != QLA_SUCCESS) { | 388 | if (rval != QLA_SUCCESS) { |
364 | qla2x00_post_async_logout_work(vha, fcport, NULL); | 389 | qla2x00_post_async_logout_work(vha, fcport, NULL); |
365 | qla2x00_post_async_login_work(vha, fcport, NULL); | 390 | qla2x00_post_async_login_work(vha, fcport, NULL); |
@@ -425,83 +450,34 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
425 | return; | 450 | return; |
426 | } | 451 | } |
427 | 452 | ||
428 | /****************************************************************************/ | 453 | void |
429 | /* QLogic ISP2x00 Hardware Support Functions. */ | 454 | qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport, |
430 | /****************************************************************************/ | 455 | struct srb_iocb *iocb) |
431 | |||
432 | static int | ||
433 | qla83xx_nic_core_fw_load(scsi_qla_host_t *vha) | ||
434 | { | 456 | { |
435 | int rval = QLA_SUCCESS; | 457 | int rval; |
436 | struct qla_hw_data *ha = vha->hw; | 458 | uint32_t flags; |
437 | uint32_t idc_major_ver, idc_minor_ver; | 459 | uint16_t lun; |
438 | uint16_t config[4]; | ||
439 | |||
440 | qla83xx_idc_lock(vha, 0); | ||
441 | |||
442 | /* SV: TODO: Assign initialization timeout from | ||
443 | * flash-info / other param | ||
444 | */ | ||
445 | ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; | ||
446 | ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; | ||
447 | |||
448 | /* Set our fcoe function presence */ | ||
449 | if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) { | ||
450 | ql_dbg(ql_dbg_p3p, vha, 0xb077, | ||
451 | "Error while setting DRV-Presence.\n"); | ||
452 | rval = QLA_FUNCTION_FAILED; | ||
453 | goto exit; | ||
454 | } | ||
455 | 460 | ||
456 | /* Decide the reset ownership */ | 461 | flags = iocb->u.tmf.flags; |
457 | qla83xx_reset_ownership(vha); | 462 | lun = (uint16_t)iocb->u.tmf.lun; |
458 | 463 | ||
459 | /* | 464 | /* Issue Marker IOCB */ |
460 | * On first protocol driver load: | 465 | rval = qla2x00_marker(vha, vha->hw->req_q_map[0], |
461 | * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery | 466 | vha->hw->rsp_q_map[0], fcport->loop_id, lun, |
462 | * register. | 467 | flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); |
463 | * Others: Check compatibility with current IDC Major version. | ||
464 | */ | ||
465 | qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver); | ||
466 | if (ha->flags.nic_core_reset_owner) { | ||
467 | /* Set IDC Major version */ | ||
468 | idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION; | ||
469 | qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver); | ||
470 | |||
471 | /* Clearing IDC-Lock-Recovery register */ | ||
472 | qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0); | ||
473 | } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) { | ||
474 | /* | ||
475 | * Clear further IDC participation if we are not compatible with | ||
476 | * the current IDC Major Version. | ||
477 | */ | ||
478 | ql_log(ql_log_warn, vha, 0xb07d, | ||
479 | "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", | ||
480 | idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION); | ||
481 | __qla83xx_clear_drv_presence(vha); | ||
482 | rval = QLA_FUNCTION_FAILED; | ||
483 | goto exit; | ||
484 | } | ||
485 | /* Each function sets its supported Minor version. */ | ||
486 | qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver); | ||
487 | idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); | ||
488 | qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver); | ||
489 | 468 | ||
490 | if (ha->flags.nic_core_reset_owner) { | 469 | if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { |
491 | memset(config, 0, sizeof(config)); | 470 | ql_dbg(ql_dbg_taskm, vha, 0x8030, |
492 | if (!qla81xx_get_port_config(vha, config)) | 471 | "TM IOCB failed (%x).\n", rval); |
493 | qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, | ||
494 | QLA8XXX_DEV_READY); | ||
495 | } | 472 | } |
496 | 473 | ||
497 | rval = qla83xx_idc_state_handler(vha); | 474 | return; |
498 | |||
499 | exit: | ||
500 | qla83xx_idc_unlock(vha, 0); | ||
501 | |||
502 | return rval; | ||
503 | } | 475 | } |
504 | 476 | ||
477 | /****************************************************************************/ | ||
478 | /* QLogic ISP2x00 Hardware Support Functions. */ | ||
479 | /****************************************************************************/ | ||
480 | |||
505 | /* | 481 | /* |
506 | * qla2x00_initialize_adapter | 482 | * qla2x00_initialize_adapter |
507 | * Initialize board. | 483 | * Initialize board. |
@@ -538,7 +514,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
538 | set_bit(0, ha->req_qid_map); | 514 | set_bit(0, ha->req_qid_map); |
539 | set_bit(0, ha->rsp_qid_map); | 515 | set_bit(0, ha->rsp_qid_map); |
540 | 516 | ||
541 | ql_dbg(ql_dbg_init, vha, 0x0040, | 517 | ql_log(ql_log_info, vha, 0x0040, |
542 | "Configuring PCI space...\n"); | 518 | "Configuring PCI space...\n"); |
543 | rval = ha->isp_ops->pci_config(vha); | 519 | rval = ha->isp_ops->pci_config(vha); |
544 | if (rval) { | 520 | if (rval) { |
@@ -557,7 +533,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
557 | } | 533 | } |
558 | 534 | ||
559 | ha->isp_ops->get_flash_version(vha, req->ring); | 535 | ha->isp_ops->get_flash_version(vha, req->ring); |
560 | ql_dbg(ql_dbg_init, vha, 0x0061, | 536 | ql_log(ql_log_info, vha, 0x0061, |
561 | "Configure NVRAM parameters...\n"); | 537 | "Configure NVRAM parameters...\n"); |
562 | 538 | ||
563 | ha->isp_ops->nvram_config(vha); | 539 | ha->isp_ops->nvram_config(vha); |
@@ -574,7 +550,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
574 | return QLA_FUNCTION_FAILED; | 550 | return QLA_FUNCTION_FAILED; |
575 | } | 551 | } |
576 | 552 | ||
577 | ql_dbg(ql_dbg_init, vha, 0x0078, | 553 | ql_log(ql_log_info, vha, 0x0078, |
578 | "Verifying loaded RISC code...\n"); | 554 | "Verifying loaded RISC code...\n"); |
579 | 555 | ||
580 | if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { | 556 | if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { |
@@ -594,10 +570,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
594 | return QLA_FUNCTION_FAILED; | 570 | return QLA_FUNCTION_FAILED; |
595 | } | 571 | } |
596 | } | 572 | } |
597 | 573 | rval = qla2x00_init_rings(vha); | |
598 | if (qla_ini_mode_enabled(vha)) | ||
599 | rval = qla2x00_init_rings(vha); | ||
600 | |||
601 | ha->flags.chip_reset_done = 1; | 574 | ha->flags.chip_reset_done = 1; |
602 | 575 | ||
603 | if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { | 576 | if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { |
@@ -610,14 +583,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
610 | } | 583 | } |
611 | } | 584 | } |
612 | 585 | ||
613 | /* Load the NIC Core f/w if we are the first protocol driver. */ | ||
614 | if (IS_QLA8031(ha)) { | ||
615 | rval = qla83xx_nic_core_fw_load(vha); | ||
616 | if (rval) | ||
617 | ql_log(ql_log_warn, vha, 0x0124, | ||
618 | "Error in initializing NIC Core f/w.\n"); | ||
619 | } | ||
620 | |||
621 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) | 586 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) |
622 | qla24xx_read_fcp_prio_cfg(vha); | 587 | qla24xx_read_fcp_prio_cfg(vha); |
623 | 588 | ||
@@ -766,8 +731,8 @@ qla24xx_pci_config(scsi_qla_host_t *vha) | |||
766 | pcix_set_mmrbc(ha->pdev, 2048); | 731 | pcix_set_mmrbc(ha->pdev, 2048); |
767 | 732 | ||
768 | /* PCIe -- adjust Maximum Read Request Size (2048). */ | 733 | /* PCIe -- adjust Maximum Read Request Size (2048). */ |
769 | if (pci_is_pcie(ha->pdev)) | 734 | if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) |
770 | pcie_set_readrq(ha->pdev, 4096); | 735 | pcie_set_readrq(ha->pdev, 2048); |
771 | 736 | ||
772 | pci_disable_rom(ha->pdev); | 737 | pci_disable_rom(ha->pdev); |
773 | 738 | ||
@@ -802,8 +767,8 @@ qla25xx_pci_config(scsi_qla_host_t *vha) | |||
802 | pci_write_config_word(ha->pdev, PCI_COMMAND, w); | 767 | pci_write_config_word(ha->pdev, PCI_COMMAND, w); |
803 | 768 | ||
804 | /* PCIe -- adjust Maximum Read Request Size (2048). */ | 769 | /* PCIe -- adjust Maximum Read Request Size (2048). */ |
805 | if (pci_is_pcie(ha->pdev)) | 770 | if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) |
806 | pcie_set_readrq(ha->pdev, 4096); | 771 | pcie_set_readrq(ha->pdev, 2048); |
807 | 772 | ||
808 | pci_disable_rom(ha->pdev); | 773 | pci_disable_rom(ha->pdev); |
809 | 774 | ||
@@ -997,14 +962,11 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) | |||
997 | * | 962 | * |
998 | * Returns 0 on success. | 963 | * Returns 0 on success. |
999 | */ | 964 | */ |
1000 | static int | 965 | int |
1001 | qla81xx_reset_mpi(scsi_qla_host_t *vha) | 966 | qla81xx_reset_mpi(scsi_qla_host_t *vha) |
1002 | { | 967 | { |
1003 | uint16_t mb[4] = {0x1010, 0, 1, 0}; | 968 | uint16_t mb[4] = {0x1010, 0, 1, 0}; |
1004 | 969 | ||
1005 | if (!IS_QLA81XX(vha->hw)) | ||
1006 | return QLA_SUCCESS; | ||
1007 | |||
1008 | return qla81xx_write_mpi_register(vha, mb); | 970 | return qla81xx_write_mpi_register(vha, mb); |
1009 | } | 971 | } |
1010 | 972 | ||
@@ -1095,83 +1057,6 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) | |||
1095 | ha->isp_ops->enable_intrs(ha); | 1057 | ha->isp_ops->enable_intrs(ha); |
1096 | } | 1058 | } |
1097 | 1059 | ||
1098 | static void | ||
1099 | qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data) | ||
1100 | { | ||
1101 | struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; | ||
1102 | |||
1103 | WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); | ||
1104 | *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET); | ||
1105 | |||
1106 | } | ||
1107 | |||
1108 | static void | ||
1109 | qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) | ||
1110 | { | ||
1111 | struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; | ||
1112 | |||
1113 | WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); | ||
1114 | WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET, data); | ||
1115 | } | ||
1116 | |||
1117 | static void | ||
1118 | qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) | ||
1119 | { | ||
1120 | struct qla_hw_data *ha = vha->hw; | ||
1121 | uint32_t wd32 = 0; | ||
1122 | uint delta_msec = 100; | ||
1123 | uint elapsed_msec = 0; | ||
1124 | uint timeout_msec; | ||
1125 | ulong n; | ||
1126 | |||
1127 | if (!IS_QLA25XX(ha) && !IS_QLA2031(ha)) | ||
1128 | return; | ||
1129 | |||
1130 | attempt: | ||
1131 | timeout_msec = TIMEOUT_SEMAPHORE; | ||
1132 | n = timeout_msec / delta_msec; | ||
1133 | while (n--) { | ||
1134 | qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET); | ||
1135 | qla25xx_read_risc_sema_reg(vha, &wd32); | ||
1136 | if (wd32 & RISC_SEMAPHORE) | ||
1137 | break; | ||
1138 | msleep(delta_msec); | ||
1139 | elapsed_msec += delta_msec; | ||
1140 | if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) | ||
1141 | goto force; | ||
1142 | } | ||
1143 | |||
1144 | if (!(wd32 & RISC_SEMAPHORE)) | ||
1145 | goto force; | ||
1146 | |||
1147 | if (!(wd32 & RISC_SEMAPHORE_FORCE)) | ||
1148 | goto acquired; | ||
1149 | |||
1150 | qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR); | ||
1151 | timeout_msec = TIMEOUT_SEMAPHORE_FORCE; | ||
1152 | n = timeout_msec / delta_msec; | ||
1153 | while (n--) { | ||
1154 | qla25xx_read_risc_sema_reg(vha, &wd32); | ||
1155 | if (!(wd32 & RISC_SEMAPHORE_FORCE)) | ||
1156 | break; | ||
1157 | msleep(delta_msec); | ||
1158 | elapsed_msec += delta_msec; | ||
1159 | if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) | ||
1160 | goto force; | ||
1161 | } | ||
1162 | |||
1163 | if (wd32 & RISC_SEMAPHORE_FORCE) | ||
1164 | qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR); | ||
1165 | |||
1166 | goto attempt; | ||
1167 | |||
1168 | force: | ||
1169 | qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET); | ||
1170 | |||
1171 | acquired: | ||
1172 | return; | ||
1173 | } | ||
1174 | |||
1175 | /** | 1060 | /** |
1176 | * qla24xx_reset_chip() - Reset ISP24xx chip. | 1061 | * qla24xx_reset_chip() - Reset ISP24xx chip. |
1177 | * @ha: HA context | 1062 | * @ha: HA context |
@@ -1190,8 +1075,6 @@ qla24xx_reset_chip(scsi_qla_host_t *vha) | |||
1190 | 1075 | ||
1191 | ha->isp_ops->disable_intrs(ha); | 1076 | ha->isp_ops->disable_intrs(ha); |
1192 | 1077 | ||
1193 | qla25xx_manipulate_risc_semaphore(vha); | ||
1194 | |||
1195 | /* Perform RISC reset. */ | 1078 | /* Perform RISC reset. */ |
1196 | qla24xx_reset_risc(vha); | 1079 | qla24xx_reset_risc(vha); |
1197 | } | 1080 | } |
@@ -1377,9 +1260,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
1377 | mem_size = (ha->fw_memory_size - 0x11000 + 1) * | 1260 | mem_size = (ha->fw_memory_size - 0x11000 + 1) * |
1378 | sizeof(uint16_t); | 1261 | sizeof(uint16_t); |
1379 | } else if (IS_FWI2_CAPABLE(ha)) { | 1262 | } else if (IS_FWI2_CAPABLE(ha)) { |
1380 | if (IS_QLA83XX(ha)) | 1263 | if (IS_QLA81XX(ha)) |
1381 | fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); | ||
1382 | else if (IS_QLA81XX(ha)) | ||
1383 | fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); | 1264 | fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); |
1384 | else if (IS_QLA25XX(ha)) | 1265 | else if (IS_QLA25XX(ha)) |
1385 | fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); | 1266 | fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); |
@@ -1387,22 +1268,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
1387 | fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); | 1268 | fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); |
1388 | mem_size = (ha->fw_memory_size - 0x100000 + 1) * | 1269 | mem_size = (ha->fw_memory_size - 0x100000 + 1) * |
1389 | sizeof(uint32_t); | 1270 | sizeof(uint32_t); |
1390 | if (ha->mqenable) { | 1271 | if (ha->mqenable) |
1391 | if (!IS_QLA83XX(ha)) | 1272 | mq_size = sizeof(struct qla2xxx_mq_chain); |
1392 | mq_size = sizeof(struct qla2xxx_mq_chain); | ||
1393 | /* | ||
1394 | * Allocate maximum buffer size for all queues. | ||
1395 | * Resizing must be done at end-of-dump processing. | ||
1396 | */ | ||
1397 | mq_size += ha->max_req_queues * | ||
1398 | (req->length * sizeof(request_t)); | ||
1399 | mq_size += ha->max_rsp_queues * | ||
1400 | (rsp->length * sizeof(response_t)); | ||
1401 | } | ||
1402 | if (ha->tgt.atio_q_length) | ||
1403 | mq_size += ha->tgt.atio_q_length * sizeof(request_t); | ||
1404 | /* Allocate memory for Fibre Channel Event Buffer. */ | 1273 | /* Allocate memory for Fibre Channel Event Buffer. */ |
1405 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) | 1274 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) |
1406 | goto try_eft; | 1275 | goto try_eft; |
1407 | 1276 | ||
1408 | tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, | 1277 | tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, |
@@ -1425,7 +1294,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
1425 | ha->flags.fce_enabled = 0; | 1294 | ha->flags.fce_enabled = 0; |
1426 | goto try_eft; | 1295 | goto try_eft; |
1427 | } | 1296 | } |
1428 | ql_dbg(ql_dbg_init, vha, 0x00c0, | 1297 | ql_log(ql_log_info, vha, 0x00c0, |
1429 | "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024); | 1298 | "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024); |
1430 | 1299 | ||
1431 | fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; | 1300 | fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; |
@@ -1452,7 +1321,7 @@ try_eft: | |||
1452 | tc_dma); | 1321 | tc_dma); |
1453 | goto cont_alloc; | 1322 | goto cont_alloc; |
1454 | } | 1323 | } |
1455 | ql_dbg(ql_dbg_init, vha, 0x00c3, | 1324 | ql_log(ql_log_info, vha, 0x00c3, |
1456 | "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); | 1325 | "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); |
1457 | 1326 | ||
1458 | eft_size = EFT_SIZE; | 1327 | eft_size = EFT_SIZE; |
@@ -1489,7 +1358,7 @@ cont_alloc: | |||
1489 | } | 1358 | } |
1490 | return; | 1359 | return; |
1491 | } | 1360 | } |
1492 | ql_dbg(ql_dbg_init, vha, 0x00c5, | 1361 | ql_log(ql_log_info, vha, 0x00c5, |
1493 | "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); | 1362 | "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); |
1494 | 1363 | ||
1495 | ha->fw_dump_len = dump_size; | 1364 | ha->fw_dump_len = dump_size; |
@@ -1611,10 +1480,13 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) | |||
1611 | if (rval == QLA_SUCCESS) { | 1480 | if (rval == QLA_SUCCESS) { |
1612 | enable_82xx_npiv: | 1481 | enable_82xx_npiv: |
1613 | fw_major_version = ha->fw_major_version; | 1482 | fw_major_version = ha->fw_major_version; |
1614 | if (IS_QLA82XX(ha)) | 1483 | rval = qla2x00_get_fw_version(vha, |
1615 | qla82xx_check_md_needed(vha); | 1484 | &ha->fw_major_version, |
1616 | else | 1485 | &ha->fw_minor_version, |
1617 | rval = qla2x00_get_fw_version(vha); | 1486 | &ha->fw_subminor_version, |
1487 | &ha->fw_attributes, &ha->fw_memory_size, | ||
1488 | ha->mpi_version, &ha->mpi_capabilities, | ||
1489 | ha->phy_version); | ||
1618 | if (rval != QLA_SUCCESS) | 1490 | if (rval != QLA_SUCCESS) |
1619 | goto failed; | 1491 | goto failed; |
1620 | ha->flags.npiv_supported = 0; | 1492 | ha->flags.npiv_supported = 0; |
@@ -1631,17 +1503,17 @@ enable_82xx_npiv: | |||
1631 | &ha->fw_xcb_count, NULL, NULL, | 1503 | &ha->fw_xcb_count, NULL, NULL, |
1632 | &ha->max_npiv_vports, NULL); | 1504 | &ha->max_npiv_vports, NULL); |
1633 | 1505 | ||
1634 | if (!fw_major_version && ql2xallocfwdump | 1506 | if (!fw_major_version && ql2xallocfwdump) { |
1635 | && !IS_QLA82XX(ha)) | 1507 | if (!IS_QLA82XX(ha)) |
1636 | qla2x00_alloc_fw_dump(vha); | 1508 | qla2x00_alloc_fw_dump(vha); |
1509 | } | ||
1637 | } | 1510 | } |
1638 | } else { | 1511 | } else { |
1639 | ql_log(ql_log_fatal, vha, 0x00cd, | 1512 | ql_log(ql_log_fatal, vha, 0x00cd, |
1640 | "ISP Firmware failed checksum.\n"); | 1513 | "ISP Firmware failed checksum.\n"); |
1641 | goto failed; | 1514 | goto failed; |
1642 | } | 1515 | } |
1643 | } else | 1516 | } |
1644 | goto failed; | ||
1645 | 1517 | ||
1646 | if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { | 1518 | if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { |
1647 | /* Enable proper parity. */ | 1519 | /* Enable proper parity. */ |
@@ -1656,9 +1528,6 @@ enable_82xx_npiv: | |||
1656 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1528 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1657 | } | 1529 | } |
1658 | 1530 | ||
1659 | if (IS_QLA83XX(ha)) | ||
1660 | goto skip_fac_check; | ||
1661 | |||
1662 | if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { | 1531 | if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { |
1663 | uint32_t size; | 1532 | uint32_t size; |
1664 | 1533 | ||
@@ -1671,11 +1540,6 @@ enable_82xx_npiv: | |||
1671 | "Unsupported FAC firmware (%d.%02d.%02d).\n", | 1540 | "Unsupported FAC firmware (%d.%02d.%02d).\n", |
1672 | ha->fw_major_version, ha->fw_minor_version, | 1541 | ha->fw_major_version, ha->fw_minor_version, |
1673 | ha->fw_subminor_version); | 1542 | ha->fw_subminor_version); |
1674 | skip_fac_check: | ||
1675 | if (IS_QLA83XX(ha)) { | ||
1676 | ha->flags.fac_supported = 0; | ||
1677 | rval = QLA_SUCCESS; | ||
1678 | } | ||
1679 | } | 1543 | } |
1680 | } | 1544 | } |
1681 | failed: | 1545 | failed: |
@@ -1854,7 +1718,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha) | |||
1854 | struct req_que *req = ha->req_q_map[0]; | 1718 | struct req_que *req = ha->req_q_map[0]; |
1855 | struct rsp_que *rsp = ha->rsp_q_map[0]; | 1719 | struct rsp_que *rsp = ha->rsp_q_map[0]; |
1856 | 1720 | ||
1857 | /* Setup ring parameters in initialization control block. */ | 1721 | /* Setup ring parameters in initialization control block. */ |
1858 | icb = (struct init_cb_24xx *)ha->init_cb; | 1722 | icb = (struct init_cb_24xx *)ha->init_cb; |
1859 | icb->request_q_outpointer = __constant_cpu_to_le16(0); | 1723 | icb->request_q_outpointer = __constant_cpu_to_le16(0); |
1860 | icb->response_q_inpointer = __constant_cpu_to_le16(0); | 1724 | icb->response_q_inpointer = __constant_cpu_to_le16(0); |
@@ -1865,13 +1729,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha) | |||
1865 | icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); | 1729 | icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); |
1866 | icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); | 1730 | icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); |
1867 | 1731 | ||
1868 | /* Setup ATIO queue dma pointers for target mode */ | 1732 | if (ha->mqenable) { |
1869 | icb->atio_q_inpointer = __constant_cpu_to_le16(0); | ||
1870 | icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); | ||
1871 | icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); | ||
1872 | icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); | ||
1873 | |||
1874 | if (ha->mqenable || IS_QLA83XX(ha)) { | ||
1875 | icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); | 1733 | icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); |
1876 | icb->rid = __constant_cpu_to_le16(rid); | 1734 | icb->rid = __constant_cpu_to_le16(rid); |
1877 | if (ha->flags.msix_enabled) { | 1735 | if (ha->flags.msix_enabled) { |
@@ -1891,8 +1749,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha) | |||
1891 | __constant_cpu_to_le32(BIT_18); | 1749 | __constant_cpu_to_le32(BIT_18); |
1892 | 1750 | ||
1893 | /* Use Disable MSIX Handshake mode for capable adapters */ | 1751 | /* Use Disable MSIX Handshake mode for capable adapters */ |
1894 | if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && | 1752 | if (IS_MSIX_NACK_CAPABLE(ha)) { |
1895 | (ha->flags.msix_enabled)) { | ||
1896 | icb->firmware_options_2 &= | 1753 | icb->firmware_options_2 &= |
1897 | __constant_cpu_to_le32(~BIT_22); | 1754 | __constant_cpu_to_le32(~BIT_22); |
1898 | ha->flags.disable_msix_handshake = 1; | 1755 | ha->flags.disable_msix_handshake = 1; |
@@ -1914,8 +1771,6 @@ qla24xx_config_rings(struct scsi_qla_host *vha) | |||
1914 | WRT_REG_DWORD(®->isp24.rsp_q_in, 0); | 1771 | WRT_REG_DWORD(®->isp24.rsp_q_in, 0); |
1915 | WRT_REG_DWORD(®->isp24.rsp_q_out, 0); | 1772 | WRT_REG_DWORD(®->isp24.rsp_q_out, 0); |
1916 | } | 1773 | } |
1917 | qlt_24xx_config_rings(vha, reg); | ||
1918 | |||
1919 | /* PCI posting */ | 1774 | /* PCI posting */ |
1920 | RD_REG_DWORD(&ioreg->hccr); | 1775 | RD_REG_DWORD(&ioreg->hccr); |
1921 | } | 1776 | } |
@@ -1938,6 +1793,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
1938 | struct qla_hw_data *ha = vha->hw; | 1793 | struct qla_hw_data *ha = vha->hw; |
1939 | struct req_que *req; | 1794 | struct req_que *req; |
1940 | struct rsp_que *rsp; | 1795 | struct rsp_que *rsp; |
1796 | struct scsi_qla_host *vp; | ||
1941 | struct mid_init_cb_24xx *mid_init_cb = | 1797 | struct mid_init_cb_24xx *mid_init_cb = |
1942 | (struct mid_init_cb_24xx *) ha->init_cb; | 1798 | (struct mid_init_cb_24xx *) ha->init_cb; |
1943 | 1799 | ||
@@ -1967,10 +1823,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
1967 | qla2x00_init_response_q_entries(rsp); | 1823 | qla2x00_init_response_q_entries(rsp); |
1968 | } | 1824 | } |
1969 | 1825 | ||
1970 | ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; | 1826 | spin_lock(&ha->vport_slock); |
1971 | ha->tgt.atio_ring_index = 0; | 1827 | /* Clear RSCN queue. */ |
1972 | /* Initialize ATIO queue entries */ | 1828 | list_for_each_entry(vp, &ha->vp_list, list) { |
1973 | qlt_init_atio_q_entries(vha); | 1829 | vp->rscn_in_ptr = 0; |
1830 | vp->rscn_out_ptr = 0; | ||
1831 | } | ||
1832 | |||
1833 | spin_unlock(&ha->vport_slock); | ||
1974 | 1834 | ||
1975 | ha->isp_ops->config_rings(vha); | 1835 | ha->isp_ops->config_rings(vha); |
1976 | 1836 | ||
@@ -1982,7 +1842,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
1982 | ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); | 1842 | ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); |
1983 | 1843 | ||
1984 | if (ha->flags.npiv_supported) { | 1844 | if (ha->flags.npiv_supported) { |
1985 | if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) | 1845 | if (ha->operating_mode == LOOP) |
1986 | ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; | 1846 | ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; |
1987 | mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); | 1847 | mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); |
1988 | } | 1848 | } |
@@ -2046,7 +1906,6 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) | |||
2046 | "Waiting for LIP to complete.\n"); | 1906 | "Waiting for LIP to complete.\n"); |
2047 | 1907 | ||
2048 | do { | 1908 | do { |
2049 | memset(state, -1, sizeof(state)); | ||
2050 | rval = qla2x00_get_firmware_state(vha, state); | 1909 | rval = qla2x00_get_firmware_state(vha, state); |
2051 | if (rval == QLA_SUCCESS) { | 1910 | if (rval == QLA_SUCCESS) { |
2052 | if (state[0] < FSTATE_LOSS_OF_SYNC) { | 1911 | if (state[0] < FSTATE_LOSS_OF_SYNC) { |
@@ -2065,7 +1924,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) | |||
2065 | rval = qla84xx_init_chip(vha); | 1924 | rval = qla84xx_init_chip(vha); |
2066 | if (rval != QLA_SUCCESS) { | 1925 | if (rval != QLA_SUCCESS) { |
2067 | ql_log(ql_log_warn, | 1926 | ql_log(ql_log_warn, |
2068 | vha, 0x8007, | 1927 | vha, 0x8043, |
2069 | "Init chip failed.\n"); | 1928 | "Init chip failed.\n"); |
2070 | break; | 1929 | break; |
2071 | } | 1930 | } |
@@ -2074,7 +1933,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) | |||
2074 | cs84xx_time = jiffies - cs84xx_time; | 1933 | cs84xx_time = jiffies - cs84xx_time; |
2075 | wtime += cs84xx_time; | 1934 | wtime += cs84xx_time; |
2076 | mtime += cs84xx_time; | 1935 | mtime += cs84xx_time; |
2077 | ql_dbg(ql_dbg_taskm, vha, 0x8008, | 1936 | ql_dbg(ql_dbg_taskm, vha, 0x8042, |
2078 | "Increasing wait time by %ld. " | 1937 | "Increasing wait time by %ld. " |
2079 | "New time %ld.\n", cs84xx_time, | 1938 | "New time %ld.\n", cs84xx_time, |
2080 | wtime); | 1939 | wtime); |
@@ -2117,13 +1976,16 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) | |||
2117 | 1976 | ||
2118 | /* Delay for a while */ | 1977 | /* Delay for a while */ |
2119 | msleep(500); | 1978 | msleep(500); |
1979 | |||
1980 | ql_dbg(ql_dbg_taskm, vha, 0x8039, | ||
1981 | "fw_state=%x curr time=%lx.\n", state[0], jiffies); | ||
2120 | } while (1); | 1982 | } while (1); |
2121 | 1983 | ||
2122 | ql_dbg(ql_dbg_taskm, vha, 0x803a, | 1984 | ql_dbg(ql_dbg_taskm, vha, 0x803a, |
2123 | "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0], | 1985 | "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0], |
2124 | state[1], state[2], state[3], state[4], jiffies); | 1986 | state[1], state[2], state[3], state[4], jiffies); |
2125 | 1987 | ||
2126 | if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { | 1988 | if (rval) { |
2127 | ql_log(ql_log_warn, vha, 0x803b, | 1989 | ql_log(ql_log_warn, vha, 0x803b, |
2128 | "Firmware ready **** FAILED ****.\n"); | 1990 | "Firmware ready **** FAILED ****.\n"); |
2129 | } | 1991 | } |
@@ -2156,14 +2018,13 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) | |||
2156 | uint8_t domain; | 2018 | uint8_t domain; |
2157 | char connect_type[22]; | 2019 | char connect_type[22]; |
2158 | struct qla_hw_data *ha = vha->hw; | 2020 | struct qla_hw_data *ha = vha->hw; |
2159 | unsigned long flags; | ||
2160 | 2021 | ||
2161 | /* Get host addresses. */ | 2022 | /* Get host addresses. */ |
2162 | rval = qla2x00_get_adapter_id(vha, | 2023 | rval = qla2x00_get_adapter_id(vha, |
2163 | &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); | 2024 | &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); |
2164 | if (rval != QLA_SUCCESS) { | 2025 | if (rval != QLA_SUCCESS) { |
2165 | if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || | 2026 | if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || |
2166 | IS_CNA_CAPABLE(ha) || | 2027 | IS_QLA8XXX_TYPE(ha) || |
2167 | (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { | 2028 | (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { |
2168 | ql_dbg(ql_dbg_disc, vha, 0x2008, | 2029 | ql_dbg(ql_dbg_disc, vha, 0x2008, |
2169 | "Loop is in a transition state.\n"); | 2030 | "Loop is in a transition state.\n"); |
@@ -2231,10 +2092,6 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) | |||
2231 | vha->d_id.b.area = area; | 2092 | vha->d_id.b.area = area; |
2232 | vha->d_id.b.al_pa = al_pa; | 2093 | vha->d_id.b.al_pa = al_pa; |
2233 | 2094 | ||
2234 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
2235 | qlt_update_vp_map(vha, SET_AL_PA); | ||
2236 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
2237 | |||
2238 | if (!vha->flags.init_done) | 2095 | if (!vha->flags.init_done) |
2239 | ql_log(ql_log_info, vha, 0x2010, | 2096 | ql_log(ql_log_info, vha, 0x2010, |
2240 | "Topology - %s, Host Loop address 0x%x.\n", | 2097 | "Topology - %s, Host Loop address 0x%x.\n", |
@@ -2259,7 +2116,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, | |||
2259 | uint16_t index; | 2116 | uint16_t index; |
2260 | struct qla_hw_data *ha = vha->hw; | 2117 | struct qla_hw_data *ha = vha->hw; |
2261 | int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && | 2118 | int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && |
2262 | !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); | 2119 | !IS_QLA8XXX_TYPE(ha); |
2263 | 2120 | ||
2264 | if (memcmp(model, BINZERO, len) != 0) { | 2121 | if (memcmp(model, BINZERO, len) != 0) { |
2265 | strncpy(ha->model_number, model, len); | 2122 | strncpy(ha->model_number, model, len); |
@@ -2369,7 +2226,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) | |||
2369 | nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { | 2226 | nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { |
2370 | /* Reset NVRAM data. */ | 2227 | /* Reset NVRAM data. */ |
2371 | ql_log(ql_log_warn, vha, 0x0064, | 2228 | ql_log(ql_log_warn, vha, 0x0064, |
2372 | "Inconsistent NVRAM " | 2229 | "Inconisistent NVRAM " |
2373 | "detected: checksum=0x%x id=%c version=0x%x.\n", | 2230 | "detected: checksum=0x%x id=%c version=0x%x.\n", |
2374 | chksum, nv->id[0], nv->nvram_version); | 2231 | chksum, nv->id[0], nv->nvram_version); |
2375 | ql_log(ql_log_warn, vha, 0x0065, | 2232 | ql_log(ql_log_warn, vha, 0x0065, |
@@ -2454,7 +2311,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) | |||
2454 | if (IS_QLA23XX(ha)) { | 2311 | if (IS_QLA23XX(ha)) { |
2455 | nv->firmware_options[0] |= BIT_2; | 2312 | nv->firmware_options[0] |= BIT_2; |
2456 | nv->firmware_options[0] &= ~BIT_3; | 2313 | nv->firmware_options[0] &= ~BIT_3; |
2457 | nv->special_options[0] &= ~BIT_6; | 2314 | nv->firmware_options[0] &= ~BIT_6; |
2458 | nv->add_firmware_options[1] |= BIT_5 | BIT_4; | 2315 | nv->add_firmware_options[1] |= BIT_5 | BIT_4; |
2459 | 2316 | ||
2460 | if (IS_QLA2300(ha)) { | 2317 | if (IS_QLA2300(ha)) { |
@@ -2524,7 +2381,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) | |||
2524 | * internal driver logging. | 2381 | * internal driver logging. |
2525 | */ | 2382 | */ |
2526 | if (nv->host_p[0] & BIT_7) | 2383 | if (nv->host_p[0] & BIT_7) |
2527 | ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; | 2384 | ql2xextended_error_logging = 0x7fffffff; |
2528 | ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); | 2385 | ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); |
2529 | /* Always load RISC code on non ISP2[12]00 chips. */ | 2386 | /* Always load RISC code on non ISP2[12]00 chips. */ |
2530 | if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) | 2387 | if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) |
@@ -2651,21 +2508,14 @@ qla2x00_rport_del(void *data) | |||
2651 | { | 2508 | { |
2652 | fc_port_t *fcport = data; | 2509 | fc_port_t *fcport = data; |
2653 | struct fc_rport *rport; | 2510 | struct fc_rport *rport; |
2654 | scsi_qla_host_t *vha = fcport->vha; | ||
2655 | unsigned long flags; | 2511 | unsigned long flags; |
2656 | 2512 | ||
2657 | spin_lock_irqsave(fcport->vha->host->host_lock, flags); | 2513 | spin_lock_irqsave(fcport->vha->host->host_lock, flags); |
2658 | rport = fcport->drport ? fcport->drport: fcport->rport; | 2514 | rport = fcport->drport ? fcport->drport: fcport->rport; |
2659 | fcport->drport = NULL; | 2515 | fcport->drport = NULL; |
2660 | spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); | 2516 | spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); |
2661 | if (rport) { | 2517 | if (rport) |
2662 | fc_remote_port_delete(rport); | 2518 | fc_remote_port_delete(rport); |
2663 | /* | ||
2664 | * Release the target mode FC NEXUS in qla_target.c code | ||
2665 | * if target mod is enabled. | ||
2666 | */ | ||
2667 | qlt_fc_port_deleted(vha, fcport); | ||
2668 | } | ||
2669 | } | 2519 | } |
2670 | 2520 | ||
2671 | /** | 2521 | /** |
@@ -2686,11 +2536,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) | |||
2686 | 2536 | ||
2687 | /* Setup fcport template structure. */ | 2537 | /* Setup fcport template structure. */ |
2688 | fcport->vha = vha; | 2538 | fcport->vha = vha; |
2539 | fcport->vp_idx = vha->vp_idx; | ||
2689 | fcport->port_type = FCT_UNKNOWN; | 2540 | fcport->port_type = FCT_UNKNOWN; |
2690 | fcport->loop_id = FC_NO_LOOP_ID; | 2541 | fcport->loop_id = FC_NO_LOOP_ID; |
2691 | qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); | 2542 | qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); |
2692 | fcport->supported_classes = FC_COS_UNSPECIFIED; | 2543 | fcport->supported_classes = FC_COS_UNSPECIFIED; |
2693 | fcport->scan_state = QLA_FCPORT_SCAN_NONE; | ||
2694 | 2544 | ||
2695 | return fcport; | 2545 | return fcport; |
2696 | } | 2546 | } |
@@ -2742,11 +2592,13 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) | |||
2742 | if (ha->current_topology == ISP_CFG_FL && | 2592 | if (ha->current_topology == ISP_CFG_FL && |
2743 | (test_bit(LOCAL_LOOP_UPDATE, &flags))) { | 2593 | (test_bit(LOCAL_LOOP_UPDATE, &flags))) { |
2744 | 2594 | ||
2595 | vha->flags.rscn_queue_overflow = 1; | ||
2745 | set_bit(RSCN_UPDATE, &flags); | 2596 | set_bit(RSCN_UPDATE, &flags); |
2746 | 2597 | ||
2747 | } else if (ha->current_topology == ISP_CFG_F && | 2598 | } else if (ha->current_topology == ISP_CFG_F && |
2748 | (test_bit(LOCAL_LOOP_UPDATE, &flags))) { | 2599 | (test_bit(LOCAL_LOOP_UPDATE, &flags))) { |
2749 | 2600 | ||
2601 | vha->flags.rscn_queue_overflow = 1; | ||
2750 | set_bit(RSCN_UPDATE, &flags); | 2602 | set_bit(RSCN_UPDATE, &flags); |
2751 | clear_bit(LOCAL_LOOP_UPDATE, &flags); | 2603 | clear_bit(LOCAL_LOOP_UPDATE, &flags); |
2752 | 2604 | ||
@@ -2756,6 +2608,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) | |||
2756 | } else if (!vha->flags.online || | 2608 | } else if (!vha->flags.online || |
2757 | (test_bit(ABORT_ISP_ACTIVE, &flags))) { | 2609 | (test_bit(ABORT_ISP_ACTIVE, &flags))) { |
2758 | 2610 | ||
2611 | vha->flags.rscn_queue_overflow = 1; | ||
2759 | set_bit(RSCN_UPDATE, &flags); | 2612 | set_bit(RSCN_UPDATE, &flags); |
2760 | set_bit(LOCAL_LOOP_UPDATE, &flags); | 2613 | set_bit(LOCAL_LOOP_UPDATE, &flags); |
2761 | } | 2614 | } |
@@ -2765,7 +2618,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) | |||
2765 | ql_dbg(ql_dbg_disc, vha, 0x2015, | 2618 | ql_dbg(ql_dbg_disc, vha, 0x2015, |
2766 | "Loop resync needed, failing.\n"); | 2619 | "Loop resync needed, failing.\n"); |
2767 | rval = QLA_FUNCTION_FAILED; | 2620 | rval = QLA_FUNCTION_FAILED; |
2768 | } else | 2621 | } |
2622 | else | ||
2769 | rval = qla2x00_configure_local_loop(vha); | 2623 | rval = qla2x00_configure_local_loop(vha); |
2770 | } | 2624 | } |
2771 | 2625 | ||
@@ -2804,6 +2658,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) | |||
2804 | set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); | 2658 | set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); |
2805 | if (test_bit(RSCN_UPDATE, &save_flags)) { | 2659 | if (test_bit(RSCN_UPDATE, &save_flags)) { |
2806 | set_bit(RSCN_UPDATE, &vha->dpc_flags); | 2660 | set_bit(RSCN_UPDATE, &vha->dpc_flags); |
2661 | if (!IS_ALOGIO_CAPABLE(ha)) | ||
2662 | vha->flags.rscn_queue_overflow = 1; | ||
2807 | } | 2663 | } |
2808 | } | 2664 | } |
2809 | 2665 | ||
@@ -2839,10 +2695,15 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) | |||
2839 | 2695 | ||
2840 | found_devs = 0; | 2696 | found_devs = 0; |
2841 | new_fcport = NULL; | 2697 | new_fcport = NULL; |
2842 | entries = MAX_FIBRE_DEVICES_LOOP; | 2698 | entries = MAX_FIBRE_DEVICES; |
2699 | |||
2700 | ql_dbg(ql_dbg_disc, vha, 0x2016, | ||
2701 | "Getting FCAL position map.\n"); | ||
2702 | if (ql2xextended_error_logging & ql_dbg_disc) | ||
2703 | qla2x00_get_fcal_position_map(vha, NULL); | ||
2843 | 2704 | ||
2844 | /* Get list of logged in devices. */ | 2705 | /* Get list of logged in devices. */ |
2845 | memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); | 2706 | memset(ha->gid_list, 0, GID_LIST_SIZE); |
2846 | rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, | 2707 | rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, |
2847 | &entries); | 2708 | &entries); |
2848 | if (rval != QLA_SUCCESS) | 2709 | if (rval != QLA_SUCCESS) |
@@ -2907,13 +2768,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) | |||
2907 | if (loop_id > LAST_LOCAL_LOOP_ID) | 2768 | if (loop_id > LAST_LOCAL_LOOP_ID) |
2908 | continue; | 2769 | continue; |
2909 | 2770 | ||
2910 | memset(new_fcport, 0, sizeof(fc_port_t)); | ||
2911 | |||
2912 | /* Fill in member data. */ | 2771 | /* Fill in member data. */ |
2913 | new_fcport->d_id.b.domain = domain; | 2772 | new_fcport->d_id.b.domain = domain; |
2914 | new_fcport->d_id.b.area = area; | 2773 | new_fcport->d_id.b.area = area; |
2915 | new_fcport->d_id.b.al_pa = al_pa; | 2774 | new_fcport->d_id.b.al_pa = al_pa; |
2916 | new_fcport->loop_id = loop_id; | 2775 | new_fcport->loop_id = loop_id; |
2776 | new_fcport->vp_idx = vha->vp_idx; | ||
2917 | rval2 = qla2x00_get_port_database(vha, new_fcport, 0); | 2777 | rval2 = qla2x00_get_port_database(vha, new_fcport, 0); |
2918 | if (rval2 != QLA_SUCCESS) { | 2778 | if (rval2 != QLA_SUCCESS) { |
2919 | ql_dbg(ql_dbg_disc, vha, 0x201a, | 2779 | ql_dbg(ql_dbg_disc, vha, 0x201a, |
@@ -2947,6 +2807,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) | |||
2947 | 2807 | ||
2948 | if (!found) { | 2808 | if (!found) { |
2949 | /* New device, add to fcports list. */ | 2809 | /* New device, add to fcports list. */ |
2810 | if (vha->vp_idx) { | ||
2811 | new_fcport->vha = vha; | ||
2812 | new_fcport->vp_idx = vha->vp_idx; | ||
2813 | } | ||
2950 | list_add_tail(&new_fcport->list, &vha->vp_fcports); | 2814 | list_add_tail(&new_fcport->list, &vha->vp_fcports); |
2951 | 2815 | ||
2952 | /* Allocate a new replacement fcport. */ | 2816 | /* Allocate a new replacement fcport. */ |
@@ -2983,6 +2847,9 @@ cleanup_allocation: | |||
2983 | static void | 2847 | static void |
2984 | qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) | 2848 | qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) |
2985 | { | 2849 | { |
2850 | #define LS_UNKNOWN 2 | ||
2851 | static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; | ||
2852 | char *link_speed; | ||
2986 | int rval; | 2853 | int rval; |
2987 | uint16_t mb[4]; | 2854 | uint16_t mb[4]; |
2988 | struct qla_hw_data *ha = vha->hw; | 2855 | struct qla_hw_data *ha = vha->hw; |
@@ -3009,10 +2876,14 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
3009 | fcport->port_name[6], fcport->port_name[7], rval, | 2876 | fcport->port_name[6], fcport->port_name[7], rval, |
3010 | fcport->fp_speed, mb[0], mb[1]); | 2877 | fcport->fp_speed, mb[0], mb[1]); |
3011 | } else { | 2878 | } else { |
2879 | link_speed = link_speeds[LS_UNKNOWN]; | ||
2880 | if (fcport->fp_speed < 5) | ||
2881 | link_speed = link_speeds[fcport->fp_speed]; | ||
2882 | else if (fcport->fp_speed == 0x13) | ||
2883 | link_speed = link_speeds[5]; | ||
3012 | ql_dbg(ql_dbg_disc, vha, 0x2005, | 2884 | ql_dbg(ql_dbg_disc, vha, 0x2005, |
3013 | "iIDMA adjusted to %s GB/s " | 2885 | "iIDMA adjusted to %s GB/s " |
3014 | "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", | 2886 | "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed, |
3015 | qla2x00_get_link_speed_str(ha, fcport->fp_speed), | ||
3016 | fcport->port_name[0], fcport->port_name[1], | 2887 | fcport->port_name[0], fcport->port_name[1], |
3017 | fcport->port_name[2], fcport->port_name[3], | 2888 | fcport->port_name[2], fcport->port_name[3], |
3018 | fcport->port_name[4], fcport->port_name[5], | 2889 | fcport->port_name[4], fcport->port_name[5], |
@@ -3040,12 +2911,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
3040 | "Unable to allocate fc remote port.\n"); | 2911 | "Unable to allocate fc remote port.\n"); |
3041 | return; | 2912 | return; |
3042 | } | 2913 | } |
3043 | /* | ||
3044 | * Create target mode FC NEXUS in qla_target.c if target mode is | ||
3045 | * enabled.. | ||
3046 | */ | ||
3047 | qlt_fc_port_added(vha, fcport); | ||
3048 | |||
3049 | spin_lock_irqsave(fcport->vha->host->host_lock, flags); | 2914 | spin_lock_irqsave(fcport->vha->host->host_lock, flags); |
3050 | *((fc_port_t **)rport->dd_data) = fcport; | 2915 | *((fc_port_t **)rport->dd_data) = fcport; |
3051 | spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); | 2916 | spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); |
@@ -3082,10 +2947,10 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
3082 | fcport->login_retry = 0; | 2947 | fcport->login_retry = 0; |
3083 | fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); | 2948 | fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); |
3084 | 2949 | ||
3085 | qla2x00_set_fcport_state(fcport, FCS_ONLINE); | ||
3086 | qla2x00_iidma_fcport(vha, fcport); | 2950 | qla2x00_iidma_fcport(vha, fcport); |
3087 | qla24xx_update_fcport_fcp_prio(vha, fcport); | 2951 | qla24xx_update_fcport_fcp_prio(vha, fcport); |
3088 | qla2x00_reg_remote_port(vha, fcport); | 2952 | qla2x00_reg_remote_port(vha, fcport); |
2953 | qla2x00_set_fcport_state(fcport, FCS_ONLINE); | ||
3089 | } | 2954 | } |
3090 | 2955 | ||
3091 | /* | 2956 | /* |
@@ -3102,8 +2967,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
3102 | static int | 2967 | static int |
3103 | qla2x00_configure_fabric(scsi_qla_host_t *vha) | 2968 | qla2x00_configure_fabric(scsi_qla_host_t *vha) |
3104 | { | 2969 | { |
3105 | int rval; | 2970 | int rval, rval2; |
3106 | fc_port_t *fcport; | 2971 | fc_port_t *fcport, *fcptemp; |
3107 | uint16_t next_loopid; | 2972 | uint16_t next_loopid; |
3108 | uint16_t mb[MAILBOX_REGISTER_COUNT]; | 2973 | uint16_t mb[MAILBOX_REGISTER_COUNT]; |
3109 | uint16_t loop_id; | 2974 | uint16_t loop_id; |
@@ -3126,6 +2991,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
3126 | } | 2991 | } |
3127 | vha->device_flags |= SWITCH_FOUND; | 2992 | vha->device_flags |= SWITCH_FOUND; |
3128 | 2993 | ||
2994 | /* Mark devices that need re-synchronization. */ | ||
2995 | rval2 = qla2x00_device_resync(vha); | ||
2996 | if (rval2 == QLA_RSCNS_HANDLED) { | ||
2997 | /* No point doing the scan, just continue. */ | ||
2998 | return (QLA_SUCCESS); | ||
2999 | } | ||
3129 | do { | 3000 | do { |
3130 | /* FDMI support. */ | 3001 | /* FDMI support. */ |
3131 | if (ql2xfdmienable && | 3002 | if (ql2xfdmienable && |
@@ -3137,12 +3008,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
3137 | loop_id = NPH_SNS; | 3008 | loop_id = NPH_SNS; |
3138 | else | 3009 | else |
3139 | loop_id = SIMPLE_NAME_SERVER; | 3010 | loop_id = SIMPLE_NAME_SERVER; |
3140 | rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, | 3011 | ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, |
3141 | 0xfc, mb, BIT_1|BIT_0); | 3012 | 0xfc, mb, BIT_1 | BIT_0); |
3142 | if (rval != QLA_SUCCESS) { | ||
3143 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); | ||
3144 | break; | ||
3145 | } | ||
3146 | if (mb[0] != MBS_COMMAND_COMPLETE) { | 3013 | if (mb[0] != MBS_COMMAND_COMPLETE) { |
3147 | ql_dbg(ql_dbg_disc, vha, 0x2042, | 3014 | ql_dbg(ql_dbg_disc, vha, 0x2042, |
3148 | "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " | 3015 | "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " |
@@ -3177,12 +3044,10 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
3177 | if (rval != QLA_SUCCESS) | 3044 | if (rval != QLA_SUCCESS) |
3178 | break; | 3045 | break; |
3179 | 3046 | ||
3180 | /* Add new ports to existing port list */ | 3047 | /* |
3181 | list_splice_tail_init(&new_fcports, &vha->vp_fcports); | 3048 | * Logout all previous fabric devices marked lost, except |
3182 | 3049 | * FCP2 devices. | |
3183 | /* Starting free loop ID. */ | 3050 | */ |
3184 | next_loopid = ha->min_external_loopid; | ||
3185 | |||
3186 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | 3051 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
3187 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) | 3052 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) |
3188 | break; | 3053 | break; |
@@ -3190,9 +3055,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
3190 | if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) | 3055 | if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) |
3191 | continue; | 3056 | continue; |
3192 | 3057 | ||
3193 | /* Logout lost/gone fabric devices (non-FCP2) */ | 3058 | if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { |
3194 | if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND && | ||
3195 | atomic_read(&fcport->state) == FCS_ONLINE) { | ||
3196 | qla2x00_mark_device_lost(vha, fcport, | 3059 | qla2x00_mark_device_lost(vha, fcport, |
3197 | ql2xplogiabsentdevice, 0); | 3060 | ql2xplogiabsentdevice, 0); |
3198 | if (fcport->loop_id != FC_NO_LOOP_ID && | 3061 | if (fcport->loop_id != FC_NO_LOOP_ID && |
@@ -3204,30 +3067,78 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
3204 | fcport->d_id.b.domain, | 3067 | fcport->d_id.b.domain, |
3205 | fcport->d_id.b.area, | 3068 | fcport->d_id.b.area, |
3206 | fcport->d_id.b.al_pa); | 3069 | fcport->d_id.b.al_pa); |
3070 | fcport->loop_id = FC_NO_LOOP_ID; | ||
3207 | } | 3071 | } |
3208 | continue; | ||
3209 | } | 3072 | } |
3210 | fcport->scan_state = QLA_FCPORT_SCAN_NONE; | 3073 | } |
3211 | 3074 | ||
3212 | /* Login fabric devices that need a login */ | 3075 | /* Starting free loop ID. */ |
3213 | if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 && | 3076 | next_loopid = ha->min_external_loopid; |
3214 | atomic_read(&vha->loop_down_timer) == 0) { | 3077 | |
3215 | if (fcport->loop_id == FC_NO_LOOP_ID) { | 3078 | /* |
3216 | fcport->loop_id = next_loopid; | 3079 | * Scan through our port list and login entries that need to be |
3217 | rval = qla2x00_find_new_loop_id( | 3080 | * logged in. |
3218 | base_vha, fcport); | 3081 | */ |
3219 | if (rval != QLA_SUCCESS) { | 3082 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
3220 | /* Ran out of IDs to use */ | 3083 | if (atomic_read(&vha->loop_down_timer) || |
3221 | continue; | 3084 | test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) |
3222 | } | 3085 | break; |
3086 | |||
3087 | if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || | ||
3088 | (fcport->flags & FCF_LOGIN_NEEDED) == 0) | ||
3089 | continue; | ||
3090 | |||
3091 | if (fcport->loop_id == FC_NO_LOOP_ID) { | ||
3092 | fcport->loop_id = next_loopid; | ||
3093 | rval = qla2x00_find_new_loop_id( | ||
3094 | base_vha, fcport); | ||
3095 | if (rval != QLA_SUCCESS) { | ||
3096 | /* Ran out of IDs to use */ | ||
3097 | break; | ||
3223 | } | 3098 | } |
3224 | } | 3099 | } |
3100 | /* Login and update database */ | ||
3101 | qla2x00_fabric_dev_login(vha, fcport, &next_loopid); | ||
3102 | } | ||
3103 | |||
3104 | /* Exit if out of loop IDs. */ | ||
3105 | if (rval != QLA_SUCCESS) { | ||
3106 | break; | ||
3107 | } | ||
3108 | |||
3109 | /* | ||
3110 | * Login and add the new devices to our port list. | ||
3111 | */ | ||
3112 | list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { | ||
3113 | if (atomic_read(&vha->loop_down_timer) || | ||
3114 | test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) | ||
3115 | break; | ||
3116 | |||
3117 | /* Find a new loop ID to use. */ | ||
3118 | fcport->loop_id = next_loopid; | ||
3119 | rval = qla2x00_find_new_loop_id(base_vha, fcport); | ||
3120 | if (rval != QLA_SUCCESS) { | ||
3121 | /* Ran out of IDs to use */ | ||
3122 | break; | ||
3123 | } | ||
3225 | 3124 | ||
3226 | /* Login and update database */ | 3125 | /* Login and update database */ |
3227 | qla2x00_fabric_dev_login(vha, fcport, &next_loopid); | 3126 | qla2x00_fabric_dev_login(vha, fcport, &next_loopid); |
3127 | |||
3128 | if (vha->vp_idx) { | ||
3129 | fcport->vha = vha; | ||
3130 | fcport->vp_idx = vha->vp_idx; | ||
3131 | } | ||
3132 | list_move_tail(&fcport->list, &vha->vp_fcports); | ||
3228 | } | 3133 | } |
3229 | } while (0); | 3134 | } while (0); |
3230 | 3135 | ||
3136 | /* Free all new device structures not processed. */ | ||
3137 | list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { | ||
3138 | list_del(&fcport->list); | ||
3139 | kfree(fcport); | ||
3140 | } | ||
3141 | |||
3231 | if (rval) { | 3142 | if (rval) { |
3232 | ql_dbg(ql_dbg_disc, vha, 0x2068, | 3143 | ql_dbg(ql_dbg_disc, vha, 0x2068, |
3233 | "Configure fabric error exit rval=%d.\n", rval); | 3144 | "Configure fabric error exit rval=%d.\n", rval); |
@@ -3269,21 +3180,20 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3269 | rval = QLA_SUCCESS; | 3180 | rval = QLA_SUCCESS; |
3270 | 3181 | ||
3271 | /* Try GID_PT to get device list, else GAN. */ | 3182 | /* Try GID_PT to get device list, else GAN. */ |
3272 | if (!ha->swl) | 3183 | swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); |
3273 | ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), | ||
3274 | GFP_KERNEL); | ||
3275 | swl = ha->swl; | ||
3276 | if (!swl) { | 3184 | if (!swl) { |
3277 | /*EMPTY*/ | 3185 | /*EMPTY*/ |
3278 | ql_dbg(ql_dbg_disc, vha, 0x2054, | 3186 | ql_dbg(ql_dbg_disc, vha, 0x2054, |
3279 | "GID_PT allocations failed, fallback on GA_NXT.\n"); | 3187 | "GID_PT allocations failed, fallback on GA_NXT.\n"); |
3280 | } else { | 3188 | } else { |
3281 | memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); | ||
3282 | if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { | 3189 | if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { |
3190 | kfree(swl); | ||
3283 | swl = NULL; | 3191 | swl = NULL; |
3284 | } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { | 3192 | } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { |
3193 | kfree(swl); | ||
3285 | swl = NULL; | 3194 | swl = NULL; |
3286 | } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { | 3195 | } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { |
3196 | kfree(swl); | ||
3287 | swl = NULL; | 3197 | swl = NULL; |
3288 | } else if (ql2xiidmaenable && | 3198 | } else if (ql2xiidmaenable && |
3289 | qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { | 3199 | qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { |
@@ -3301,6 +3211,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3301 | if (new_fcport == NULL) { | 3211 | if (new_fcport == NULL) { |
3302 | ql_log(ql_log_warn, vha, 0x205e, | 3212 | ql_log(ql_log_warn, vha, 0x205e, |
3303 | "Failed to allocate memory for fcport.\n"); | 3213 | "Failed to allocate memory for fcport.\n"); |
3214 | kfree(swl); | ||
3304 | return (QLA_MEMORY_ALLOC_FAILED); | 3215 | return (QLA_MEMORY_ALLOC_FAILED); |
3305 | } | 3216 | } |
3306 | new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); | 3217 | new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); |
@@ -3417,8 +3328,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3417 | WWN_SIZE)) | 3328 | WWN_SIZE)) |
3418 | continue; | 3329 | continue; |
3419 | 3330 | ||
3420 | fcport->scan_state = QLA_FCPORT_SCAN_FOUND; | ||
3421 | |||
3422 | found++; | 3331 | found++; |
3423 | 3332 | ||
3424 | /* Update port state. */ | 3333 | /* Update port state. */ |
@@ -3440,7 +3349,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3440 | */ | 3349 | */ |
3441 | if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { | 3350 | if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { |
3442 | fcport->d_id.b24 = new_fcport->d_id.b24; | 3351 | fcport->d_id.b24 = new_fcport->d_id.b24; |
3443 | qla2x00_clear_loop_id(fcport); | 3352 | fcport->loop_id = FC_NO_LOOP_ID; |
3444 | fcport->flags |= (FCF_FABRIC_DEVICE | | 3353 | fcport->flags |= (FCF_FABRIC_DEVICE | |
3445 | FCF_LOGIN_NEEDED); | 3354 | FCF_LOGIN_NEEDED); |
3446 | break; | 3355 | break; |
@@ -3455,13 +3364,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3455 | fcport->flags |= FCF_LOGIN_NEEDED; | 3364 | fcport->flags |= FCF_LOGIN_NEEDED; |
3456 | if (fcport->loop_id != FC_NO_LOOP_ID && | 3365 | if (fcport->loop_id != FC_NO_LOOP_ID && |
3457 | (fcport->flags & FCF_FCP2_DEVICE) == 0 && | 3366 | (fcport->flags & FCF_FCP2_DEVICE) == 0 && |
3458 | (fcport->flags & FCF_ASYNC_SENT) == 0 && | ||
3459 | fcport->port_type != FCT_INITIATOR && | 3367 | fcport->port_type != FCT_INITIATOR && |
3460 | fcport->port_type != FCT_BROADCAST) { | 3368 | fcport->port_type != FCT_BROADCAST) { |
3461 | ha->isp_ops->fabric_logout(vha, fcport->loop_id, | 3369 | ha->isp_ops->fabric_logout(vha, fcport->loop_id, |
3462 | fcport->d_id.b.domain, fcport->d_id.b.area, | 3370 | fcport->d_id.b.domain, fcport->d_id.b.area, |
3463 | fcport->d_id.b.al_pa); | 3371 | fcport->d_id.b.al_pa); |
3464 | qla2x00_clear_loop_id(fcport); | 3372 | fcport->loop_id = FC_NO_LOOP_ID; |
3465 | } | 3373 | } |
3466 | 3374 | ||
3467 | break; | 3375 | break; |
@@ -3478,12 +3386,14 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3478 | if (new_fcport == NULL) { | 3386 | if (new_fcport == NULL) { |
3479 | ql_log(ql_log_warn, vha, 0x2066, | 3387 | ql_log(ql_log_warn, vha, 0x2066, |
3480 | "Memory allocation failed for fcport.\n"); | 3388 | "Memory allocation failed for fcport.\n"); |
3389 | kfree(swl); | ||
3481 | return (QLA_MEMORY_ALLOC_FAILED); | 3390 | return (QLA_MEMORY_ALLOC_FAILED); |
3482 | } | 3391 | } |
3483 | new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); | 3392 | new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); |
3484 | new_fcport->d_id.b24 = nxt_d_id.b24; | 3393 | new_fcport->d_id.b24 = nxt_d_id.b24; |
3485 | } | 3394 | } |
3486 | 3395 | ||
3396 | kfree(swl); | ||
3487 | kfree(new_fcport); | 3397 | kfree(new_fcport); |
3488 | 3398 | ||
3489 | return (rval); | 3399 | return (rval); |
@@ -3507,33 +3417,173 @@ int | |||
3507 | qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) | 3417 | qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) |
3508 | { | 3418 | { |
3509 | int rval; | 3419 | int rval; |
3420 | int found; | ||
3421 | fc_port_t *fcport; | ||
3422 | uint16_t first_loop_id; | ||
3510 | struct qla_hw_data *ha = vha->hw; | 3423 | struct qla_hw_data *ha = vha->hw; |
3424 | struct scsi_qla_host *vp; | ||
3425 | struct scsi_qla_host *tvp; | ||
3511 | unsigned long flags = 0; | 3426 | unsigned long flags = 0; |
3512 | 3427 | ||
3513 | rval = QLA_SUCCESS; | 3428 | rval = QLA_SUCCESS; |
3514 | 3429 | ||
3515 | spin_lock_irqsave(&ha->vport_slock, flags); | 3430 | /* Save starting loop ID. */ |
3431 | first_loop_id = dev->loop_id; | ||
3516 | 3432 | ||
3517 | dev->loop_id = find_first_zero_bit(ha->loop_id_map, | 3433 | for (;;) { |
3518 | LOOPID_MAP_SIZE); | 3434 | /* Skip loop ID if already used by adapter. */ |
3519 | if (dev->loop_id >= LOOPID_MAP_SIZE || | 3435 | if (dev->loop_id == vha->loop_id) |
3520 | qla2x00_is_reserved_id(vha, dev->loop_id)) { | 3436 | dev->loop_id++; |
3521 | dev->loop_id = FC_NO_LOOP_ID; | 3437 | |
3522 | rval = QLA_FUNCTION_FAILED; | 3438 | /* Skip reserved loop IDs. */ |
3523 | } else | 3439 | while (qla2x00_is_reserved_id(vha, dev->loop_id)) |
3524 | set_bit(dev->loop_id, ha->loop_id_map); | 3440 | dev->loop_id++; |
3441 | |||
3442 | /* Reset loop ID if passed the end. */ | ||
3443 | if (dev->loop_id > ha->max_loop_id) { | ||
3444 | /* first loop ID. */ | ||
3445 | dev->loop_id = ha->min_external_loopid; | ||
3446 | } | ||
3525 | 3447 | ||
3526 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 3448 | /* Check for loop ID being already in use. */ |
3449 | found = 0; | ||
3450 | fcport = NULL; | ||
3527 | 3451 | ||
3528 | if (rval == QLA_SUCCESS) | 3452 | spin_lock_irqsave(&ha->vport_slock, flags); |
3529 | ql_dbg(ql_dbg_disc, dev->vha, 0x2086, | 3453 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
3530 | "Assigning new loopid=%x, portid=%x.\n", | 3454 | list_for_each_entry(fcport, &vp->vp_fcports, list) { |
3531 | dev->loop_id, dev->d_id.b24); | 3455 | if (fcport->loop_id == dev->loop_id && |
3532 | else | 3456 | fcport != dev) { |
3533 | ql_log(ql_log_warn, dev->vha, 0x2087, | 3457 | /* ID possibly in use */ |
3534 | "No loop_id's available, portid=%x.\n", | 3458 | found++; |
3535 | dev->d_id.b24); | 3459 | break; |
3460 | } | ||
3461 | } | ||
3462 | if (found) | ||
3463 | break; | ||
3464 | } | ||
3465 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
3466 | |||
3467 | /* If not in use then it is free to use. */ | ||
3468 | if (!found) { | ||
3469 | break; | ||
3470 | } | ||
3471 | |||
3472 | /* ID in use. Try next value. */ | ||
3473 | dev->loop_id++; | ||
3474 | |||
3475 | /* If wrap around. No free ID to use. */ | ||
3476 | if (dev->loop_id == first_loop_id) { | ||
3477 | dev->loop_id = FC_NO_LOOP_ID; | ||
3478 | rval = QLA_FUNCTION_FAILED; | ||
3479 | break; | ||
3480 | } | ||
3481 | } | ||
3482 | |||
3483 | return (rval); | ||
3484 | } | ||
3485 | |||
3486 | /* | ||
3487 | * qla2x00_device_resync | ||
3488 | * Marks devices in the database that needs resynchronization. | ||
3489 | * | ||
3490 | * Input: | ||
3491 | * ha = adapter block pointer. | ||
3492 | * | ||
3493 | * Context: | ||
3494 | * Kernel context. | ||
3495 | */ | ||
3496 | static int | ||
3497 | qla2x00_device_resync(scsi_qla_host_t *vha) | ||
3498 | { | ||
3499 | int rval; | ||
3500 | uint32_t mask; | ||
3501 | fc_port_t *fcport; | ||
3502 | uint32_t rscn_entry; | ||
3503 | uint8_t rscn_out_iter; | ||
3504 | uint8_t format; | ||
3505 | port_id_t d_id = {}; | ||
3506 | |||
3507 | rval = QLA_RSCNS_HANDLED; | ||
3508 | |||
3509 | while (vha->rscn_out_ptr != vha->rscn_in_ptr || | ||
3510 | vha->flags.rscn_queue_overflow) { | ||
3511 | |||
3512 | rscn_entry = vha->rscn_queue[vha->rscn_out_ptr]; | ||
3513 | format = MSB(MSW(rscn_entry)); | ||
3514 | d_id.b.domain = LSB(MSW(rscn_entry)); | ||
3515 | d_id.b.area = MSB(LSW(rscn_entry)); | ||
3516 | d_id.b.al_pa = LSB(LSW(rscn_entry)); | ||
3517 | |||
3518 | ql_dbg(ql_dbg_disc, vha, 0x2020, | ||
3519 | "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n", | ||
3520 | vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area, | ||
3521 | d_id.b.al_pa); | ||
3522 | |||
3523 | vha->rscn_out_ptr++; | ||
3524 | if (vha->rscn_out_ptr == MAX_RSCN_COUNT) | ||
3525 | vha->rscn_out_ptr = 0; | ||
3526 | |||
3527 | /* Skip duplicate entries. */ | ||
3528 | for (rscn_out_iter = vha->rscn_out_ptr; | ||
3529 | !vha->flags.rscn_queue_overflow && | ||
3530 | rscn_out_iter != vha->rscn_in_ptr; | ||
3531 | rscn_out_iter = (rscn_out_iter == | ||
3532 | (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) { | ||
3533 | |||
3534 | if (rscn_entry != vha->rscn_queue[rscn_out_iter]) | ||
3535 | break; | ||
3536 | |||
3537 | ql_dbg(ql_dbg_disc, vha, 0x2021, | ||
3538 | "Skipping duplicate RSCN queue entry found at " | ||
3539 | "[%d].\n", rscn_out_iter); | ||
3540 | |||
3541 | vha->rscn_out_ptr = rscn_out_iter; | ||
3542 | } | ||
3543 | |||
3544 | /* Queue overflow, set switch default case. */ | ||
3545 | if (vha->flags.rscn_queue_overflow) { | ||
3546 | ql_dbg(ql_dbg_disc, vha, 0x2022, | ||
3547 | "device_resync: rscn overflow.\n"); | ||
3548 | |||
3549 | format = 3; | ||
3550 | vha->flags.rscn_queue_overflow = 0; | ||
3551 | } | ||
3552 | |||
3553 | switch (format) { | ||
3554 | case 0: | ||
3555 | mask = 0xffffff; | ||
3556 | break; | ||
3557 | case 1: | ||
3558 | mask = 0xffff00; | ||
3559 | break; | ||
3560 | case 2: | ||
3561 | mask = 0xff0000; | ||
3562 | break; | ||
3563 | default: | ||
3564 | mask = 0x0; | ||
3565 | d_id.b24 = 0; | ||
3566 | vha->rscn_out_ptr = vha->rscn_in_ptr; | ||
3567 | break; | ||
3568 | } | ||
3536 | 3569 | ||
3570 | rval = QLA_SUCCESS; | ||
3571 | |||
3572 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | ||
3573 | if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || | ||
3574 | (fcport->d_id.b24 & mask) != d_id.b24 || | ||
3575 | fcport->port_type == FCT_BROADCAST) | ||
3576 | continue; | ||
3577 | |||
3578 | if (atomic_read(&fcport->state) == FCS_ONLINE) { | ||
3579 | if (format != 3 || | ||
3580 | fcport->port_type != FCT_INITIATOR) { | ||
3581 | qla2x00_mark_device_lost(vha, fcport, | ||
3582 | 0, 0); | ||
3583 | } | ||
3584 | } | ||
3585 | } | ||
3586 | } | ||
3537 | return (rval); | 3587 | return (rval); |
3538 | } | 3588 | } |
3539 | 3589 | ||
@@ -3590,9 +3640,6 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
3590 | } else { | 3640 | } else { |
3591 | qla2x00_update_fcport(vha, fcport); | 3641 | qla2x00_update_fcport(vha, fcport); |
3592 | } | 3642 | } |
3593 | } else { | ||
3594 | /* Retry Login. */ | ||
3595 | qla2x00_mark_device_lost(vha, fcport, 1, 0); | ||
3596 | } | 3643 | } |
3597 | 3644 | ||
3598 | return (rval); | 3645 | return (rval); |
@@ -3633,12 +3680,9 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
3633 | fcport->d_id.b.area, fcport->d_id.b.al_pa); | 3680 | fcport->d_id.b.area, fcport->d_id.b.al_pa); |
3634 | 3681 | ||
3635 | /* Login fcport on switch. */ | 3682 | /* Login fcport on switch. */ |
3636 | rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, | 3683 | ha->isp_ops->fabric_login(vha, fcport->loop_id, |
3637 | fcport->d_id.b.domain, fcport->d_id.b.area, | 3684 | fcport->d_id.b.domain, fcport->d_id.b.area, |
3638 | fcport->d_id.b.al_pa, mb, BIT_0); | 3685 | fcport->d_id.b.al_pa, mb, BIT_0); |
3639 | if (rval != QLA_SUCCESS) { | ||
3640 | return rval; | ||
3641 | } | ||
3642 | if (mb[0] == MBS_PORT_ID_USED) { | 3686 | if (mb[0] == MBS_PORT_ID_USED) { |
3643 | /* | 3687 | /* |
3644 | * Device has another loop ID. The firmware team | 3688 | * Device has another loop ID. The firmware team |
@@ -3686,12 +3730,6 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
3686 | if (mb[10] & BIT_1) | 3730 | if (mb[10] & BIT_1) |
3687 | fcport->supported_classes |= FC_COS_CLASS3; | 3731 | fcport->supported_classes |= FC_COS_CLASS3; |
3688 | 3732 | ||
3689 | if (IS_FWI2_CAPABLE(ha)) { | ||
3690 | if (mb[10] & BIT_7) | ||
3691 | fcport->flags |= | ||
3692 | FCF_CONF_COMP_SUPPORTED; | ||
3693 | } | ||
3694 | |||
3695 | rval = QLA_SUCCESS; | 3733 | rval = QLA_SUCCESS; |
3696 | break; | 3734 | break; |
3697 | } else if (mb[0] == MBS_LOOP_ID_USED) { | 3735 | } else if (mb[0] == MBS_LOOP_ID_USED) { |
@@ -3732,7 +3770,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
3732 | ha->isp_ops->fabric_logout(vha, fcport->loop_id, | 3770 | ha->isp_ops->fabric_logout(vha, fcport->loop_id, |
3733 | fcport->d_id.b.domain, fcport->d_id.b.area, | 3771 | fcport->d_id.b.domain, fcport->d_id.b.area, |
3734 | fcport->d_id.b.al_pa); | 3772 | fcport->d_id.b.al_pa); |
3735 | qla2x00_clear_loop_id(fcport); | 3773 | fcport->loop_id = FC_NO_LOOP_ID; |
3736 | fcport->login_retry = 0; | 3774 | fcport->login_retry = 0; |
3737 | 3775 | ||
3738 | rval = 3; | 3776 | rval = 3; |
@@ -3891,315 +3929,8 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) | |||
3891 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 3929 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
3892 | } | 3930 | } |
3893 | 3931 | ||
3894 | /* Assumes idc_lock always held on entry */ | ||
3895 | void | ||
3896 | qla83xx_reset_ownership(scsi_qla_host_t *vha) | ||
3897 | { | ||
3898 | struct qla_hw_data *ha = vha->hw; | ||
3899 | uint32_t drv_presence, drv_presence_mask; | ||
3900 | uint32_t dev_part_info1, dev_part_info2, class_type; | ||
3901 | uint32_t class_type_mask = 0x3; | ||
3902 | uint16_t fcoe_other_function = 0xffff, i; | ||
3903 | |||
3904 | qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); | ||
3905 | |||
3906 | qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); | ||
3907 | qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); | ||
3908 | for (i = 0; i < 8; i++) { | ||
3909 | class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); | ||
3910 | if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && | ||
3911 | (i != ha->portnum)) { | ||
3912 | fcoe_other_function = i; | ||
3913 | break; | ||
3914 | } | ||
3915 | } | ||
3916 | if (fcoe_other_function == 0xffff) { | ||
3917 | for (i = 0; i < 8; i++) { | ||
3918 | class_type = ((dev_part_info2 >> (i * 4)) & | ||
3919 | class_type_mask); | ||
3920 | if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && | ||
3921 | ((i + 8) != ha->portnum)) { | ||
3922 | fcoe_other_function = i + 8; | ||
3923 | break; | ||
3924 | } | ||
3925 | } | ||
3926 | } | ||
3927 | /* | ||
3928 | * Prepare drv-presence mask based on fcoe functions present. | ||
3929 | * However consider only valid physical fcoe function numbers (0-15). | ||
3930 | */ | ||
3931 | drv_presence_mask = ~((1 << (ha->portnum)) | | ||
3932 | ((fcoe_other_function == 0xffff) ? | ||
3933 | 0 : (1 << (fcoe_other_function)))); | ||
3934 | |||
3935 | /* We are the reset owner iff: | ||
3936 | * - No other protocol drivers present. | ||
3937 | * - This is the lowest among fcoe functions. */ | ||
3938 | if (!(drv_presence & drv_presence_mask) && | ||
3939 | (ha->portnum < fcoe_other_function)) { | ||
3940 | ql_dbg(ql_dbg_p3p, vha, 0xb07f, | ||
3941 | "This host is Reset owner.\n"); | ||
3942 | ha->flags.nic_core_reset_owner = 1; | ||
3943 | } | ||
3944 | } | ||
3945 | |||
3946 | static int | ||
3947 | __qla83xx_set_drv_ack(scsi_qla_host_t *vha) | ||
3948 | { | ||
3949 | int rval = QLA_SUCCESS; | ||
3950 | struct qla_hw_data *ha = vha->hw; | ||
3951 | uint32_t drv_ack; | ||
3952 | |||
3953 | rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); | ||
3954 | if (rval == QLA_SUCCESS) { | ||
3955 | drv_ack |= (1 << ha->portnum); | ||
3956 | rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); | ||
3957 | } | ||
3958 | |||
3959 | return rval; | ||
3960 | } | ||
3961 | |||
3962 | static int | ||
3963 | __qla83xx_clear_drv_ack(scsi_qla_host_t *vha) | ||
3964 | { | ||
3965 | int rval = QLA_SUCCESS; | ||
3966 | struct qla_hw_data *ha = vha->hw; | ||
3967 | uint32_t drv_ack; | ||
3968 | |||
3969 | rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); | ||
3970 | if (rval == QLA_SUCCESS) { | ||
3971 | drv_ack &= ~(1 << ha->portnum); | ||
3972 | rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); | ||
3973 | } | ||
3974 | |||
3975 | return rval; | ||
3976 | } | ||
3977 | |||
3978 | static const char * | ||
3979 | qla83xx_dev_state_to_string(uint32_t dev_state) | ||
3980 | { | ||
3981 | switch (dev_state) { | ||
3982 | case QLA8XXX_DEV_COLD: | ||
3983 | return "COLD/RE-INIT"; | ||
3984 | case QLA8XXX_DEV_INITIALIZING: | ||
3985 | return "INITIALIZING"; | ||
3986 | case QLA8XXX_DEV_READY: | ||
3987 | return "READY"; | ||
3988 | case QLA8XXX_DEV_NEED_RESET: | ||
3989 | return "NEED RESET"; | ||
3990 | case QLA8XXX_DEV_NEED_QUIESCENT: | ||
3991 | return "NEED QUIESCENT"; | ||
3992 | case QLA8XXX_DEV_FAILED: | ||
3993 | return "FAILED"; | ||
3994 | case QLA8XXX_DEV_QUIESCENT: | ||
3995 | return "QUIESCENT"; | ||
3996 | default: | ||
3997 | return "Unknown"; | ||
3998 | } | ||
3999 | } | ||
4000 | |||
4001 | /* Assumes idc-lock always held on entry */ | ||
4002 | void | ||
4003 | qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type) | ||
4004 | { | ||
4005 | struct qla_hw_data *ha = vha->hw; | ||
4006 | uint32_t idc_audit_reg = 0, duration_secs = 0; | ||
4007 | |||
4008 | switch (audit_type) { | ||
4009 | case IDC_AUDIT_TIMESTAMP: | ||
4010 | ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); | ||
4011 | idc_audit_reg = (ha->portnum) | | ||
4012 | (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); | ||
4013 | qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); | ||
4014 | break; | ||
4015 | |||
4016 | case IDC_AUDIT_COMPLETION: | ||
4017 | duration_secs = ((jiffies_to_msecs(jiffies) - | ||
4018 | jiffies_to_msecs(ha->idc_audit_ts)) / 1000); | ||
4019 | idc_audit_reg = (ha->portnum) | | ||
4020 | (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8); | ||
4021 | qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); | ||
4022 | break; | ||
4023 | |||
4024 | default: | ||
4025 | ql_log(ql_log_warn, vha, 0xb078, | ||
4026 | "Invalid audit type specified.\n"); | ||
4027 | break; | ||
4028 | } | ||
4029 | } | ||
4030 | |||
4031 | /* Assumes idc_lock always held on entry */ | ||
4032 | static int | ||
4033 | qla83xx_initiating_reset(scsi_qla_host_t *vha) | ||
4034 | { | ||
4035 | struct qla_hw_data *ha = vha->hw; | ||
4036 | uint32_t idc_control, dev_state; | ||
4037 | |||
4038 | __qla83xx_get_idc_control(vha, &idc_control); | ||
4039 | if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) { | ||
4040 | ql_log(ql_log_info, vha, 0xb080, | ||
4041 | "NIC Core reset has been disabled. idc-control=0x%x\n", | ||
4042 | idc_control); | ||
4043 | return QLA_FUNCTION_FAILED; | ||
4044 | } | ||
4045 | |||
4046 | /* Set NEED-RESET iff in READY state and we are the reset-owner */ | ||
4047 | qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); | ||
4048 | if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { | ||
4049 | qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, | ||
4050 | QLA8XXX_DEV_NEED_RESET); | ||
4051 | ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n"); | ||
4052 | qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); | ||
4053 | } else { | ||
4054 | const char *state = qla83xx_dev_state_to_string(dev_state); | ||
4055 | ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state); | ||
4056 | |||
4057 | /* SV: XXX: Is timeout required here? */ | ||
4058 | /* Wait for IDC state change READY -> NEED_RESET */ | ||
4059 | while (dev_state == QLA8XXX_DEV_READY) { | ||
4060 | qla83xx_idc_unlock(vha, 0); | ||
4061 | msleep(200); | ||
4062 | qla83xx_idc_lock(vha, 0); | ||
4063 | qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); | ||
4064 | } | ||
4065 | } | ||
4066 | |||
4067 | /* Send IDC ack by writing to drv-ack register */ | ||
4068 | __qla83xx_set_drv_ack(vha); | ||
4069 | |||
4070 | return QLA_SUCCESS; | ||
4071 | } | ||
4072 | |||
4073 | int | ||
4074 | __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control) | ||
4075 | { | ||
4076 | return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control); | ||
4077 | } | ||
4078 | |||
4079 | int | ||
4080 | __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control) | ||
4081 | { | ||
4082 | return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control); | ||
4083 | } | ||
4084 | |||
4085 | static int | ||
4086 | qla83xx_check_driver_presence(scsi_qla_host_t *vha) | ||
4087 | { | ||
4088 | uint32_t drv_presence = 0; | ||
4089 | struct qla_hw_data *ha = vha->hw; | ||
4090 | |||
4091 | qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); | ||
4092 | if (drv_presence & (1 << ha->portnum)) | ||
4093 | return QLA_SUCCESS; | ||
4094 | else | ||
4095 | return QLA_TEST_FAILED; | ||
4096 | } | ||
4097 | |||
4098 | int | ||
4099 | qla83xx_nic_core_reset(scsi_qla_host_t *vha) | ||
4100 | { | ||
4101 | int rval = QLA_SUCCESS; | ||
4102 | struct qla_hw_data *ha = vha->hw; | ||
4103 | |||
4104 | ql_dbg(ql_dbg_p3p, vha, 0xb058, | ||
4105 | "Entered %s().\n", __func__); | ||
4106 | |||
4107 | if (vha->device_flags & DFLG_DEV_FAILED) { | ||
4108 | ql_log(ql_log_warn, vha, 0xb059, | ||
4109 | "Device in unrecoverable FAILED state.\n"); | ||
4110 | return QLA_FUNCTION_FAILED; | ||
4111 | } | ||
4112 | |||
4113 | qla83xx_idc_lock(vha, 0); | ||
4114 | |||
4115 | if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) { | ||
4116 | ql_log(ql_log_warn, vha, 0xb05a, | ||
4117 | "Function=0x%x has been removed from IDC participation.\n", | ||
4118 | ha->portnum); | ||
4119 | rval = QLA_FUNCTION_FAILED; | ||
4120 | goto exit; | ||
4121 | } | ||
4122 | |||
4123 | qla83xx_reset_ownership(vha); | ||
4124 | |||
4125 | rval = qla83xx_initiating_reset(vha); | ||
4126 | |||
4127 | /* | ||
4128 | * Perform reset if we are the reset-owner, | ||
4129 | * else wait till IDC state changes to READY/FAILED. | ||
4130 | */ | ||
4131 | if (rval == QLA_SUCCESS) { | ||
4132 | rval = qla83xx_idc_state_handler(vha); | ||
4133 | |||
4134 | if (rval == QLA_SUCCESS) | ||
4135 | ha->flags.nic_core_hung = 0; | ||
4136 | __qla83xx_clear_drv_ack(vha); | ||
4137 | } | ||
4138 | |||
4139 | exit: | ||
4140 | qla83xx_idc_unlock(vha, 0); | ||
4141 | |||
4142 | ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__); | ||
4143 | |||
4144 | return rval; | ||
4145 | } | ||
4146 | |||
4147 | int | ||
4148 | qla2xxx_mctp_dump(scsi_qla_host_t *vha) | ||
4149 | { | ||
4150 | struct qla_hw_data *ha = vha->hw; | ||
4151 | int rval = QLA_FUNCTION_FAILED; | ||
4152 | |||
4153 | if (!IS_MCTP_CAPABLE(ha)) { | ||
4154 | /* This message can be removed from the final version */ | ||
4155 | ql_log(ql_log_info, vha, 0x506d, | ||
4156 | "This board is not MCTP capable\n"); | ||
4157 | return rval; | ||
4158 | } | ||
4159 | |||
4160 | if (!ha->mctp_dump) { | ||
4161 | ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, | ||
4162 | MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); | ||
4163 | |||
4164 | if (!ha->mctp_dump) { | ||
4165 | ql_log(ql_log_warn, vha, 0x506e, | ||
4166 | "Failed to allocate memory for mctp dump\n"); | ||
4167 | return rval; | ||
4168 | } | ||
4169 | } | ||
4170 | |||
4171 | #define MCTP_DUMP_STR_ADDR 0x00000000 | ||
4172 | rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, | ||
4173 | MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4); | ||
4174 | if (rval != QLA_SUCCESS) { | ||
4175 | ql_log(ql_log_warn, vha, 0x506f, | ||
4176 | "Failed to capture mctp dump\n"); | ||
4177 | } else { | ||
4178 | ql_log(ql_log_info, vha, 0x5070, | ||
4179 | "Mctp dump capture for host (%ld/%p).\n", | ||
4180 | vha->host_no, ha->mctp_dump); | ||
4181 | ha->mctp_dumped = 1; | ||
4182 | } | ||
4183 | |||
4184 | if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { | ||
4185 | ha->flags.nic_core_reset_hdlr_active = 1; | ||
4186 | rval = qla83xx_restart_nic_firmware(vha); | ||
4187 | if (rval) | ||
4188 | /* NIC Core reset failed. */ | ||
4189 | ql_log(ql_log_warn, vha, 0x5071, | ||
4190 | "Failed to restart nic firmware\n"); | ||
4191 | else | ||
4192 | ql_dbg(ql_dbg_p3p, vha, 0xb084, | ||
4193 | "Restarted NIC firmware successfully.\n"); | ||
4194 | ha->flags.nic_core_reset_hdlr_active = 0; | ||
4195 | } | ||
4196 | |||
4197 | return rval; | ||
4198 | |||
4199 | } | ||
4200 | |||
4201 | /* | 3932 | /* |
4202 | * qla2x00_quiesce_io | 3933 | * qla82xx_quiescent_state_cleanup |
4203 | * Description: This function will block the new I/Os | 3934 | * Description: This function will block the new I/Os |
4204 | * Its not aborting any I/Os as context | 3935 | * Its not aborting any I/Os as context |
4205 | * is not destroyed during quiescence | 3936 | * is not destroyed during quiescence |
@@ -4207,20 +3938,20 @@ qla2xxx_mctp_dump(scsi_qla_host_t *vha) | |||
4207 | * return : void | 3938 | * return : void |
4208 | */ | 3939 | */ |
4209 | void | 3940 | void |
4210 | qla2x00_quiesce_io(scsi_qla_host_t *vha) | 3941 | qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha) |
4211 | { | 3942 | { |
4212 | struct qla_hw_data *ha = vha->hw; | 3943 | struct qla_hw_data *ha = vha->hw; |
4213 | struct scsi_qla_host *vp; | 3944 | struct scsi_qla_host *vp; |
4214 | 3945 | ||
4215 | ql_dbg(ql_dbg_dpc, vha, 0x401d, | 3946 | ql_dbg(ql_dbg_p3p, vha, 0xb002, |
4216 | "Quiescing I/O - ha=%p.\n", ha); | 3947 | "Performing ISP error recovery - ha=%p.\n", ha); |
4217 | 3948 | ||
4218 | atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); | 3949 | atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); |
4219 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | 3950 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
4220 | atomic_set(&vha->loop_state, LOOP_DOWN); | 3951 | atomic_set(&vha->loop_state, LOOP_DOWN); |
4221 | qla2x00_mark_all_devices_lost(vha, 0); | 3952 | qla2x00_mark_all_devices_lost(vha, 0); |
4222 | list_for_each_entry(vp, &ha->vp_list, list) | 3953 | list_for_each_entry(vp, &ha->vp_list, list) |
4223 | qla2x00_mark_all_devices_lost(vp, 0); | 3954 | qla2x00_mark_all_devices_lost(vha, 0); |
4224 | } else { | 3955 | } else { |
4225 | if (!atomic_read(&vha->loop_down_timer)) | 3956 | if (!atomic_read(&vha->loop_down_timer)) |
4226 | atomic_set(&vha->loop_down_timer, | 3957 | atomic_set(&vha->loop_down_timer, |
@@ -4245,7 +3976,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) | |||
4245 | vha->flags.online = 0; | 3976 | vha->flags.online = 0; |
4246 | ha->flags.chip_reset_done = 0; | 3977 | ha->flags.chip_reset_done = 0; |
4247 | clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 3978 | clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
4248 | vha->qla_stats.total_isp_aborts++; | 3979 | ha->qla_stats.total_isp_aborts++; |
4249 | 3980 | ||
4250 | ql_log(ql_log_info, vha, 0x00af, | 3981 | ql_log(ql_log_info, vha, 0x00af, |
4251 | "Performing ISP error recovery - ha=%p.\n", ha); | 3982 | "Performing ISP error recovery - ha=%p.\n", ha); |
@@ -4336,14 +4067,6 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
4336 | if (vha->flags.online) { | 4067 | if (vha->flags.online) { |
4337 | qla2x00_abort_isp_cleanup(vha); | 4068 | qla2x00_abort_isp_cleanup(vha); |
4338 | 4069 | ||
4339 | if (IS_QLA8031(ha)) { | ||
4340 | ql_dbg(ql_dbg_p3p, vha, 0xb05c, | ||
4341 | "Clearing fcoe driver presence.\n"); | ||
4342 | if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS) | ||
4343 | ql_dbg(ql_dbg_p3p, vha, 0xb073, | ||
4344 | "Error while clearing DRV-Presence.\n"); | ||
4345 | } | ||
4346 | |||
4347 | if (unlikely(pci_channel_offline(ha->pdev) && | 4070 | if (unlikely(pci_channel_offline(ha->pdev) && |
4348 | ha->flags.pci_channel_io_perm_failure)) { | 4071 | ha->flags.pci_channel_io_perm_failure)) { |
4349 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); | 4072 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); |
@@ -4373,8 +4096,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
4373 | ha->isp_abort_cnt = 0; | 4096 | ha->isp_abort_cnt = 0; |
4374 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); | 4097 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); |
4375 | 4098 | ||
4376 | if (IS_QLA81XX(ha) || IS_QLA8031(ha)) | 4099 | if (IS_QLA81XX(ha)) |
4377 | qla2x00_get_fw_version(vha); | 4100 | qla2x00_get_fw_version(vha, |
4101 | &ha->fw_major_version, | ||
4102 | &ha->fw_minor_version, | ||
4103 | &ha->fw_subminor_version, | ||
4104 | &ha->fw_attributes, &ha->fw_memory_size, | ||
4105 | ha->mpi_version, &ha->mpi_capabilities, | ||
4106 | ha->phy_version); | ||
4107 | |||
4378 | if (ha->fce) { | 4108 | if (ha->fce) { |
4379 | ha->flags.fce_enabled = 1; | 4109 | ha->flags.fce_enabled = 1; |
4380 | memset(ha->fce, 0, | 4110 | memset(ha->fce, 0, |
@@ -4452,16 +4182,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
4452 | } | 4182 | } |
4453 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 4183 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
4454 | 4184 | ||
4455 | if (IS_QLA8031(ha)) { | ||
4456 | ql_dbg(ql_dbg_p3p, vha, 0xb05d, | ||
4457 | "Setting back fcoe driver presence.\n"); | ||
4458 | if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS) | ||
4459 | ql_dbg(ql_dbg_p3p, vha, 0xb074, | ||
4460 | "Error while setting DRV-Presence.\n"); | ||
4461 | } | ||
4462 | } else { | 4185 | } else { |
4463 | ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", | 4186 | ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n"); |
4464 | __func__); | ||
4465 | } | 4187 | } |
4466 | 4188 | ||
4467 | return(status); | 4189 | return(status); |
@@ -4485,7 +4207,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) | |||
4485 | struct qla_hw_data *ha = vha->hw; | 4207 | struct qla_hw_data *ha = vha->hw; |
4486 | struct req_que *req = ha->req_q_map[0]; | 4208 | struct req_que *req = ha->req_q_map[0]; |
4487 | struct rsp_que *rsp = ha->rsp_q_map[0]; | 4209 | struct rsp_que *rsp = ha->rsp_q_map[0]; |
4488 | unsigned long flags; | ||
4489 | 4210 | ||
4490 | /* If firmware needs to be loaded */ | 4211 | /* If firmware needs to be loaded */ |
4491 | if (qla2x00_isp_firmware(vha)) { | 4212 | if (qla2x00_isp_firmware(vha)) { |
@@ -4510,16 +4231,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) | |||
4510 | qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); | 4231 | qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); |
4511 | 4232 | ||
4512 | vha->flags.online = 1; | 4233 | vha->flags.online = 1; |
4513 | |||
4514 | /* | ||
4515 | * Process any ATIO queue entries that came in | ||
4516 | * while we weren't online. | ||
4517 | */ | ||
4518 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
4519 | if (qla_tgt_mode_enabled(vha)) | ||
4520 | qlt_24xx_process_atio_queue(vha); | ||
4521 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4522 | |||
4523 | /* Wait at most MAX_TARGET RSCNs for a stable link. */ | 4234 | /* Wait at most MAX_TARGET RSCNs for a stable link. */ |
4524 | wait_time = 256; | 4235 | wait_time = 256; |
4525 | do { | 4236 | do { |
@@ -4709,7 +4420,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) | |||
4709 | nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { | 4420 | nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { |
4710 | /* Reset NVRAM data. */ | 4421 | /* Reset NVRAM data. */ |
4711 | ql_log(ql_log_warn, vha, 0x006b, | 4422 | ql_log(ql_log_warn, vha, 0x006b, |
4712 | "Inconsistent NVRAM detected: checksum=0x%x id=%c " | 4423 | "Inconisistent NVRAM detected: checksum=0x%x id=%c " |
4713 | "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); | 4424 | "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); |
4714 | ql_log(ql_log_warn, vha, 0x006c, | 4425 | ql_log(ql_log_warn, vha, 0x006c, |
4715 | "Falling back to functioning (yet invalid -- WWPN) " | 4426 | "Falling back to functioning (yet invalid -- WWPN) " |
@@ -4760,15 +4471,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) | |||
4760 | rval = 1; | 4471 | rval = 1; |
4761 | } | 4472 | } |
4762 | 4473 | ||
4763 | if (!qla_ini_mode_enabled(vha)) { | ||
4764 | /* Don't enable full login after initial LIP */ | ||
4765 | nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); | ||
4766 | /* Don't enable LIP full login for initiator */ | ||
4767 | nv->host_p &= __constant_cpu_to_le32(~BIT_10); | ||
4768 | } | ||
4769 | |||
4770 | qlt_24xx_config_nvram_stage1(vha, nv); | ||
4771 | |||
4772 | /* Reset Initialization control block */ | 4474 | /* Reset Initialization control block */ |
4773 | memset(icb, 0, ha->init_cb_size); | 4475 | memset(icb, 0, ha->init_cb_size); |
4774 | 4476 | ||
@@ -4796,10 +4498,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) | |||
4796 | qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), | 4498 | qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), |
4797 | "QLA2462"); | 4499 | "QLA2462"); |
4798 | 4500 | ||
4799 | qlt_24xx_config_nvram_stage2(vha, icb); | 4501 | /* Use alternate WWN? */ |
4800 | |||
4801 | if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { | 4502 | if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { |
4802 | /* Use alternate WWN? */ | ||
4803 | memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); | 4503 | memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); |
4804 | memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); | 4504 | memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); |
4805 | } | 4505 | } |
@@ -4933,7 +4633,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, | |||
4933 | struct req_que *req = ha->req_q_map[0]; | 4633 | struct req_que *req = ha->req_q_map[0]; |
4934 | 4634 | ||
4935 | ql_dbg(ql_dbg_init, vha, 0x008b, | 4635 | ql_dbg(ql_dbg_init, vha, 0x008b, |
4936 | "FW: Loading firmware from flash (%x).\n", faddr); | 4636 | "Loading firmware from flash (%x).\n", faddr); |
4937 | 4637 | ||
4938 | rval = QLA_SUCCESS; | 4638 | rval = QLA_SUCCESS; |
4939 | 4639 | ||
@@ -5131,8 +4831,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
5131 | return QLA_FUNCTION_FAILED; | 4831 | return QLA_FUNCTION_FAILED; |
5132 | } | 4832 | } |
5133 | 4833 | ||
5134 | ql_dbg(ql_dbg_init, vha, 0x0092, | 4834 | ql_log(ql_log_info, vha, 0x0092, |
5135 | "FW: Loading via request-firmware.\n"); | 4835 | "Loading via request-firmware.\n"); |
5136 | 4836 | ||
5137 | rval = QLA_SUCCESS; | 4837 | rval = QLA_SUCCESS; |
5138 | 4838 | ||
@@ -5269,6 +4969,7 @@ try_blob_fw: | |||
5269 | 4969 | ||
5270 | ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n"); | 4970 | ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n"); |
5271 | ha->flags.running_gold_fw = 1; | 4971 | ha->flags.running_gold_fw = 1; |
4972 | |||
5272 | return rval; | 4973 | return rval; |
5273 | } | 4974 | } |
5274 | 4975 | ||
@@ -5303,7 +5004,6 @@ int | |||
5303 | qla24xx_configure_vhba(scsi_qla_host_t *vha) | 5004 | qla24xx_configure_vhba(scsi_qla_host_t *vha) |
5304 | { | 5005 | { |
5305 | int rval = QLA_SUCCESS; | 5006 | int rval = QLA_SUCCESS; |
5306 | int rval2; | ||
5307 | uint16_t mb[MAILBOX_REGISTER_COUNT]; | 5007 | uint16_t mb[MAILBOX_REGISTER_COUNT]; |
5308 | struct qla_hw_data *ha = vha->hw; | 5008 | struct qla_hw_data *ha = vha->hw; |
5309 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | 5009 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); |
@@ -5328,18 +5028,12 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) | |||
5328 | vha->flags.management_server_logged_in = 0; | 5028 | vha->flags.management_server_logged_in = 0; |
5329 | 5029 | ||
5330 | /* Login to SNS first */ | 5030 | /* Login to SNS first */ |
5331 | rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, | 5031 | ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); |
5332 | BIT_1); | 5032 | if (mb[0] != MBS_COMMAND_COMPLETE) { |
5333 | if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { | 5033 | ql_dbg(ql_dbg_init, vha, 0x0103, |
5334 | if (rval2 == QLA_MEMORY_ALLOC_FAILED) | 5034 | "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " |
5335 | ql_dbg(ql_dbg_init, vha, 0x0120, | 5035 | "mb[6]=%x mb[7]=%x.\n", |
5336 | "Failed SNS login: loop_id=%x, rval2=%d\n", | 5036 | NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); |
5337 | NPH_SNS, rval2); | ||
5338 | else | ||
5339 | ql_dbg(ql_dbg_init, vha, 0x0103, | ||
5340 | "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " | ||
5341 | "mb[2]=%x mb[6]=%x mb[7]=%x.\n", | ||
5342 | NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); | ||
5343 | return (QLA_FUNCTION_FAILED); | 5037 | return (QLA_FUNCTION_FAILED); |
5344 | } | 5038 | } |
5345 | 5039 | ||
@@ -5470,7 +5164,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) | |||
5470 | nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { | 5164 | nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { |
5471 | /* Reset NVRAM data. */ | 5165 | /* Reset NVRAM data. */ |
5472 | ql_log(ql_log_info, vha, 0x0073, | 5166 | ql_log(ql_log_info, vha, 0x0073, |
5473 | "Inconsistent NVRAM detected: checksum=0x%x id=%c " | 5167 | "Inconisistent NVRAM detected: checksum=0x%x id=%c " |
5474 | "version=0x%x.\n", chksum, nv->id[0], | 5168 | "version=0x%x.\n", chksum, nv->id[0], |
5475 | le16_to_cpu(nv->nvram_version)); | 5169 | le16_to_cpu(nv->nvram_version)); |
5476 | ql_log(ql_log_info, vha, 0x0074, | 5170 | ql_log(ql_log_info, vha, 0x0074, |
@@ -5515,10 +5209,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) | |||
5515 | nv->reset_delay = 5; | 5209 | nv->reset_delay = 5; |
5516 | nv->max_luns_per_target = __constant_cpu_to_le16(128); | 5210 | nv->max_luns_per_target = __constant_cpu_to_le16(128); |
5517 | nv->port_down_retry_count = __constant_cpu_to_le16(30); | 5211 | nv->port_down_retry_count = __constant_cpu_to_le16(30); |
5518 | nv->link_down_timeout = __constant_cpu_to_le16(180); | 5212 | nv->link_down_timeout = __constant_cpu_to_le16(30); |
5519 | nv->enode_mac[0] = 0x00; | 5213 | nv->enode_mac[0] = 0x00; |
5520 | nv->enode_mac[1] = 0xC0; | 5214 | nv->enode_mac[1] = 0x02; |
5521 | nv->enode_mac[2] = 0xDD; | 5215 | nv->enode_mac[2] = 0x03; |
5522 | nv->enode_mac[3] = 0x04; | 5216 | nv->enode_mac[3] = 0x04; |
5523 | nv->enode_mac[4] = 0x05; | 5217 | nv->enode_mac[4] = 0x05; |
5524 | nv->enode_mac[5] = 0x06 + ha->port_no; | 5218 | nv->enode_mac[5] = 0x06 + ha->port_no; |
@@ -5526,9 +5220,6 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) | |||
5526 | rval = 1; | 5220 | rval = 1; |
5527 | } | 5221 | } |
5528 | 5222 | ||
5529 | if (IS_T10_PI_CAPABLE(ha)) | ||
5530 | nv->frame_payload_size &= ~7; | ||
5531 | |||
5532 | /* Reset Initialization control block */ | 5223 | /* Reset Initialization control block */ |
5533 | memset(icb, 0, ha->init_cb_size); | 5224 | memset(icb, 0, ha->init_cb_size); |
5534 | 5225 | ||
@@ -5552,9 +5243,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) | |||
5552 | memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); | 5243 | memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); |
5553 | /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ | 5244 | /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ |
5554 | if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { | 5245 | if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { |
5555 | icb->enode_mac[0] = 0x00; | 5246 | icb->enode_mac[0] = 0x01; |
5556 | icb->enode_mac[1] = 0xC0; | 5247 | icb->enode_mac[1] = 0x02; |
5557 | icb->enode_mac[2] = 0xDD; | 5248 | icb->enode_mac[2] = 0x03; |
5558 | icb->enode_mac[3] = 0x04; | 5249 | icb->enode_mac[3] = 0x04; |
5559 | icb->enode_mac[4] = 0x05; | 5250 | icb->enode_mac[4] = 0x05; |
5560 | icb->enode_mac[5] = 0x06 + ha->port_no; | 5251 | icb->enode_mac[5] = 0x06 + ha->port_no; |
@@ -5657,10 +5348,6 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) | |||
5657 | if (ql2xloginretrycount) | 5348 | if (ql2xloginretrycount) |
5658 | ha->login_retry_count = ql2xloginretrycount; | 5349 | ha->login_retry_count = ql2xloginretrycount; |
5659 | 5350 | ||
5660 | /* if not running MSI-X we need handshaking on interrupts */ | ||
5661 | if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha)) | ||
5662 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22); | ||
5663 | |||
5664 | /* Enable ZIO. */ | 5351 | /* Enable ZIO. */ |
5665 | if (!vha->flags.init_done) { | 5352 | if (!vha->flags.init_done) { |
5666 | ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & | 5353 | ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & |
@@ -5733,7 +5420,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
5733 | if ((vha->device_flags & DFLG_NO_CABLE)) | 5420 | if ((vha->device_flags & DFLG_NO_CABLE)) |
5734 | status = 0; | 5421 | status = 0; |
5735 | 5422 | ||
5736 | ql_log(ql_log_info, vha, 0x8000, | 5423 | ql_log(ql_log_info, vha, 0x803d, |
5737 | "Configure loop done, status = 0x%x.\n", status); | 5424 | "Configure loop done, status = 0x%x.\n", status); |
5738 | } | 5425 | } |
5739 | 5426 | ||
@@ -5756,7 +5443,11 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
5756 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); | 5443 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); |
5757 | 5444 | ||
5758 | /* Update the firmware version */ | 5445 | /* Update the firmware version */ |
5759 | status = qla82xx_check_md_needed(vha); | 5446 | qla2x00_get_fw_version(vha, &ha->fw_major_version, |
5447 | &ha->fw_minor_version, &ha->fw_subminor_version, | ||
5448 | &ha->fw_attributes, &ha->fw_memory_size, | ||
5449 | ha->mpi_version, &ha->mpi_capabilities, | ||
5450 | ha->phy_version); | ||
5760 | 5451 | ||
5761 | if (ha->fce) { | 5452 | if (ha->fce) { |
5762 | ha->flags.fce_enabled = 1; | 5453 | ha->flags.fce_enabled = 1; |
@@ -5766,7 +5457,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
5766 | ha->fce_dma, ha->fce_bufs, ha->fce_mb, | 5457 | ha->fce_dma, ha->fce_bufs, ha->fce_mb, |
5767 | &ha->fce_bufs); | 5458 | &ha->fce_bufs); |
5768 | if (rval) { | 5459 | if (rval) { |
5769 | ql_log(ql_log_warn, vha, 0x8001, | 5460 | ql_log(ql_log_warn, vha, 0x803e, |
5770 | "Unable to reinitialize FCE (%d).\n", | 5461 | "Unable to reinitialize FCE (%d).\n", |
5771 | rval); | 5462 | rval); |
5772 | ha->flags.fce_enabled = 0; | 5463 | ha->flags.fce_enabled = 0; |
@@ -5778,7 +5469,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
5778 | rval = qla2x00_enable_eft_trace(vha, | 5469 | rval = qla2x00_enable_eft_trace(vha, |
5779 | ha->eft_dma, EFT_NUM_BUFFERS); | 5470 | ha->eft_dma, EFT_NUM_BUFFERS); |
5780 | if (rval) { | 5471 | if (rval) { |
5781 | ql_log(ql_log_warn, vha, 0x8010, | 5472 | ql_log(ql_log_warn, vha, 0x803f, |
5782 | "Unable to reinitialize EFT (%d).\n", | 5473 | "Unable to reinitialize EFT (%d).\n", |
5783 | rval); | 5474 | rval); |
5784 | } | 5475 | } |
@@ -5786,7 +5477,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
5786 | } | 5477 | } |
5787 | 5478 | ||
5788 | if (!status) { | 5479 | if (!status) { |
5789 | ql_dbg(ql_dbg_taskm, vha, 0x8011, | 5480 | ql_dbg(ql_dbg_taskm, vha, 0x8040, |
5790 | "qla82xx_restart_isp succeeded.\n"); | 5481 | "qla82xx_restart_isp succeeded.\n"); |
5791 | 5482 | ||
5792 | spin_lock_irqsave(&ha->vport_slock, flags); | 5483 | spin_lock_irqsave(&ha->vport_slock, flags); |
@@ -5804,7 +5495,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) | |||
5804 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 5495 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
5805 | 5496 | ||
5806 | } else { | 5497 | } else { |
5807 | ql_log(ql_log_warn, vha, 0x8016, | 5498 | ql_log(ql_log_warn, vha, 0x8041, |
5808 | "qla82xx_restart_isp **** FAILED ****.\n"); | 5499 | "qla82xx_restart_isp **** FAILED ****.\n"); |
5809 | } | 5500 | } |
5810 | 5501 | ||
@@ -5951,26 +5642,13 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
5951 | if (priority < 0) | 5642 | if (priority < 0) |
5952 | return QLA_FUNCTION_FAILED; | 5643 | return QLA_FUNCTION_FAILED; |
5953 | 5644 | ||
5954 | if (IS_QLA82XX(vha->hw)) { | ||
5955 | fcport->fcp_prio = priority & 0xf; | ||
5956 | return QLA_SUCCESS; | ||
5957 | } | ||
5958 | |||
5959 | ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); | 5645 | ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); |
5960 | if (ret == QLA_SUCCESS) { | 5646 | if (ret == QLA_SUCCESS) |
5961 | if (fcport->fcp_prio != priority) | 5647 | fcport->fcp_prio = priority; |
5962 | ql_dbg(ql_dbg_user, vha, 0x709e, | 5648 | else |
5963 | "Updated FCP_CMND priority - value=%d loop_id=%d " | ||
5964 | "port_id=%02x%02x%02x.\n", priority, | ||
5965 | fcport->loop_id, fcport->d_id.b.domain, | ||
5966 | fcport->d_id.b.area, fcport->d_id.b.al_pa); | ||
5967 | fcport->fcp_prio = priority & 0xf; | ||
5968 | } else | ||
5969 | ql_dbg(ql_dbg_user, vha, 0x704f, | 5649 | ql_dbg(ql_dbg_user, vha, 0x704f, |
5970 | "Unable to update FCP_CMND priority - ret=0x%x for " | 5650 | "Unable to activate fcp priority, ret=0x%x.\n", ret); |
5971 | "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id, | 5651 | |
5972 | fcport->d_id.b.domain, fcport->d_id.b.area, | ||
5973 | fcport->d_id.b.al_pa); | ||
5974 | return ret; | 5652 | return ret; |
5975 | } | 5653 | } |
5976 | 5654 | ||
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index c0462c04c88..9902834e0b7 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -57,20 +57,6 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) | |||
57 | return fcp; | 57 | return fcp; |
58 | } | 58 | } |
59 | 59 | ||
60 | static inline void | ||
61 | qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) | ||
62 | { | ||
63 | int i; | ||
64 | |||
65 | if (IS_FWI2_CAPABLE(ha)) | ||
66 | return; | ||
67 | |||
68 | for (i = 0; i < SNS_FIRST_LOOP_ID; i++) | ||
69 | set_bit(i, ha->loop_id_map); | ||
70 | set_bit(MANAGEMENT_SERVER, ha->loop_id_map); | ||
71 | set_bit(BROADCAST, ha->loop_id_map); | ||
72 | } | ||
73 | |||
74 | static inline int | 60 | static inline int |
75 | qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) | 61 | qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) |
76 | { | 62 | { |
@@ -83,34 +69,19 @@ qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) | |||
83 | } | 69 | } |
84 | 70 | ||
85 | static inline void | 71 | static inline void |
86 | qla2x00_clear_loop_id(fc_port_t *fcport) { | ||
87 | struct qla_hw_data *ha = fcport->vha->hw; | ||
88 | |||
89 | if (fcport->loop_id == FC_NO_LOOP_ID || | ||
90 | qla2x00_is_reserved_id(fcport->vha, fcport->loop_id)) | ||
91 | return; | ||
92 | |||
93 | clear_bit(fcport->loop_id, ha->loop_id_map); | ||
94 | fcport->loop_id = FC_NO_LOOP_ID; | ||
95 | } | ||
96 | |||
97 | static inline void | ||
98 | qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) | 72 | qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) |
99 | { | 73 | { |
100 | struct dsd_dma *dsd_ptr, *tdsd_ptr; | 74 | struct dsd_dma *dsd_ptr, *tdsd_ptr; |
101 | struct crc_context *ctx; | ||
102 | |||
103 | ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); | ||
104 | 75 | ||
105 | /* clean up allocated prev pool */ | 76 | /* clean up allocated prev pool */ |
106 | list_for_each_entry_safe(dsd_ptr, tdsd_ptr, | 77 | list_for_each_entry_safe(dsd_ptr, tdsd_ptr, |
107 | &ctx->dsd_list, list) { | 78 | &((struct crc_context *)sp->ctx)->dsd_list, list) { |
108 | dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, | 79 | dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, |
109 | dsd_ptr->dsd_list_dma); | 80 | dsd_ptr->dsd_list_dma); |
110 | list_del(&dsd_ptr->list); | 81 | list_del(&dsd_ptr->list); |
111 | kfree(dsd_ptr); | 82 | kfree(dsd_ptr); |
112 | } | 83 | } |
113 | INIT_LIST_HEAD(&ctx->dsd_list); | 84 | INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list); |
114 | } | 85 | } |
115 | 86 | ||
116 | static inline void | 87 | static inline void |
@@ -142,7 +113,8 @@ qla2x00_hba_err_chk_enabled(srb_t *sp) | |||
142 | return 0; | 113 | return 0; |
143 | * | 114 | * |
144 | */ | 115 | */ |
145 | switch (scsi_get_prot_op(GET_CMD_SP(sp))) { | 116 | |
117 | switch (scsi_get_prot_op(sp->cmd)) { | ||
146 | case SCSI_PROT_READ_STRIP: | 118 | case SCSI_PROT_READ_STRIP: |
147 | case SCSI_PROT_WRITE_INSERT: | 119 | case SCSI_PROT_WRITE_INSERT: |
148 | if (ql2xenablehba_err_chk >= 1) | 120 | if (ql2xenablehba_err_chk >= 1) |
@@ -159,57 +131,3 @@ qla2x00_hba_err_chk_enabled(srb_t *sp) | |||
159 | } | 131 | } |
160 | return 0; | 132 | return 0; |
161 | } | 133 | } |
162 | |||
163 | static inline int | ||
164 | qla2x00_reset_active(scsi_qla_host_t *vha) | ||
165 | { | ||
166 | scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); | ||
167 | |||
168 | /* Test appropriate base-vha and vha flags. */ | ||
169 | return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) || | ||
170 | test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || | ||
171 | test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || | ||
172 | test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || | ||
173 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); | ||
174 | } | ||
175 | |||
176 | static inline srb_t * | ||
177 | qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) | ||
178 | { | ||
179 | srb_t *sp = NULL; | ||
180 | struct qla_hw_data *ha = vha->hw; | ||
181 | uint8_t bail; | ||
182 | |||
183 | QLA_VHA_MARK_BUSY(vha, bail); | ||
184 | if (unlikely(bail)) | ||
185 | return NULL; | ||
186 | |||
187 | sp = mempool_alloc(ha->srb_mempool, flag); | ||
188 | if (!sp) | ||
189 | goto done; | ||
190 | |||
191 | memset(sp, 0, sizeof(*sp)); | ||
192 | sp->fcport = fcport; | ||
193 | sp->iocbs = 1; | ||
194 | done: | ||
195 | if (!sp) | ||
196 | QLA_VHA_MARK_NOT_BUSY(vha); | ||
197 | return sp; | ||
198 | } | ||
199 | |||
200 | static inline void | ||
201 | qla2x00_init_timer(srb_t *sp, unsigned long tmo) | ||
202 | { | ||
203 | init_timer(&sp->u.iocb_cmd.timer); | ||
204 | sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; | ||
205 | sp->u.iocb_cmd.timer.data = (unsigned long)sp; | ||
206 | sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; | ||
207 | add_timer(&sp->u.iocb_cmd.timer); | ||
208 | sp->free = qla2x00_sp_free; | ||
209 | } | ||
210 | |||
211 | static inline int | ||
212 | qla2x00_gid_list_size(struct qla_hw_data *ha) | ||
213 | { | ||
214 | return sizeof(struct gid_list_info) * ha->max_fibre_devices; | ||
215 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index a481684479c..dbec89622a0 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -1,17 +1,18 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | #include "qla_target.h" | ||
9 | 8 | ||
10 | #include <linux/blkdev.h> | 9 | #include <linux/blkdev.h> |
11 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
12 | 11 | ||
13 | #include <scsi/scsi_tcq.h> | 12 | #include <scsi/scsi_tcq.h> |
14 | 13 | ||
14 | static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); | ||
15 | |||
15 | static void qla25xx_set_que(srb_t *, struct rsp_que **); | 16 | static void qla25xx_set_que(srb_t *, struct rsp_que **); |
16 | /** | 17 | /** |
17 | * qla2x00_get_cmd_direction() - Determine control_flag data direction. | 18 | * qla2x00_get_cmd_direction() - Determine control_flag data direction. |
@@ -23,18 +24,18 @@ static inline uint16_t | |||
23 | qla2x00_get_cmd_direction(srb_t *sp) | 24 | qla2x00_get_cmd_direction(srb_t *sp) |
24 | { | 25 | { |
25 | uint16_t cflags; | 26 | uint16_t cflags; |
26 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | ||
27 | struct scsi_qla_host *vha = sp->fcport->vha; | ||
28 | 27 | ||
29 | cflags = 0; | 28 | cflags = 0; |
30 | 29 | ||
31 | /* Set transfer direction */ | 30 | /* Set transfer direction */ |
32 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | 31 | if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) { |
33 | cflags = CF_WRITE; | 32 | cflags = CF_WRITE; |
34 | vha->qla_stats.output_bytes += scsi_bufflen(cmd); | 33 | sp->fcport->vha->hw->qla_stats.output_bytes += |
35 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { | 34 | scsi_bufflen(sp->cmd); |
35 | } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) { | ||
36 | cflags = CF_READ; | 36 | cflags = CF_READ; |
37 | vha->qla_stats.input_bytes += scsi_bufflen(cmd); | 37 | sp->fcport->vha->hw->qla_stats.input_bytes += |
38 | scsi_bufflen(sp->cmd); | ||
38 | } | 39 | } |
39 | return (cflags); | 40 | return (cflags); |
40 | } | 41 | } |
@@ -119,10 +120,11 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) | |||
119 | * Returns a pointer to the continuation type 1 IOCB packet. | 120 | * Returns a pointer to the continuation type 1 IOCB packet. |
120 | */ | 121 | */ |
121 | static inline cont_a64_entry_t * | 122 | static inline cont_a64_entry_t * |
122 | qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) | 123 | qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha) |
123 | { | 124 | { |
124 | cont_a64_entry_t *cont_pkt; | 125 | cont_a64_entry_t *cont_pkt; |
125 | 126 | ||
127 | struct req_que *req = vha->req; | ||
126 | /* Adjust ring index. */ | 128 | /* Adjust ring index. */ |
127 | req->ring_index++; | 129 | req->ring_index++; |
128 | if (req->ring_index == req->length) { | 130 | if (req->ring_index == req->length) { |
@@ -144,14 +146,20 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) | |||
144 | static inline int | 146 | static inline int |
145 | qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) | 147 | qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) |
146 | { | 148 | { |
147 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 149 | uint8_t guard = scsi_host_get_guard(sp->cmd->device->host); |
148 | uint8_t guard = scsi_host_get_guard(cmd->device->host); | 150 | |
151 | /* We only support T10 DIF right now */ | ||
152 | if (guard != SHOST_DIX_GUARD_CRC) { | ||
153 | ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007, | ||
154 | "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd); | ||
155 | return 0; | ||
156 | } | ||
149 | 157 | ||
150 | /* We always use DIFF Bundling for best performance */ | 158 | /* We always use DIFF Bundling for best performance */ |
151 | *fw_prot_opts = 0; | 159 | *fw_prot_opts = 0; |
152 | 160 | ||
153 | /* Translate SCSI opcode to a protection opcode */ | 161 | /* Translate SCSI opcode to a protection opcode */ |
154 | switch (scsi_get_prot_op(cmd)) { | 162 | switch (scsi_get_prot_op(sp->cmd)) { |
155 | case SCSI_PROT_READ_STRIP: | 163 | case SCSI_PROT_READ_STRIP: |
156 | *fw_prot_opts |= PO_MODE_DIF_REMOVE; | 164 | *fw_prot_opts |= PO_MODE_DIF_REMOVE; |
157 | break; | 165 | break; |
@@ -165,18 +173,17 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) | |||
165 | *fw_prot_opts |= PO_MODE_DIF_REMOVE; | 173 | *fw_prot_opts |= PO_MODE_DIF_REMOVE; |
166 | break; | 174 | break; |
167 | case SCSI_PROT_READ_PASS: | 175 | case SCSI_PROT_READ_PASS: |
176 | *fw_prot_opts |= PO_MODE_DIF_PASS; | ||
177 | break; | ||
168 | case SCSI_PROT_WRITE_PASS: | 178 | case SCSI_PROT_WRITE_PASS: |
169 | if (guard & SHOST_DIX_GUARD_IP) | 179 | *fw_prot_opts |= PO_MODE_DIF_PASS; |
170 | *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; | ||
171 | else | ||
172 | *fw_prot_opts |= PO_MODE_DIF_PASS; | ||
173 | break; | 180 | break; |
174 | default: /* Normal Request */ | 181 | default: /* Normal Request */ |
175 | *fw_prot_opts |= PO_MODE_DIF_PASS; | 182 | *fw_prot_opts |= PO_MODE_DIF_PASS; |
176 | break; | 183 | break; |
177 | } | 184 | } |
178 | 185 | ||
179 | return scsi_prot_sg_count(cmd); | 186 | return scsi_prot_sg_count(sp->cmd); |
180 | } | 187 | } |
181 | 188 | ||
182 | /* | 189 | /* |
@@ -197,7 +204,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
197 | struct scatterlist *sg; | 204 | struct scatterlist *sg; |
198 | int i; | 205 | int i; |
199 | 206 | ||
200 | cmd = GET_CMD_SP(sp); | 207 | cmd = sp->cmd; |
201 | 208 | ||
202 | /* Update entry type to indicate Command Type 2 IOCB */ | 209 | /* Update entry type to indicate Command Type 2 IOCB */ |
203 | *((uint32_t *)(&cmd_pkt->entry_type)) = | 210 | *((uint32_t *)(&cmd_pkt->entry_type)) = |
@@ -255,7 +262,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
255 | struct scatterlist *sg; | 262 | struct scatterlist *sg; |
256 | int i; | 263 | int i; |
257 | 264 | ||
258 | cmd = GET_CMD_SP(sp); | 265 | cmd = sp->cmd; |
259 | 266 | ||
260 | /* Update entry type to indicate Command Type 3 IOCB */ | 267 | /* Update entry type to indicate Command Type 3 IOCB */ |
261 | *((uint32_t *)(&cmd_pkt->entry_type)) = | 268 | *((uint32_t *)(&cmd_pkt->entry_type)) = |
@@ -285,7 +292,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
285 | * Five DSDs are available in the Continuation | 292 | * Five DSDs are available in the Continuation |
286 | * Type 1 IOCB. | 293 | * Type 1 IOCB. |
287 | */ | 294 | */ |
288 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); | 295 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha); |
289 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; | 296 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; |
290 | avail_dsds = 5; | 297 | avail_dsds = 5; |
291 | } | 298 | } |
@@ -329,7 +336,7 @@ qla2x00_start_scsi(srb_t *sp) | |||
329 | vha = sp->fcport->vha; | 336 | vha = sp->fcport->vha; |
330 | ha = vha->hw; | 337 | ha = vha->hw; |
331 | reg = &ha->iobase->isp; | 338 | reg = &ha->iobase->isp; |
332 | cmd = GET_CMD_SP(sp); | 339 | cmd = sp->cmd; |
333 | req = ha->req_q_map[0]; | 340 | req = ha->req_q_map[0]; |
334 | rsp = ha->rsp_q_map[0]; | 341 | rsp = ha->rsp_q_map[0]; |
335 | /* So we know we haven't pci_map'ed anything yet */ | 342 | /* So we know we haven't pci_map'ed anything yet */ |
@@ -379,16 +386,15 @@ qla2x00_start_scsi(srb_t *sp) | |||
379 | else | 386 | else |
380 | req->cnt = req->length - | 387 | req->cnt = req->length - |
381 | (req->ring_index - cnt); | 388 | (req->ring_index - cnt); |
382 | /* If still no head room then bail out */ | ||
383 | if (req->cnt < (req_cnt + 2)) | ||
384 | goto queuing_error; | ||
385 | } | 389 | } |
390 | if (req->cnt < (req_cnt + 2)) | ||
391 | goto queuing_error; | ||
386 | 392 | ||
387 | /* Build command packet */ | 393 | /* Build command packet */ |
388 | req->current_outstanding_cmd = handle; | 394 | req->current_outstanding_cmd = handle; |
389 | req->outstanding_cmds[handle] = sp; | 395 | req->outstanding_cmds[handle] = sp; |
390 | sp->handle = handle; | 396 | sp->handle = handle; |
391 | cmd->host_scribble = (unsigned char *)(unsigned long)handle; | 397 | sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
392 | req->cnt -= req_cnt; | 398 | req->cnt -= req_cnt; |
393 | 399 | ||
394 | cmd_pkt = (cmd_entry_t *)req->ring_ptr; | 400 | cmd_pkt = (cmd_entry_t *)req->ring_ptr; |
@@ -400,7 +406,7 @@ qla2x00_start_scsi(srb_t *sp) | |||
400 | 406 | ||
401 | /* Set target ID and LUN number*/ | 407 | /* Set target ID and LUN number*/ |
402 | SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); | 408 | SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); |
403 | cmd_pkt->lun = cpu_to_le16(cmd->device->lun); | 409 | cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun); |
404 | 410 | ||
405 | /* Update tagged queuing modifier */ | 411 | /* Update tagged queuing modifier */ |
406 | if (scsi_populate_tag_msg(cmd, tag)) { | 412 | if (scsi_populate_tag_msg(cmd, tag)) { |
@@ -463,41 +469,6 @@ queuing_error: | |||
463 | } | 469 | } |
464 | 470 | ||
465 | /** | 471 | /** |
466 | * qla2x00_start_iocbs() - Execute the IOCB command | ||
467 | */ | ||
468 | void | ||
469 | qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) | ||
470 | { | ||
471 | struct qla_hw_data *ha = vha->hw; | ||
472 | device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); | ||
473 | |||
474 | if (IS_QLA82XX(ha)) { | ||
475 | qla82xx_start_iocbs(vha); | ||
476 | } else { | ||
477 | /* Adjust ring index. */ | ||
478 | req->ring_index++; | ||
479 | if (req->ring_index == req->length) { | ||
480 | req->ring_index = 0; | ||
481 | req->ring_ptr = req->ring; | ||
482 | } else | ||
483 | req->ring_ptr++; | ||
484 | |||
485 | /* Set chip new ring index. */ | ||
486 | if (ha->mqenable || IS_QLA83XX(ha)) { | ||
487 | WRT_REG_DWORD(req->req_q_in, req->ring_index); | ||
488 | RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); | ||
489 | } else if (IS_FWI2_CAPABLE(ha)) { | ||
490 | WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); | ||
491 | RD_REG_DWORD_RELAXED(®->isp24.req_q_in); | ||
492 | } else { | ||
493 | WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), | ||
494 | req->ring_index); | ||
495 | RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); | ||
496 | } | ||
497 | } | ||
498 | } | ||
499 | |||
500 | /** | ||
501 | * qla2x00_marker() - Send a marker IOCB to the firmware. | 472 | * qla2x00_marker() - Send a marker IOCB to the firmware. |
502 | * @ha: HA context | 473 | * @ha: HA context |
503 | * @loop_id: loop ID | 474 | * @loop_id: loop ID |
@@ -519,8 +490,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, | |||
519 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | 490 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); |
520 | 491 | ||
521 | mrk24 = NULL; | 492 | mrk24 = NULL; |
522 | req = ha->req_q_map[0]; | 493 | mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0); |
523 | mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL); | ||
524 | if (mrk == NULL) { | 494 | if (mrk == NULL) { |
525 | ql_log(ql_log_warn, base_vha, 0x3026, | 495 | ql_log(ql_log_warn, base_vha, 0x3026, |
526 | "Failed to allocate Marker IOCB.\n"); | 496 | "Failed to allocate Marker IOCB.\n"); |
@@ -546,7 +516,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, | |||
546 | } | 516 | } |
547 | wmb(); | 517 | wmb(); |
548 | 518 | ||
549 | qla2x00_start_iocbs(vha, req); | 519 | qla2x00_isp_cmd(vha, req); |
550 | 520 | ||
551 | return (QLA_SUCCESS); | 521 | return (QLA_SUCCESS); |
552 | } | 522 | } |
@@ -566,27 +536,66 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, | |||
566 | return (ret); | 536 | return (ret); |
567 | } | 537 | } |
568 | 538 | ||
569 | /* | 539 | /** |
570 | * qla2x00_issue_marker | 540 | * qla2x00_isp_cmd() - Modify the request ring pointer. |
541 | * @ha: HA context | ||
571 | * | 542 | * |
572 | * Issue marker | 543 | * Note: The caller must hold the hardware lock before calling this routine. |
573 | * Caller CAN have hardware lock held as specified by ha_locked parameter. | ||
574 | * Might release it, then reaquire. | ||
575 | */ | 544 | */ |
576 | int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) | 545 | static void |
546 | qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req) | ||
577 | { | 547 | { |
578 | if (ha_locked) { | 548 | struct qla_hw_data *ha = vha->hw; |
579 | if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, | 549 | device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); |
580 | MK_SYNC_ALL) != QLA_SUCCESS) | 550 | struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; |
581 | return QLA_FUNCTION_FAILED; | 551 | |
552 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d, | ||
553 | "IOCB data:\n"); | ||
554 | ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, | ||
555 | (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE); | ||
556 | |||
557 | /* Adjust ring index. */ | ||
558 | req->ring_index++; | ||
559 | if (req->ring_index == req->length) { | ||
560 | req->ring_index = 0; | ||
561 | req->ring_ptr = req->ring; | ||
562 | } else | ||
563 | req->ring_ptr++; | ||
564 | |||
565 | /* Set chip new ring index. */ | ||
566 | if (IS_QLA82XX(ha)) { | ||
567 | uint32_t dbval = 0x04 | (ha->portnum << 5); | ||
568 | |||
569 | /* write, read and verify logic */ | ||
570 | dbval = dbval | (req->id << 8) | (req->ring_index << 16); | ||
571 | if (ql2xdbwr) | ||
572 | qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); | ||
573 | else { | ||
574 | WRT_REG_DWORD( | ||
575 | (unsigned long __iomem *)ha->nxdb_wr_ptr, | ||
576 | dbval); | ||
577 | wmb(); | ||
578 | while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { | ||
579 | WRT_REG_DWORD((unsigned long __iomem *) | ||
580 | ha->nxdb_wr_ptr, dbval); | ||
581 | wmb(); | ||
582 | } | ||
583 | } | ||
584 | } else if (ha->mqenable) { | ||
585 | /* Set chip new ring index. */ | ||
586 | WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index); | ||
587 | RD_REG_DWORD(&ioreg->hccr); | ||
582 | } else { | 588 | } else { |
583 | if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, | 589 | if (IS_FWI2_CAPABLE(ha)) { |
584 | MK_SYNC_ALL) != QLA_SUCCESS) | 590 | WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); |
585 | return QLA_FUNCTION_FAILED; | 591 | RD_REG_DWORD_RELAXED(®->isp24.req_q_in); |
592 | } else { | ||
593 | WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), | ||
594 | req->ring_index); | ||
595 | RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); | ||
596 | } | ||
586 | } | 597 | } |
587 | vha->marker_needed = 0; | ||
588 | 598 | ||
589 | return QLA_SUCCESS; | ||
590 | } | 599 | } |
591 | 600 | ||
592 | /** | 601 | /** |
@@ -611,119 +620,6 @@ qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) | |||
611 | return iocbs; | 620 | return iocbs; |
612 | } | 621 | } |
613 | 622 | ||
614 | static inline int | ||
615 | qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, | ||
616 | uint16_t tot_dsds) | ||
617 | { | ||
618 | uint32_t *cur_dsd = NULL; | ||
619 | scsi_qla_host_t *vha; | ||
620 | struct qla_hw_data *ha; | ||
621 | struct scsi_cmnd *cmd; | ||
622 | struct scatterlist *cur_seg; | ||
623 | uint32_t *dsd_seg; | ||
624 | void *next_dsd; | ||
625 | uint8_t avail_dsds; | ||
626 | uint8_t first_iocb = 1; | ||
627 | uint32_t dsd_list_len; | ||
628 | struct dsd_dma *dsd_ptr; | ||
629 | struct ct6_dsd *ctx; | ||
630 | |||
631 | cmd = GET_CMD_SP(sp); | ||
632 | |||
633 | /* Update entry type to indicate Command Type 3 IOCB */ | ||
634 | *((uint32_t *)(&cmd_pkt->entry_type)) = | ||
635 | __constant_cpu_to_le32(COMMAND_TYPE_6); | ||
636 | |||
637 | /* No data transfer */ | ||
638 | if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { | ||
639 | cmd_pkt->byte_count = __constant_cpu_to_le32(0); | ||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | vha = sp->fcport->vha; | ||
644 | ha = vha->hw; | ||
645 | |||
646 | /* Set transfer direction */ | ||
647 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | ||
648 | cmd_pkt->control_flags = | ||
649 | __constant_cpu_to_le16(CF_WRITE_DATA); | ||
650 | vha->qla_stats.output_bytes += scsi_bufflen(cmd); | ||
651 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { | ||
652 | cmd_pkt->control_flags = | ||
653 | __constant_cpu_to_le16(CF_READ_DATA); | ||
654 | vha->qla_stats.input_bytes += scsi_bufflen(cmd); | ||
655 | } | ||
656 | |||
657 | cur_seg = scsi_sglist(cmd); | ||
658 | ctx = GET_CMD_CTX_SP(sp); | ||
659 | |||
660 | while (tot_dsds) { | ||
661 | avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? | ||
662 | QLA_DSDS_PER_IOCB : tot_dsds; | ||
663 | tot_dsds -= avail_dsds; | ||
664 | dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; | ||
665 | |||
666 | dsd_ptr = list_first_entry(&ha->gbl_dsd_list, | ||
667 | struct dsd_dma, list); | ||
668 | next_dsd = dsd_ptr->dsd_addr; | ||
669 | list_del(&dsd_ptr->list); | ||
670 | ha->gbl_dsd_avail--; | ||
671 | list_add_tail(&dsd_ptr->list, &ctx->dsd_list); | ||
672 | ctx->dsd_use_cnt++; | ||
673 | ha->gbl_dsd_inuse++; | ||
674 | |||
675 | if (first_iocb) { | ||
676 | first_iocb = 0; | ||
677 | dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; | ||
678 | *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | ||
679 | *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | ||
680 | cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len); | ||
681 | } else { | ||
682 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | ||
683 | *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | ||
684 | *cur_dsd++ = cpu_to_le32(dsd_list_len); | ||
685 | } | ||
686 | cur_dsd = (uint32_t *)next_dsd; | ||
687 | while (avail_dsds) { | ||
688 | dma_addr_t sle_dma; | ||
689 | |||
690 | sle_dma = sg_dma_address(cur_seg); | ||
691 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | ||
692 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | ||
693 | *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); | ||
694 | cur_seg = sg_next(cur_seg); | ||
695 | avail_dsds--; | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /* Null termination */ | ||
700 | *cur_dsd++ = 0; | ||
701 | *cur_dsd++ = 0; | ||
702 | *cur_dsd++ = 0; | ||
703 | cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; | ||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | /* | ||
708 | * qla24xx_calc_dsd_lists() - Determine number of DSD list required | ||
709 | * for Command Type 6. | ||
710 | * | ||
711 | * @dsds: number of data segment decriptors needed | ||
712 | * | ||
713 | * Returns the number of dsd list needed to store @dsds. | ||
714 | */ | ||
715 | inline uint16_t | ||
716 | qla24xx_calc_dsd_lists(uint16_t dsds) | ||
717 | { | ||
718 | uint16_t dsd_lists = 0; | ||
719 | |||
720 | dsd_lists = (dsds/QLA_DSDS_PER_IOCB); | ||
721 | if (dsds % QLA_DSDS_PER_IOCB) | ||
722 | dsd_lists++; | ||
723 | return dsd_lists; | ||
724 | } | ||
725 | |||
726 | |||
727 | /** | 623 | /** |
728 | * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 | 624 | * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 |
729 | * IOCB types. | 625 | * IOCB types. |
@@ -744,7 +640,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
744 | int i; | 640 | int i; |
745 | struct req_que *req; | 641 | struct req_que *req; |
746 | 642 | ||
747 | cmd = GET_CMD_SP(sp); | 643 | cmd = sp->cmd; |
748 | 644 | ||
749 | /* Update entry type to indicate Command Type 3 IOCB */ | 645 | /* Update entry type to indicate Command Type 3 IOCB */ |
750 | *((uint32_t *)(&cmd_pkt->entry_type)) = | 646 | *((uint32_t *)(&cmd_pkt->entry_type)) = |
@@ -763,11 +659,13 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
763 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | 659 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
764 | cmd_pkt->task_mgmt_flags = | 660 | cmd_pkt->task_mgmt_flags = |
765 | __constant_cpu_to_le16(TMF_WRITE_DATA); | 661 | __constant_cpu_to_le16(TMF_WRITE_DATA); |
766 | vha->qla_stats.output_bytes += scsi_bufflen(cmd); | 662 | sp->fcport->vha->hw->qla_stats.output_bytes += |
663 | scsi_bufflen(sp->cmd); | ||
767 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { | 664 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { |
768 | cmd_pkt->task_mgmt_flags = | 665 | cmd_pkt->task_mgmt_flags = |
769 | __constant_cpu_to_le16(TMF_READ_DATA); | 666 | __constant_cpu_to_le16(TMF_READ_DATA); |
770 | vha->qla_stats.input_bytes += scsi_bufflen(cmd); | 667 | sp->fcport->vha->hw->qla_stats.input_bytes += |
668 | scsi_bufflen(sp->cmd); | ||
771 | } | 669 | } |
772 | 670 | ||
773 | /* One DSD is available in the Command Type 3 IOCB */ | 671 | /* One DSD is available in the Command Type 3 IOCB */ |
@@ -786,7 +684,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
786 | * Five DSDs are available in the Continuation | 684 | * Five DSDs are available in the Continuation |
787 | * Type 1 IOCB. | 685 | * Type 1 IOCB. |
788 | */ | 686 | */ |
789 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); | 687 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha); |
790 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; | 688 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; |
791 | avail_dsds = 5; | 689 | avail_dsds = 5; |
792 | } | 690 | } |
@@ -814,7 +712,8 @@ static inline void | |||
814 | qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, | 712 | qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, |
815 | unsigned int protcnt) | 713 | unsigned int protcnt) |
816 | { | 714 | { |
817 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 715 | struct scsi_cmnd *cmd = sp->cmd; |
716 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | ||
818 | 717 | ||
819 | switch (scsi_get_prot_type(cmd)) { | 718 | switch (scsi_get_prot_type(cmd)) { |
820 | case SCSI_PROT_DIF_TYPE0: | 719 | case SCSI_PROT_DIF_TYPE0: |
@@ -884,6 +783,12 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, | |||
884 | pkt->ref_tag_mask[3] = 0xff; | 783 | pkt->ref_tag_mask[3] = 0xff; |
885 | break; | 784 | break; |
886 | } | 785 | } |
786 | |||
787 | ql_dbg(ql_dbg_io, vha, 0x3009, | ||
788 | "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, " | ||
789 | "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n", | ||
790 | pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd), | ||
791 | scsi_get_prot_type(cmd), cmd); | ||
887 | } | 792 | } |
888 | 793 | ||
889 | struct qla2_sgx { | 794 | struct qla2_sgx { |
@@ -962,16 +867,16 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, | |||
962 | struct qla2_sgx sgx; | 867 | struct qla2_sgx sgx; |
963 | dma_addr_t sle_dma; | 868 | dma_addr_t sle_dma; |
964 | uint32_t sle_dma_len, tot_prot_dma_len = 0; | 869 | uint32_t sle_dma_len, tot_prot_dma_len = 0; |
965 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 870 | struct scsi_cmnd *cmd = sp->cmd; |
966 | 871 | ||
967 | prot_int = cmd->device->sector_size; | 872 | prot_int = cmd->device->sector_size; |
968 | 873 | ||
969 | memset(&sgx, 0, sizeof(struct qla2_sgx)); | 874 | memset(&sgx, 0, sizeof(struct qla2_sgx)); |
970 | sgx.tot_bytes = scsi_bufflen(cmd); | 875 | sgx.tot_bytes = scsi_bufflen(sp->cmd); |
971 | sgx.cur_sg = scsi_sglist(cmd); | 876 | sgx.cur_sg = scsi_sglist(sp->cmd); |
972 | sgx.sp = sp; | 877 | sgx.sp = sp; |
973 | 878 | ||
974 | sg_prot = scsi_prot_sglist(cmd); | 879 | sg_prot = scsi_prot_sglist(sp->cmd); |
975 | 880 | ||
976 | while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { | 881 | while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { |
977 | 882 | ||
@@ -1005,7 +910,7 @@ alloc_and_fill: | |||
1005 | } | 910 | } |
1006 | 911 | ||
1007 | list_add_tail(&dsd_ptr->list, | 912 | list_add_tail(&dsd_ptr->list, |
1008 | &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); | 913 | &((struct crc_context *)sp->ctx)->dsd_list); |
1009 | 914 | ||
1010 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | 915 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
1011 | 916 | ||
@@ -1041,7 +946,6 @@ alloc_and_fill: | |||
1041 | *cur_dsd++ = 0; | 946 | *cur_dsd++ = 0; |
1042 | return 0; | 947 | return 0; |
1043 | } | 948 | } |
1044 | |||
1045 | static int | 949 | static int |
1046 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | 950 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, |
1047 | uint16_t tot_dsds) | 951 | uint16_t tot_dsds) |
@@ -1054,9 +958,11 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | |||
1054 | uint32_t *cur_dsd = dsd; | 958 | uint32_t *cur_dsd = dsd; |
1055 | int i; | 959 | int i; |
1056 | uint16_t used_dsds = tot_dsds; | 960 | uint16_t used_dsds = tot_dsds; |
1057 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 961 | scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host); |
1058 | 962 | ||
1059 | scsi_for_each_sg(cmd, sg, tot_dsds, i) { | 963 | uint8_t *cp; |
964 | |||
965 | scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) { | ||
1060 | dma_addr_t sle_dma; | 966 | dma_addr_t sle_dma; |
1061 | 967 | ||
1062 | /* Allocate additional continuation packets? */ | 968 | /* Allocate additional continuation packets? */ |
@@ -1086,7 +992,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | |||
1086 | } | 992 | } |
1087 | 993 | ||
1088 | list_add_tail(&dsd_ptr->list, | 994 | list_add_tail(&dsd_ptr->list, |
1089 | &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); | 995 | &((struct crc_context *)sp->ctx)->dsd_list); |
1090 | 996 | ||
1091 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | 997 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
1092 | 998 | ||
@@ -1097,12 +1003,20 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | |||
1097 | cur_dsd = (uint32_t *)next_dsd; | 1003 | cur_dsd = (uint32_t *)next_dsd; |
1098 | } | 1004 | } |
1099 | sle_dma = sg_dma_address(sg); | 1005 | sle_dma = sg_dma_address(sg); |
1100 | 1006 | ql_dbg(ql_dbg_io, vha, 0x300a, | |
1007 | "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n", | ||
1008 | cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), | ||
1009 | sp->cmd); | ||
1101 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | 1010 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
1102 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | 1011 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
1103 | *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | 1012 | *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
1104 | avail_dsds--; | 1013 | avail_dsds--; |
1105 | 1014 | ||
1015 | if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { | ||
1016 | cp = page_address(sg_page(sg)) + sg->offset; | ||
1017 | ql_dbg(ql_dbg_io, vha, 0x300b, | ||
1018 | "User data buffer=%p for cmd=%p.\n", cp, sp->cmd); | ||
1019 | } | ||
1106 | } | 1020 | } |
1107 | /* Null termination */ | 1021 | /* Null termination */ |
1108 | *cur_dsd++ = 0; | 1022 | *cur_dsd++ = 0; |
@@ -1125,8 +1039,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, | |||
1125 | struct scsi_cmnd *cmd; | 1039 | struct scsi_cmnd *cmd; |
1126 | uint32_t *cur_dsd = dsd; | 1040 | uint32_t *cur_dsd = dsd; |
1127 | uint16_t used_dsds = tot_dsds; | 1041 | uint16_t used_dsds = tot_dsds; |
1042 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | ||
1043 | uint8_t *cp; | ||
1044 | |||
1128 | 1045 | ||
1129 | cmd = GET_CMD_SP(sp); | 1046 | cmd = sp->cmd; |
1130 | scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { | 1047 | scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { |
1131 | dma_addr_t sle_dma; | 1048 | dma_addr_t sle_dma; |
1132 | 1049 | ||
@@ -1157,7 +1074,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, | |||
1157 | } | 1074 | } |
1158 | 1075 | ||
1159 | list_add_tail(&dsd_ptr->list, | 1076 | list_add_tail(&dsd_ptr->list, |
1160 | &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); | 1077 | &((struct crc_context *)sp->ctx)->dsd_list); |
1161 | 1078 | ||
1162 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | 1079 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
1163 | 1080 | ||
@@ -1168,11 +1085,23 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, | |||
1168 | cur_dsd = (uint32_t *)next_dsd; | 1085 | cur_dsd = (uint32_t *)next_dsd; |
1169 | } | 1086 | } |
1170 | sle_dma = sg_dma_address(sg); | 1087 | sle_dma = sg_dma_address(sg); |
1171 | 1088 | if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { | |
1089 | ql_dbg(ql_dbg_io, vha, 0x3027, | ||
1090 | "%s(): %p, sg_entry %d - " | ||
1091 | "addr=0x%x0x%x, len=%d.\n", | ||
1092 | __func__, cur_dsd, i, | ||
1093 | LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)); | ||
1094 | } | ||
1172 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | 1095 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
1173 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | 1096 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
1174 | *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | 1097 | *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
1175 | 1098 | ||
1099 | if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { | ||
1100 | cp = page_address(sg_page(sg)) + sg->offset; | ||
1101 | ql_dbg(ql_dbg_io, vha, 0x3028, | ||
1102 | "%s(): Protection Data buffer = %p.\n", __func__, | ||
1103 | cp); | ||
1104 | } | ||
1176 | avail_dsds--; | 1105 | avail_dsds--; |
1177 | } | 1106 | } |
1178 | /* Null termination */ | 1107 | /* Null termination */ |
@@ -1213,7 +1142,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1213 | dma_addr_t crc_ctx_dma; | 1142 | dma_addr_t crc_ctx_dma; |
1214 | char tag[2]; | 1143 | char tag[2]; |
1215 | 1144 | ||
1216 | cmd = GET_CMD_SP(sp); | 1145 | cmd = sp->cmd; |
1217 | 1146 | ||
1218 | sgc = 0; | 1147 | sgc = 0; |
1219 | /* Update entry type to indicate Command Type CRC_2 IOCB */ | 1148 | /* Update entry type to indicate Command Type CRC_2 IOCB */ |
@@ -1230,7 +1159,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1230 | return QLA_SUCCESS; | 1159 | return QLA_SUCCESS; |
1231 | } | 1160 | } |
1232 | 1161 | ||
1233 | cmd_pkt->vp_index = sp->fcport->vha->vp_idx; | 1162 | cmd_pkt->vp_index = sp->fcport->vp_idx; |
1234 | 1163 | ||
1235 | /* Set transfer direction */ | 1164 | /* Set transfer direction */ |
1236 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | 1165 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
@@ -1241,15 +1170,15 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1241 | __constant_cpu_to_le16(CF_READ_DATA); | 1170 | __constant_cpu_to_le16(CF_READ_DATA); |
1242 | } | 1171 | } |
1243 | 1172 | ||
1244 | if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || | 1173 | if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) || |
1245 | (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || | 1174 | (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) || |
1246 | (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || | 1175 | (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) || |
1247 | (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT)) | 1176 | (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT)) |
1248 | bundling = 0; | 1177 | bundling = 0; |
1249 | 1178 | ||
1250 | /* Allocate CRC context from global pool */ | 1179 | /* Allocate CRC context from global pool */ |
1251 | crc_ctx_pkt = sp->u.scmd.ctx = | 1180 | crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool, |
1252 | dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); | 1181 | GFP_ATOMIC, &crc_ctx_dma); |
1253 | 1182 | ||
1254 | if (!crc_ctx_pkt) | 1183 | if (!crc_ctx_pkt) |
1255 | goto crc_queuing_error; | 1184 | goto crc_queuing_error; |
@@ -1295,7 +1224,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1295 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) | 1224 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) |
1296 | fcp_cmnd->additional_cdb_len |= 2; | 1225 | fcp_cmnd->additional_cdb_len |= 2; |
1297 | 1226 | ||
1298 | int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); | 1227 | int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); |
1299 | memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); | 1228 | memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); |
1300 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); | 1229 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); |
1301 | cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( | 1230 | cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( |
@@ -1330,7 +1259,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1330 | blk_size = cmd->device->sector_size; | 1259 | blk_size = cmd->device->sector_size; |
1331 | dif_bytes = (data_bytes / blk_size) * 8; | 1260 | dif_bytes = (data_bytes / blk_size) * 8; |
1332 | 1261 | ||
1333 | switch (scsi_get_prot_op(GET_CMD_SP(sp))) { | 1262 | switch (scsi_get_prot_op(sp->cmd)) { |
1334 | case SCSI_PROT_READ_INSERT: | 1263 | case SCSI_PROT_READ_INSERT: |
1335 | case SCSI_PROT_WRITE_STRIP: | 1264 | case SCSI_PROT_WRITE_STRIP: |
1336 | total_bytes = data_bytes; | 1265 | total_bytes = data_bytes; |
@@ -1349,16 +1278,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1349 | 1278 | ||
1350 | if (!qla2x00_hba_err_chk_enabled(sp)) | 1279 | if (!qla2x00_hba_err_chk_enabled(sp)) |
1351 | fw_prot_opts |= 0x10; /* Disable Guard tag checking */ | 1280 | fw_prot_opts |= 0x10; /* Disable Guard tag checking */ |
1352 | /* HBA error checking enabled */ | ||
1353 | else if (IS_PI_UNINIT_CAPABLE(ha)) { | ||
1354 | if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) | ||
1355 | || (scsi_get_prot_type(GET_CMD_SP(sp)) == | ||
1356 | SCSI_PROT_DIF_TYPE2)) | ||
1357 | fw_prot_opts |= BIT_10; | ||
1358 | else if (scsi_get_prot_type(GET_CMD_SP(sp)) == | ||
1359 | SCSI_PROT_DIF_TYPE3) | ||
1360 | fw_prot_opts |= BIT_11; | ||
1361 | } | ||
1362 | 1281 | ||
1363 | if (!bundling) { | 1282 | if (!bundling) { |
1364 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; | 1283 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; |
@@ -1440,7 +1359,7 @@ qla24xx_start_scsi(srb_t *sp) | |||
1440 | uint16_t tot_dsds; | 1359 | uint16_t tot_dsds; |
1441 | struct req_que *req = NULL; | 1360 | struct req_que *req = NULL; |
1442 | struct rsp_que *rsp = NULL; | 1361 | struct rsp_que *rsp = NULL; |
1443 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 1362 | struct scsi_cmnd *cmd = sp->cmd; |
1444 | struct scsi_qla_host *vha = sp->fcport->vha; | 1363 | struct scsi_qla_host *vha = sp->fcport->vha; |
1445 | struct qla_hw_data *ha = vha->hw; | 1364 | struct qla_hw_data *ha = vha->hw; |
1446 | char tag[2]; | 1365 | char tag[2]; |
@@ -1497,15 +1416,15 @@ qla24xx_start_scsi(srb_t *sp) | |||
1497 | else | 1416 | else |
1498 | req->cnt = req->length - | 1417 | req->cnt = req->length - |
1499 | (req->ring_index - cnt); | 1418 | (req->ring_index - cnt); |
1500 | if (req->cnt < (req_cnt + 2)) | ||
1501 | goto queuing_error; | ||
1502 | } | 1419 | } |
1420 | if (req->cnt < (req_cnt + 2)) | ||
1421 | goto queuing_error; | ||
1503 | 1422 | ||
1504 | /* Build command packet. */ | 1423 | /* Build command packet. */ |
1505 | req->current_outstanding_cmd = handle; | 1424 | req->current_outstanding_cmd = handle; |
1506 | req->outstanding_cmds[handle] = sp; | 1425 | req->outstanding_cmds[handle] = sp; |
1507 | sp->handle = handle; | 1426 | sp->handle = handle; |
1508 | cmd->host_scribble = (unsigned char *)(unsigned long)handle; | 1427 | sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
1509 | req->cnt -= req_cnt; | 1428 | req->cnt -= req_cnt; |
1510 | 1429 | ||
1511 | cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; | 1430 | cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; |
@@ -1522,9 +1441,9 @@ qla24xx_start_scsi(srb_t *sp) | |||
1522 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; | 1441 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; |
1523 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; | 1442 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; |
1524 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; | 1443 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; |
1525 | cmd_pkt->vp_index = sp->fcport->vha->vp_idx; | 1444 | cmd_pkt->vp_index = sp->fcport->vp_idx; |
1526 | 1445 | ||
1527 | int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); | 1446 | int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); |
1528 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); | 1447 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); |
1529 | 1448 | ||
1530 | /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ | 1449 | /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ |
@@ -1606,7 +1525,7 @@ qla24xx_dif_start_scsi(srb_t *sp) | |||
1606 | uint16_t fw_prot_opts = 0; | 1525 | uint16_t fw_prot_opts = 0; |
1607 | struct req_que *req = NULL; | 1526 | struct req_que *req = NULL; |
1608 | struct rsp_que *rsp = NULL; | 1527 | struct rsp_que *rsp = NULL; |
1609 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 1528 | struct scsi_cmnd *cmd = sp->cmd; |
1610 | struct scsi_qla_host *vha = sp->fcport->vha; | 1529 | struct scsi_qla_host *vha = sp->fcport->vha; |
1611 | struct qla_hw_data *ha = vha->hw; | 1530 | struct qla_hw_data *ha = vha->hw; |
1612 | struct cmd_type_crc_2 *cmd_pkt; | 1531 | struct cmd_type_crc_2 *cmd_pkt; |
@@ -1712,17 +1631,18 @@ qla24xx_dif_start_scsi(srb_t *sp) | |||
1712 | else | 1631 | else |
1713 | req->cnt = req->length - | 1632 | req->cnt = req->length - |
1714 | (req->ring_index - cnt); | 1633 | (req->ring_index - cnt); |
1715 | if (req->cnt < (req_cnt + 2)) | ||
1716 | goto queuing_error; | ||
1717 | } | 1634 | } |
1718 | 1635 | ||
1636 | if (req->cnt < (req_cnt + 2)) | ||
1637 | goto queuing_error; | ||
1638 | |||
1719 | status |= QDSS_GOT_Q_SPACE; | 1639 | status |= QDSS_GOT_Q_SPACE; |
1720 | 1640 | ||
1721 | /* Build header part of command packet (excluding the OPCODE). */ | 1641 | /* Build header part of command packet (excluding the OPCODE). */ |
1722 | req->current_outstanding_cmd = handle; | 1642 | req->current_outstanding_cmd = handle; |
1723 | req->outstanding_cmds[handle] = sp; | 1643 | req->outstanding_cmds[handle] = sp; |
1724 | sp->handle = handle; | 1644 | sp->handle = handle; |
1725 | cmd->host_scribble = (unsigned char *)(unsigned long)handle; | 1645 | sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
1726 | req->cnt -= req_cnt; | 1646 | req->cnt -= req_cnt; |
1727 | 1647 | ||
1728 | /* Fill-in common area */ | 1648 | /* Fill-in common area */ |
@@ -1738,7 +1658,7 @@ qla24xx_dif_start_scsi(srb_t *sp) | |||
1738 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; | 1658 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; |
1739 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; | 1659 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; |
1740 | 1660 | ||
1741 | int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); | 1661 | int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); |
1742 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); | 1662 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); |
1743 | 1663 | ||
1744 | /* Total Data and protection segment(s) */ | 1664 | /* Total Data and protection segment(s) */ |
@@ -1791,7 +1711,7 @@ queuing_error: | |||
1791 | 1711 | ||
1792 | static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) | 1712 | static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) |
1793 | { | 1713 | { |
1794 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 1714 | struct scsi_cmnd *cmd = sp->cmd; |
1795 | struct qla_hw_data *ha = sp->fcport->vha->hw; | 1715 | struct qla_hw_data *ha = sp->fcport->vha->hw; |
1796 | int affinity = cmd->request->cpu; | 1716 | int affinity = cmd->request->cpu; |
1797 | 1717 | ||
@@ -1831,7 +1751,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) | |||
1831 | } | 1751 | } |
1832 | if (index == MAX_OUTSTANDING_COMMANDS) { | 1752 | if (index == MAX_OUTSTANDING_COMMANDS) { |
1833 | ql_log(ql_log_warn, vha, 0x700b, | 1753 | ql_log(ql_log_warn, vha, 0x700b, |
1834 | "No room on outstanding cmd array.\n"); | 1754 | "No room on oustanding cmd array.\n"); |
1835 | goto queuing_error; | 1755 | goto queuing_error; |
1836 | } | 1756 | } |
1837 | 1757 | ||
@@ -1840,14 +1760,10 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) | |||
1840 | req->outstanding_cmds[handle] = sp; | 1760 | req->outstanding_cmds[handle] = sp; |
1841 | sp->handle = handle; | 1761 | sp->handle = handle; |
1842 | 1762 | ||
1843 | /* Adjust entry-counts as needed. */ | ||
1844 | if (sp->type != SRB_SCSI_CMD) | ||
1845 | req_cnt = sp->iocbs; | ||
1846 | |||
1847 | skip_cmd_array: | 1763 | skip_cmd_array: |
1848 | /* Check for room on request queue. */ | 1764 | /* Check for room on request queue. */ |
1849 | if (req->cnt < req_cnt) { | 1765 | if (req->cnt < req_cnt) { |
1850 | if (ha->mqenable || IS_QLA83XX(ha)) | 1766 | if (ha->mqenable) |
1851 | cnt = RD_REG_DWORD(®->isp25mq.req_q_out); | 1767 | cnt = RD_REG_DWORD(®->isp25mq.req_q_out); |
1852 | else if (IS_QLA82XX(ha)) | 1768 | else if (IS_QLA82XX(ha)) |
1853 | cnt = RD_REG_DWORD(®->isp82.req_q_out); | 1769 | cnt = RD_REG_DWORD(®->isp82.req_q_out); |
@@ -1878,9 +1794,46 @@ queuing_error: | |||
1878 | } | 1794 | } |
1879 | 1795 | ||
1880 | static void | 1796 | static void |
1797 | qla2x00_start_iocbs(srb_t *sp) | ||
1798 | { | ||
1799 | struct qla_hw_data *ha = sp->fcport->vha->hw; | ||
1800 | struct req_que *req = ha->req_q_map[0]; | ||
1801 | device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); | ||
1802 | struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; | ||
1803 | |||
1804 | if (IS_QLA82XX(ha)) { | ||
1805 | qla82xx_start_iocbs(sp); | ||
1806 | } else { | ||
1807 | /* Adjust ring index. */ | ||
1808 | req->ring_index++; | ||
1809 | if (req->ring_index == req->length) { | ||
1810 | req->ring_index = 0; | ||
1811 | req->ring_ptr = req->ring; | ||
1812 | } else | ||
1813 | req->ring_ptr++; | ||
1814 | |||
1815 | /* Set chip new ring index. */ | ||
1816 | if (ha->mqenable) { | ||
1817 | WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index); | ||
1818 | RD_REG_DWORD(&ioreg->hccr); | ||
1819 | } else if (IS_QLA82XX(ha)) { | ||
1820 | qla82xx_start_iocbs(sp); | ||
1821 | } else if (IS_FWI2_CAPABLE(ha)) { | ||
1822 | WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); | ||
1823 | RD_REG_DWORD_RELAXED(®->isp24.req_q_in); | ||
1824 | } else { | ||
1825 | WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), | ||
1826 | req->ring_index); | ||
1827 | RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); | ||
1828 | } | ||
1829 | } | ||
1830 | } | ||
1831 | |||
1832 | static void | ||
1881 | qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) | 1833 | qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) |
1882 | { | 1834 | { |
1883 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 1835 | struct srb_ctx *ctx = sp->ctx; |
1836 | struct srb_iocb *lio = ctx->u.iocb_cmd; | ||
1884 | 1837 | ||
1885 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; | 1838 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; |
1886 | logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); | 1839 | logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); |
@@ -1892,14 +1845,15 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) | |||
1892 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; | 1845 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; |
1893 | logio->port_id[1] = sp->fcport->d_id.b.area; | 1846 | logio->port_id[1] = sp->fcport->d_id.b.area; |
1894 | logio->port_id[2] = sp->fcport->d_id.b.domain; | 1847 | logio->port_id[2] = sp->fcport->d_id.b.domain; |
1895 | logio->vp_index = sp->fcport->vha->vp_idx; | 1848 | logio->vp_index = sp->fcport->vp_idx; |
1896 | } | 1849 | } |
1897 | 1850 | ||
1898 | static void | 1851 | static void |
1899 | qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) | 1852 | qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) |
1900 | { | 1853 | { |
1901 | struct qla_hw_data *ha = sp->fcport->vha->hw; | 1854 | struct qla_hw_data *ha = sp->fcport->vha->hw; |
1902 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 1855 | struct srb_ctx *ctx = sp->ctx; |
1856 | struct srb_iocb *lio = ctx->u.iocb_cmd; | ||
1903 | uint16_t opts; | 1857 | uint16_t opts; |
1904 | 1858 | ||
1905 | mbx->entry_type = MBX_IOCB_TYPE; | 1859 | mbx->entry_type = MBX_IOCB_TYPE; |
@@ -1916,7 +1870,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) | |||
1916 | mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); | 1870 | mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); |
1917 | mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | | 1871 | mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | |
1918 | sp->fcport->d_id.b.al_pa); | 1872 | sp->fcport->d_id.b.al_pa); |
1919 | mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); | 1873 | mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); |
1920 | } | 1874 | } |
1921 | 1875 | ||
1922 | static void | 1876 | static void |
@@ -1929,7 +1883,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) | |||
1929 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; | 1883 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; |
1930 | logio->port_id[1] = sp->fcport->d_id.b.area; | 1884 | logio->port_id[1] = sp->fcport->d_id.b.area; |
1931 | logio->port_id[2] = sp->fcport->d_id.b.domain; | 1885 | logio->port_id[2] = sp->fcport->d_id.b.domain; |
1932 | logio->vp_index = sp->fcport->vha->vp_idx; | 1886 | logio->vp_index = sp->fcport->vp_idx; |
1933 | } | 1887 | } |
1934 | 1888 | ||
1935 | static void | 1889 | static void |
@@ -1946,7 +1900,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) | |||
1946 | mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); | 1900 | mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); |
1947 | mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | | 1901 | mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | |
1948 | sp->fcport->d_id.b.al_pa); | 1902 | sp->fcport->d_id.b.al_pa); |
1949 | mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); | 1903 | mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); |
1950 | /* Implicit: mbx->mbx10 = 0. */ | 1904 | /* Implicit: mbx->mbx10 = 0. */ |
1951 | } | 1905 | } |
1952 | 1906 | ||
@@ -1956,7 +1910,7 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) | |||
1956 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; | 1910 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; |
1957 | logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); | 1911 | logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); |
1958 | logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 1912 | logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
1959 | logio->vp_index = sp->fcport->vha->vp_idx; | 1913 | logio->vp_index = sp->fcport->vp_idx; |
1960 | } | 1914 | } |
1961 | 1915 | ||
1962 | static void | 1916 | static void |
@@ -1977,7 +1931,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) | |||
1977 | mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); | 1931 | mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); |
1978 | mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); | 1932 | mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); |
1979 | mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); | 1933 | mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); |
1980 | mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); | 1934 | mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); |
1981 | } | 1935 | } |
1982 | 1936 | ||
1983 | static void | 1937 | static void |
@@ -1988,7 +1942,8 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) | |||
1988 | struct fc_port *fcport = sp->fcport; | 1942 | struct fc_port *fcport = sp->fcport; |
1989 | scsi_qla_host_t *vha = fcport->vha; | 1943 | scsi_qla_host_t *vha = fcport->vha; |
1990 | struct qla_hw_data *ha = vha->hw; | 1944 | struct qla_hw_data *ha = vha->hw; |
1991 | struct srb_iocb *iocb = &sp->u.iocb_cmd; | 1945 | struct srb_ctx *ctx = sp->ctx; |
1946 | struct srb_iocb *iocb = ctx->u.iocb_cmd; | ||
1992 | struct req_que *req = vha->req; | 1947 | struct req_que *req = vha->req; |
1993 | 1948 | ||
1994 | flags = iocb->u.tmf.flags; | 1949 | flags = iocb->u.tmf.flags; |
@@ -2003,7 +1958,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) | |||
2003 | tsk->port_id[0] = fcport->d_id.b.al_pa; | 1958 | tsk->port_id[0] = fcport->d_id.b.al_pa; |
2004 | tsk->port_id[1] = fcport->d_id.b.area; | 1959 | tsk->port_id[1] = fcport->d_id.b.area; |
2005 | tsk->port_id[2] = fcport->d_id.b.domain; | 1960 | tsk->port_id[2] = fcport->d_id.b.domain; |
2006 | tsk->vp_index = fcport->vha->vp_idx; | 1961 | tsk->vp_index = fcport->vp_idx; |
2007 | 1962 | ||
2008 | if (flags == TCF_LUN_RESET) { | 1963 | if (flags == TCF_LUN_RESET) { |
2009 | int_to_scsilun(lun, &tsk->lun); | 1964 | int_to_scsilun(lun, &tsk->lun); |
@@ -2015,7 +1970,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) | |||
2015 | static void | 1970 | static void |
2016 | qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) | 1971 | qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) |
2017 | { | 1972 | { |
2018 | struct fc_bsg_job *bsg_job = sp->u.bsg_job; | 1973 | struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; |
2019 | 1974 | ||
2020 | els_iocb->entry_type = ELS_IOCB_TYPE; | 1975 | els_iocb->entry_type = ELS_IOCB_TYPE; |
2021 | els_iocb->entry_count = 1; | 1976 | els_iocb->entry_count = 1; |
@@ -2024,12 +1979,12 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) | |||
2024 | els_iocb->handle = sp->handle; | 1979 | els_iocb->handle = sp->handle; |
2025 | els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 1980 | els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
2026 | els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); | 1981 | els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); |
2027 | els_iocb->vp_index = sp->fcport->vha->vp_idx; | 1982 | els_iocb->vp_index = sp->fcport->vp_idx; |
2028 | els_iocb->sof_type = EST_SOFI3; | 1983 | els_iocb->sof_type = EST_SOFI3; |
2029 | els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); | 1984 | els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); |
2030 | 1985 | ||
2031 | els_iocb->opcode = | 1986 | els_iocb->opcode = |
2032 | sp->type == SRB_ELS_CMD_RPT ? | 1987 | (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ? |
2033 | bsg_job->request->rqst_data.r_els.els_code : | 1988 | bsg_job->request->rqst_data.r_els.els_code : |
2034 | bsg_job->request->rqst_data.h_els.command_code; | 1989 | bsg_job->request->rqst_data.h_els.command_code; |
2035 | els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; | 1990 | els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; |
@@ -2066,7 +2021,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) | |||
2066 | uint16_t tot_dsds; | 2021 | uint16_t tot_dsds; |
2067 | scsi_qla_host_t *vha = sp->fcport->vha; | 2022 | scsi_qla_host_t *vha = sp->fcport->vha; |
2068 | struct qla_hw_data *ha = vha->hw; | 2023 | struct qla_hw_data *ha = vha->hw; |
2069 | struct fc_bsg_job *bsg_job = sp->u.bsg_job; | 2024 | struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; |
2070 | int loop_iterartion = 0; | 2025 | int loop_iterartion = 0; |
2071 | int cont_iocb_prsnt = 0; | 2026 | int cont_iocb_prsnt = 0; |
2072 | int entry_count = 1; | 2027 | int entry_count = 1; |
@@ -2115,8 +2070,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) | |||
2115 | * Five DSDs are available in the Cont. | 2070 | * Five DSDs are available in the Cont. |
2116 | * Type 1 IOCB. | 2071 | * Type 1 IOCB. |
2117 | */ | 2072 | */ |
2118 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, | 2073 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha); |
2119 | vha->hw->req_q_map[0]); | ||
2120 | cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; | 2074 | cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; |
2121 | avail_dsds = 5; | 2075 | avail_dsds = 5; |
2122 | cont_iocb_prsnt = 1; | 2076 | cont_iocb_prsnt = 1; |
@@ -2142,8 +2096,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) | |||
2142 | int index; | 2096 | int index; |
2143 | uint16_t tot_dsds; | 2097 | uint16_t tot_dsds; |
2144 | scsi_qla_host_t *vha = sp->fcport->vha; | 2098 | scsi_qla_host_t *vha = sp->fcport->vha; |
2145 | struct qla_hw_data *ha = vha->hw; | 2099 | struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; |
2146 | struct fc_bsg_job *bsg_job = sp->u.bsg_job; | ||
2147 | int loop_iterartion = 0; | 2100 | int loop_iterartion = 0; |
2148 | int cont_iocb_prsnt = 0; | 2101 | int cont_iocb_prsnt = 0; |
2149 | int entry_count = 1; | 2102 | int entry_count = 1; |
@@ -2154,7 +2107,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) | |||
2154 | ct_iocb->handle = sp->handle; | 2107 | ct_iocb->handle = sp->handle; |
2155 | 2108 | ||
2156 | ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 2109 | ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
2157 | ct_iocb->vp_index = sp->fcport->vha->vp_idx; | 2110 | ct_iocb->vp_index = sp->fcport->vp_idx; |
2158 | ct_iocb->comp_status = __constant_cpu_to_le16(0); | 2111 | ct_iocb->comp_status = __constant_cpu_to_le16(0); |
2159 | 2112 | ||
2160 | ct_iocb->cmd_dsd_count = | 2113 | ct_iocb->cmd_dsd_count = |
@@ -2188,8 +2141,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) | |||
2188 | * Five DSDs are available in the Cont. | 2141 | * Five DSDs are available in the Cont. |
2189 | * Type 1 IOCB. | 2142 | * Type 1 IOCB. |
2190 | */ | 2143 | */ |
2191 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, | 2144 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha); |
2192 | ha->req_q_map[0]); | ||
2193 | cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; | 2145 | cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; |
2194 | avail_dsds = 5; | 2146 | avail_dsds = 5; |
2195 | cont_iocb_prsnt = 1; | 2147 | cont_iocb_prsnt = 1; |
@@ -2206,388 +2158,13 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) | |||
2206 | ct_iocb->entry_count = entry_count; | 2158 | ct_iocb->entry_count = entry_count; |
2207 | } | 2159 | } |
2208 | 2160 | ||
2209 | /* | ||
2210 | * qla82xx_start_scsi() - Send a SCSI command to the ISP | ||
2211 | * @sp: command to send to the ISP | ||
2212 | * | ||
2213 | * Returns non-zero if a failure occurred, else zero. | ||
2214 | */ | ||
2215 | int | ||
2216 | qla82xx_start_scsi(srb_t *sp) | ||
2217 | { | ||
2218 | int ret, nseg; | ||
2219 | unsigned long flags; | ||
2220 | struct scsi_cmnd *cmd; | ||
2221 | uint32_t *clr_ptr; | ||
2222 | uint32_t index; | ||
2223 | uint32_t handle; | ||
2224 | uint16_t cnt; | ||
2225 | uint16_t req_cnt; | ||
2226 | uint16_t tot_dsds; | ||
2227 | struct device_reg_82xx __iomem *reg; | ||
2228 | uint32_t dbval; | ||
2229 | uint32_t *fcp_dl; | ||
2230 | uint8_t additional_cdb_len; | ||
2231 | struct ct6_dsd *ctx; | ||
2232 | struct scsi_qla_host *vha = sp->fcport->vha; | ||
2233 | struct qla_hw_data *ha = vha->hw; | ||
2234 | struct req_que *req = NULL; | ||
2235 | struct rsp_que *rsp = NULL; | ||
2236 | char tag[2]; | ||
2237 | |||
2238 | /* Setup device pointers. */ | ||
2239 | ret = 0; | ||
2240 | reg = &ha->iobase->isp82; | ||
2241 | cmd = GET_CMD_SP(sp); | ||
2242 | req = vha->req; | ||
2243 | rsp = ha->rsp_q_map[0]; | ||
2244 | |||
2245 | /* So we know we haven't pci_map'ed anything yet */ | ||
2246 | tot_dsds = 0; | ||
2247 | |||
2248 | dbval = 0x04 | (ha->portnum << 5); | ||
2249 | |||
2250 | /* Send marker if required */ | ||
2251 | if (vha->marker_needed != 0) { | ||
2252 | if (qla2x00_marker(vha, req, | ||
2253 | rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { | ||
2254 | ql_log(ql_log_warn, vha, 0x300c, | ||
2255 | "qla2x00_marker failed for cmd=%p.\n", cmd); | ||
2256 | return QLA_FUNCTION_FAILED; | ||
2257 | } | ||
2258 | vha->marker_needed = 0; | ||
2259 | } | ||
2260 | |||
2261 | /* Acquire ring specific lock */ | ||
2262 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
2263 | |||
2264 | /* Check for room in outstanding command list. */ | ||
2265 | handle = req->current_outstanding_cmd; | ||
2266 | for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | ||
2267 | handle++; | ||
2268 | if (handle == MAX_OUTSTANDING_COMMANDS) | ||
2269 | handle = 1; | ||
2270 | if (!req->outstanding_cmds[handle]) | ||
2271 | break; | ||
2272 | } | ||
2273 | if (index == MAX_OUTSTANDING_COMMANDS) | ||
2274 | goto queuing_error; | ||
2275 | |||
2276 | /* Map the sg table so we have an accurate count of sg entries needed */ | ||
2277 | if (scsi_sg_count(cmd)) { | ||
2278 | nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), | ||
2279 | scsi_sg_count(cmd), cmd->sc_data_direction); | ||
2280 | if (unlikely(!nseg)) | ||
2281 | goto queuing_error; | ||
2282 | } else | ||
2283 | nseg = 0; | ||
2284 | |||
2285 | tot_dsds = nseg; | ||
2286 | |||
2287 | if (tot_dsds > ql2xshiftctondsd) { | ||
2288 | struct cmd_type_6 *cmd_pkt; | ||
2289 | uint16_t more_dsd_lists = 0; | ||
2290 | struct dsd_dma *dsd_ptr; | ||
2291 | uint16_t i; | ||
2292 | |||
2293 | more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); | ||
2294 | if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { | ||
2295 | ql_dbg(ql_dbg_io, vha, 0x300d, | ||
2296 | "Num of DSD list %d is than %d for cmd=%p.\n", | ||
2297 | more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, | ||
2298 | cmd); | ||
2299 | goto queuing_error; | ||
2300 | } | ||
2301 | |||
2302 | if (more_dsd_lists <= ha->gbl_dsd_avail) | ||
2303 | goto sufficient_dsds; | ||
2304 | else | ||
2305 | more_dsd_lists -= ha->gbl_dsd_avail; | ||
2306 | |||
2307 | for (i = 0; i < more_dsd_lists; i++) { | ||
2308 | dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); | ||
2309 | if (!dsd_ptr) { | ||
2310 | ql_log(ql_log_fatal, vha, 0x300e, | ||
2311 | "Failed to allocate memory for dsd_dma " | ||
2312 | "for cmd=%p.\n", cmd); | ||
2313 | goto queuing_error; | ||
2314 | } | ||
2315 | |||
2316 | dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, | ||
2317 | GFP_ATOMIC, &dsd_ptr->dsd_list_dma); | ||
2318 | if (!dsd_ptr->dsd_addr) { | ||
2319 | kfree(dsd_ptr); | ||
2320 | ql_log(ql_log_fatal, vha, 0x300f, | ||
2321 | "Failed to allocate memory for dsd_addr " | ||
2322 | "for cmd=%p.\n", cmd); | ||
2323 | goto queuing_error; | ||
2324 | } | ||
2325 | list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); | ||
2326 | ha->gbl_dsd_avail++; | ||
2327 | } | ||
2328 | |||
2329 | sufficient_dsds: | ||
2330 | req_cnt = 1; | ||
2331 | |||
2332 | if (req->cnt < (req_cnt + 2)) { | ||
2333 | cnt = (uint16_t)RD_REG_DWORD_RELAXED( | ||
2334 | ®->req_q_out[0]); | ||
2335 | if (req->ring_index < cnt) | ||
2336 | req->cnt = cnt - req->ring_index; | ||
2337 | else | ||
2338 | req->cnt = req->length - | ||
2339 | (req->ring_index - cnt); | ||
2340 | if (req->cnt < (req_cnt + 2)) | ||
2341 | goto queuing_error; | ||
2342 | } | ||
2343 | |||
2344 | ctx = sp->u.scmd.ctx = | ||
2345 | mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); | ||
2346 | if (!ctx) { | ||
2347 | ql_log(ql_log_fatal, vha, 0x3010, | ||
2348 | "Failed to allocate ctx for cmd=%p.\n", cmd); | ||
2349 | goto queuing_error; | ||
2350 | } | ||
2351 | |||
2352 | memset(ctx, 0, sizeof(struct ct6_dsd)); | ||
2353 | ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, | ||
2354 | GFP_ATOMIC, &ctx->fcp_cmnd_dma); | ||
2355 | if (!ctx->fcp_cmnd) { | ||
2356 | ql_log(ql_log_fatal, vha, 0x3011, | ||
2357 | "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); | ||
2358 | goto queuing_error; | ||
2359 | } | ||
2360 | |||
2361 | /* Initialize the DSD list and dma handle */ | ||
2362 | INIT_LIST_HEAD(&ctx->dsd_list); | ||
2363 | ctx->dsd_use_cnt = 0; | ||
2364 | |||
2365 | if (cmd->cmd_len > 16) { | ||
2366 | additional_cdb_len = cmd->cmd_len - 16; | ||
2367 | if ((cmd->cmd_len % 4) != 0) { | ||
2368 | /* SCSI command bigger than 16 bytes must be | ||
2369 | * multiple of 4 | ||
2370 | */ | ||
2371 | ql_log(ql_log_warn, vha, 0x3012, | ||
2372 | "scsi cmd len %d not multiple of 4 " | ||
2373 | "for cmd=%p.\n", cmd->cmd_len, cmd); | ||
2374 | goto queuing_error_fcp_cmnd; | ||
2375 | } | ||
2376 | ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; | ||
2377 | } else { | ||
2378 | additional_cdb_len = 0; | ||
2379 | ctx->fcp_cmnd_len = 12 + 16 + 4; | ||
2380 | } | ||
2381 | |||
2382 | cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; | ||
2383 | cmd_pkt->handle = MAKE_HANDLE(req->id, handle); | ||
2384 | |||
2385 | /* Zero out remaining portion of packet. */ | ||
2386 | /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ | ||
2387 | clr_ptr = (uint32_t *)cmd_pkt + 2; | ||
2388 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); | ||
2389 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); | ||
2390 | |||
2391 | /* Set NPORT-ID and LUN number*/ | ||
2392 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); | ||
2393 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; | ||
2394 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; | ||
2395 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; | ||
2396 | cmd_pkt->vp_index = sp->fcport->vha->vp_idx; | ||
2397 | |||
2398 | /* Build IOCB segments */ | ||
2399 | if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) | ||
2400 | goto queuing_error_fcp_cmnd; | ||
2401 | |||
2402 | int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); | ||
2403 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); | ||
2404 | |||
2405 | /* build FCP_CMND IU */ | ||
2406 | memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); | ||
2407 | int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); | ||
2408 | ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; | ||
2409 | |||
2410 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | ||
2411 | ctx->fcp_cmnd->additional_cdb_len |= 1; | ||
2412 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) | ||
2413 | ctx->fcp_cmnd->additional_cdb_len |= 2; | ||
2414 | |||
2415 | /* | ||
2416 | * Update tagged queuing modifier -- default is TSK_SIMPLE (0). | ||
2417 | */ | ||
2418 | if (scsi_populate_tag_msg(cmd, tag)) { | ||
2419 | switch (tag[0]) { | ||
2420 | case HEAD_OF_QUEUE_TAG: | ||
2421 | ctx->fcp_cmnd->task_attribute = | ||
2422 | TSK_HEAD_OF_QUEUE; | ||
2423 | break; | ||
2424 | case ORDERED_QUEUE_TAG: | ||
2425 | ctx->fcp_cmnd->task_attribute = | ||
2426 | TSK_ORDERED; | ||
2427 | break; | ||
2428 | } | ||
2429 | } | ||
2430 | |||
2431 | /* Populate the FCP_PRIO. */ | ||
2432 | if (ha->flags.fcp_prio_enabled) | ||
2433 | ctx->fcp_cmnd->task_attribute |= | ||
2434 | sp->fcport->fcp_prio << 3; | ||
2435 | |||
2436 | memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); | ||
2437 | |||
2438 | fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + | ||
2439 | additional_cdb_len); | ||
2440 | *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); | ||
2441 | |||
2442 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); | ||
2443 | cmd_pkt->fcp_cmnd_dseg_address[0] = | ||
2444 | cpu_to_le32(LSD(ctx->fcp_cmnd_dma)); | ||
2445 | cmd_pkt->fcp_cmnd_dseg_address[1] = | ||
2446 | cpu_to_le32(MSD(ctx->fcp_cmnd_dma)); | ||
2447 | |||
2448 | sp->flags |= SRB_FCP_CMND_DMA_VALID; | ||
2449 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); | ||
2450 | /* Set total data segment count. */ | ||
2451 | cmd_pkt->entry_count = (uint8_t)req_cnt; | ||
2452 | /* Specify response queue number where | ||
2453 | * completion should happen | ||
2454 | */ | ||
2455 | cmd_pkt->entry_status = (uint8_t) rsp->id; | ||
2456 | } else { | ||
2457 | struct cmd_type_7 *cmd_pkt; | ||
2458 | req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); | ||
2459 | if (req->cnt < (req_cnt + 2)) { | ||
2460 | cnt = (uint16_t)RD_REG_DWORD_RELAXED( | ||
2461 | ®->req_q_out[0]); | ||
2462 | if (req->ring_index < cnt) | ||
2463 | req->cnt = cnt - req->ring_index; | ||
2464 | else | ||
2465 | req->cnt = req->length - | ||
2466 | (req->ring_index - cnt); | ||
2467 | } | ||
2468 | if (req->cnt < (req_cnt + 2)) | ||
2469 | goto queuing_error; | ||
2470 | |||
2471 | cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; | ||
2472 | cmd_pkt->handle = MAKE_HANDLE(req->id, handle); | ||
2473 | |||
2474 | /* Zero out remaining portion of packet. */ | ||
2475 | /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ | ||
2476 | clr_ptr = (uint32_t *)cmd_pkt + 2; | ||
2477 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); | ||
2478 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); | ||
2479 | |||
2480 | /* Set NPORT-ID and LUN number*/ | ||
2481 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); | ||
2482 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; | ||
2483 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; | ||
2484 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; | ||
2485 | cmd_pkt->vp_index = sp->fcport->vha->vp_idx; | ||
2486 | |||
2487 | int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); | ||
2488 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, | ||
2489 | sizeof(cmd_pkt->lun)); | ||
2490 | |||
2491 | /* | ||
2492 | * Update tagged queuing modifier -- default is TSK_SIMPLE (0). | ||
2493 | */ | ||
2494 | if (scsi_populate_tag_msg(cmd, tag)) { | ||
2495 | switch (tag[0]) { | ||
2496 | case HEAD_OF_QUEUE_TAG: | ||
2497 | cmd_pkt->task = TSK_HEAD_OF_QUEUE; | ||
2498 | break; | ||
2499 | case ORDERED_QUEUE_TAG: | ||
2500 | cmd_pkt->task = TSK_ORDERED; | ||
2501 | break; | ||
2502 | } | ||
2503 | } | ||
2504 | |||
2505 | /* Populate the FCP_PRIO. */ | ||
2506 | if (ha->flags.fcp_prio_enabled) | ||
2507 | cmd_pkt->task |= sp->fcport->fcp_prio << 3; | ||
2508 | |||
2509 | /* Load SCSI command packet. */ | ||
2510 | memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); | ||
2511 | host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); | ||
2512 | |||
2513 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); | ||
2514 | |||
2515 | /* Build IOCB segments */ | ||
2516 | qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); | ||
2517 | |||
2518 | /* Set total data segment count. */ | ||
2519 | cmd_pkt->entry_count = (uint8_t)req_cnt; | ||
2520 | /* Specify response queue number where | ||
2521 | * completion should happen. | ||
2522 | */ | ||
2523 | cmd_pkt->entry_status = (uint8_t) rsp->id; | ||
2524 | |||
2525 | } | ||
2526 | /* Build command packet. */ | ||
2527 | req->current_outstanding_cmd = handle; | ||
2528 | req->outstanding_cmds[handle] = sp; | ||
2529 | sp->handle = handle; | ||
2530 | cmd->host_scribble = (unsigned char *)(unsigned long)handle; | ||
2531 | req->cnt -= req_cnt; | ||
2532 | wmb(); | ||
2533 | |||
2534 | /* Adjust ring index. */ | ||
2535 | req->ring_index++; | ||
2536 | if (req->ring_index == req->length) { | ||
2537 | req->ring_index = 0; | ||
2538 | req->ring_ptr = req->ring; | ||
2539 | } else | ||
2540 | req->ring_ptr++; | ||
2541 | |||
2542 | sp->flags |= SRB_DMA_VALID; | ||
2543 | |||
2544 | /* Set chip new ring index. */ | ||
2545 | /* write, read and verify logic */ | ||
2546 | dbval = dbval | (req->id << 8) | (req->ring_index << 16); | ||
2547 | if (ql2xdbwr) | ||
2548 | qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); | ||
2549 | else { | ||
2550 | WRT_REG_DWORD( | ||
2551 | (unsigned long __iomem *)ha->nxdb_wr_ptr, | ||
2552 | dbval); | ||
2553 | wmb(); | ||
2554 | while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) { | ||
2555 | WRT_REG_DWORD( | ||
2556 | (unsigned long __iomem *)ha->nxdb_wr_ptr, | ||
2557 | dbval); | ||
2558 | wmb(); | ||
2559 | } | ||
2560 | } | ||
2561 | |||
2562 | /* Manage unprocessed RIO/ZIO commands in response queue. */ | ||
2563 | if (vha->flags.process_response_queue && | ||
2564 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) | ||
2565 | qla24xx_process_response_queue(vha, rsp); | ||
2566 | |||
2567 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2568 | return QLA_SUCCESS; | ||
2569 | |||
2570 | queuing_error_fcp_cmnd: | ||
2571 | dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); | ||
2572 | queuing_error: | ||
2573 | if (tot_dsds) | ||
2574 | scsi_dma_unmap(cmd); | ||
2575 | |||
2576 | if (sp->u.scmd.ctx) { | ||
2577 | mempool_free(sp->u.scmd.ctx, ha->ctx_mempool); | ||
2578 | sp->u.scmd.ctx = NULL; | ||
2579 | } | ||
2580 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2581 | |||
2582 | return QLA_FUNCTION_FAILED; | ||
2583 | } | ||
2584 | |||
2585 | int | 2161 | int |
2586 | qla2x00_start_sp(srb_t *sp) | 2162 | qla2x00_start_sp(srb_t *sp) |
2587 | { | 2163 | { |
2588 | int rval; | 2164 | int rval; |
2589 | struct qla_hw_data *ha = sp->fcport->vha->hw; | 2165 | struct qla_hw_data *ha = sp->fcport->vha->hw; |
2590 | void *pkt; | 2166 | void *pkt; |
2167 | struct srb_ctx *ctx = sp->ctx; | ||
2591 | unsigned long flags; | 2168 | unsigned long flags; |
2592 | 2169 | ||
2593 | rval = QLA_FUNCTION_FAILED; | 2170 | rval = QLA_FUNCTION_FAILED; |
@@ -2600,7 +2177,7 @@ qla2x00_start_sp(srb_t *sp) | |||
2600 | } | 2177 | } |
2601 | 2178 | ||
2602 | rval = QLA_SUCCESS; | 2179 | rval = QLA_SUCCESS; |
2603 | switch (sp->type) { | 2180 | switch (ctx->type) { |
2604 | case SRB_LOGIN_CMD: | 2181 | case SRB_LOGIN_CMD: |
2605 | IS_FWI2_CAPABLE(ha) ? | 2182 | IS_FWI2_CAPABLE(ha) ? |
2606 | qla24xx_login_iocb(sp, pkt) : | 2183 | qla24xx_login_iocb(sp, pkt) : |
@@ -2617,8 +2194,8 @@ qla2x00_start_sp(srb_t *sp) | |||
2617 | break; | 2194 | break; |
2618 | case SRB_CT_CMD: | 2195 | case SRB_CT_CMD: |
2619 | IS_FWI2_CAPABLE(ha) ? | 2196 | IS_FWI2_CAPABLE(ha) ? |
2620 | qla24xx_ct_iocb(sp, pkt) : | 2197 | qla24xx_ct_iocb(sp, pkt) : |
2621 | qla2x00_ct_iocb(sp, pkt); | 2198 | qla2x00_ct_iocb(sp, pkt); |
2622 | break; | 2199 | break; |
2623 | case SRB_ADISC_CMD: | 2200 | case SRB_ADISC_CMD: |
2624 | IS_FWI2_CAPABLE(ha) ? | 2201 | IS_FWI2_CAPABLE(ha) ? |
@@ -2633,197 +2210,8 @@ qla2x00_start_sp(srb_t *sp) | |||
2633 | } | 2210 | } |
2634 | 2211 | ||
2635 | wmb(); | 2212 | wmb(); |
2636 | qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]); | 2213 | qla2x00_start_iocbs(sp); |
2637 | done: | 2214 | done: |
2638 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 2215 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
2639 | return rval; | 2216 | return rval; |
2640 | } | 2217 | } |
2641 | |||
2642 | static void | ||
2643 | qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, | ||
2644 | struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) | ||
2645 | { | ||
2646 | uint16_t avail_dsds; | ||
2647 | uint32_t *cur_dsd; | ||
2648 | uint32_t req_data_len = 0; | ||
2649 | uint32_t rsp_data_len = 0; | ||
2650 | struct scatterlist *sg; | ||
2651 | int index; | ||
2652 | int entry_count = 1; | ||
2653 | struct fc_bsg_job *bsg_job = sp->u.bsg_job; | ||
2654 | |||
2655 | /*Update entry type to indicate bidir command */ | ||
2656 | *((uint32_t *)(&cmd_pkt->entry_type)) = | ||
2657 | __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL); | ||
2658 | |||
2659 | /* Set the transfer direction, in this set both flags | ||
2660 | * Also set the BD_WRAP_BACK flag, firmware will take care | ||
2661 | * assigning DID=SID for outgoing pkts. | ||
2662 | */ | ||
2663 | cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); | ||
2664 | cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); | ||
2665 | cmd_pkt->control_flags = | ||
2666 | __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | | ||
2667 | BD_WRAP_BACK); | ||
2668 | |||
2669 | req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; | ||
2670 | cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); | ||
2671 | cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); | ||
2672 | cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); | ||
2673 | |||
2674 | vha->bidi_stats.transfer_bytes += req_data_len; | ||
2675 | vha->bidi_stats.io_count++; | ||
2676 | |||
2677 | /* Only one dsd is available for bidirectional IOCB, remaining dsds | ||
2678 | * are bundled in continuation iocb | ||
2679 | */ | ||
2680 | avail_dsds = 1; | ||
2681 | cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; | ||
2682 | |||
2683 | index = 0; | ||
2684 | |||
2685 | for_each_sg(bsg_job->request_payload.sg_list, sg, | ||
2686 | bsg_job->request_payload.sg_cnt, index) { | ||
2687 | dma_addr_t sle_dma; | ||
2688 | cont_a64_entry_t *cont_pkt; | ||
2689 | |||
2690 | /* Allocate additional continuation packets */ | ||
2691 | if (avail_dsds == 0) { | ||
2692 | /* Continuation type 1 IOCB can accomodate | ||
2693 | * 5 DSDS | ||
2694 | */ | ||
2695 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); | ||
2696 | cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; | ||
2697 | avail_dsds = 5; | ||
2698 | entry_count++; | ||
2699 | } | ||
2700 | sle_dma = sg_dma_address(sg); | ||
2701 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | ||
2702 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | ||
2703 | *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | ||
2704 | avail_dsds--; | ||
2705 | } | ||
2706 | /* For read request DSD will always goes to continuation IOCB | ||
2707 | * and follow the write DSD. If there is room on the current IOCB | ||
2708 | * then it is added to that IOCB else new continuation IOCB is | ||
2709 | * allocated. | ||
2710 | */ | ||
2711 | for_each_sg(bsg_job->reply_payload.sg_list, sg, | ||
2712 | bsg_job->reply_payload.sg_cnt, index) { | ||
2713 | dma_addr_t sle_dma; | ||
2714 | cont_a64_entry_t *cont_pkt; | ||
2715 | |||
2716 | /* Allocate additional continuation packets */ | ||
2717 | if (avail_dsds == 0) { | ||
2718 | /* Continuation type 1 IOCB can accomodate | ||
2719 | * 5 DSDS | ||
2720 | */ | ||
2721 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); | ||
2722 | cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; | ||
2723 | avail_dsds = 5; | ||
2724 | entry_count++; | ||
2725 | } | ||
2726 | sle_dma = sg_dma_address(sg); | ||
2727 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | ||
2728 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | ||
2729 | *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | ||
2730 | avail_dsds--; | ||
2731 | } | ||
2732 | /* This value should be same as number of IOCB required for this cmd */ | ||
2733 | cmd_pkt->entry_count = entry_count; | ||
2734 | } | ||
2735 | |||
2736 | int | ||
2737 | qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) | ||
2738 | { | ||
2739 | |||
2740 | struct qla_hw_data *ha = vha->hw; | ||
2741 | unsigned long flags; | ||
2742 | uint32_t handle; | ||
2743 | uint32_t index; | ||
2744 | uint16_t req_cnt; | ||
2745 | uint16_t cnt; | ||
2746 | uint32_t *clr_ptr; | ||
2747 | struct cmd_bidir *cmd_pkt = NULL; | ||
2748 | struct rsp_que *rsp; | ||
2749 | struct req_que *req; | ||
2750 | int rval = EXT_STATUS_OK; | ||
2751 | |||
2752 | rval = QLA_SUCCESS; | ||
2753 | |||
2754 | rsp = ha->rsp_q_map[0]; | ||
2755 | req = vha->req; | ||
2756 | |||
2757 | /* Send marker if required */ | ||
2758 | if (vha->marker_needed != 0) { | ||
2759 | if (qla2x00_marker(vha, req, | ||
2760 | rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) | ||
2761 | return EXT_STATUS_MAILBOX; | ||
2762 | vha->marker_needed = 0; | ||
2763 | } | ||
2764 | |||
2765 | /* Acquire ring specific lock */ | ||
2766 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
2767 | |||
2768 | /* Check for room in outstanding command list. */ | ||
2769 | handle = req->current_outstanding_cmd; | ||
2770 | for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | ||
2771 | handle++; | ||
2772 | if (handle == MAX_OUTSTANDING_COMMANDS) | ||
2773 | handle = 1; | ||
2774 | if (!req->outstanding_cmds[handle]) | ||
2775 | break; | ||
2776 | } | ||
2777 | |||
2778 | if (index == MAX_OUTSTANDING_COMMANDS) { | ||
2779 | rval = EXT_STATUS_BUSY; | ||
2780 | goto queuing_error; | ||
2781 | } | ||
2782 | |||
2783 | /* Calculate number of IOCB required */ | ||
2784 | req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); | ||
2785 | |||
2786 | /* Check for room on request queue. */ | ||
2787 | if (req->cnt < req_cnt + 2) { | ||
2788 | cnt = RD_REG_DWORD_RELAXED(req->req_q_out); | ||
2789 | |||
2790 | if (req->ring_index < cnt) | ||
2791 | req->cnt = cnt - req->ring_index; | ||
2792 | else | ||
2793 | req->cnt = req->length - | ||
2794 | (req->ring_index - cnt); | ||
2795 | } | ||
2796 | if (req->cnt < req_cnt + 2) { | ||
2797 | rval = EXT_STATUS_BUSY; | ||
2798 | goto queuing_error; | ||
2799 | } | ||
2800 | |||
2801 | cmd_pkt = (struct cmd_bidir *)req->ring_ptr; | ||
2802 | cmd_pkt->handle = MAKE_HANDLE(req->id, handle); | ||
2803 | |||
2804 | /* Zero out remaining portion of packet. */ | ||
2805 | /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ | ||
2806 | clr_ptr = (uint32_t *)cmd_pkt + 2; | ||
2807 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); | ||
2808 | |||
2809 | /* Set NPORT-ID (of vha)*/ | ||
2810 | cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); | ||
2811 | cmd_pkt->port_id[0] = vha->d_id.b.al_pa; | ||
2812 | cmd_pkt->port_id[1] = vha->d_id.b.area; | ||
2813 | cmd_pkt->port_id[2] = vha->d_id.b.domain; | ||
2814 | |||
2815 | qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); | ||
2816 | cmd_pkt->entry_status = (uint8_t) rsp->id; | ||
2817 | /* Build command packet. */ | ||
2818 | req->current_outstanding_cmd = handle; | ||
2819 | req->outstanding_cmds[handle] = sp; | ||
2820 | sp->handle = handle; | ||
2821 | req->cnt -= req_cnt; | ||
2822 | |||
2823 | /* Send the command to the firmware */ | ||
2824 | wmb(); | ||
2825 | qla2x00_start_iocbs(vha, req); | ||
2826 | queuing_error: | ||
2827 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2828 | return rval; | ||
2829 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 873c82014b1..8a7591f035e 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | #include "qla_target.h" | ||
9 | 8 | ||
10 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
11 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
@@ -45,8 +44,8 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
45 | 44 | ||
46 | rsp = (struct rsp_que *) dev_id; | 45 | rsp = (struct rsp_que *) dev_id; |
47 | if (!rsp) { | 46 | if (!rsp) { |
48 | ql_log(ql_log_info, NULL, 0x505d, | 47 | printk(KERN_INFO |
49 | "%s: NULL response queue pointer.\n", __func__); | 48 | "%s(): NULL response queue pointer.\n", __func__); |
50 | return (IRQ_NONE); | 49 | return (IRQ_NONE); |
51 | } | 50 | } |
52 | 51 | ||
@@ -142,8 +141,8 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
142 | 141 | ||
143 | rsp = (struct rsp_que *) dev_id; | 142 | rsp = (struct rsp_que *) dev_id; |
144 | if (!rsp) { | 143 | if (!rsp) { |
145 | ql_log(ql_log_info, NULL, 0x5058, | 144 | printk(KERN_INFO |
146 | "%s: NULL response queue pointer.\n", __func__); | 145 | "%s(): NULL response queue pointer.\n", __func__); |
147 | return (IRQ_NONE); | 146 | return (IRQ_NONE); |
148 | } | 147 | } |
149 | 148 | ||
@@ -243,34 +242,32 @@ static void | |||
243 | qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | 242 | qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) |
244 | { | 243 | { |
245 | uint16_t cnt; | 244 | uint16_t cnt; |
246 | uint32_t mboxes; | ||
247 | uint16_t __iomem *wptr; | 245 | uint16_t __iomem *wptr; |
248 | struct qla_hw_data *ha = vha->hw; | 246 | struct qla_hw_data *ha = vha->hw; |
249 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 247 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
250 | 248 | ||
251 | /* Read all mbox registers? */ | ||
252 | mboxes = (1 << ha->mbx_count) - 1; | ||
253 | if (!ha->mcp) | ||
254 | ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); | ||
255 | else | ||
256 | mboxes = ha->mcp->in_mb; | ||
257 | |||
258 | /* Load return mailbox registers. */ | 249 | /* Load return mailbox registers. */ |
259 | ha->flags.mbox_int = 1; | 250 | ha->flags.mbox_int = 1; |
260 | ha->mailbox_out[0] = mb0; | 251 | ha->mailbox_out[0] = mb0; |
261 | mboxes >>= 1; | ||
262 | wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); | 252 | wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); |
263 | 253 | ||
264 | for (cnt = 1; cnt < ha->mbx_count; cnt++) { | 254 | for (cnt = 1; cnt < ha->mbx_count; cnt++) { |
265 | if (IS_QLA2200(ha) && cnt == 8) | 255 | if (IS_QLA2200(ha) && cnt == 8) |
266 | wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); | 256 | wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); |
267 | if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) | 257 | if (cnt == 4 || cnt == 5) |
268 | ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); | 258 | ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); |
269 | else if (mboxes & BIT_0) | 259 | else |
270 | ha->mailbox_out[cnt] = RD_REG_WORD(wptr); | 260 | ha->mailbox_out[cnt] = RD_REG_WORD(wptr); |
271 | 261 | ||
272 | wptr++; | 262 | wptr++; |
273 | mboxes >>= 1; | 263 | } |
264 | |||
265 | if (ha->mcp) { | ||
266 | ql_dbg(ql_dbg_async, vha, 0x5000, | ||
267 | "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]); | ||
268 | } else { | ||
269 | ql_dbg(ql_dbg_async, vha, 0x5001, | ||
270 | "MBX pointer ERROR.\n"); | ||
274 | } | 271 | } |
275 | } | 272 | } |
276 | 273 | ||
@@ -290,15 +287,10 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) | |||
290 | mb[cnt] = RD_REG_WORD(wptr); | 287 | mb[cnt] = RD_REG_WORD(wptr); |
291 | 288 | ||
292 | ql_dbg(ql_dbg_async, vha, 0x5021, | 289 | ql_dbg(ql_dbg_async, vha, 0x5021, |
293 | "Inter-Driver Communication %s -- " | 290 | "Inter-Driver Commucation %s -- " |
294 | "%04x %04x %04x %04x %04x %04x %04x.\n", | 291 | "%04x %04x %04x %04x %04x %04x %04x.\n", |
295 | event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], | 292 | event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], |
296 | mb[4], mb[5], mb[6]); | 293 | mb[4], mb[5], mb[6]); |
297 | if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) { | ||
298 | vha->hw->flags.idc_compl_status = 1; | ||
299 | if (vha->hw->notify_dcbx_comp) | ||
300 | complete(&vha->hw->dcbx_comp); | ||
301 | } | ||
302 | 294 | ||
303 | /* Acknowledgement needed? [Notify && non-zero timeout]. */ | 295 | /* Acknowledgement needed? [Notify && non-zero timeout]. */ |
304 | timeout = (descr >> 8) & 0xf; | 296 | timeout = (descr >> 8) & 0xf; |
@@ -306,7 +298,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) | |||
306 | return; | 298 | return; |
307 | 299 | ||
308 | ql_dbg(ql_dbg_async, vha, 0x5022, | 300 | ql_dbg(ql_dbg_async, vha, 0x5022, |
309 | "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", | 301 | "Inter-Driver Commucation %s -- ACK timeout=%d.\n", |
310 | vha->host_no, event[aen & 0xff], timeout); | 302 | vha->host_no, event[aen & 0xff], timeout); |
311 | 303 | ||
312 | rval = qla2x00_post_idc_ack_work(vha, mb); | 304 | rval = qla2x00_post_idc_ack_work(vha, mb); |
@@ -315,184 +307,6 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) | |||
315 | "IDC failed to post ACK.\n"); | 307 | "IDC failed to post ACK.\n"); |
316 | } | 308 | } |
317 | 309 | ||
318 | #define LS_UNKNOWN 2 | ||
319 | const char * | ||
320 | qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) | ||
321 | { | ||
322 | static const char * const link_speeds[] = { | ||
323 | "1", "2", "?", "4", "8", "16", "10" | ||
324 | }; | ||
325 | |||
326 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) | ||
327 | return link_speeds[0]; | ||
328 | else if (speed == 0x13) | ||
329 | return link_speeds[6]; | ||
330 | else if (speed < 6) | ||
331 | return link_speeds[speed]; | ||
332 | else | ||
333 | return link_speeds[LS_UNKNOWN]; | ||
334 | } | ||
335 | |||
336 | static void | ||
337 | qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) | ||
338 | { | ||
339 | struct qla_hw_data *ha = vha->hw; | ||
340 | |||
341 | /* | ||
342 | * 8200 AEN Interpretation: | ||
343 | * mb[0] = AEN code | ||
344 | * mb[1] = AEN Reason code | ||
345 | * mb[2] = LSW of Peg-Halt Status-1 Register | ||
346 | * mb[6] = MSW of Peg-Halt Status-1 Register | ||
347 | * mb[3] = LSW of Peg-Halt Status-2 register | ||
348 | * mb[7] = MSW of Peg-Halt Status-2 register | ||
349 | * mb[4] = IDC Device-State Register value | ||
350 | * mb[5] = IDC Driver-Presence Register value | ||
351 | */ | ||
352 | ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " | ||
353 | "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", | ||
354 | mb[0], mb[1], mb[2], mb[6]); | ||
355 | ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " | ||
356 | "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " | ||
357 | "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); | ||
358 | |||
359 | if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | | ||
360 | IDC_HEARTBEAT_FAILURE)) { | ||
361 | ha->flags.nic_core_hung = 1; | ||
362 | ql_log(ql_log_warn, vha, 0x5060, | ||
363 | "83XX: F/W Error Reported: Check if reset required.\n"); | ||
364 | |||
365 | if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { | ||
366 | uint32_t protocol_engine_id, fw_err_code, err_level; | ||
367 | |||
368 | /* | ||
369 | * IDC_PEG_HALT_STATUS_CHANGE interpretation: | ||
370 | * - PEG-Halt Status-1 Register: | ||
371 | * (LSW = mb[2], MSW = mb[6]) | ||
372 | * Bits 0-7 = protocol-engine ID | ||
373 | * Bits 8-28 = f/w error code | ||
374 | * Bits 29-31 = Error-level | ||
375 | * Error-level 0x1 = Non-Fatal error | ||
376 | * Error-level 0x2 = Recoverable Fatal error | ||
377 | * Error-level 0x4 = UnRecoverable Fatal error | ||
378 | * - PEG-Halt Status-2 Register: | ||
379 | * (LSW = mb[3], MSW = mb[7]) | ||
380 | */ | ||
381 | protocol_engine_id = (mb[2] & 0xff); | ||
382 | fw_err_code = (((mb[2] & 0xff00) >> 8) | | ||
383 | ((mb[6] & 0x1fff) << 8)); | ||
384 | err_level = ((mb[6] & 0xe000) >> 13); | ||
385 | ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " | ||
386 | "Register: protocol_engine_id=0x%x " | ||
387 | "fw_err_code=0x%x err_level=0x%x.\n", | ||
388 | protocol_engine_id, fw_err_code, err_level); | ||
389 | ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " | ||
390 | "Register: 0x%x%x.\n", mb[7], mb[3]); | ||
391 | if (err_level == ERR_LEVEL_NON_FATAL) { | ||
392 | ql_log(ql_log_warn, vha, 0x5063, | ||
393 | "Not a fatal error, f/w has recovered " | ||
394 | "iteself.\n"); | ||
395 | } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { | ||
396 | ql_log(ql_log_fatal, vha, 0x5064, | ||
397 | "Recoverable Fatal error: Chip reset " | ||
398 | "required.\n"); | ||
399 | qla83xx_schedule_work(vha, | ||
400 | QLA83XX_NIC_CORE_RESET); | ||
401 | } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { | ||
402 | ql_log(ql_log_fatal, vha, 0x5065, | ||
403 | "Unrecoverable Fatal error: Set FAILED " | ||
404 | "state, reboot required.\n"); | ||
405 | qla83xx_schedule_work(vha, | ||
406 | QLA83XX_NIC_CORE_UNRECOVERABLE); | ||
407 | } | ||
408 | } | ||
409 | |||
410 | if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { | ||
411 | uint16_t peg_fw_state, nw_interface_link_up; | ||
412 | uint16_t nw_interface_signal_detect, sfp_status; | ||
413 | uint16_t htbt_counter, htbt_monitor_enable; | ||
414 | uint16_t sfp_additonal_info, sfp_multirate; | ||
415 | uint16_t sfp_tx_fault, link_speed, dcbx_status; | ||
416 | |||
417 | /* | ||
418 | * IDC_NIC_FW_REPORTED_FAILURE interpretation: | ||
419 | * - PEG-to-FC Status Register: | ||
420 | * (LSW = mb[2], MSW = mb[6]) | ||
421 | * Bits 0-7 = Peg-Firmware state | ||
422 | * Bit 8 = N/W Interface Link-up | ||
423 | * Bit 9 = N/W Interface signal detected | ||
424 | * Bits 10-11 = SFP Status | ||
425 | * SFP Status 0x0 = SFP+ transceiver not expected | ||
426 | * SFP Status 0x1 = SFP+ transceiver not present | ||
427 | * SFP Status 0x2 = SFP+ transceiver invalid | ||
428 | * SFP Status 0x3 = SFP+ transceiver present and | ||
429 | * valid | ||
430 | * Bits 12-14 = Heartbeat Counter | ||
431 | * Bit 15 = Heartbeat Monitor Enable | ||
432 | * Bits 16-17 = SFP Additional Info | ||
433 | * SFP info 0x0 = Unregocnized transceiver for | ||
434 | * Ethernet | ||
435 | * SFP info 0x1 = SFP+ brand validation failed | ||
436 | * SFP info 0x2 = SFP+ speed validation failed | ||
437 | * SFP info 0x3 = SFP+ access error | ||
438 | * Bit 18 = SFP Multirate | ||
439 | * Bit 19 = SFP Tx Fault | ||
440 | * Bits 20-22 = Link Speed | ||
441 | * Bits 23-27 = Reserved | ||
442 | * Bits 28-30 = DCBX Status | ||
443 | * DCBX Status 0x0 = DCBX Disabled | ||
444 | * DCBX Status 0x1 = DCBX Enabled | ||
445 | * DCBX Status 0x2 = DCBX Exchange error | ||
446 | * Bit 31 = Reserved | ||
447 | */ | ||
448 | peg_fw_state = (mb[2] & 0x00ff); | ||
449 | nw_interface_link_up = ((mb[2] & 0x0100) >> 8); | ||
450 | nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); | ||
451 | sfp_status = ((mb[2] & 0x0c00) >> 10); | ||
452 | htbt_counter = ((mb[2] & 0x7000) >> 12); | ||
453 | htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); | ||
454 | sfp_additonal_info = (mb[6] & 0x0003); | ||
455 | sfp_multirate = ((mb[6] & 0x0004) >> 2); | ||
456 | sfp_tx_fault = ((mb[6] & 0x0008) >> 3); | ||
457 | link_speed = ((mb[6] & 0x0070) >> 4); | ||
458 | dcbx_status = ((mb[6] & 0x7000) >> 12); | ||
459 | |||
460 | ql_log(ql_log_warn, vha, 0x5066, | ||
461 | "Peg-to-Fc Status Register:\n" | ||
462 | "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " | ||
463 | "nw_interface_signal_detect=0x%x" | ||
464 | "\nsfp_statis=0x%x.\n ", peg_fw_state, | ||
465 | nw_interface_link_up, nw_interface_signal_detect, | ||
466 | sfp_status); | ||
467 | ql_log(ql_log_warn, vha, 0x5067, | ||
468 | "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " | ||
469 | "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", | ||
470 | htbt_counter, htbt_monitor_enable, | ||
471 | sfp_additonal_info, sfp_multirate); | ||
472 | ql_log(ql_log_warn, vha, 0x5068, | ||
473 | "sfp_tx_fault=0x%x, link_state=0x%x, " | ||
474 | "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, | ||
475 | dcbx_status); | ||
476 | |||
477 | qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); | ||
478 | } | ||
479 | |||
480 | if (mb[1] & IDC_HEARTBEAT_FAILURE) { | ||
481 | ql_log(ql_log_warn, vha, 0x5069, | ||
482 | "Heartbeat Failure encountered, chip reset " | ||
483 | "required.\n"); | ||
484 | |||
485 | qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); | ||
486 | } | ||
487 | } | ||
488 | |||
489 | if (mb[1] & IDC_DEVICE_STATE_CHANGE) { | ||
490 | ql_log(ql_log_info, vha, 0x506a, | ||
491 | "IDC Device-State changed = 0x%x.\n", mb[4]); | ||
492 | qla83xx_schedule_work(vha, MBA_IDC_AEN); | ||
493 | } | ||
494 | } | ||
495 | |||
496 | /** | 310 | /** |
497 | * qla2x00_async_event() - Process aynchronous events. | 311 | * qla2x00_async_event() - Process aynchronous events. |
498 | * @ha: SCSI driver HA context | 312 | * @ha: SCSI driver HA context |
@@ -501,6 +315,9 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) | |||
501 | void | 315 | void |
502 | qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) | 316 | qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) |
503 | { | 317 | { |
318 | #define LS_UNKNOWN 2 | ||
319 | static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; | ||
320 | char *link_speed; | ||
504 | uint16_t handle_cnt; | 321 | uint16_t handle_cnt; |
505 | uint16_t cnt, mbx; | 322 | uint16_t cnt, mbx; |
506 | uint32_t handles[5]; | 323 | uint32_t handles[5]; |
@@ -509,11 +326,12 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) | |||
509 | struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; | 326 | struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; |
510 | struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; | 327 | struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; |
511 | uint32_t rscn_entry, host_pid; | 328 | uint32_t rscn_entry, host_pid; |
329 | uint8_t rscn_queue_index; | ||
512 | unsigned long flags; | 330 | unsigned long flags; |
513 | 331 | ||
514 | /* Setup to process RIO completion. */ | 332 | /* Setup to process RIO completion. */ |
515 | handle_cnt = 0; | 333 | handle_cnt = 0; |
516 | if (IS_CNA_CAPABLE(ha)) | 334 | if (IS_QLA8XXX_TYPE(ha)) |
517 | goto skip_rio; | 335 | goto skip_rio; |
518 | switch (mb[0]) { | 336 | switch (mb[0]) { |
519 | case MBA_SCSI_COMPLETION: | 337 | case MBA_SCSI_COMPLETION: |
@@ -585,8 +403,7 @@ skip_rio: | |||
585 | break; | 403 | break; |
586 | 404 | ||
587 | case MBA_SYSTEM_ERR: /* System Error */ | 405 | case MBA_SYSTEM_ERR: /* System Error */ |
588 | mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ? | 406 | mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox7) : 0; |
589 | RD_REG_WORD(®24->mailbox7) : 0; | ||
590 | ql_log(ql_log_warn, vha, 0x5003, | 407 | ql_log(ql_log_warn, vha, 0x5003, |
591 | "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " | 408 | "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " |
592 | "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); | 409 | "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); |
@@ -599,7 +416,6 @@ skip_rio: | |||
599 | "Unrecoverable Hardware Error: adapter " | 416 | "Unrecoverable Hardware Error: adapter " |
600 | "marked OFFLINE!\n"); | 417 | "marked OFFLINE!\n"); |
601 | vha->flags.online = 0; | 418 | vha->flags.online = 0; |
602 | vha->device_flags |= DFLG_DEV_FAILED; | ||
603 | } else { | 419 | } else { |
604 | /* Check to see if MPI timeout occurred */ | 420 | /* Check to see if MPI timeout occurred */ |
605 | if ((mbx & MBX_3) && (ha->flags.port0)) | 421 | if ((mbx & MBX_3) && (ha->flags.port0)) |
@@ -613,7 +429,6 @@ skip_rio: | |||
613 | "Unrecoverable Hardware Error: adapter marked " | 429 | "Unrecoverable Hardware Error: adapter marked " |
614 | "OFFLINE!\n"); | 430 | "OFFLINE!\n"); |
615 | vha->flags.online = 0; | 431 | vha->flags.online = 0; |
616 | vha->device_flags |= DFLG_DEV_FAILED; | ||
617 | } else | 432 | } else |
618 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 433 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
619 | break; | 434 | break; |
@@ -635,10 +450,10 @@ skip_rio: | |||
635 | case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ | 450 | case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ |
636 | ql_dbg(ql_dbg_async, vha, 0x5008, | 451 | ql_dbg(ql_dbg_async, vha, 0x5008, |
637 | "Asynchronous WAKEUP_THRES.\n"); | 452 | "Asynchronous WAKEUP_THRES.\n"); |
638 | |||
639 | break; | 453 | break; |
454 | |||
640 | case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ | 455 | case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ |
641 | ql_dbg(ql_dbg_async, vha, 0x5009, | 456 | ql_log(ql_log_info, vha, 0x5009, |
642 | "LIP occurred (%x).\n", mb[1]); | 457 | "LIP occurred (%x).\n", mb[1]); |
643 | 458 | ||
644 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | 459 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
@@ -660,24 +475,29 @@ skip_rio: | |||
660 | break; | 475 | break; |
661 | 476 | ||
662 | case MBA_LOOP_UP: /* Loop Up Event */ | 477 | case MBA_LOOP_UP: /* Loop Up Event */ |
663 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) | 478 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) { |
479 | link_speed = link_speeds[0]; | ||
664 | ha->link_data_rate = PORT_SPEED_1GB; | 480 | ha->link_data_rate = PORT_SPEED_1GB; |
665 | else | 481 | } else { |
482 | link_speed = link_speeds[LS_UNKNOWN]; | ||
483 | if (mb[1] < 5) | ||
484 | link_speed = link_speeds[mb[1]]; | ||
485 | else if (mb[1] == 0x13) | ||
486 | link_speed = link_speeds[5]; | ||
666 | ha->link_data_rate = mb[1]; | 487 | ha->link_data_rate = mb[1]; |
488 | } | ||
667 | 489 | ||
668 | ql_dbg(ql_dbg_async, vha, 0x500a, | 490 | ql_log(ql_log_info, vha, 0x500a, |
669 | "LOOP UP detected (%s Gbps).\n", | 491 | "LOOP UP detected (%s Gbps).\n", link_speed); |
670 | qla2x00_get_link_speed_str(ha, ha->link_data_rate)); | ||
671 | 492 | ||
672 | vha->flags.management_server_logged_in = 0; | 493 | vha->flags.management_server_logged_in = 0; |
673 | qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); | 494 | qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); |
674 | break; | 495 | break; |
675 | 496 | ||
676 | case MBA_LOOP_DOWN: /* Loop Down Event */ | 497 | case MBA_LOOP_DOWN: /* Loop Down Event */ |
677 | mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) | 498 | mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox4) : 0; |
678 | ? RD_REG_WORD(®24->mailbox4) : 0; | ||
679 | mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; | 499 | mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; |
680 | ql_dbg(ql_dbg_async, vha, 0x500b, | 500 | ql_log(ql_log_info, vha, 0x500b, |
681 | "LOOP DOWN detected (%x %x %x %x).\n", | 501 | "LOOP DOWN detected (%x %x %x %x).\n", |
682 | mb[1], mb[2], mb[3], mbx); | 502 | mb[1], mb[2], mb[3], mbx); |
683 | 503 | ||
@@ -699,7 +519,7 @@ skip_rio: | |||
699 | break; | 519 | break; |
700 | 520 | ||
701 | case MBA_LIP_RESET: /* LIP reset occurred */ | 521 | case MBA_LIP_RESET: /* LIP reset occurred */ |
702 | ql_dbg(ql_dbg_async, vha, 0x500c, | 522 | ql_log(ql_log_info, vha, 0x500c, |
703 | "LIP reset occurred (%x).\n", mb[1]); | 523 | "LIP reset occurred (%x).\n", mb[1]); |
704 | 524 | ||
705 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | 525 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
@@ -725,7 +545,7 @@ skip_rio: | |||
725 | if (IS_QLA2100(ha)) | 545 | if (IS_QLA2100(ha)) |
726 | break; | 546 | break; |
727 | 547 | ||
728 | if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) { | 548 | if (IS_QLA8XXX_TYPE(ha)) { |
729 | ql_dbg(ql_dbg_async, vha, 0x500d, | 549 | ql_dbg(ql_dbg_async, vha, 0x500d, |
730 | "DCBX Completed -- %04x %04x %04x.\n", | 550 | "DCBX Completed -- %04x %04x %04x.\n", |
731 | mb[1], mb[2], mb[3]); | 551 | mb[1], mb[2], mb[3]); |
@@ -767,7 +587,7 @@ skip_rio: | |||
767 | if (IS_QLA2100(ha)) | 587 | if (IS_QLA2100(ha)) |
768 | break; | 588 | break; |
769 | 589 | ||
770 | ql_dbg(ql_dbg_async, vha, 0x500f, | 590 | ql_log(ql_log_info, vha, 0x500f, |
771 | "Configuration change detected: value=%x.\n", mb[1]); | 591 | "Configuration change detected: value=%x.\n", mb[1]); |
772 | 592 | ||
773 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | 593 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
@@ -813,8 +633,6 @@ skip_rio: | |||
813 | ql_dbg(ql_dbg_async, vha, 0x5010, | 633 | ql_dbg(ql_dbg_async, vha, 0x5010, |
814 | "Port unavailable %04x %04x %04x.\n", | 634 | "Port unavailable %04x %04x %04x.\n", |
815 | mb[1], mb[2], mb[3]); | 635 | mb[1], mb[2], mb[3]); |
816 | ql_log(ql_log_warn, vha, 0x505e, | ||
817 | "Link is offline.\n"); | ||
818 | 636 | ||
819 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | 637 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
820 | atomic_set(&vha->loop_state, LOOP_DOWN); | 638 | atomic_set(&vha->loop_state, LOOP_DOWN); |
@@ -842,21 +660,17 @@ skip_rio: | |||
842 | * it. Otherwise ignore it and Wait for RSCN to come in. | 660 | * it. Otherwise ignore it and Wait for RSCN to come in. |
843 | */ | 661 | */ |
844 | atomic_set(&vha->loop_down_timer, 0); | 662 | atomic_set(&vha->loop_down_timer, 0); |
845 | if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) { | 663 | if (atomic_read(&vha->loop_state) != LOOP_DOWN && |
664 | atomic_read(&vha->loop_state) != LOOP_DEAD) { | ||
846 | ql_dbg(ql_dbg_async, vha, 0x5011, | 665 | ql_dbg(ql_dbg_async, vha, 0x5011, |
847 | "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", | 666 | "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", |
848 | mb[1], mb[2], mb[3]); | 667 | mb[1], mb[2], mb[3]); |
849 | |||
850 | qlt_async_event(mb[0], vha, mb); | ||
851 | break; | 668 | break; |
852 | } | 669 | } |
853 | 670 | ||
854 | ql_dbg(ql_dbg_async, vha, 0x5012, | 671 | ql_dbg(ql_dbg_async, vha, 0x5012, |
855 | "Port database changed %04x %04x %04x.\n", | 672 | "Port database changed %04x %04x %04x.\n", |
856 | mb[1], mb[2], mb[3]); | 673 | mb[1], mb[2], mb[3]); |
857 | ql_log(ql_log_warn, vha, 0x505f, | ||
858 | "Link is operational (%s Gbps).\n", | ||
859 | qla2x00_get_link_speed_str(ha, ha->link_data_rate)); | ||
860 | 674 | ||
861 | /* | 675 | /* |
862 | * Mark all devices as missing so we will login again. | 676 | * Mark all devices as missing so we will login again. |
@@ -865,13 +679,10 @@ skip_rio: | |||
865 | 679 | ||
866 | qla2x00_mark_all_devices_lost(vha, 1); | 680 | qla2x00_mark_all_devices_lost(vha, 1); |
867 | 681 | ||
868 | if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) | 682 | vha->flags.rscn_queue_overflow = 1; |
869 | set_bit(SCR_PENDING, &vha->dpc_flags); | ||
870 | 683 | ||
871 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); | 684 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); |
872 | set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); | 685 | set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); |
873 | |||
874 | qlt_async_event(mb[0], vha, mb); | ||
875 | break; | 686 | break; |
876 | 687 | ||
877 | case MBA_RSCN_UPDATE: /* State Change Registration */ | 688 | case MBA_RSCN_UPDATE: /* State Change Registration */ |
@@ -898,6 +709,15 @@ skip_rio: | |||
898 | 709 | ||
899 | /* Ignore reserved bits from RSCN-payload. */ | 710 | /* Ignore reserved bits from RSCN-payload. */ |
900 | rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; | 711 | rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; |
712 | rscn_queue_index = vha->rscn_in_ptr + 1; | ||
713 | if (rscn_queue_index == MAX_RSCN_COUNT) | ||
714 | rscn_queue_index = 0; | ||
715 | if (rscn_queue_index != vha->rscn_out_ptr) { | ||
716 | vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry; | ||
717 | vha->rscn_in_ptr = rscn_queue_index; | ||
718 | } else { | ||
719 | vha->flags.rscn_queue_overflow = 1; | ||
720 | } | ||
901 | 721 | ||
902 | atomic_set(&vha->loop_down_timer, 0); | 722 | atomic_set(&vha->loop_down_timer, 0); |
903 | vha->flags.management_server_logged_in = 0; | 723 | vha->flags.management_server_logged_in = 0; |
@@ -982,36 +802,13 @@ skip_rio: | |||
982 | "FCF Configuration Error -- %04x %04x %04x.\n", | 802 | "FCF Configuration Error -- %04x %04x %04x.\n", |
983 | mb[1], mb[2], mb[3]); | 803 | mb[1], mb[2], mb[3]); |
984 | break; | 804 | break; |
985 | case MBA_IDC_NOTIFY: | ||
986 | /* See if we need to quiesce any I/O */ | ||
987 | if (IS_QLA8031(vha->hw)) | ||
988 | if ((mb[2] & 0x7fff) == MBC_PORT_RESET || | ||
989 | (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) { | ||
990 | set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); | ||
991 | qla2xxx_wake_dpc(vha); | ||
992 | } | ||
993 | case MBA_IDC_COMPLETE: | 805 | case MBA_IDC_COMPLETE: |
806 | case MBA_IDC_NOTIFY: | ||
994 | case MBA_IDC_TIME_EXT: | 807 | case MBA_IDC_TIME_EXT: |
995 | if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) | 808 | qla81xx_idc_event(vha, mb[0], mb[1]); |
996 | qla81xx_idc_event(vha, mb[0], mb[1]); | ||
997 | break; | 809 | break; |
998 | |||
999 | case MBA_IDC_AEN: | ||
1000 | mb[4] = RD_REG_WORD(®24->mailbox4); | ||
1001 | mb[5] = RD_REG_WORD(®24->mailbox5); | ||
1002 | mb[6] = RD_REG_WORD(®24->mailbox6); | ||
1003 | mb[7] = RD_REG_WORD(®24->mailbox7); | ||
1004 | qla83xx_handle_8200_aen(vha, mb); | ||
1005 | break; | ||
1006 | |||
1007 | default: | ||
1008 | ql_dbg(ql_dbg_async, vha, 0x5057, | ||
1009 | "Unknown AEN:%04x %04x %04x %04x\n", | ||
1010 | mb[0], mb[1], mb[2], mb[3]); | ||
1011 | } | 810 | } |
1012 | 811 | ||
1013 | qlt_async_event(mb[0], vha, mb); | ||
1014 | |||
1015 | if (!vha->vp_idx && ha->num_vhosts) | 812 | if (!vha->vp_idx && ha->num_vhosts) |
1016 | qla2x00_alert_all_vps(rsp, mb); | 813 | qla2x00_alert_all_vps(rsp, mb); |
1017 | } | 814 | } |
@@ -1046,7 +843,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, | |||
1046 | req->outstanding_cmds[index] = NULL; | 843 | req->outstanding_cmds[index] = NULL; |
1047 | 844 | ||
1048 | /* Save ISP completion status */ | 845 | /* Save ISP completion status */ |
1049 | sp->done(ha, sp, DID_OK << 16); | 846 | sp->cmd->result = DID_OK << 16; |
847 | qla2x00_sp_compl(ha, sp); | ||
1050 | } else { | 848 | } else { |
1051 | ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); | 849 | ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); |
1052 | 850 | ||
@@ -1103,6 +901,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1103 | fc_port_t *fcport; | 901 | fc_port_t *fcport; |
1104 | srb_t *sp; | 902 | srb_t *sp; |
1105 | struct srb_iocb *lio; | 903 | struct srb_iocb *lio; |
904 | struct srb_ctx *ctx; | ||
1106 | uint16_t *data; | 905 | uint16_t *data; |
1107 | uint16_t status; | 906 | uint16_t status; |
1108 | 907 | ||
@@ -1110,8 +909,9 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1110 | if (!sp) | 909 | if (!sp) |
1111 | return; | 910 | return; |
1112 | 911 | ||
1113 | lio = &sp->u.iocb_cmd; | 912 | ctx = sp->ctx; |
1114 | type = sp->name; | 913 | lio = ctx->u.iocb_cmd; |
914 | type = ctx->name; | ||
1115 | fcport = sp->fcport; | 915 | fcport = sp->fcport; |
1116 | data = lio->u.logio.data; | 916 | data = lio->u.logio.data; |
1117 | 917 | ||
@@ -1120,33 +920,32 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1120 | QLA_LOGIO_LOGIN_RETRIED : 0; | 920 | QLA_LOGIO_LOGIN_RETRIED : 0; |
1121 | if (mbx->entry_status) { | 921 | if (mbx->entry_status) { |
1122 | ql_dbg(ql_dbg_async, vha, 0x5043, | 922 | ql_dbg(ql_dbg_async, vha, 0x5043, |
1123 | "Async-%s error entry - hdl=%x portid=%02x%02x%02x " | 923 | "Async-%s error entry - portid=%02x%02x%02x " |
1124 | "entry-status=%x status=%x state-flag=%x " | 924 | "entry-status=%x status=%x state-flag=%x " |
1125 | "status-flags=%x.\n", type, sp->handle, | 925 | "status-flags=%x.\n", |
1126 | fcport->d_id.b.domain, fcport->d_id.b.area, | 926 | type, fcport->d_id.b.domain, fcport->d_id.b.area, |
1127 | fcport->d_id.b.al_pa, mbx->entry_status, | 927 | fcport->d_id.b.al_pa, mbx->entry_status, |
1128 | le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), | 928 | le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), |
1129 | le16_to_cpu(mbx->status_flags)); | 929 | le16_to_cpu(mbx->status_flags)); |
1130 | 930 | ||
1131 | ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, | 931 | ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057, |
1132 | (uint8_t *)mbx, sizeof(*mbx)); | 932 | (uint8_t *)mbx, sizeof(*mbx)); |
1133 | 933 | ||
1134 | goto logio_done; | 934 | goto logio_done; |
1135 | } | 935 | } |
1136 | 936 | ||
1137 | status = le16_to_cpu(mbx->status); | 937 | status = le16_to_cpu(mbx->status); |
1138 | if (status == 0x30 && sp->type == SRB_LOGIN_CMD && | 938 | if (status == 0x30 && ctx->type == SRB_LOGIN_CMD && |
1139 | le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) | 939 | le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) |
1140 | status = 0; | 940 | status = 0; |
1141 | if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { | 941 | if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { |
1142 | ql_dbg(ql_dbg_async, vha, 0x5045, | 942 | ql_dbg(ql_dbg_async, vha, 0x5045, |
1143 | "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", | 943 | "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n", |
1144 | type, sp->handle, fcport->d_id.b.domain, | 944 | type, fcport->d_id.b.domain, fcport->d_id.b.area, |
1145 | fcport->d_id.b.area, fcport->d_id.b.al_pa, | 945 | fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)); |
1146 | le16_to_cpu(mbx->mb1)); | ||
1147 | 946 | ||
1148 | data[0] = MBS_COMMAND_COMPLETE; | 947 | data[0] = MBS_COMMAND_COMPLETE; |
1149 | if (sp->type == SRB_LOGIN_CMD) { | 948 | if (ctx->type == SRB_LOGIN_CMD) { |
1150 | fcport->port_type = FCT_TARGET; | 949 | fcport->port_type = FCT_TARGET; |
1151 | if (le16_to_cpu(mbx->mb1) & BIT_0) | 950 | if (le16_to_cpu(mbx->mb1) & BIT_0) |
1152 | fcport->port_type = FCT_INITIATOR; | 951 | fcport->port_type = FCT_INITIATOR; |
@@ -1169,15 +968,16 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1169 | } | 968 | } |
1170 | 969 | ||
1171 | ql_log(ql_log_warn, vha, 0x5046, | 970 | ql_log(ql_log_warn, vha, 0x5046, |
1172 | "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " | 971 | "Async-%s failed - portid=%02x%02x%02x status=%x " |
1173 | "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, | 972 | "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", |
1174 | fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, | 973 | type, fcport->d_id.b.domain, |
1175 | status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), | 974 | fcport->d_id.b.area, fcport->d_id.b.al_pa, status, |
975 | le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), | ||
1176 | le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), | 976 | le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), |
1177 | le16_to_cpu(mbx->mb7)); | 977 | le16_to_cpu(mbx->mb7)); |
1178 | 978 | ||
1179 | logio_done: | 979 | logio_done: |
1180 | sp->done(vha, sp, 0); | 980 | lio->done(sp); |
1181 | } | 981 | } |
1182 | 982 | ||
1183 | static void | 983 | static void |
@@ -1186,18 +986,29 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1186 | { | 986 | { |
1187 | const char func[] = "CT_IOCB"; | 987 | const char func[] = "CT_IOCB"; |
1188 | const char *type; | 988 | const char *type; |
989 | struct qla_hw_data *ha = vha->hw; | ||
1189 | srb_t *sp; | 990 | srb_t *sp; |
991 | struct srb_ctx *sp_bsg; | ||
1190 | struct fc_bsg_job *bsg_job; | 992 | struct fc_bsg_job *bsg_job; |
1191 | uint16_t comp_status; | 993 | uint16_t comp_status; |
1192 | int res; | ||
1193 | 994 | ||
1194 | sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); | 995 | sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); |
1195 | if (!sp) | 996 | if (!sp) |
1196 | return; | 997 | return; |
1197 | 998 | ||
1198 | bsg_job = sp->u.bsg_job; | 999 | sp_bsg = sp->ctx; |
1000 | bsg_job = sp_bsg->u.bsg_job; | ||
1199 | 1001 | ||
1200 | type = "ct pass-through"; | 1002 | type = NULL; |
1003 | switch (sp_bsg->type) { | ||
1004 | case SRB_CT_CMD: | ||
1005 | type = "ct pass-through"; | ||
1006 | break; | ||
1007 | default: | ||
1008 | ql_log(ql_log_warn, vha, 0x5047, | ||
1009 | "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); | ||
1010 | return; | ||
1011 | } | ||
1201 | 1012 | ||
1202 | comp_status = le16_to_cpu(pkt->comp_status); | 1013 | comp_status = le16_to_cpu(pkt->comp_status); |
1203 | 1014 | ||
@@ -1209,7 +1020,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1209 | 1020 | ||
1210 | if (comp_status != CS_COMPLETE) { | 1021 | if (comp_status != CS_COMPLETE) { |
1211 | if (comp_status == CS_DATA_UNDERRUN) { | 1022 | if (comp_status == CS_DATA_UNDERRUN) { |
1212 | res = DID_OK << 16; | 1023 | bsg_job->reply->result = DID_OK << 16; |
1213 | bsg_job->reply->reply_payload_rcv_len = | 1024 | bsg_job->reply->reply_payload_rcv_len = |
1214 | le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); | 1025 | le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); |
1215 | 1026 | ||
@@ -1222,19 +1033,30 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1222 | ql_log(ql_log_warn, vha, 0x5049, | 1033 | ql_log(ql_log_warn, vha, 0x5049, |
1223 | "CT pass-through-%s error " | 1034 | "CT pass-through-%s error " |
1224 | "comp_status-status=0x%x.\n", type, comp_status); | 1035 | "comp_status-status=0x%x.\n", type, comp_status); |
1225 | res = DID_ERROR << 16; | 1036 | bsg_job->reply->result = DID_ERROR << 16; |
1226 | bsg_job->reply->reply_payload_rcv_len = 0; | 1037 | bsg_job->reply->reply_payload_rcv_len = 0; |
1227 | } | 1038 | } |
1228 | ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, | 1039 | ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058, |
1229 | (uint8_t *)pkt, sizeof(*pkt)); | 1040 | (uint8_t *)pkt, sizeof(*pkt)); |
1230 | } else { | 1041 | } else { |
1231 | res = DID_OK << 16; | 1042 | bsg_job->reply->result = DID_OK << 16; |
1232 | bsg_job->reply->reply_payload_rcv_len = | 1043 | bsg_job->reply->reply_payload_rcv_len = |
1233 | bsg_job->reply_payload.payload_len; | 1044 | bsg_job->reply_payload.payload_len; |
1234 | bsg_job->reply_len = 0; | 1045 | bsg_job->reply_len = 0; |
1235 | } | 1046 | } |
1236 | 1047 | ||
1237 | sp->done(vha, sp, res); | 1048 | dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, |
1049 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1050 | |||
1051 | dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, | ||
1052 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
1053 | |||
1054 | if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD) | ||
1055 | kfree(sp->fcport); | ||
1056 | |||
1057 | kfree(sp->ctx); | ||
1058 | mempool_free(sp, ha->srb_mempool); | ||
1059 | bsg_job->job_done(bsg_job); | ||
1238 | } | 1060 | } |
1239 | 1061 | ||
1240 | static void | 1062 | static void |
@@ -1243,20 +1065,22 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1243 | { | 1065 | { |
1244 | const char func[] = "ELS_CT_IOCB"; | 1066 | const char func[] = "ELS_CT_IOCB"; |
1245 | const char *type; | 1067 | const char *type; |
1068 | struct qla_hw_data *ha = vha->hw; | ||
1246 | srb_t *sp; | 1069 | srb_t *sp; |
1070 | struct srb_ctx *sp_bsg; | ||
1247 | struct fc_bsg_job *bsg_job; | 1071 | struct fc_bsg_job *bsg_job; |
1248 | uint16_t comp_status; | 1072 | uint16_t comp_status; |
1249 | uint32_t fw_status[3]; | 1073 | uint32_t fw_status[3]; |
1250 | uint8_t* fw_sts_ptr; | 1074 | uint8_t* fw_sts_ptr; |
1251 | int res; | ||
1252 | 1075 | ||
1253 | sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); | 1076 | sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); |
1254 | if (!sp) | 1077 | if (!sp) |
1255 | return; | 1078 | return; |
1256 | bsg_job = sp->u.bsg_job; | 1079 | sp_bsg = sp->ctx; |
1080 | bsg_job = sp_bsg->u.bsg_job; | ||
1257 | 1081 | ||
1258 | type = NULL; | 1082 | type = NULL; |
1259 | switch (sp->type) { | 1083 | switch (sp_bsg->type) { |
1260 | case SRB_ELS_CMD_RPT: | 1084 | case SRB_ELS_CMD_RPT: |
1261 | case SRB_ELS_CMD_HST: | 1085 | case SRB_ELS_CMD_HST: |
1262 | type = "els"; | 1086 | type = "els"; |
@@ -1265,8 +1089,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1265 | type = "ct pass-through"; | 1089 | type = "ct pass-through"; |
1266 | break; | 1090 | break; |
1267 | default: | 1091 | default: |
1268 | ql_dbg(ql_dbg_user, vha, 0x503e, | 1092 | ql_log(ql_log_warn, vha, 0x503e, |
1269 | "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); | 1093 | "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); |
1270 | return; | 1094 | return; |
1271 | } | 1095 | } |
1272 | 1096 | ||
@@ -1282,43 +1106,54 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1282 | 1106 | ||
1283 | if (comp_status != CS_COMPLETE) { | 1107 | if (comp_status != CS_COMPLETE) { |
1284 | if (comp_status == CS_DATA_UNDERRUN) { | 1108 | if (comp_status == CS_DATA_UNDERRUN) { |
1285 | res = DID_OK << 16; | 1109 | bsg_job->reply->result = DID_OK << 16; |
1286 | bsg_job->reply->reply_payload_rcv_len = | 1110 | bsg_job->reply->reply_payload_rcv_len = |
1287 | le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); | 1111 | le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); |
1288 | 1112 | ||
1289 | ql_dbg(ql_dbg_user, vha, 0x503f, | 1113 | ql_log(ql_log_info, vha, 0x503f, |
1290 | "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " | 1114 | "ELS-CT pass-through-%s error comp_status-status=0x%x " |
1291 | "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", | 1115 | "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", |
1292 | type, sp->handle, comp_status, fw_status[1], fw_status[2], | 1116 | type, comp_status, fw_status[1], fw_status[2], |
1293 | le16_to_cpu(((struct els_sts_entry_24xx *) | 1117 | le16_to_cpu(((struct els_sts_entry_24xx *) |
1294 | pkt)->total_byte_count)); | 1118 | pkt)->total_byte_count)); |
1295 | fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); | 1119 | fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); |
1296 | memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); | 1120 | memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); |
1297 | } | 1121 | } |
1298 | else { | 1122 | else { |
1299 | ql_dbg(ql_dbg_user, vha, 0x5040, | 1123 | ql_log(ql_log_info, vha, 0x5040, |
1300 | "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " | 1124 | "ELS-CT pass-through-%s error comp_status-status=0x%x " |
1301 | "error subcode 1=0x%x error subcode 2=0x%x.\n", | 1125 | "error subcode 1=0x%x error subcode 2=0x%x.\n", |
1302 | type, sp->handle, comp_status, | 1126 | type, comp_status, |
1303 | le16_to_cpu(((struct els_sts_entry_24xx *) | 1127 | le16_to_cpu(((struct els_sts_entry_24xx *) |
1304 | pkt)->error_subcode_1), | 1128 | pkt)->error_subcode_1), |
1305 | le16_to_cpu(((struct els_sts_entry_24xx *) | 1129 | le16_to_cpu(((struct els_sts_entry_24xx *) |
1306 | pkt)->error_subcode_2)); | 1130 | pkt)->error_subcode_2)); |
1307 | res = DID_ERROR << 16; | 1131 | bsg_job->reply->result = DID_ERROR << 16; |
1308 | bsg_job->reply->reply_payload_rcv_len = 0; | 1132 | bsg_job->reply->reply_payload_rcv_len = 0; |
1309 | fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); | 1133 | fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); |
1310 | memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); | 1134 | memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); |
1311 | } | 1135 | } |
1312 | ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, | 1136 | ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056, |
1313 | (uint8_t *)pkt, sizeof(*pkt)); | 1137 | (uint8_t *)pkt, sizeof(*pkt)); |
1314 | } | 1138 | } |
1315 | else { | 1139 | else { |
1316 | res = DID_OK << 16; | 1140 | bsg_job->reply->result = DID_OK << 16; |
1317 | bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; | 1141 | bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; |
1318 | bsg_job->reply_len = 0; | 1142 | bsg_job->reply_len = 0; |
1319 | } | 1143 | } |
1320 | 1144 | ||
1321 | sp->done(vha, sp, res); | 1145 | dma_unmap_sg(&ha->pdev->dev, |
1146 | bsg_job->request_payload.sg_list, | ||
1147 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1148 | dma_unmap_sg(&ha->pdev->dev, | ||
1149 | bsg_job->reply_payload.sg_list, | ||
1150 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
1151 | if ((sp_bsg->type == SRB_ELS_CMD_HST) || | ||
1152 | (sp_bsg->type == SRB_CT_CMD)) | ||
1153 | kfree(sp->fcport); | ||
1154 | kfree(sp->ctx); | ||
1155 | mempool_free(sp, ha->srb_mempool); | ||
1156 | bsg_job->job_done(bsg_job); | ||
1322 | } | 1157 | } |
1323 | 1158 | ||
1324 | static void | 1159 | static void |
@@ -1330,6 +1165,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1330 | fc_port_t *fcport; | 1165 | fc_port_t *fcport; |
1331 | srb_t *sp; | 1166 | srb_t *sp; |
1332 | struct srb_iocb *lio; | 1167 | struct srb_iocb *lio; |
1168 | struct srb_ctx *ctx; | ||
1333 | uint16_t *data; | 1169 | uint16_t *data; |
1334 | uint32_t iop[2]; | 1170 | uint32_t iop[2]; |
1335 | 1171 | ||
@@ -1337,8 +1173,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1337 | if (!sp) | 1173 | if (!sp) |
1338 | return; | 1174 | return; |
1339 | 1175 | ||
1340 | lio = &sp->u.iocb_cmd; | 1176 | ctx = sp->ctx; |
1341 | type = sp->name; | 1177 | lio = ctx->u.iocb_cmd; |
1178 | type = ctx->name; | ||
1342 | fcport = sp->fcport; | 1179 | fcport = sp->fcport; |
1343 | data = lio->u.logio.data; | 1180 | data = lio->u.logio.data; |
1344 | 1181 | ||
@@ -1346,27 +1183,27 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1346 | data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? | 1183 | data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? |
1347 | QLA_LOGIO_LOGIN_RETRIED : 0; | 1184 | QLA_LOGIO_LOGIN_RETRIED : 0; |
1348 | if (logio->entry_status) { | 1185 | if (logio->entry_status) { |
1349 | ql_log(ql_log_warn, fcport->vha, 0x5034, | 1186 | ql_log(ql_log_warn, vha, 0x5034, |
1350 | "Async-%s error entry - hdl=%x" | 1187 | "Async-%s error entry - " |
1351 | "portid=%02x%02x%02x entry-status=%x.\n", | 1188 | "portid=%02x%02x%02x entry-status=%x.\n", |
1352 | type, sp->handle, fcport->d_id.b.domain, | 1189 | type, fcport->d_id.b.domain, fcport->d_id.b.area, |
1353 | fcport->d_id.b.area, fcport->d_id.b.al_pa, | 1190 | fcport->d_id.b.al_pa, logio->entry_status); |
1354 | logio->entry_status); | 1191 | ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059, |
1355 | ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, | ||
1356 | (uint8_t *)logio, sizeof(*logio)); | 1192 | (uint8_t *)logio, sizeof(*logio)); |
1357 | 1193 | ||
1358 | goto logio_done; | 1194 | goto logio_done; |
1359 | } | 1195 | } |
1360 | 1196 | ||
1361 | if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { | 1197 | if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { |
1362 | ql_dbg(ql_dbg_async, fcport->vha, 0x5036, | 1198 | ql_dbg(ql_dbg_async, vha, 0x5036, |
1363 | "Async-%s complete - hdl=%x portid=%02x%02x%02x " | 1199 | "Async-%s complete - portid=%02x%02x%02x " |
1364 | "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, | 1200 | "iop0=%x.\n", |
1365 | fcport->d_id.b.area, fcport->d_id.b.al_pa, | 1201 | type, fcport->d_id.b.domain, fcport->d_id.b.area, |
1202 | fcport->d_id.b.al_pa, | ||
1366 | le32_to_cpu(logio->io_parameter[0])); | 1203 | le32_to_cpu(logio->io_parameter[0])); |
1367 | 1204 | ||
1368 | data[0] = MBS_COMMAND_COMPLETE; | 1205 | data[0] = MBS_COMMAND_COMPLETE; |
1369 | if (sp->type != SRB_LOGIN_CMD) | 1206 | if (ctx->type != SRB_LOGIN_CMD) |
1370 | goto logio_done; | 1207 | goto logio_done; |
1371 | 1208 | ||
1372 | iop[0] = le32_to_cpu(logio->io_parameter[0]); | 1209 | iop[0] = le32_to_cpu(logio->io_parameter[0]); |
@@ -1377,9 +1214,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1377 | } else if (iop[0] & BIT_5) | 1214 | } else if (iop[0] & BIT_5) |
1378 | fcport->port_type = FCT_INITIATOR; | 1215 | fcport->port_type = FCT_INITIATOR; |
1379 | 1216 | ||
1380 | if (iop[0] & BIT_7) | ||
1381 | fcport->flags |= FCF_CONF_COMP_SUPPORTED; | ||
1382 | |||
1383 | if (logio->io_parameter[7] || logio->io_parameter[8]) | 1217 | if (logio->io_parameter[7] || logio->io_parameter[8]) |
1384 | fcport->supported_classes |= FC_COS_CLASS2; | 1218 | fcport->supported_classes |= FC_COS_CLASS2; |
1385 | if (logio->io_parameter[9] || logio->io_parameter[10]) | 1219 | if (logio->io_parameter[9] || logio->io_parameter[10]) |
@@ -1403,16 +1237,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1403 | break; | 1237 | break; |
1404 | } | 1238 | } |
1405 | 1239 | ||
1406 | ql_dbg(ql_dbg_async, fcport->vha, 0x5037, | 1240 | ql_dbg(ql_dbg_async, vha, 0x5037, |
1407 | "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " | 1241 | "Async-%s failed - portid=%02x%02x%02x comp=%x " |
1408 | "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, | 1242 | "iop0=%x iop1=%x.\n", |
1243 | type, fcport->d_id.b.domain, | ||
1409 | fcport->d_id.b.area, fcport->d_id.b.al_pa, | 1244 | fcport->d_id.b.area, fcport->d_id.b.al_pa, |
1410 | le16_to_cpu(logio->comp_status), | 1245 | le16_to_cpu(logio->comp_status), |
1411 | le32_to_cpu(logio->io_parameter[0]), | 1246 | le32_to_cpu(logio->io_parameter[0]), |
1412 | le32_to_cpu(logio->io_parameter[1])); | 1247 | le32_to_cpu(logio->io_parameter[1])); |
1413 | 1248 | ||
1414 | logio_done: | 1249 | logio_done: |
1415 | sp->done(vha, sp, 0); | 1250 | lio->done(sp); |
1416 | } | 1251 | } |
1417 | 1252 | ||
1418 | static void | 1253 | static void |
@@ -1424,6 +1259,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1424 | fc_port_t *fcport; | 1259 | fc_port_t *fcport; |
1425 | srb_t *sp; | 1260 | srb_t *sp; |
1426 | struct srb_iocb *iocb; | 1261 | struct srb_iocb *iocb; |
1262 | struct srb_ctx *ctx; | ||
1427 | struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; | 1263 | struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; |
1428 | int error = 1; | 1264 | int error = 1; |
1429 | 1265 | ||
@@ -1431,31 +1267,32 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1431 | if (!sp) | 1267 | if (!sp) |
1432 | return; | 1268 | return; |
1433 | 1269 | ||
1434 | iocb = &sp->u.iocb_cmd; | 1270 | ctx = sp->ctx; |
1435 | type = sp->name; | 1271 | iocb = ctx->u.iocb_cmd; |
1272 | type = ctx->name; | ||
1436 | fcport = sp->fcport; | 1273 | fcport = sp->fcport; |
1437 | 1274 | ||
1438 | if (sts->entry_status) { | 1275 | if (sts->entry_status) { |
1439 | ql_log(ql_log_warn, fcport->vha, 0x5038, | 1276 | ql_log(ql_log_warn, vha, 0x5038, |
1440 | "Async-%s error - hdl=%x entry-status(%x).\n", | 1277 | "Async-%s error - entry-status(%x).\n", |
1441 | type, sp->handle, sts->entry_status); | 1278 | type, sts->entry_status); |
1442 | } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { | 1279 | } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { |
1443 | ql_log(ql_log_warn, fcport->vha, 0x5039, | 1280 | ql_log(ql_log_warn, vha, 0x5039, |
1444 | "Async-%s error - hdl=%x completion status(%x).\n", | 1281 | "Async-%s error - completion status(%x).\n", |
1445 | type, sp->handle, sts->comp_status); | 1282 | type, sts->comp_status); |
1446 | } else if (!(le16_to_cpu(sts->scsi_status) & | 1283 | } else if (!(le16_to_cpu(sts->scsi_status) & |
1447 | SS_RESPONSE_INFO_LEN_VALID)) { | 1284 | SS_RESPONSE_INFO_LEN_VALID)) { |
1448 | ql_log(ql_log_warn, fcport->vha, 0x503a, | 1285 | ql_log(ql_log_warn, vha, 0x503a, |
1449 | "Async-%s error - hdl=%x no response info(%x).\n", | 1286 | "Async-%s error - no response info(%x).\n", |
1450 | type, sp->handle, sts->scsi_status); | 1287 | type, sts->scsi_status); |
1451 | } else if (le32_to_cpu(sts->rsp_data_len) < 4) { | 1288 | } else if (le32_to_cpu(sts->rsp_data_len) < 4) { |
1452 | ql_log(ql_log_warn, fcport->vha, 0x503b, | 1289 | ql_log(ql_log_warn, vha, 0x503b, |
1453 | "Async-%s error - hdl=%x not enough response(%d).\n", | 1290 | "Async-%s error - not enough response(%d).\n", |
1454 | type, sp->handle, sts->rsp_data_len); | 1291 | type, sts->rsp_data_len); |
1455 | } else if (sts->data[3]) { | 1292 | } else if (sts->data[3]) { |
1456 | ql_log(ql_log_warn, fcport->vha, 0x503c, | 1293 | ql_log(ql_log_warn, vha, 0x503c, |
1457 | "Async-%s error - hdl=%x response(%x).\n", | 1294 | "Async-%s error - response(%x).\n", |
1458 | type, sp->handle, sts->data[3]); | 1295 | type, sts->data[3]); |
1459 | } else { | 1296 | } else { |
1460 | error = 0; | 1297 | error = 0; |
1461 | } | 1298 | } |
@@ -1466,7 +1303,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
1466 | (uint8_t *)sts, sizeof(*sts)); | 1303 | (uint8_t *)sts, sizeof(*sts)); |
1467 | } | 1304 | } |
1468 | 1305 | ||
1469 | sp->done(vha, sp, 0); | 1306 | iocb->done(sp); |
1470 | } | 1307 | } |
1471 | 1308 | ||
1472 | /** | 1309 | /** |
@@ -1500,6 +1337,9 @@ qla2x00_process_response_queue(struct rsp_que *rsp) | |||
1500 | } | 1337 | } |
1501 | 1338 | ||
1502 | if (pkt->entry_status != 0) { | 1339 | if (pkt->entry_status != 0) { |
1340 | ql_log(ql_log_warn, vha, 0x5035, | ||
1341 | "Process error entry.\n"); | ||
1342 | |||
1503 | qla2x00_error_entry(vha, rsp, pkt); | 1343 | qla2x00_error_entry(vha, rsp, pkt); |
1504 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; | 1344 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; |
1505 | wmb(); | 1345 | wmb(); |
@@ -1551,47 +1391,40 @@ qla2x00_process_response_queue(struct rsp_que *rsp) | |||
1551 | } | 1391 | } |
1552 | 1392 | ||
1553 | static inline void | 1393 | static inline void |
1394 | |||
1554 | qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, | 1395 | qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, |
1555 | uint32_t sense_len, struct rsp_que *rsp, int res) | 1396 | uint32_t sense_len, struct rsp_que *rsp) |
1556 | { | 1397 | { |
1557 | struct scsi_qla_host *vha = sp->fcport->vha; | 1398 | struct scsi_qla_host *vha = sp->fcport->vha; |
1558 | struct scsi_cmnd *cp = GET_CMD_SP(sp); | 1399 | struct scsi_cmnd *cp = sp->cmd; |
1559 | uint32_t track_sense_len; | ||
1560 | 1400 | ||
1561 | if (sense_len >= SCSI_SENSE_BUFFERSIZE) | 1401 | if (sense_len >= SCSI_SENSE_BUFFERSIZE) |
1562 | sense_len = SCSI_SENSE_BUFFERSIZE; | 1402 | sense_len = SCSI_SENSE_BUFFERSIZE; |
1563 | 1403 | ||
1564 | SET_CMD_SENSE_LEN(sp, sense_len); | 1404 | sp->request_sense_length = sense_len; |
1565 | SET_CMD_SENSE_PTR(sp, cp->sense_buffer); | 1405 | sp->request_sense_ptr = cp->sense_buffer; |
1566 | track_sense_len = sense_len; | 1406 | if (sp->request_sense_length > par_sense_len) |
1567 | |||
1568 | if (sense_len > par_sense_len) | ||
1569 | sense_len = par_sense_len; | 1407 | sense_len = par_sense_len; |
1570 | 1408 | ||
1571 | memcpy(cp->sense_buffer, sense_data, sense_len); | 1409 | memcpy(cp->sense_buffer, sense_data, sense_len); |
1572 | 1410 | ||
1573 | SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); | 1411 | sp->request_sense_ptr += sense_len; |
1574 | track_sense_len -= sense_len; | 1412 | sp->request_sense_length -= sense_len; |
1575 | SET_CMD_SENSE_LEN(sp, track_sense_len); | 1413 | if (sp->request_sense_length != 0) |
1576 | |||
1577 | if (track_sense_len != 0) { | ||
1578 | rsp->status_srb = sp; | 1414 | rsp->status_srb = sp; |
1579 | cp->result = res; | ||
1580 | } | ||
1581 | 1415 | ||
1582 | if (sense_len) { | 1416 | ql_dbg(ql_dbg_io, vha, 0x301c, |
1583 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, | 1417 | "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n", |
1584 | "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", | 1418 | sp->fcport->vha->host_no, cp->device->channel, cp->device->id, |
1585 | sp->fcport->vha->host_no, cp->device->id, cp->device->lun, | 1419 | cp->device->lun, cp); |
1586 | cp); | 1420 | if (sense_len) |
1587 | ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, | 1421 | ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, |
1588 | cp->sense_buffer, sense_len); | 1422 | cp->sense_buffer, sense_len); |
1589 | } | ||
1590 | } | 1423 | } |
1591 | 1424 | ||
1592 | struct scsi_dif_tuple { | 1425 | struct scsi_dif_tuple { |
1593 | __be16 guard; /* Checksum */ | 1426 | __be16 guard; /* Checksum */ |
1594 | __be16 app_tag; /* APPL identifier */ | 1427 | __be16 app_tag; /* APPL identifer */ |
1595 | __be32 ref_tag; /* Target LBA or indirect LBA */ | 1428 | __be32 ref_tag; /* Target LBA or indirect LBA */ |
1596 | }; | 1429 | }; |
1597 | 1430 | ||
@@ -1605,7 +1438,7 @@ static inline int | |||
1605 | qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | 1438 | qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) |
1606 | { | 1439 | { |
1607 | struct scsi_qla_host *vha = sp->fcport->vha; | 1440 | struct scsi_qla_host *vha = sp->fcport->vha; |
1608 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 1441 | struct scsi_cmnd *cmd = sp->cmd; |
1609 | uint8_t *ap = &sts24->data[12]; | 1442 | uint8_t *ap = &sts24->data[12]; |
1610 | uint8_t *ep = &sts24->data[20]; | 1443 | uint8_t *ep = &sts24->data[20]; |
1611 | uint32_t e_ref_tag, a_ref_tag; | 1444 | uint32_t e_ref_tag, a_ref_tag; |
@@ -1673,7 +1506,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | |||
1673 | } | 1506 | } |
1674 | 1507 | ||
1675 | if (k != blocks_done) { | 1508 | if (k != blocks_done) { |
1676 | ql_log(ql_log_warn, vha, 0x302f, | 1509 | qla_printk(KERN_WARNING, sp->fcport->vha->hw, |
1677 | "unexpected tag values tag:lba=%x:%llx)\n", | 1510 | "unexpected tag values tag:lba=%x:%llx)\n", |
1678 | e_ref_tag, (unsigned long long)lba_s); | 1511 | e_ref_tag, (unsigned long long)lba_s); |
1679 | return 1; | 1512 | return 1; |
@@ -1723,149 +1556,6 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | |||
1723 | return 1; | 1556 | return 1; |
1724 | } | 1557 | } |
1725 | 1558 | ||
1726 | static void | ||
1727 | qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, | ||
1728 | struct req_que *req, uint32_t index) | ||
1729 | { | ||
1730 | struct qla_hw_data *ha = vha->hw; | ||
1731 | srb_t *sp; | ||
1732 | uint16_t comp_status; | ||
1733 | uint16_t scsi_status; | ||
1734 | uint16_t thread_id; | ||
1735 | uint32_t rval = EXT_STATUS_OK; | ||
1736 | struct fc_bsg_job *bsg_job = NULL; | ||
1737 | sts_entry_t *sts; | ||
1738 | struct sts_entry_24xx *sts24; | ||
1739 | sts = (sts_entry_t *) pkt; | ||
1740 | sts24 = (struct sts_entry_24xx *) pkt; | ||
1741 | |||
1742 | /* Validate handle. */ | ||
1743 | if (index >= MAX_OUTSTANDING_COMMANDS) { | ||
1744 | ql_log(ql_log_warn, vha, 0x70af, | ||
1745 | "Invalid SCSI completion handle 0x%x.\n", index); | ||
1746 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | ||
1747 | return; | ||
1748 | } | ||
1749 | |||
1750 | sp = req->outstanding_cmds[index]; | ||
1751 | if (sp) { | ||
1752 | /* Free outstanding command slot. */ | ||
1753 | req->outstanding_cmds[index] = NULL; | ||
1754 | bsg_job = sp->u.bsg_job; | ||
1755 | } else { | ||
1756 | ql_log(ql_log_warn, vha, 0x70b0, | ||
1757 | "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", | ||
1758 | req->id, index); | ||
1759 | |||
1760 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | ||
1761 | return; | ||
1762 | } | ||
1763 | |||
1764 | if (IS_FWI2_CAPABLE(ha)) { | ||
1765 | comp_status = le16_to_cpu(sts24->comp_status); | ||
1766 | scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; | ||
1767 | } else { | ||
1768 | comp_status = le16_to_cpu(sts->comp_status); | ||
1769 | scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; | ||
1770 | } | ||
1771 | |||
1772 | thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; | ||
1773 | switch (comp_status) { | ||
1774 | case CS_COMPLETE: | ||
1775 | if (scsi_status == 0) { | ||
1776 | bsg_job->reply->reply_payload_rcv_len = | ||
1777 | bsg_job->reply_payload.payload_len; | ||
1778 | rval = EXT_STATUS_OK; | ||
1779 | } | ||
1780 | goto done; | ||
1781 | |||
1782 | case CS_DATA_OVERRUN: | ||
1783 | ql_dbg(ql_dbg_user, vha, 0x70b1, | ||
1784 | "Command completed with date overrun thread_id=%d\n", | ||
1785 | thread_id); | ||
1786 | rval = EXT_STATUS_DATA_OVERRUN; | ||
1787 | break; | ||
1788 | |||
1789 | case CS_DATA_UNDERRUN: | ||
1790 | ql_dbg(ql_dbg_user, vha, 0x70b2, | ||
1791 | "Command completed with date underrun thread_id=%d\n", | ||
1792 | thread_id); | ||
1793 | rval = EXT_STATUS_DATA_UNDERRUN; | ||
1794 | break; | ||
1795 | case CS_BIDIR_RD_OVERRUN: | ||
1796 | ql_dbg(ql_dbg_user, vha, 0x70b3, | ||
1797 | "Command completed with read data overrun thread_id=%d\n", | ||
1798 | thread_id); | ||
1799 | rval = EXT_STATUS_DATA_OVERRUN; | ||
1800 | break; | ||
1801 | |||
1802 | case CS_BIDIR_RD_WR_OVERRUN: | ||
1803 | ql_dbg(ql_dbg_user, vha, 0x70b4, | ||
1804 | "Command completed with read and write data overrun " | ||
1805 | "thread_id=%d\n", thread_id); | ||
1806 | rval = EXT_STATUS_DATA_OVERRUN; | ||
1807 | break; | ||
1808 | |||
1809 | case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: | ||
1810 | ql_dbg(ql_dbg_user, vha, 0x70b5, | ||
1811 | "Command completed with read data over and write data " | ||
1812 | "underrun thread_id=%d\n", thread_id); | ||
1813 | rval = EXT_STATUS_DATA_OVERRUN; | ||
1814 | break; | ||
1815 | |||
1816 | case CS_BIDIR_RD_UNDERRUN: | ||
1817 | ql_dbg(ql_dbg_user, vha, 0x70b6, | ||
1818 | "Command completed with read data data underrun " | ||
1819 | "thread_id=%d\n", thread_id); | ||
1820 | rval = EXT_STATUS_DATA_UNDERRUN; | ||
1821 | break; | ||
1822 | |||
1823 | case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: | ||
1824 | ql_dbg(ql_dbg_user, vha, 0x70b7, | ||
1825 | "Command completed with read data under and write data " | ||
1826 | "overrun thread_id=%d\n", thread_id); | ||
1827 | rval = EXT_STATUS_DATA_UNDERRUN; | ||
1828 | break; | ||
1829 | |||
1830 | case CS_BIDIR_RD_WR_UNDERRUN: | ||
1831 | ql_dbg(ql_dbg_user, vha, 0x70b8, | ||
1832 | "Command completed with read and write data underrun " | ||
1833 | "thread_id=%d\n", thread_id); | ||
1834 | rval = EXT_STATUS_DATA_UNDERRUN; | ||
1835 | break; | ||
1836 | |||
1837 | case CS_BIDIR_DMA: | ||
1838 | ql_dbg(ql_dbg_user, vha, 0x70b9, | ||
1839 | "Command completed with data DMA error thread_id=%d\n", | ||
1840 | thread_id); | ||
1841 | rval = EXT_STATUS_DMA_ERR; | ||
1842 | break; | ||
1843 | |||
1844 | case CS_TIMEOUT: | ||
1845 | ql_dbg(ql_dbg_user, vha, 0x70ba, | ||
1846 | "Command completed with timeout thread_id=%d\n", | ||
1847 | thread_id); | ||
1848 | rval = EXT_STATUS_TIMEOUT; | ||
1849 | break; | ||
1850 | default: | ||
1851 | ql_dbg(ql_dbg_user, vha, 0x70bb, | ||
1852 | "Command completed with completion status=0x%x " | ||
1853 | "thread_id=%d\n", comp_status, thread_id); | ||
1854 | rval = EXT_STATUS_ERR; | ||
1855 | break; | ||
1856 | } | ||
1857 | bsg_job->reply->reply_payload_rcv_len = 0; | ||
1858 | |||
1859 | done: | ||
1860 | /* Return the vendor specific reply to API */ | ||
1861 | bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; | ||
1862 | bsg_job->reply_len = sizeof(struct fc_bsg_reply); | ||
1863 | /* Always return DID_OK, bsg will send the vendor specific response | ||
1864 | * in this case only */ | ||
1865 | sp->done(vha, sp, (DID_OK << 6)); | ||
1866 | |||
1867 | } | ||
1868 | |||
1869 | /** | 1559 | /** |
1870 | * qla2x00_status_entry() - Process a Status IOCB entry. | 1560 | * qla2x00_status_entry() - Process a Status IOCB entry. |
1871 | * @ha: SCSI driver HA context | 1561 | * @ha: SCSI driver HA context |
@@ -1892,15 +1582,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1892 | uint16_t que; | 1582 | uint16_t que; |
1893 | struct req_que *req; | 1583 | struct req_que *req; |
1894 | int logit = 1; | 1584 | int logit = 1; |
1895 | int res = 0; | ||
1896 | uint16_t state_flags = 0; | ||
1897 | 1585 | ||
1898 | sts = (sts_entry_t *) pkt; | 1586 | sts = (sts_entry_t *) pkt; |
1899 | sts24 = (struct sts_entry_24xx *) pkt; | 1587 | sts24 = (struct sts_entry_24xx *) pkt; |
1900 | if (IS_FWI2_CAPABLE(ha)) { | 1588 | if (IS_FWI2_CAPABLE(ha)) { |
1901 | comp_status = le16_to_cpu(sts24->comp_status); | 1589 | comp_status = le16_to_cpu(sts24->comp_status); |
1902 | scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; | 1590 | scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; |
1903 | state_flags = le16_to_cpu(sts24->state_flags); | ||
1904 | } else { | 1591 | } else { |
1905 | comp_status = le16_to_cpu(sts->comp_status); | 1592 | comp_status = le16_to_cpu(sts->comp_status); |
1906 | scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; | 1593 | scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; |
@@ -1909,14 +1596,22 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1909 | que = MSW(sts->handle); | 1596 | que = MSW(sts->handle); |
1910 | req = ha->req_q_map[que]; | 1597 | req = ha->req_q_map[que]; |
1911 | 1598 | ||
1599 | /* Fast path completion. */ | ||
1600 | if (comp_status == CS_COMPLETE && scsi_status == 0) { | ||
1601 | qla2x00_process_completed_request(vha, req, handle); | ||
1602 | |||
1603 | return; | ||
1604 | } | ||
1605 | |||
1912 | /* Validate handle. */ | 1606 | /* Validate handle. */ |
1913 | if (handle < MAX_OUTSTANDING_COMMANDS) { | 1607 | if (handle < MAX_OUTSTANDING_COMMANDS) { |
1914 | sp = req->outstanding_cmds[handle]; | 1608 | sp = req->outstanding_cmds[handle]; |
1609 | req->outstanding_cmds[handle] = NULL; | ||
1915 | } else | 1610 | } else |
1916 | sp = NULL; | 1611 | sp = NULL; |
1917 | 1612 | ||
1918 | if (sp == NULL) { | 1613 | if (sp == NULL) { |
1919 | ql_dbg(ql_dbg_io, vha, 0x3017, | 1614 | ql_log(ql_log_warn, vha, 0x3017, |
1920 | "Invalid status handle (0x%x).\n", sts->handle); | 1615 | "Invalid status handle (0x%x).\n", sts->handle); |
1921 | 1616 | ||
1922 | if (IS_QLA82XX(ha)) | 1617 | if (IS_QLA82XX(ha)) |
@@ -1926,23 +1621,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1926 | qla2xxx_wake_dpc(vha); | 1621 | qla2xxx_wake_dpc(vha); |
1927 | return; | 1622 | return; |
1928 | } | 1623 | } |
1929 | 1624 | cp = sp->cmd; | |
1930 | if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { | ||
1931 | qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); | ||
1932 | return; | ||
1933 | } | ||
1934 | |||
1935 | /* Fast path completion. */ | ||
1936 | if (comp_status == CS_COMPLETE && scsi_status == 0) { | ||
1937 | qla2x00_process_completed_request(vha, req, handle); | ||
1938 | |||
1939 | return; | ||
1940 | } | ||
1941 | |||
1942 | req->outstanding_cmds[handle] = NULL; | ||
1943 | cp = GET_CMD_SP(sp); | ||
1944 | if (cp == NULL) { | 1625 | if (cp == NULL) { |
1945 | ql_dbg(ql_dbg_io, vha, 0x3018, | 1626 | ql_log(ql_log_warn, vha, 0x3018, |
1946 | "Command already returned (0x%x/%p).\n", | 1627 | "Command already returned (0x%x/%p).\n", |
1947 | sts->handle, sp); | 1628 | sts->handle, sp); |
1948 | 1629 | ||
@@ -1989,11 +1670,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1989 | par_sense_len -= rsp_info_len; | 1670 | par_sense_len -= rsp_info_len; |
1990 | } | 1671 | } |
1991 | if (rsp_info_len > 3 && rsp_info[3]) { | 1672 | if (rsp_info_len > 3 && rsp_info[3]) { |
1992 | ql_dbg(ql_dbg_io, fcport->vha, 0x3019, | 1673 | ql_log(ql_log_warn, vha, 0x3019, |
1993 | "FCP I/O protocol failure (0x%x/0x%x).\n", | 1674 | "FCP I/O protocol failure (0x%x/0x%x).\n", |
1994 | rsp_info_len, rsp_info[3]); | 1675 | rsp_info_len, rsp_info[3]); |
1995 | 1676 | ||
1996 | res = DID_BUS_BUSY << 16; | 1677 | cp->result = DID_BUS_BUSY << 16; |
1997 | goto out; | 1678 | goto out; |
1998 | } | 1679 | } |
1999 | } | 1680 | } |
@@ -2010,7 +1691,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
2010 | case CS_COMPLETE: | 1691 | case CS_COMPLETE: |
2011 | case CS_QUEUE_FULL: | 1692 | case CS_QUEUE_FULL: |
2012 | if (scsi_status == 0) { | 1693 | if (scsi_status == 0) { |
2013 | res = DID_OK << 16; | 1694 | cp->result = DID_OK << 16; |
2014 | break; | 1695 | break; |
2015 | } | 1696 | } |
2016 | if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { | 1697 | if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { |
@@ -2020,19 +1701,19 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
2020 | if (!lscsi_status && | 1701 | if (!lscsi_status && |
2021 | ((unsigned)(scsi_bufflen(cp) - resid) < | 1702 | ((unsigned)(scsi_bufflen(cp) - resid) < |
2022 | cp->underflow)) { | 1703 | cp->underflow)) { |
2023 | ql_dbg(ql_dbg_io, fcport->vha, 0x301a, | 1704 | ql_log(ql_log_warn, vha, 0x301a, |
2024 | "Mid-layer underflow " | 1705 | "Mid-layer underflow " |
2025 | "detected (0x%x of 0x%x bytes).\n", | 1706 | "detected (0x%x of 0x%x bytes).\n", |
2026 | resid, scsi_bufflen(cp)); | 1707 | resid, scsi_bufflen(cp)); |
2027 | 1708 | ||
2028 | res = DID_ERROR << 16; | 1709 | cp->result = DID_ERROR << 16; |
2029 | break; | 1710 | break; |
2030 | } | 1711 | } |
2031 | } | 1712 | } |
2032 | res = DID_OK << 16 | lscsi_status; | 1713 | cp->result = DID_OK << 16 | lscsi_status; |
2033 | 1714 | ||
2034 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { | 1715 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { |
2035 | ql_dbg(ql_dbg_io, fcport->vha, 0x301b, | 1716 | ql_log(ql_log_warn, vha, 0x301b, |
2036 | "QUEUE FULL detected.\n"); | 1717 | "QUEUE FULL detected.\n"); |
2037 | break; | 1718 | break; |
2038 | } | 1719 | } |
@@ -2045,7 +1726,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
2045 | break; | 1726 | break; |
2046 | 1727 | ||
2047 | qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, | 1728 | qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, |
2048 | rsp, res); | 1729 | rsp); |
2049 | break; | 1730 | break; |
2050 | 1731 | ||
2051 | case CS_DATA_UNDERRUN: | 1732 | case CS_DATA_UNDERRUN: |
@@ -2054,47 +1735,36 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
2054 | scsi_set_resid(cp, resid); | 1735 | scsi_set_resid(cp, resid); |
2055 | if (scsi_status & SS_RESIDUAL_UNDER) { | 1736 | if (scsi_status & SS_RESIDUAL_UNDER) { |
2056 | if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { | 1737 | if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { |
2057 | ql_dbg(ql_dbg_io, fcport->vha, 0x301d, | 1738 | ql_log(ql_log_warn, vha, 0x301d, |
2058 | "Dropped frame(s) detected " | 1739 | "Dropped frame(s) detected " |
2059 | "(0x%x of 0x%x bytes).\n", | 1740 | "(0x%x of 0x%x bytes).\n", |
2060 | resid, scsi_bufflen(cp)); | 1741 | resid, scsi_bufflen(cp)); |
2061 | 1742 | ||
2062 | res = DID_ERROR << 16 | lscsi_status; | 1743 | cp->result = DID_ERROR << 16 | lscsi_status; |
2063 | goto check_scsi_status; | 1744 | break; |
2064 | } | 1745 | } |
2065 | 1746 | ||
2066 | if (!lscsi_status && | 1747 | if (!lscsi_status && |
2067 | ((unsigned)(scsi_bufflen(cp) - resid) < | 1748 | ((unsigned)(scsi_bufflen(cp) - resid) < |
2068 | cp->underflow)) { | 1749 | cp->underflow)) { |
2069 | ql_dbg(ql_dbg_io, fcport->vha, 0x301e, | 1750 | ql_log(ql_log_warn, vha, 0x301e, |
2070 | "Mid-layer underflow " | 1751 | "Mid-layer underflow " |
2071 | "detected (0x%x of 0x%x bytes).\n", | 1752 | "detected (0x%x of 0x%x bytes).\n", |
2072 | resid, scsi_bufflen(cp)); | 1753 | resid, scsi_bufflen(cp)); |
2073 | 1754 | ||
2074 | res = DID_ERROR << 16; | 1755 | cp->result = DID_ERROR << 16; |
2075 | break; | 1756 | break; |
2076 | } | 1757 | } |
2077 | } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && | 1758 | } else { |
2078 | lscsi_status != SAM_STAT_BUSY) { | 1759 | ql_log(ql_log_warn, vha, 0x301f, |
2079 | /* | ||
2080 | * scsi status of task set and busy are considered to be | ||
2081 | * task not completed. | ||
2082 | */ | ||
2083 | |||
2084 | ql_dbg(ql_dbg_io, fcport->vha, 0x301f, | ||
2085 | "Dropped frame(s) detected (0x%x " | 1760 | "Dropped frame(s) detected (0x%x " |
2086 | "of 0x%x bytes).\n", resid, | 1761 | "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); |
2087 | scsi_bufflen(cp)); | ||
2088 | 1762 | ||
2089 | res = DID_ERROR << 16 | lscsi_status; | 1763 | cp->result = DID_ERROR << 16 | lscsi_status; |
2090 | goto check_scsi_status; | 1764 | goto check_scsi_status; |
2091 | } else { | ||
2092 | ql_dbg(ql_dbg_io, fcport->vha, 0x3030, | ||
2093 | "scsi_status: 0x%x, lscsi_status: 0x%x\n", | ||
2094 | scsi_status, lscsi_status); | ||
2095 | } | 1765 | } |
2096 | 1766 | ||
2097 | res = DID_OK << 16 | lscsi_status; | 1767 | cp->result = DID_OK << 16 | lscsi_status; |
2098 | logit = 0; | 1768 | logit = 0; |
2099 | 1769 | ||
2100 | check_scsi_status: | 1770 | check_scsi_status: |
@@ -2104,7 +1774,7 @@ check_scsi_status: | |||
2104 | */ | 1774 | */ |
2105 | if (lscsi_status != 0) { | 1775 | if (lscsi_status != 0) { |
2106 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { | 1776 | if (lscsi_status == SAM_STAT_TASK_SET_FULL) { |
2107 | ql_dbg(ql_dbg_io, fcport->vha, 0x3020, | 1777 | ql_log(ql_log_warn, vha, 0x3020, |
2108 | "QUEUE FULL detected.\n"); | 1778 | "QUEUE FULL detected.\n"); |
2109 | logit = 1; | 1779 | logit = 1; |
2110 | break; | 1780 | break; |
@@ -2117,7 +1787,7 @@ check_scsi_status: | |||
2117 | break; | 1787 | break; |
2118 | 1788 | ||
2119 | qla2x00_handle_sense(sp, sense_data, par_sense_len, | 1789 | qla2x00_handle_sense(sp, sense_data, par_sense_len, |
2120 | sense_len, rsp, res); | 1790 | sense_len, rsp); |
2121 | } | 1791 | } |
2122 | break; | 1792 | break; |
2123 | 1793 | ||
@@ -2134,7 +1804,7 @@ check_scsi_status: | |||
2134 | * while we try to recover so instruct the mid layer | 1804 | * while we try to recover so instruct the mid layer |
2135 | * to requeue until the class decides how to handle this. | 1805 | * to requeue until the class decides how to handle this. |
2136 | */ | 1806 | */ |
2137 | res = DID_TRANSPORT_DISRUPTED << 16; | 1807 | cp->result = DID_TRANSPORT_DISRUPTED << 16; |
2138 | 1808 | ||
2139 | if (comp_status == CS_TIMEOUT) { | 1809 | if (comp_status == CS_TIMEOUT) { |
2140 | if (IS_FWI2_CAPABLE(ha)) | 1810 | if (IS_FWI2_CAPABLE(ha)) |
@@ -2144,7 +1814,7 @@ check_scsi_status: | |||
2144 | break; | 1814 | break; |
2145 | } | 1815 | } |
2146 | 1816 | ||
2147 | ql_dbg(ql_dbg_io, fcport->vha, 0x3021, | 1817 | ql_dbg(ql_dbg_io, vha, 0x3021, |
2148 | "Port down status: port-state=0x%x.\n", | 1818 | "Port down status: port-state=0x%x.\n", |
2149 | atomic_read(&fcport->state)); | 1819 | atomic_read(&fcport->state)); |
2150 | 1820 | ||
@@ -2153,48 +1823,29 @@ check_scsi_status: | |||
2153 | break; | 1823 | break; |
2154 | 1824 | ||
2155 | case CS_ABORTED: | 1825 | case CS_ABORTED: |
2156 | res = DID_RESET << 16; | 1826 | cp->result = DID_RESET << 16; |
2157 | break; | 1827 | break; |
2158 | 1828 | ||
2159 | case CS_DIF_ERROR: | 1829 | case CS_DIF_ERROR: |
2160 | logit = qla2x00_handle_dif_error(sp, sts24); | 1830 | logit = qla2x00_handle_dif_error(sp, sts24); |
2161 | res = cp->result; | ||
2162 | break; | ||
2163 | |||
2164 | case CS_TRANSPORT: | ||
2165 | res = DID_ERROR << 16; | ||
2166 | |||
2167 | if (!IS_PI_SPLIT_DET_CAPABLE(ha)) | ||
2168 | break; | ||
2169 | |||
2170 | if (state_flags & BIT_4) | ||
2171 | scmd_printk(KERN_WARNING, cp, | ||
2172 | "Unsupported device '%s' found.\n", | ||
2173 | cp->device->vendor); | ||
2174 | break; | 1831 | break; |
2175 | |||
2176 | default: | 1832 | default: |
2177 | res = DID_ERROR << 16; | 1833 | cp->result = DID_ERROR << 16; |
2178 | break; | 1834 | break; |
2179 | } | 1835 | } |
2180 | 1836 | ||
2181 | out: | 1837 | out: |
2182 | if (logit) | 1838 | if (logit) |
2183 | ql_dbg(ql_dbg_io, fcport->vha, 0x3022, | 1839 | ql_dbg(ql_dbg_io, vha, 0x3022, |
2184 | "FCP command status: 0x%x-0x%x (0x%x) " | 1840 | "FCP command status: 0x%x-0x%x (0x%x) " |
2185 | "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " | 1841 | "oxid=0x%x cdb=%02x%02x%02x len=0x%x " |
2186 | "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " | ||
2187 | "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", | 1842 | "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", |
2188 | comp_status, scsi_status, res, vha->host_no, | 1843 | comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0], |
2189 | cp->device->id, cp->device->lun, fcport->d_id.b.domain, | 1844 | cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, |
2190 | fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, | ||
2191 | cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], | ||
2192 | cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7], | ||
2193 | cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, | ||
2194 | resid_len, fw_resid_len); | 1845 | resid_len, fw_resid_len); |
2195 | 1846 | ||
2196 | if (rsp->status_srb == NULL) | 1847 | if (rsp->status_srb == NULL) |
2197 | sp->done(ha, sp, res); | 1848 | qla2x00_sp_compl(ha, sp); |
2198 | } | 1849 | } |
2199 | 1850 | ||
2200 | /** | 1851 | /** |
@@ -2207,51 +1858,44 @@ out: | |||
2207 | static void | 1858 | static void |
2208 | qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) | 1859 | qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) |
2209 | { | 1860 | { |
2210 | uint8_t sense_sz = 0; | 1861 | uint8_t sense_sz = 0; |
2211 | struct qla_hw_data *ha = rsp->hw; | 1862 | struct qla_hw_data *ha = rsp->hw; |
2212 | struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); | 1863 | struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); |
2213 | srb_t *sp = rsp->status_srb; | 1864 | srb_t *sp = rsp->status_srb; |
2214 | struct scsi_cmnd *cp; | 1865 | struct scsi_cmnd *cp; |
2215 | uint32_t sense_len; | ||
2216 | uint8_t *sense_ptr; | ||
2217 | |||
2218 | if (!sp || !GET_CMD_SENSE_LEN(sp)) | ||
2219 | return; | ||
2220 | |||
2221 | sense_len = GET_CMD_SENSE_LEN(sp); | ||
2222 | sense_ptr = GET_CMD_SENSE_PTR(sp); | ||
2223 | |||
2224 | cp = GET_CMD_SP(sp); | ||
2225 | if (cp == NULL) { | ||
2226 | ql_log(ql_log_warn, vha, 0x3025, | ||
2227 | "cmd is NULL: already returned to OS (sp=%p).\n", sp); | ||
2228 | 1866 | ||
2229 | rsp->status_srb = NULL; | 1867 | if (sp != NULL && sp->request_sense_length != 0) { |
2230 | return; | 1868 | cp = sp->cmd; |
2231 | } | 1869 | if (cp == NULL) { |
2232 | 1870 | ql_log(ql_log_warn, vha, 0x3025, | |
2233 | if (sense_len > sizeof(pkt->data)) | 1871 | "cmd is NULL: already returned to OS (sp=%p).\n", |
2234 | sense_sz = sizeof(pkt->data); | 1872 | sp); |
2235 | else | ||
2236 | sense_sz = sense_len; | ||
2237 | 1873 | ||
2238 | /* Move sense data. */ | 1874 | rsp->status_srb = NULL; |
2239 | if (IS_FWI2_CAPABLE(ha)) | 1875 | return; |
2240 | host_to_fcp_swap(pkt->data, sizeof(pkt->data)); | 1876 | } |
2241 | memcpy(sense_ptr, pkt->data, sense_sz); | ||
2242 | ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, | ||
2243 | sense_ptr, sense_sz); | ||
2244 | |||
2245 | sense_len -= sense_sz; | ||
2246 | sense_ptr += sense_sz; | ||
2247 | 1877 | ||
2248 | SET_CMD_SENSE_PTR(sp, sense_ptr); | 1878 | if (sp->request_sense_length > sizeof(pkt->data)) { |
2249 | SET_CMD_SENSE_LEN(sp, sense_len); | 1879 | sense_sz = sizeof(pkt->data); |
1880 | } else { | ||
1881 | sense_sz = sp->request_sense_length; | ||
1882 | } | ||
2250 | 1883 | ||
2251 | /* Place command on done queue. */ | 1884 | /* Move sense data. */ |
2252 | if (sense_len == 0) { | 1885 | if (IS_FWI2_CAPABLE(ha)) |
2253 | rsp->status_srb = NULL; | 1886 | host_to_fcp_swap(pkt->data, sizeof(pkt->data)); |
2254 | sp->done(ha, sp, cp->result); | 1887 | memcpy(sp->request_sense_ptr, pkt->data, sense_sz); |
1888 | ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, | ||
1889 | sp->request_sense_ptr, sense_sz); | ||
1890 | |||
1891 | sp->request_sense_ptr += sense_sz; | ||
1892 | sp->request_sense_length -= sense_sz; | ||
1893 | |||
1894 | /* Place command on done queue. */ | ||
1895 | if (sp->request_sense_length == 0) { | ||
1896 | rsp->status_srb = NULL; | ||
1897 | qla2x00_sp_compl(ha, sp); | ||
1898 | } | ||
2255 | } | 1899 | } |
2256 | } | 1900 | } |
2257 | 1901 | ||
@@ -2265,36 +1909,63 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) | |||
2265 | { | 1909 | { |
2266 | srb_t *sp; | 1910 | srb_t *sp; |
2267 | struct qla_hw_data *ha = vha->hw; | 1911 | struct qla_hw_data *ha = vha->hw; |
2268 | const char func[] = "ERROR-IOCB"; | 1912 | uint32_t handle = LSW(pkt->handle); |
2269 | uint16_t que = MSW(pkt->handle); | 1913 | uint16_t que = MSW(pkt->handle); |
2270 | struct req_que *req = NULL; | 1914 | struct req_que *req = ha->req_q_map[que]; |
2271 | int res = DID_ERROR << 16; | 1915 | |
2272 | 1916 | if (pkt->entry_status & RF_INV_E_ORDER) | |
2273 | ql_dbg(ql_dbg_async, vha, 0x502a, | 1917 | ql_dbg(ql_dbg_async, vha, 0x502a, |
2274 | "type of error status in response: 0x%x\n", pkt->entry_status); | 1918 | "Invalid Entry Order.\n"); |
1919 | else if (pkt->entry_status & RF_INV_E_COUNT) | ||
1920 | ql_dbg(ql_dbg_async, vha, 0x502b, | ||
1921 | "Invalid Entry Count.\n"); | ||
1922 | else if (pkt->entry_status & RF_INV_E_PARAM) | ||
1923 | ql_dbg(ql_dbg_async, vha, 0x502c, | ||
1924 | "Invalid Entry Parameter.\n"); | ||
1925 | else if (pkt->entry_status & RF_INV_E_TYPE) | ||
1926 | ql_dbg(ql_dbg_async, vha, 0x502d, | ||
1927 | "Invalid Entry Type.\n"); | ||
1928 | else if (pkt->entry_status & RF_BUSY) | ||
1929 | ql_dbg(ql_dbg_async, vha, 0x502e, | ||
1930 | "Busy.\n"); | ||
1931 | else | ||
1932 | ql_dbg(ql_dbg_async, vha, 0x502f, | ||
1933 | "UNKNOWN flag error.\n"); | ||
2275 | 1934 | ||
2276 | if (que >= ha->max_req_queues || !ha->req_q_map[que]) | 1935 | /* Validate handle. */ |
2277 | goto fatal; | 1936 | if (handle < MAX_OUTSTANDING_COMMANDS) |
1937 | sp = req->outstanding_cmds[handle]; | ||
1938 | else | ||
1939 | sp = NULL; | ||
2278 | 1940 | ||
2279 | req = ha->req_q_map[que]; | 1941 | if (sp) { |
1942 | /* Free outstanding command slot. */ | ||
1943 | req->outstanding_cmds[handle] = NULL; | ||
1944 | |||
1945 | /* Bad payload or header */ | ||
1946 | if (pkt->entry_status & | ||
1947 | (RF_INV_E_ORDER | RF_INV_E_COUNT | | ||
1948 | RF_INV_E_PARAM | RF_INV_E_TYPE)) { | ||
1949 | sp->cmd->result = DID_ERROR << 16; | ||
1950 | } else if (pkt->entry_status & RF_BUSY) { | ||
1951 | sp->cmd->result = DID_BUS_BUSY << 16; | ||
1952 | } else { | ||
1953 | sp->cmd->result = DID_ERROR << 16; | ||
1954 | } | ||
1955 | qla2x00_sp_compl(ha, sp); | ||
2280 | 1956 | ||
2281 | if (pkt->entry_status & RF_BUSY) | 1957 | } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == |
2282 | res = DID_BUS_BUSY << 16; | 1958 | COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 |
1959 | || pkt->entry_type == COMMAND_TYPE_6) { | ||
1960 | ql_log(ql_log_warn, vha, 0x5030, | ||
1961 | "Error entry - invalid handle.\n"); | ||
2283 | 1962 | ||
2284 | sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); | 1963 | if (IS_QLA82XX(ha)) |
2285 | if (sp) { | 1964 | set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); |
2286 | sp->done(ha, sp, res); | 1965 | else |
2287 | return; | 1966 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
1967 | qla2xxx_wake_dpc(vha); | ||
2288 | } | 1968 | } |
2289 | fatal: | ||
2290 | ql_log(ql_log_warn, vha, 0x5030, | ||
2291 | "Error entry - invalid handle/queue.\n"); | ||
2292 | |||
2293 | if (IS_QLA82XX(ha)) | ||
2294 | set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); | ||
2295 | else | ||
2296 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | ||
2297 | qla2xxx_wake_dpc(vha); | ||
2298 | } | 1969 | } |
2299 | 1970 | ||
2300 | /** | 1971 | /** |
@@ -2306,31 +1977,27 @@ static void | |||
2306 | qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | 1977 | qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) |
2307 | { | 1978 | { |
2308 | uint16_t cnt; | 1979 | uint16_t cnt; |
2309 | uint32_t mboxes; | ||
2310 | uint16_t __iomem *wptr; | 1980 | uint16_t __iomem *wptr; |
2311 | struct qla_hw_data *ha = vha->hw; | 1981 | struct qla_hw_data *ha = vha->hw; |
2312 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 1982 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
2313 | 1983 | ||
2314 | /* Read all mbox registers? */ | ||
2315 | mboxes = (1 << ha->mbx_count) - 1; | ||
2316 | if (!ha->mcp) | ||
2317 | ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); | ||
2318 | else | ||
2319 | mboxes = ha->mcp->in_mb; | ||
2320 | |||
2321 | /* Load return mailbox registers. */ | 1984 | /* Load return mailbox registers. */ |
2322 | ha->flags.mbox_int = 1; | 1985 | ha->flags.mbox_int = 1; |
2323 | ha->mailbox_out[0] = mb0; | 1986 | ha->mailbox_out[0] = mb0; |
2324 | mboxes >>= 1; | ||
2325 | wptr = (uint16_t __iomem *)®->mailbox1; | 1987 | wptr = (uint16_t __iomem *)®->mailbox1; |
2326 | 1988 | ||
2327 | for (cnt = 1; cnt < ha->mbx_count; cnt++) { | 1989 | for (cnt = 1; cnt < ha->mbx_count; cnt++) { |
2328 | if (mboxes & BIT_0) | 1990 | ha->mailbox_out[cnt] = RD_REG_WORD(wptr); |
2329 | ha->mailbox_out[cnt] = RD_REG_WORD(wptr); | ||
2330 | |||
2331 | mboxes >>= 1; | ||
2332 | wptr++; | 1991 | wptr++; |
2333 | } | 1992 | } |
1993 | |||
1994 | if (ha->mcp) { | ||
1995 | ql_dbg(ql_dbg_async, vha, 0x504d, | ||
1996 | "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]); | ||
1997 | } else { | ||
1998 | ql_dbg(ql_dbg_async, vha, 0x504e, | ||
1999 | "MBX pointer ERROR.\n"); | ||
2000 | } | ||
2334 | } | 2001 | } |
2335 | 2002 | ||
2336 | /** | 2003 | /** |
@@ -2358,10 +2025,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, | |||
2358 | } | 2025 | } |
2359 | 2026 | ||
2360 | if (pkt->entry_status != 0) { | 2027 | if (pkt->entry_status != 0) { |
2361 | qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); | 2028 | ql_dbg(ql_dbg_async, vha, 0x5029, |
2362 | 2029 | "Process error entry.\n"); | |
2363 | (void)qlt_24xx_process_response_error(vha, pkt); | ||
2364 | 2030 | ||
2031 | qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); | ||
2365 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; | 2032 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; |
2366 | wmb(); | 2033 | wmb(); |
2367 | continue; | 2034 | continue; |
@@ -2388,23 +2055,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, | |||
2388 | break; | 2055 | break; |
2389 | case CT_IOCB_TYPE: | 2056 | case CT_IOCB_TYPE: |
2390 | qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); | 2057 | qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); |
2058 | clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags); | ||
2391 | break; | 2059 | break; |
2392 | case ELS_IOCB_TYPE: | 2060 | case ELS_IOCB_TYPE: |
2393 | qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); | 2061 | qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); |
2394 | break; | 2062 | break; |
2395 | case ABTS_RECV_24XX: | ||
2396 | /* ensure that the ATIO queue is empty */ | ||
2397 | qlt_24xx_process_atio_queue(vha); | ||
2398 | case ABTS_RESP_24XX: | ||
2399 | case CTIO_TYPE7: | ||
2400 | case NOTIFY_ACK_TYPE: | ||
2401 | qlt_response_pkt_all_vps(vha, (response_t *)pkt); | ||
2402 | break; | ||
2403 | case MARKER_TYPE: | ||
2404 | /* Do nothing in this case, this check is to prevent it | ||
2405 | * from falling into default case | ||
2406 | */ | ||
2407 | break; | ||
2408 | default: | 2063 | default: |
2409 | /* Type Not Supported. */ | 2064 | /* Type Not Supported. */ |
2410 | ql_dbg(ql_dbg_async, vha, 0x5042, | 2065 | ql_dbg(ql_dbg_async, vha, 0x5042, |
@@ -2433,7 +2088,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha) | |||
2433 | struct qla_hw_data *ha = vha->hw; | 2088 | struct qla_hw_data *ha = vha->hw; |
2434 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 2089 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
2435 | 2090 | ||
2436 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) | 2091 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) |
2437 | return; | 2092 | return; |
2438 | 2093 | ||
2439 | rval = QLA_SUCCESS; | 2094 | rval = QLA_SUCCESS; |
@@ -2474,7 +2129,7 @@ done: | |||
2474 | } | 2129 | } |
2475 | 2130 | ||
2476 | /** | 2131 | /** |
2477 | * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. | 2132 | * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. |
2478 | * @irq: | 2133 | * @irq: |
2479 | * @dev_id: SCSI driver HA context | 2134 | * @dev_id: SCSI driver HA context |
2480 | * | 2135 | * |
@@ -2492,14 +2147,14 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
2492 | unsigned long iter; | 2147 | unsigned long iter; |
2493 | uint32_t stat; | 2148 | uint32_t stat; |
2494 | uint32_t hccr; | 2149 | uint32_t hccr; |
2495 | uint16_t mb[8]; | 2150 | uint16_t mb[4]; |
2496 | struct rsp_que *rsp; | 2151 | struct rsp_que *rsp; |
2497 | unsigned long flags; | 2152 | unsigned long flags; |
2498 | 2153 | ||
2499 | rsp = (struct rsp_que *) dev_id; | 2154 | rsp = (struct rsp_que *) dev_id; |
2500 | if (!rsp) { | 2155 | if (!rsp) { |
2501 | ql_log(ql_log_info, NULL, 0x5059, | 2156 | printk(KERN_INFO |
2502 | "%s: NULL response queue pointer.\n", __func__); | 2157 | "%s(): NULL response queue pointer.\n", __func__); |
2503 | return IRQ_NONE; | 2158 | return IRQ_NONE; |
2504 | } | 2159 | } |
2505 | 2160 | ||
@@ -2533,30 +2188,23 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
2533 | break; | 2188 | break; |
2534 | 2189 | ||
2535 | switch (stat & 0xff) { | 2190 | switch (stat & 0xff) { |
2536 | case INTR_ROM_MB_SUCCESS: | 2191 | case 0x1: |
2537 | case INTR_ROM_MB_FAILED: | 2192 | case 0x2: |
2538 | case INTR_MB_SUCCESS: | 2193 | case 0x10: |
2539 | case INTR_MB_FAILED: | 2194 | case 0x11: |
2540 | qla24xx_mbx_completion(vha, MSW(stat)); | 2195 | qla24xx_mbx_completion(vha, MSW(stat)); |
2541 | status |= MBX_INTERRUPT; | 2196 | status |= MBX_INTERRUPT; |
2542 | 2197 | ||
2543 | break; | 2198 | break; |
2544 | case INTR_ASYNC_EVENT: | 2199 | case 0x12: |
2545 | mb[0] = MSW(stat); | 2200 | mb[0] = MSW(stat); |
2546 | mb[1] = RD_REG_WORD(®->mailbox1); | 2201 | mb[1] = RD_REG_WORD(®->mailbox1); |
2547 | mb[2] = RD_REG_WORD(®->mailbox2); | 2202 | mb[2] = RD_REG_WORD(®->mailbox2); |
2548 | mb[3] = RD_REG_WORD(®->mailbox3); | 2203 | mb[3] = RD_REG_WORD(®->mailbox3); |
2549 | qla2x00_async_event(vha, rsp, mb); | 2204 | qla2x00_async_event(vha, rsp, mb); |
2550 | break; | 2205 | break; |
2551 | case INTR_RSP_QUE_UPDATE: | 2206 | case 0x13: |
2552 | case INTR_RSP_QUE_UPDATE_83XX: | 2207 | case 0x14: |
2553 | qla24xx_process_response_queue(vha, rsp); | ||
2554 | break; | ||
2555 | case INTR_ATIO_QUE_UPDATE: | ||
2556 | qlt_24xx_process_atio_queue(vha); | ||
2557 | break; | ||
2558 | case INTR_ATIO_RSP_QUE_UPDATE: | ||
2559 | qlt_24xx_process_atio_queue(vha); | ||
2560 | qla24xx_process_response_queue(vha, rsp); | 2208 | qla24xx_process_response_queue(vha, rsp); |
2561 | break; | 2209 | break; |
2562 | default: | 2210 | default: |
@@ -2566,8 +2214,6 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
2566 | } | 2214 | } |
2567 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 2215 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
2568 | RD_REG_DWORD_RELAXED(®->hccr); | 2216 | RD_REG_DWORD_RELAXED(®->hccr); |
2569 | if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) | ||
2570 | ndelay(3500); | ||
2571 | } | 2217 | } |
2572 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 2218 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
2573 | 2219 | ||
@@ -2591,8 +2237,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) | |||
2591 | 2237 | ||
2592 | rsp = (struct rsp_que *) dev_id; | 2238 | rsp = (struct rsp_que *) dev_id; |
2593 | if (!rsp) { | 2239 | if (!rsp) { |
2594 | ql_log(ql_log_info, NULL, 0x505a, | 2240 | printk(KERN_INFO |
2595 | "%s: NULL response queue pointer.\n", __func__); | 2241 | "%s(): NULL response queue pointer.\n", __func__); |
2596 | return IRQ_NONE; | 2242 | return IRQ_NONE; |
2597 | } | 2243 | } |
2598 | ha = rsp->hw; | 2244 | ha = rsp->hw; |
@@ -2621,14 +2267,14 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
2621 | 2267 | ||
2622 | rsp = (struct rsp_que *) dev_id; | 2268 | rsp = (struct rsp_que *) dev_id; |
2623 | if (!rsp) { | 2269 | if (!rsp) { |
2624 | ql_log(ql_log_info, NULL, 0x505b, | 2270 | printk(KERN_INFO |
2625 | "%s: NULL response queue pointer.\n", __func__); | 2271 | "%s(): NULL response queue pointer.\n", __func__); |
2626 | return IRQ_NONE; | 2272 | return IRQ_NONE; |
2627 | } | 2273 | } |
2628 | ha = rsp->hw; | 2274 | ha = rsp->hw; |
2629 | 2275 | ||
2630 | /* Clear the interrupt, if enabled, for this response queue */ | 2276 | /* Clear the interrupt, if enabled, for this response queue */ |
2631 | if (!ha->flags.disable_msix_handshake) { | 2277 | if (rsp->options & ~BIT_6) { |
2632 | reg = &ha->iobase->isp24; | 2278 | reg = &ha->iobase->isp24; |
2633 | spin_lock_irqsave(&ha->hardware_lock, flags); | 2279 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2634 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 2280 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
@@ -2650,13 +2296,13 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
2650 | int status; | 2296 | int status; |
2651 | uint32_t stat; | 2297 | uint32_t stat; |
2652 | uint32_t hccr; | 2298 | uint32_t hccr; |
2653 | uint16_t mb[8]; | 2299 | uint16_t mb[4]; |
2654 | unsigned long flags; | 2300 | unsigned long flags; |
2655 | 2301 | ||
2656 | rsp = (struct rsp_que *) dev_id; | 2302 | rsp = (struct rsp_que *) dev_id; |
2657 | if (!rsp) { | 2303 | if (!rsp) { |
2658 | ql_log(ql_log_info, NULL, 0x505c, | 2304 | printk(KERN_INFO |
2659 | "%s: NULL response queue pointer.\n", __func__); | 2305 | "%s(): NULL response queue pointer.\n", __func__); |
2660 | return IRQ_NONE; | 2306 | return IRQ_NONE; |
2661 | } | 2307 | } |
2662 | ha = rsp->hw; | 2308 | ha = rsp->hw; |
@@ -2686,30 +2332,23 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
2686 | break; | 2332 | break; |
2687 | 2333 | ||
2688 | switch (stat & 0xff) { | 2334 | switch (stat & 0xff) { |
2689 | case INTR_ROM_MB_SUCCESS: | 2335 | case 0x1: |
2690 | case INTR_ROM_MB_FAILED: | 2336 | case 0x2: |
2691 | case INTR_MB_SUCCESS: | 2337 | case 0x10: |
2692 | case INTR_MB_FAILED: | 2338 | case 0x11: |
2693 | qla24xx_mbx_completion(vha, MSW(stat)); | 2339 | qla24xx_mbx_completion(vha, MSW(stat)); |
2694 | status |= MBX_INTERRUPT; | 2340 | status |= MBX_INTERRUPT; |
2695 | 2341 | ||
2696 | break; | 2342 | break; |
2697 | case INTR_ASYNC_EVENT: | 2343 | case 0x12: |
2698 | mb[0] = MSW(stat); | 2344 | mb[0] = MSW(stat); |
2699 | mb[1] = RD_REG_WORD(®->mailbox1); | 2345 | mb[1] = RD_REG_WORD(®->mailbox1); |
2700 | mb[2] = RD_REG_WORD(®->mailbox2); | 2346 | mb[2] = RD_REG_WORD(®->mailbox2); |
2701 | mb[3] = RD_REG_WORD(®->mailbox3); | 2347 | mb[3] = RD_REG_WORD(®->mailbox3); |
2702 | qla2x00_async_event(vha, rsp, mb); | 2348 | qla2x00_async_event(vha, rsp, mb); |
2703 | break; | 2349 | break; |
2704 | case INTR_RSP_QUE_UPDATE: | 2350 | case 0x13: |
2705 | case INTR_RSP_QUE_UPDATE_83XX: | 2351 | case 0x14: |
2706 | qla24xx_process_response_queue(vha, rsp); | ||
2707 | break; | ||
2708 | case INTR_ATIO_QUE_UPDATE: | ||
2709 | qlt_24xx_process_atio_queue(vha); | ||
2710 | break; | ||
2711 | case INTR_ATIO_RSP_QUE_UPDATE: | ||
2712 | qlt_24xx_process_atio_queue(vha); | ||
2713 | qla24xx_process_response_queue(vha, rsp); | 2352 | qla24xx_process_response_queue(vha, rsp); |
2714 | break; | 2353 | break; |
2715 | default: | 2354 | default: |
@@ -2852,14 +2491,8 @@ msix_failed: | |||
2852 | } | 2491 | } |
2853 | 2492 | ||
2854 | /* Enable MSI-X vector for response queue update for queue 0 */ | 2493 | /* Enable MSI-X vector for response queue update for queue 0 */ |
2855 | if (IS_QLA83XX(ha)) { | 2494 | if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) |
2856 | if (ha->msixbase && ha->mqiobase && | 2495 | ha->mqenable = 1; |
2857 | (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) | ||
2858 | ha->mqenable = 1; | ||
2859 | } else | ||
2860 | if (ha->mqiobase | ||
2861 | && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) | ||
2862 | ha->mqenable = 1; | ||
2863 | ql_dbg(ql_dbg_multiq, vha, 0xc005, | 2496 | ql_dbg(ql_dbg_multiq, vha, 0xc005, |
2864 | "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", | 2497 | "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", |
2865 | ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); | 2498 | ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); |
@@ -2880,8 +2513,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) | |||
2880 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | 2513 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); |
2881 | 2514 | ||
2882 | /* If possible, enable MSI-X. */ | 2515 | /* If possible, enable MSI-X. */ |
2883 | if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && | 2516 | if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && |
2884 | !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) | 2517 | !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha)) |
2885 | goto skip_msi; | 2518 | goto skip_msi; |
2886 | 2519 | ||
2887 | if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && | 2520 | if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && |
@@ -2914,7 +2547,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) | |||
2914 | skip_msix: | 2547 | skip_msix: |
2915 | 2548 | ||
2916 | if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && | 2549 | if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && |
2917 | !IS_QLA8001(ha) && !IS_QLA82XX(ha)) | 2550 | !IS_QLA8001(ha)) |
2918 | goto skip_msi; | 2551 | goto skip_msi; |
2919 | 2552 | ||
2920 | ret = pci_enable_msi(ha->pdev); | 2553 | ret = pci_enable_msi(ha->pdev); |
@@ -2925,11 +2558,6 @@ skip_msix: | |||
2925 | } else | 2558 | } else |
2926 | ql_log(ql_log_warn, vha, 0x0039, | 2559 | ql_log(ql_log_warn, vha, 0x0039, |
2927 | "MSI-X; Falling back-to INTa mode -- %d.\n", ret); | 2560 | "MSI-X; Falling back-to INTa mode -- %d.\n", ret); |
2928 | |||
2929 | /* Skip INTx on ISP82xx. */ | ||
2930 | if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) | ||
2931 | return QLA_FUNCTION_FAILED; | ||
2932 | |||
2933 | skip_msi: | 2561 | skip_msi: |
2934 | 2562 | ||
2935 | ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, | 2563 | ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, |
@@ -2940,15 +2568,25 @@ skip_msi: | |||
2940 | "Failed to reserve interrupt %d already in use.\n", | 2568 | "Failed to reserve interrupt %d already in use.\n", |
2941 | ha->pdev->irq); | 2569 | ha->pdev->irq); |
2942 | goto fail; | 2570 | goto fail; |
2943 | } else if (!ha->flags.msi_enabled) | 2571 | } |
2944 | ql_dbg(ql_dbg_init, vha, 0x0125, | ||
2945 | "INTa mode: Enabled.\n"); | ||
2946 | 2572 | ||
2947 | clear_risc_ints: | 2573 | clear_risc_ints: |
2948 | 2574 | ||
2575 | /* | ||
2576 | * FIXME: Noted that 8014s were being dropped during NK testing. | ||
2577 | * Timing deltas during MSI-X/INTa transitions? | ||
2578 | */ | ||
2579 | if (IS_QLA81XX(ha) || IS_QLA82XX(ha)) | ||
2580 | goto fail; | ||
2949 | spin_lock_irq(&ha->hardware_lock); | 2581 | spin_lock_irq(&ha->hardware_lock); |
2950 | if (!IS_FWI2_CAPABLE(ha)) | 2582 | if (IS_FWI2_CAPABLE(ha)) { |
2583 | WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); | ||
2584 | WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT); | ||
2585 | } else { | ||
2951 | WRT_REG_WORD(®->isp.semaphore, 0); | 2586 | WRT_REG_WORD(®->isp.semaphore, 0); |
2587 | WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT); | ||
2588 | WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); | ||
2589 | } | ||
2952 | spin_unlock_irq(&ha->hardware_lock); | 2590 | spin_unlock_irq(&ha->hardware_lock); |
2953 | 2591 | ||
2954 | fail: | 2592 | fail: |
@@ -2959,15 +2597,7 @@ void | |||
2959 | qla2x00_free_irqs(scsi_qla_host_t *vha) | 2597 | qla2x00_free_irqs(scsi_qla_host_t *vha) |
2960 | { | 2598 | { |
2961 | struct qla_hw_data *ha = vha->hw; | 2599 | struct qla_hw_data *ha = vha->hw; |
2962 | struct rsp_que *rsp; | 2600 | struct rsp_que *rsp = ha->rsp_q_map[0]; |
2963 | |||
2964 | /* | ||
2965 | * We need to check that ha->rsp_q_map is valid in case we are called | ||
2966 | * from a probe failure context. | ||
2967 | */ | ||
2968 | if (!ha->rsp_q_map || !ha->rsp_q_map[0]) | ||
2969 | return; | ||
2970 | rsp = ha->rsp_q_map[0]; | ||
2971 | 2601 | ||
2972 | if (ha->flags.msix_enabled) | 2602 | if (ha->flags.msix_enabled) |
2973 | qla24xx_disable_msix(ha); | 2603 | qla24xx_disable_msix(ha); |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 68c55eaa318..f7604ea1af8 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | #include "qla_target.h" | ||
9 | 8 | ||
10 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
11 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
@@ -47,17 +46,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
47 | struct qla_hw_data *ha = vha->hw; | 46 | struct qla_hw_data *ha = vha->hw; |
48 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | 47 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); |
49 | 48 | ||
50 | ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); | 49 | ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__); |
51 | 50 | ||
52 | if (ha->pdev->error_state > pci_channel_io_frozen) { | 51 | if (ha->pdev->error_state > pci_channel_io_frozen) { |
53 | ql_log(ql_log_warn, vha, 0x1001, | 52 | ql_log(ql_log_warn, base_vha, 0x1001, |
54 | "error_state is greater than pci_channel_io_frozen, " | 53 | "error_state is greater than pci_channel_io_frozen, " |
55 | "exiting.\n"); | 54 | "exiting.\n"); |
56 | return QLA_FUNCTION_TIMEOUT; | 55 | return QLA_FUNCTION_TIMEOUT; |
57 | } | 56 | } |
58 | 57 | ||
59 | if (vha->device_flags & DFLG_DEV_FAILED) { | 58 | if (vha->device_flags & DFLG_DEV_FAILED) { |
60 | ql_log(ql_log_warn, vha, 0x1002, | 59 | ql_log(ql_log_warn, base_vha, 0x1002, |
61 | "Device in failed state, exiting.\n"); | 60 | "Device in failed state, exiting.\n"); |
62 | return QLA_FUNCTION_TIMEOUT; | 61 | return QLA_FUNCTION_TIMEOUT; |
63 | } | 62 | } |
@@ -70,17 +69,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
70 | 69 | ||
71 | 70 | ||
72 | if (ha->flags.pci_channel_io_perm_failure) { | 71 | if (ha->flags.pci_channel_io_perm_failure) { |
73 | ql_log(ql_log_warn, vha, 0x1003, | 72 | ql_log(ql_log_warn, base_vha, 0x1003, |
74 | "Perm failure on EEH timeout MBX, exiting.\n"); | 73 | "Perm failure on EEH timeout MBX, exiting.\n"); |
75 | return QLA_FUNCTION_TIMEOUT; | 74 | return QLA_FUNCTION_TIMEOUT; |
76 | } | 75 | } |
77 | 76 | ||
78 | if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) { | 77 | if (ha->flags.isp82xx_fw_hung) { |
79 | /* Setting Link-Down error */ | 78 | /* Setting Link-Down error */ |
80 | mcp->mb[0] = MBS_LINK_DOWN_ERROR; | 79 | mcp->mb[0] = MBS_LINK_DOWN_ERROR; |
81 | ql_log(ql_log_warn, vha, 0x1004, | 80 | ql_log(ql_log_warn, base_vha, 0x1004, |
82 | "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); | 81 | "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); |
83 | return QLA_FUNCTION_TIMEOUT; | 82 | rval = QLA_FUNCTION_FAILED; |
83 | goto premature_exit; | ||
84 | } | 84 | } |
85 | 85 | ||
86 | /* | 86 | /* |
@@ -90,9 +90,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
90 | */ | 90 | */ |
91 | if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { | 91 | if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { |
92 | /* Timeout occurred. Return error. */ | 92 | /* Timeout occurred. Return error. */ |
93 | ql_log(ql_log_warn, vha, 0x1005, | 93 | ql_log(ql_log_warn, base_vha, 0x1005, |
94 | "Cmd access timeout, cmd=0x%x, Exiting.\n", | 94 | "Cmd access timeout, Exiting.\n"); |
95 | mcp->mb[0]); | ||
96 | return QLA_FUNCTION_TIMEOUT; | 95 | return QLA_FUNCTION_TIMEOUT; |
97 | } | 96 | } |
98 | 97 | ||
@@ -100,7 +99,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
100 | /* Save mailbox command for debug */ | 99 | /* Save mailbox command for debug */ |
101 | ha->mcp = mcp; | 100 | ha->mcp = mcp; |
102 | 101 | ||
103 | ql_dbg(ql_dbg_mbx, vha, 0x1006, | 102 | ql_dbg(ql_dbg_mbx, base_vha, 0x1006, |
104 | "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); | 103 | "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); |
105 | 104 | ||
106 | spin_lock_irqsave(&ha->hardware_lock, flags); | 105 | spin_lock_irqsave(&ha->hardware_lock, flags); |
@@ -129,28 +128,28 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
129 | iptr++; | 128 | iptr++; |
130 | } | 129 | } |
131 | 130 | ||
132 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111, | 131 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111, |
133 | "Loaded MBX registers (displayed in bytes) =.\n"); | 132 | "Loaded MBX registers (displayed in bytes) =.\n"); |
134 | ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112, | 133 | ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112, |
135 | (uint8_t *)mcp->mb, 16); | 134 | (uint8_t *)mcp->mb, 16); |
136 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113, | 135 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113, |
137 | ".\n"); | 136 | ".\n"); |
138 | ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114, | 137 | ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114, |
139 | ((uint8_t *)mcp->mb + 0x10), 16); | 138 | ((uint8_t *)mcp->mb + 0x10), 16); |
140 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115, | 139 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115, |
141 | ".\n"); | 140 | ".\n"); |
142 | ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116, | 141 | ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116, |
143 | ((uint8_t *)mcp->mb + 0x20), 8); | 142 | ((uint8_t *)mcp->mb + 0x20), 8); |
144 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, | 143 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117, |
145 | "I/O Address = %p.\n", optr); | 144 | "I/O Address = %p.\n", optr); |
146 | ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e); | 145 | ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e); |
147 | 146 | ||
148 | /* Issue set host interrupt command to send cmd out. */ | 147 | /* Issue set host interrupt command to send cmd out. */ |
149 | ha->flags.mbox_int = 0; | 148 | ha->flags.mbox_int = 0; |
150 | clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); | 149 | clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); |
151 | 150 | ||
152 | /* Unlock mbx registers and wait for interrupt */ | 151 | /* Unlock mbx registers and wait for interrupt */ |
153 | ql_dbg(ql_dbg_mbx, vha, 0x100f, | 152 | ql_dbg(ql_dbg_mbx, base_vha, 0x100f, |
154 | "Going to unlock irq & waiting for interrupts. " | 153 | "Going to unlock irq & waiting for interrupts. " |
155 | "jiffies=%lx.\n", jiffies); | 154 | "jiffies=%lx.\n", jiffies); |
156 | 155 | ||
@@ -164,8 +163,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
164 | HINT_MBX_INT_PENDING) { | 163 | HINT_MBX_INT_PENDING) { |
165 | spin_unlock_irqrestore(&ha->hardware_lock, | 164 | spin_unlock_irqrestore(&ha->hardware_lock, |
166 | flags); | 165 | flags); |
167 | ha->flags.mbox_busy = 0; | 166 | ql_dbg(ql_dbg_mbx, base_vha, 0x1010, |
168 | ql_dbg(ql_dbg_mbx, vha, 0x1010, | ||
169 | "Pending mailbox timeout, exiting.\n"); | 167 | "Pending mailbox timeout, exiting.\n"); |
170 | rval = QLA_FUNCTION_TIMEOUT; | 168 | rval = QLA_FUNCTION_TIMEOUT; |
171 | goto premature_exit; | 169 | goto premature_exit; |
@@ -182,7 +180,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
182 | clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); | 180 | clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); |
183 | 181 | ||
184 | } else { | 182 | } else { |
185 | ql_dbg(ql_dbg_mbx, vha, 0x1011, | 183 | ql_dbg(ql_dbg_mbx, base_vha, 0x1011, |
186 | "Cmd=%x Polling Mode.\n", command); | 184 | "Cmd=%x Polling Mode.\n", command); |
187 | 185 | ||
188 | if (IS_QLA82XX(ha)) { | 186 | if (IS_QLA82XX(ha)) { |
@@ -190,8 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
190 | HINT_MBX_INT_PENDING) { | 188 | HINT_MBX_INT_PENDING) { |
191 | spin_unlock_irqrestore(&ha->hardware_lock, | 189 | spin_unlock_irqrestore(&ha->hardware_lock, |
192 | flags); | 190 | flags); |
193 | ha->flags.mbox_busy = 0; | 191 | ql_dbg(ql_dbg_mbx, base_vha, 0x1012, |
194 | ql_dbg(ql_dbg_mbx, vha, 0x1012, | ||
195 | "Pending mailbox timeout, exiting.\n"); | 192 | "Pending mailbox timeout, exiting.\n"); |
196 | rval = QLA_FUNCTION_TIMEOUT; | 193 | rval = QLA_FUNCTION_TIMEOUT; |
197 | goto premature_exit; | 194 | goto premature_exit; |
@@ -216,7 +213,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
216 | command == MBC_LOAD_RISC_RAM_EXTENDED)) | 213 | command == MBC_LOAD_RISC_RAM_EXTENDED)) |
217 | msleep(10); | 214 | msleep(10); |
218 | } /* while */ | 215 | } /* while */ |
219 | ql_dbg(ql_dbg_mbx, vha, 0x1013, | 216 | ql_dbg(ql_dbg_mbx, base_vha, 0x1013, |
220 | "Waited %d sec.\n", | 217 | "Waited %d sec.\n", |
221 | (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); | 218 | (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); |
222 | } | 219 | } |
@@ -225,20 +222,20 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
225 | if (ha->flags.mbox_int) { | 222 | if (ha->flags.mbox_int) { |
226 | uint16_t *iptr2; | 223 | uint16_t *iptr2; |
227 | 224 | ||
228 | ql_dbg(ql_dbg_mbx, vha, 0x1014, | 225 | ql_dbg(ql_dbg_mbx, base_vha, 0x1014, |
229 | "Cmd=%x completed.\n", command); | 226 | "Cmd=%x completed.\n", command); |
230 | 227 | ||
231 | /* Got interrupt. Clear the flag. */ | 228 | /* Got interrupt. Clear the flag. */ |
232 | ha->flags.mbox_int = 0; | 229 | ha->flags.mbox_int = 0; |
233 | clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); | 230 | clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); |
234 | 231 | ||
235 | if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) { | 232 | if (ha->flags.isp82xx_fw_hung) { |
236 | ha->flags.mbox_busy = 0; | 233 | ha->flags.mbox_busy = 0; |
237 | /* Setting Link-Down error */ | 234 | /* Setting Link-Down error */ |
238 | mcp->mb[0] = MBS_LINK_DOWN_ERROR; | 235 | mcp->mb[0] = MBS_LINK_DOWN_ERROR; |
239 | ha->mcp = NULL; | 236 | ha->mcp = NULL; |
240 | rval = QLA_FUNCTION_FAILED; | 237 | rval = QLA_FUNCTION_FAILED; |
241 | ql_log(ql_log_warn, vha, 0x1015, | 238 | ql_log(ql_log_warn, base_vha, 0x1015, |
242 | "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); | 239 | "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); |
243 | goto premature_exit; | 240 | goto premature_exit; |
244 | } | 241 | } |
@@ -270,16 +267,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
270 | mb0 = RD_MAILBOX_REG(ha, ®->isp, 0); | 267 | mb0 = RD_MAILBOX_REG(ha, ®->isp, 0); |
271 | ictrl = RD_REG_WORD(®->isp.ictrl); | 268 | ictrl = RD_REG_WORD(®->isp.ictrl); |
272 | } | 269 | } |
273 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, | 270 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119, |
274 | "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " | 271 | "MBX Command timeout for cmd %x.\n", command); |
275 | "mb[0]=0x%x\n", command, ictrl, jiffies, mb0); | 272 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a, |
276 | ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); | 273 | "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies); |
277 | 274 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b, | |
278 | /* | 275 | "mb[0] = 0x%x.\n", mb0); |
279 | * Attempt to capture a firmware dump for further analysis | 276 | ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019); |
280 | * of the current firmware state | ||
281 | */ | ||
282 | ha->isp_ops->fw_dump(vha, 0); | ||
283 | 277 | ||
284 | rval = QLA_FUNCTION_TIMEOUT; | 278 | rval = QLA_FUNCTION_TIMEOUT; |
285 | } | 279 | } |
@@ -290,7 +284,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
290 | ha->mcp = NULL; | 284 | ha->mcp = NULL; |
291 | 285 | ||
292 | if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { | 286 | if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { |
293 | ql_dbg(ql_dbg_mbx, vha, 0x101a, | 287 | ql_dbg(ql_dbg_mbx, base_vha, 0x101a, |
294 | "Checking for additional resp interrupt.\n"); | 288 | "Checking for additional resp interrupt.\n"); |
295 | 289 | ||
296 | /* polling mode for non isp_abort commands. */ | 290 | /* polling mode for non isp_abort commands. */ |
@@ -302,63 +296,43 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
302 | if (!io_lock_on || (mcp->flags & IOCTL_CMD) || | 296 | if (!io_lock_on || (mcp->flags & IOCTL_CMD) || |
303 | ha->flags.eeh_busy) { | 297 | ha->flags.eeh_busy) { |
304 | /* not in dpc. schedule it for dpc to take over. */ | 298 | /* not in dpc. schedule it for dpc to take over. */ |
305 | ql_dbg(ql_dbg_mbx, vha, 0x101b, | 299 | ql_dbg(ql_dbg_mbx, base_vha, 0x101b, |
306 | "Timeout, schedule isp_abort_needed.\n"); | 300 | "Timeout, schedule isp_abort_needed.\n"); |
307 | 301 | ||
308 | if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && | 302 | if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && |
309 | !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && | 303 | !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && |
310 | !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { | 304 | !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { |
311 | if (IS_QLA82XX(ha)) { | 305 | |
312 | ql_dbg(ql_dbg_mbx, vha, 0x112a, | ||
313 | "disabling pause transmit on port " | ||
314 | "0 & 1.\n"); | ||
315 | qla82xx_wr_32(ha, | ||
316 | QLA82XX_CRB_NIU + 0x98, | ||
317 | CRB_NIU_XG_PAUSE_CTL_P0| | ||
318 | CRB_NIU_XG_PAUSE_CTL_P1); | ||
319 | } | ||
320 | ql_log(ql_log_info, base_vha, 0x101c, | 306 | ql_log(ql_log_info, base_vha, 0x101c, |
321 | "Mailbox cmd timeout occurred, cmd=0x%x, " | 307 | "Mailbox cmd timeout occured. " |
322 | "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " | 308 | "Scheduling ISP abort eeh_busy=0x%x.\n", |
323 | "abort.\n", command, mcp->mb[0], | 309 | ha->flags.eeh_busy); |
324 | ha->flags.eeh_busy); | ||
325 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 310 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
326 | qla2xxx_wake_dpc(vha); | 311 | qla2xxx_wake_dpc(vha); |
327 | } | 312 | } |
328 | } else if (!abort_active) { | 313 | } else if (!abort_active) { |
329 | /* call abort directly since we are in the DPC thread */ | 314 | /* call abort directly since we are in the DPC thread */ |
330 | ql_dbg(ql_dbg_mbx, vha, 0x101d, | 315 | ql_dbg(ql_dbg_mbx, base_vha, 0x101d, |
331 | "Timeout, calling abort_isp.\n"); | 316 | "Timeout, calling abort_isp.\n"); |
332 | 317 | ||
333 | if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && | 318 | if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && |
334 | !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && | 319 | !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && |
335 | !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { | 320 | !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { |
336 | if (IS_QLA82XX(ha)) { | 321 | |
337 | ql_dbg(ql_dbg_mbx, vha, 0x112b, | ||
338 | "disabling pause transmit on port " | ||
339 | "0 & 1.\n"); | ||
340 | qla82xx_wr_32(ha, | ||
341 | QLA82XX_CRB_NIU + 0x98, | ||
342 | CRB_NIU_XG_PAUSE_CTL_P0| | ||
343 | CRB_NIU_XG_PAUSE_CTL_P1); | ||
344 | } | ||
345 | ql_log(ql_log_info, base_vha, 0x101e, | 322 | ql_log(ql_log_info, base_vha, 0x101e, |
346 | "Mailbox cmd timeout occurred, cmd=0x%x, " | 323 | "Mailbox cmd timeout occured. " |
347 | "mb[0]=0x%x. Scheduling ISP abort ", | 324 | "Scheduling ISP abort.\n"); |
348 | command, mcp->mb[0]); | 325 | |
349 | set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); | 326 | set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); |
350 | clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 327 | clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
351 | /* Allow next mbx cmd to come in. */ | ||
352 | complete(&ha->mbx_cmd_comp); | ||
353 | if (ha->isp_ops->abort_isp(vha)) { | 328 | if (ha->isp_ops->abort_isp(vha)) { |
354 | /* Failed. retry later. */ | 329 | /* Failed. retry later. */ |
355 | set_bit(ISP_ABORT_NEEDED, | 330 | set_bit(ISP_ABORT_NEEDED, |
356 | &vha->dpc_flags); | 331 | &vha->dpc_flags); |
357 | } | 332 | } |
358 | clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); | 333 | clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); |
359 | ql_dbg(ql_dbg_mbx, vha, 0x101f, | 334 | ql_dbg(ql_dbg_mbx, base_vha, 0x101f, |
360 | "Finished abort_isp.\n"); | 335 | "Finished abort_isp.\n"); |
361 | goto mbx_done; | ||
362 | } | 336 | } |
363 | } | 337 | } |
364 | } | 338 | } |
@@ -367,11 +341,10 @@ premature_exit: | |||
367 | /* Allow next mbx cmd to come in. */ | 341 | /* Allow next mbx cmd to come in. */ |
368 | complete(&ha->mbx_cmd_comp); | 342 | complete(&ha->mbx_cmd_comp); |
369 | 343 | ||
370 | mbx_done: | ||
371 | if (rval) { | 344 | if (rval) { |
372 | ql_log(ql_log_warn, base_vha, 0x1020, | 345 | ql_dbg(ql_dbg_mbx, base_vha, 0x1020, |
373 | "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", | 346 | "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n", |
374 | mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); | 347 | mcp->mb[0], mcp->mb[1], mcp->mb[2], command); |
375 | } else { | 348 | } else { |
376 | ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); | 349 | ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); |
377 | } | 350 | } |
@@ -388,8 +361,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, | |||
388 | mbx_cmd_t mc; | 361 | mbx_cmd_t mc; |
389 | mbx_cmd_t *mcp = &mc; | 362 | mbx_cmd_t *mcp = &mc; |
390 | 363 | ||
391 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, | 364 | ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__); |
392 | "Entered %s.\n", __func__); | ||
393 | 365 | ||
394 | if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { | 366 | if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { |
395 | mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; | 367 | mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; |
@@ -423,8 +395,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, | |||
423 | ql_dbg(ql_dbg_mbx, vha, 0x1023, | 395 | ql_dbg(ql_dbg_mbx, vha, 0x1023, |
424 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 396 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
425 | } else { | 397 | } else { |
426 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, | 398 | ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__); |
427 | "Done %s.\n", __func__); | ||
428 | } | 399 | } |
429 | 400 | ||
430 | return rval; | 401 | return rval; |
@@ -454,8 +425,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) | |||
454 | mbx_cmd_t mc; | 425 | mbx_cmd_t mc; |
455 | mbx_cmd_t *mcp = &mc; | 426 | mbx_cmd_t *mcp = &mc; |
456 | 427 | ||
457 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, | 428 | ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__); |
458 | "Entered %s.\n", __func__); | ||
459 | 429 | ||
460 | mcp->mb[0] = MBC_EXECUTE_FIRMWARE; | 430 | mcp->mb[0] = MBC_EXECUTE_FIRMWARE; |
461 | mcp->out_mb = MBX_0; | 431 | mcp->out_mb = MBX_0; |
@@ -464,7 +434,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) | |||
464 | mcp->mb[1] = MSW(risc_addr); | 434 | mcp->mb[1] = MSW(risc_addr); |
465 | mcp->mb[2] = LSW(risc_addr); | 435 | mcp->mb[2] = LSW(risc_addr); |
466 | mcp->mb[3] = 0; | 436 | mcp->mb[3] = 0; |
467 | if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) { | 437 | if (IS_QLA81XX(ha)) { |
468 | struct nvram_81xx *nv = ha->nvram; | 438 | struct nvram_81xx *nv = ha->nvram; |
469 | mcp->mb[4] = (nv->enhanced_features & | 439 | mcp->mb[4] = (nv->enhanced_features & |
470 | EXTENDED_BB_CREDITS); | 440 | EXTENDED_BB_CREDITS); |
@@ -490,11 +460,10 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) | |||
490 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 460 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
491 | } else { | 461 | } else { |
492 | if (IS_FWI2_CAPABLE(ha)) { | 462 | if (IS_FWI2_CAPABLE(ha)) { |
493 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027, | 463 | ql_dbg(ql_dbg_mbx, vha, 0x1027, |
494 | "Done exchanges=%x.\n", mcp->mb[1]); | 464 | "Done exchanges=%x.\n", mcp->mb[1]); |
495 | } else { | 465 | } else { |
496 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, | 466 | ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__); |
497 | "Done %s.\n", __func__); | ||
498 | } | 467 | } |
499 | } | 468 | } |
500 | 469 | ||
@@ -518,23 +487,21 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) | |||
518 | * Kernel context. | 487 | * Kernel context. |
519 | */ | 488 | */ |
520 | int | 489 | int |
521 | qla2x00_get_fw_version(scsi_qla_host_t *vha) | 490 | qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, |
491 | uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi, | ||
492 | uint32_t *mpi_caps, uint8_t *phy) | ||
522 | { | 493 | { |
523 | int rval; | 494 | int rval; |
524 | mbx_cmd_t mc; | 495 | mbx_cmd_t mc; |
525 | mbx_cmd_t *mcp = &mc; | 496 | mbx_cmd_t *mcp = &mc; |
526 | struct qla_hw_data *ha = vha->hw; | ||
527 | 497 | ||
528 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, | 498 | ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__); |
529 | "Entered %s.\n", __func__); | ||
530 | 499 | ||
531 | mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; | 500 | mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; |
532 | mcp->out_mb = MBX_0; | 501 | mcp->out_mb = MBX_0; |
533 | mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 502 | mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; |
534 | if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha)) | 503 | if (IS_QLA81XX(vha->hw)) |
535 | mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; | 504 | mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; |
536 | if (IS_FWI2_CAPABLE(ha)) | ||
537 | mcp->in_mb |= MBX_17|MBX_16|MBX_15; | ||
538 | mcp->flags = 0; | 505 | mcp->flags = 0; |
539 | mcp->tov = MBX_TOV_SECONDS; | 506 | mcp->tov = MBX_TOV_SECONDS; |
540 | rval = qla2x00_mailbox_command(vha, mcp); | 507 | rval = qla2x00_mailbox_command(vha, mcp); |
@@ -542,43 +509,30 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) | |||
542 | goto failed; | 509 | goto failed; |
543 | 510 | ||
544 | /* Return mailbox data. */ | 511 | /* Return mailbox data. */ |
545 | ha->fw_major_version = mcp->mb[1]; | 512 | *major = mcp->mb[1]; |
546 | ha->fw_minor_version = mcp->mb[2]; | 513 | *minor = mcp->mb[2]; |
547 | ha->fw_subminor_version = mcp->mb[3]; | 514 | *subminor = mcp->mb[3]; |
548 | ha->fw_attributes = mcp->mb[6]; | 515 | *attributes = mcp->mb[6]; |
549 | if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) | 516 | if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) |
550 | ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ | 517 | *memory = 0x1FFFF; /* Defaults to 128KB. */ |
551 | else | 518 | else |
552 | ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; | 519 | *memory = (mcp->mb[5] << 16) | mcp->mb[4]; |
553 | if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) { | 520 | if (IS_QLA81XX(vha->hw)) { |
554 | ha->mpi_version[0] = mcp->mb[10] & 0xff; | 521 | mpi[0] = mcp->mb[10] & 0xff; |
555 | ha->mpi_version[1] = mcp->mb[11] >> 8; | 522 | mpi[1] = mcp->mb[11] >> 8; |
556 | ha->mpi_version[2] = mcp->mb[11] & 0xff; | 523 | mpi[2] = mcp->mb[11] & 0xff; |
557 | ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; | 524 | *mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13]; |
558 | ha->phy_version[0] = mcp->mb[8] & 0xff; | 525 | phy[0] = mcp->mb[8] & 0xff; |
559 | ha->phy_version[1] = mcp->mb[9] >> 8; | 526 | phy[1] = mcp->mb[9] >> 8; |
560 | ha->phy_version[2] = mcp->mb[9] & 0xff; | 527 | phy[2] = mcp->mb[9] & 0xff; |
561 | } | 528 | } |
562 | if (IS_FWI2_CAPABLE(ha)) { | ||
563 | ha->fw_attributes_h = mcp->mb[15]; | ||
564 | ha->fw_attributes_ext[0] = mcp->mb[16]; | ||
565 | ha->fw_attributes_ext[1] = mcp->mb[17]; | ||
566 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, | ||
567 | "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", | ||
568 | __func__, mcp->mb[15], mcp->mb[6]); | ||
569 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, | ||
570 | "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", | ||
571 | __func__, mcp->mb[17], mcp->mb[16]); | ||
572 | } | ||
573 | |||
574 | failed: | 529 | failed: |
575 | if (rval != QLA_SUCCESS) { | 530 | if (rval != QLA_SUCCESS) { |
576 | /*EMPTY*/ | 531 | /*EMPTY*/ |
577 | ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); | 532 | ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); |
578 | } else { | 533 | } else { |
579 | /*EMPTY*/ | 534 | /*EMPTY*/ |
580 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, | 535 | ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__); |
581 | "Done %s.\n", __func__); | ||
582 | } | 536 | } |
583 | return rval; | 537 | return rval; |
584 | } | 538 | } |
@@ -604,8 +558,7 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) | |||
604 | mbx_cmd_t mc; | 558 | mbx_cmd_t mc; |
605 | mbx_cmd_t *mcp = &mc; | 559 | mbx_cmd_t *mcp = &mc; |
606 | 560 | ||
607 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, | 561 | ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__); |
608 | "Entered %s.\n", __func__); | ||
609 | 562 | ||
610 | mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; | 563 | mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; |
611 | mcp->out_mb = MBX_0; | 564 | mcp->out_mb = MBX_0; |
@@ -623,8 +576,7 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) | |||
623 | fwopts[2] = mcp->mb[2]; | 576 | fwopts[2] = mcp->mb[2]; |
624 | fwopts[3] = mcp->mb[3]; | 577 | fwopts[3] = mcp->mb[3]; |
625 | 578 | ||
626 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, | 579 | ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__); |
627 | "Done %s.\n", __func__); | ||
628 | } | 580 | } |
629 | 581 | ||
630 | return rval; | 582 | return rval; |
@@ -652,8 +604,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) | |||
652 | mbx_cmd_t mc; | 604 | mbx_cmd_t mc; |
653 | mbx_cmd_t *mcp = &mc; | 605 | mbx_cmd_t *mcp = &mc; |
654 | 606 | ||
655 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, | 607 | ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__); |
656 | "Entered %s.\n", __func__); | ||
657 | 608 | ||
658 | mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; | 609 | mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; |
659 | mcp->mb[1] = fwopts[1]; | 610 | mcp->mb[1] = fwopts[1]; |
@@ -681,8 +632,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) | |||
681 | "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); | 632 | "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); |
682 | } else { | 633 | } else { |
683 | /*EMPTY*/ | 634 | /*EMPTY*/ |
684 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, | 635 | ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__); |
685 | "Done %s.\n", __func__); | ||
686 | } | 636 | } |
687 | 637 | ||
688 | return rval; | 638 | return rval; |
@@ -710,8 +660,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha) | |||
710 | mbx_cmd_t mc; | 660 | mbx_cmd_t mc; |
711 | mbx_cmd_t *mcp = &mc; | 661 | mbx_cmd_t *mcp = &mc; |
712 | 662 | ||
713 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, | 663 | ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__); |
714 | "Entered %s.\n", __func__); | ||
715 | 664 | ||
716 | mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; | 665 | mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; |
717 | mcp->mb[1] = 0xAAAA; | 666 | mcp->mb[1] = 0xAAAA; |
@@ -741,8 +690,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha) | |||
741 | ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); | 690 | ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); |
742 | } else { | 691 | } else { |
743 | /*EMPTY*/ | 692 | /*EMPTY*/ |
744 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, | 693 | ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__); |
745 | "Done %s.\n", __func__); | ||
746 | } | 694 | } |
747 | 695 | ||
748 | return rval; | 696 | return rval; |
@@ -770,8 +718,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) | |||
770 | mbx_cmd_t mc; | 718 | mbx_cmd_t mc; |
771 | mbx_cmd_t *mcp = &mc; | 719 | mbx_cmd_t *mcp = &mc; |
772 | 720 | ||
773 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, | 721 | ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__); |
774 | "Entered %s.\n", __func__); | ||
775 | 722 | ||
776 | mcp->mb[0] = MBC_VERIFY_CHECKSUM; | 723 | mcp->mb[0] = MBC_VERIFY_CHECKSUM; |
777 | mcp->out_mb = MBX_0; | 724 | mcp->out_mb = MBX_0; |
@@ -796,8 +743,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) | |||
796 | "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? | 743 | "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? |
797 | (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); | 744 | (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); |
798 | } else { | 745 | } else { |
799 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, | 746 | ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__); |
800 | "Done %s.\n", __func__); | ||
801 | } | 747 | } |
802 | 748 | ||
803 | return rval; | 749 | return rval; |
@@ -829,8 +775,7 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, | |||
829 | mbx_cmd_t mc; | 775 | mbx_cmd_t mc; |
830 | mbx_cmd_t *mcp = &mc; | 776 | mbx_cmd_t *mcp = &mc; |
831 | 777 | ||
832 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, | 778 | ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__); |
833 | "Entered %s.\n", __func__); | ||
834 | 779 | ||
835 | mcp->mb[0] = MBC_IOCB_COMMAND_A64; | 780 | mcp->mb[0] = MBC_IOCB_COMMAND_A64; |
836 | mcp->mb[1] = 0; | 781 | mcp->mb[1] = 0; |
@@ -853,8 +798,7 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, | |||
853 | /* Mask reserved bits. */ | 798 | /* Mask reserved bits. */ |
854 | sts_entry->entry_status &= | 799 | sts_entry->entry_status &= |
855 | IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; | 800 | IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; |
856 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, | 801 | ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__); |
857 | "Done %s.\n", __func__); | ||
858 | } | 802 | } |
859 | 803 | ||
860 | return rval; | 804 | return rval; |
@@ -894,10 +838,8 @@ qla2x00_abort_command(srb_t *sp) | |||
894 | scsi_qla_host_t *vha = fcport->vha; | 838 | scsi_qla_host_t *vha = fcport->vha; |
895 | struct qla_hw_data *ha = vha->hw; | 839 | struct qla_hw_data *ha = vha->hw; |
896 | struct req_que *req = vha->req; | 840 | struct req_que *req = vha->req; |
897 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | ||
898 | 841 | ||
899 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, | 842 | ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__); |
900 | "Entered %s.\n", __func__); | ||
901 | 843 | ||
902 | spin_lock_irqsave(&ha->hardware_lock, flags); | 844 | spin_lock_irqsave(&ha->hardware_lock, flags); |
903 | for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { | 845 | for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { |
@@ -918,7 +860,7 @@ qla2x00_abort_command(srb_t *sp) | |||
918 | mcp->mb[1] = fcport->loop_id << 8; | 860 | mcp->mb[1] = fcport->loop_id << 8; |
919 | mcp->mb[2] = (uint16_t)handle; | 861 | mcp->mb[2] = (uint16_t)handle; |
920 | mcp->mb[3] = (uint16_t)(handle >> 16); | 862 | mcp->mb[3] = (uint16_t)(handle >> 16); |
921 | mcp->mb[6] = (uint16_t)cmd->device->lun; | 863 | mcp->mb[6] = (uint16_t)sp->cmd->device->lun; |
922 | mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | 864 | mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; |
923 | mcp->in_mb = MBX_0; | 865 | mcp->in_mb = MBX_0; |
924 | mcp->tov = MBX_TOV_SECONDS; | 866 | mcp->tov = MBX_TOV_SECONDS; |
@@ -928,8 +870,7 @@ qla2x00_abort_command(srb_t *sp) | |||
928 | if (rval != QLA_SUCCESS) { | 870 | if (rval != QLA_SUCCESS) { |
929 | ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); | 871 | ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); |
930 | } else { | 872 | } else { |
931 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, | 873 | ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__); |
932 | "Done %s.\n", __func__); | ||
933 | } | 874 | } |
934 | 875 | ||
935 | return rval; | 876 | return rval; |
@@ -948,8 +889,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) | |||
948 | l = l; | 889 | l = l; |
949 | vha = fcport->vha; | 890 | vha = fcport->vha; |
950 | 891 | ||
951 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, | 892 | ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__); |
952 | "Entered %s.\n", __func__); | ||
953 | 893 | ||
954 | req = vha->hw->req_q_map[0]; | 894 | req = vha->hw->req_q_map[0]; |
955 | rsp = req->rsp; | 895 | rsp = req->rsp; |
@@ -970,8 +910,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) | |||
970 | mcp->flags = 0; | 910 | mcp->flags = 0; |
971 | rval = qla2x00_mailbox_command(vha, mcp); | 911 | rval = qla2x00_mailbox_command(vha, mcp); |
972 | if (rval != QLA_SUCCESS) { | 912 | if (rval != QLA_SUCCESS) { |
973 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, | 913 | ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval); |
974 | "Failed=%x.\n", rval); | ||
975 | } | 914 | } |
976 | 915 | ||
977 | /* Issue marker IOCB. */ | 916 | /* Issue marker IOCB. */ |
@@ -981,8 +920,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) | |||
981 | ql_dbg(ql_dbg_mbx, vha, 0x1040, | 920 | ql_dbg(ql_dbg_mbx, vha, 0x1040, |
982 | "Failed to issue marker IOCB (%x).\n", rval2); | 921 | "Failed to issue marker IOCB (%x).\n", rval2); |
983 | } else { | 922 | } else { |
984 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, | 923 | ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__); |
985 | "Done %s.\n", __func__); | ||
986 | } | 924 | } |
987 | 925 | ||
988 | return rval; | 926 | return rval; |
@@ -1000,8 +938,7 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) | |||
1000 | 938 | ||
1001 | vha = fcport->vha; | 939 | vha = fcport->vha; |
1002 | 940 | ||
1003 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, | 941 | ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__); |
1004 | "Entered %s.\n", __func__); | ||
1005 | 942 | ||
1006 | req = vha->hw->req_q_map[0]; | 943 | req = vha->hw->req_q_map[0]; |
1007 | rsp = req->rsp; | 944 | rsp = req->rsp; |
@@ -1030,8 +967,7 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) | |||
1030 | ql_dbg(ql_dbg_mbx, vha, 0x1044, | 967 | ql_dbg(ql_dbg_mbx, vha, 0x1044, |
1031 | "Failed to issue marker IOCB (%x).\n", rval2); | 968 | "Failed to issue marker IOCB (%x).\n", rval2); |
1032 | } else { | 969 | } else { |
1033 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, | 970 | ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__); |
1034 | "Done %s.\n", __func__); | ||
1035 | } | 971 | } |
1036 | 972 | ||
1037 | return rval; | 973 | return rval; |
@@ -1065,14 +1001,13 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, | |||
1065 | mbx_cmd_t mc; | 1001 | mbx_cmd_t mc; |
1066 | mbx_cmd_t *mcp = &mc; | 1002 | mbx_cmd_t *mcp = &mc; |
1067 | 1003 | ||
1068 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, | 1004 | ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__); |
1069 | "Entered %s.\n", __func__); | ||
1070 | 1005 | ||
1071 | mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; | 1006 | mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; |
1072 | mcp->mb[9] = vha->vp_idx; | 1007 | mcp->mb[9] = vha->vp_idx; |
1073 | mcp->out_mb = MBX_9|MBX_0; | 1008 | mcp->out_mb = MBX_9|MBX_0; |
1074 | mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | 1009 | mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; |
1075 | if (IS_CNA_CAPABLE(vha->hw)) | 1010 | if (IS_QLA8XXX_TYPE(vha->hw)) |
1076 | mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; | 1011 | mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; |
1077 | mcp->tov = MBX_TOV_SECONDS; | 1012 | mcp->tov = MBX_TOV_SECONDS; |
1078 | mcp->flags = 0; | 1013 | mcp->flags = 0; |
@@ -1094,10 +1029,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, | |||
1094 | /*EMPTY*/ | 1029 | /*EMPTY*/ |
1095 | ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); | 1030 | ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); |
1096 | } else { | 1031 | } else { |
1097 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, | 1032 | ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__); |
1098 | "Done %s.\n", __func__); | ||
1099 | 1033 | ||
1100 | if (IS_CNA_CAPABLE(vha->hw)) { | 1034 | if (IS_QLA8XXX_TYPE(vha->hw)) { |
1101 | vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; | 1035 | vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; |
1102 | vha->fcoe_fcf_idx = mcp->mb[10]; | 1036 | vha->fcoe_fcf_idx = mcp->mb[10]; |
1103 | vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; | 1037 | vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; |
@@ -1136,8 +1070,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, | |||
1136 | mbx_cmd_t mc; | 1070 | mbx_cmd_t mc; |
1137 | mbx_cmd_t *mcp = &mc; | 1071 | mbx_cmd_t *mcp = &mc; |
1138 | 1072 | ||
1139 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, | 1073 | ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__); |
1140 | "Entered %s.\n", __func__); | ||
1141 | 1074 | ||
1142 | mcp->mb[0] = MBC_GET_RETRY_COUNT; | 1075 | mcp->mb[0] = MBC_GET_RETRY_COUNT; |
1143 | mcp->out_mb = MBX_0; | 1076 | mcp->out_mb = MBX_0; |
@@ -1160,7 +1093,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, | |||
1160 | *tov = ratov; | 1093 | *tov = ratov; |
1161 | } | 1094 | } |
1162 | 1095 | ||
1163 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, | 1096 | ql_dbg(ql_dbg_mbx, vha, 0x104b, |
1164 | "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); | 1097 | "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); |
1165 | } | 1098 | } |
1166 | 1099 | ||
@@ -1192,8 +1125,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) | |||
1192 | mbx_cmd_t *mcp = &mc; | 1125 | mbx_cmd_t *mcp = &mc; |
1193 | struct qla_hw_data *ha = vha->hw; | 1126 | struct qla_hw_data *ha = vha->hw; |
1194 | 1127 | ||
1195 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, | 1128 | ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__); |
1196 | "Entered %s.\n", __func__); | ||
1197 | 1129 | ||
1198 | if (IS_QLA82XX(ha) && ql2xdbwr) | 1130 | if (IS_QLA82XX(ha) && ql2xdbwr) |
1199 | qla82xx_wr_32(ha, ha->nxdb_wr_ptr, | 1131 | qla82xx_wr_32(ha, ha->nxdb_wr_ptr, |
@@ -1210,7 +1142,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) | |||
1210 | mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); | 1142 | mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); |
1211 | mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); | 1143 | mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); |
1212 | mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | 1144 | mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; |
1213 | if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) { | 1145 | if (IS_QLA81XX(ha) && ha->ex_init_cb->ex_version) { |
1214 | mcp->mb[1] = BIT_0; | 1146 | mcp->mb[1] = BIT_0; |
1215 | mcp->mb[10] = MSW(ha->ex_init_cb_dma); | 1147 | mcp->mb[10] = MSW(ha->ex_init_cb_dma); |
1216 | mcp->mb[11] = LSW(ha->ex_init_cb_dma); | 1148 | mcp->mb[11] = LSW(ha->ex_init_cb_dma); |
@@ -1219,11 +1151,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) | |||
1219 | mcp->mb[14] = sizeof(*ha->ex_init_cb); | 1151 | mcp->mb[14] = sizeof(*ha->ex_init_cb); |
1220 | mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; | 1152 | mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; |
1221 | } | 1153 | } |
1222 | /* 1 and 2 should normally be captured. */ | 1154 | mcp->in_mb = MBX_0; |
1223 | mcp->in_mb = MBX_2|MBX_1|MBX_0; | ||
1224 | if (IS_QLA83XX(ha)) | ||
1225 | /* mb3 is additional info about the installed SFP. */ | ||
1226 | mcp->in_mb |= MBX_3; | ||
1227 | mcp->buf_size = size; | 1155 | mcp->buf_size = size; |
1228 | mcp->flags = MBX_DMA_OUT; | 1156 | mcp->flags = MBX_DMA_OUT; |
1229 | mcp->tov = MBX_TOV_SECONDS; | 1157 | mcp->tov = MBX_TOV_SECONDS; |
@@ -1232,104 +1160,12 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) | |||
1232 | if (rval != QLA_SUCCESS) { | 1160 | if (rval != QLA_SUCCESS) { |
1233 | /*EMPTY*/ | 1161 | /*EMPTY*/ |
1234 | ql_dbg(ql_dbg_mbx, vha, 0x104d, | 1162 | ql_dbg(ql_dbg_mbx, vha, 0x104d, |
1235 | "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n", | 1163 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
1236 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); | ||
1237 | } else { | 1164 | } else { |
1238 | /*EMPTY*/ | 1165 | /*EMPTY*/ |
1239 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, | 1166 | ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__); |
1240 | "Done %s.\n", __func__); | ||
1241 | } | ||
1242 | |||
1243 | return rval; | ||
1244 | } | ||
1245 | |||
1246 | /* | ||
1247 | * qla2x00_get_node_name_list | ||
1248 | * Issue get node name list mailbox command, kmalloc() | ||
1249 | * and return the resulting list. Caller must kfree() it! | ||
1250 | * | ||
1251 | * Input: | ||
1252 | * ha = adapter state pointer. | ||
1253 | * out_data = resulting list | ||
1254 | * out_len = length of the resulting list | ||
1255 | * | ||
1256 | * Returns: | ||
1257 | * qla2x00 local function return status code. | ||
1258 | * | ||
1259 | * Context: | ||
1260 | * Kernel context. | ||
1261 | */ | ||
1262 | int | ||
1263 | qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) | ||
1264 | { | ||
1265 | struct qla_hw_data *ha = vha->hw; | ||
1266 | struct qla_port_24xx_data *list = NULL; | ||
1267 | void *pmap; | ||
1268 | mbx_cmd_t mc; | ||
1269 | dma_addr_t pmap_dma; | ||
1270 | ulong dma_size; | ||
1271 | int rval, left; | ||
1272 | |||
1273 | left = 1; | ||
1274 | while (left > 0) { | ||
1275 | dma_size = left * sizeof(*list); | ||
1276 | pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size, | ||
1277 | &pmap_dma, GFP_KERNEL); | ||
1278 | if (!pmap) { | ||
1279 | ql_log(ql_log_warn, vha, 0x113f, | ||
1280 | "%s(%ld): DMA Alloc failed of %ld\n", | ||
1281 | __func__, vha->host_no, dma_size); | ||
1282 | rval = QLA_MEMORY_ALLOC_FAILED; | ||
1283 | goto out; | ||
1284 | } | ||
1285 | |||
1286 | mc.mb[0] = MBC_PORT_NODE_NAME_LIST; | ||
1287 | mc.mb[1] = BIT_1 | BIT_3; | ||
1288 | mc.mb[2] = MSW(pmap_dma); | ||
1289 | mc.mb[3] = LSW(pmap_dma); | ||
1290 | mc.mb[6] = MSW(MSD(pmap_dma)); | ||
1291 | mc.mb[7] = LSW(MSD(pmap_dma)); | ||
1292 | mc.mb[8] = dma_size; | ||
1293 | mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8; | ||
1294 | mc.in_mb = MBX_0|MBX_1; | ||
1295 | mc.tov = 30; | ||
1296 | mc.flags = MBX_DMA_IN; | ||
1297 | |||
1298 | rval = qla2x00_mailbox_command(vha, &mc); | ||
1299 | if (rval != QLA_SUCCESS) { | ||
1300 | if ((mc.mb[0] == MBS_COMMAND_ERROR) && | ||
1301 | (mc.mb[1] == 0xA)) { | ||
1302 | left += le16_to_cpu(mc.mb[2]) / | ||
1303 | sizeof(struct qla_port_24xx_data); | ||
1304 | goto restart; | ||
1305 | } | ||
1306 | goto out_free; | ||
1307 | } | ||
1308 | |||
1309 | left = 0; | ||
1310 | |||
1311 | list = kzalloc(dma_size, GFP_KERNEL); | ||
1312 | if (!list) { | ||
1313 | ql_log(ql_log_warn, vha, 0x1140, | ||
1314 | "%s(%ld): failed to allocate node names list " | ||
1315 | "structure.\n", __func__, vha->host_no); | ||
1316 | rval = QLA_MEMORY_ALLOC_FAILED; | ||
1317 | goto out_free; | ||
1318 | } | ||
1319 | |||
1320 | memcpy(list, pmap, dma_size); | ||
1321 | restart: | ||
1322 | dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); | ||
1323 | } | 1167 | } |
1324 | 1168 | ||
1325 | *out_data = list; | ||
1326 | *out_len = dma_size; | ||
1327 | |||
1328 | out: | ||
1329 | return rval; | ||
1330 | |||
1331 | out_free: | ||
1332 | dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); | ||
1333 | return rval; | 1169 | return rval; |
1334 | } | 1170 | } |
1335 | 1171 | ||
@@ -1360,8 +1196,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) | |||
1360 | dma_addr_t pd_dma; | 1196 | dma_addr_t pd_dma; |
1361 | struct qla_hw_data *ha = vha->hw; | 1197 | struct qla_hw_data *ha = vha->hw; |
1362 | 1198 | ||
1363 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, | 1199 | ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__); |
1364 | "Entered %s.\n", __func__); | ||
1365 | 1200 | ||
1366 | pd24 = NULL; | 1201 | pd24 = NULL; |
1367 | pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); | 1202 | pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); |
@@ -1404,7 +1239,6 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) | |||
1404 | goto gpd_error_out; | 1239 | goto gpd_error_out; |
1405 | 1240 | ||
1406 | if (IS_FWI2_CAPABLE(ha)) { | 1241 | if (IS_FWI2_CAPABLE(ha)) { |
1407 | uint64_t zero = 0; | ||
1408 | pd24 = (struct port_database_24xx *) pd; | 1242 | pd24 = (struct port_database_24xx *) pd; |
1409 | 1243 | ||
1410 | /* Check for logged in state. */ | 1244 | /* Check for logged in state. */ |
@@ -1418,14 +1252,6 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) | |||
1418 | goto gpd_error_out; | 1252 | goto gpd_error_out; |
1419 | } | 1253 | } |
1420 | 1254 | ||
1421 | if (fcport->loop_id == FC_NO_LOOP_ID || | ||
1422 | (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && | ||
1423 | memcmp(fcport->port_name, pd24->port_name, 8))) { | ||
1424 | /* We lost the device mid way. */ | ||
1425 | rval = QLA_NOT_LOGGED_IN; | ||
1426 | goto gpd_error_out; | ||
1427 | } | ||
1428 | |||
1429 | /* Names are little-endian. */ | 1255 | /* Names are little-endian. */ |
1430 | memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); | 1256 | memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); |
1431 | memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); | 1257 | memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); |
@@ -1441,16 +1267,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) | |||
1441 | fcport->port_type = FCT_INITIATOR; | 1267 | fcport->port_type = FCT_INITIATOR; |
1442 | else | 1268 | else |
1443 | fcport->port_type = FCT_TARGET; | 1269 | fcport->port_type = FCT_TARGET; |
1444 | |||
1445 | /* Passback COS information. */ | ||
1446 | fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? | ||
1447 | FC_COS_CLASS2 : FC_COS_CLASS3; | ||
1448 | |||
1449 | if (pd24->prli_svc_param_word_3[0] & BIT_7) | ||
1450 | fcport->flags |= FCF_CONF_COMP_SUPPORTED; | ||
1451 | } else { | 1270 | } else { |
1452 | uint64_t zero = 0; | ||
1453 | |||
1454 | /* Check for logged in state. */ | 1271 | /* Check for logged in state. */ |
1455 | if (pd->master_state != PD_STATE_PORT_LOGGED_IN && | 1272 | if (pd->master_state != PD_STATE_PORT_LOGGED_IN && |
1456 | pd->slave_state != PD_STATE_PORT_LOGGED_IN) { | 1273 | pd->slave_state != PD_STATE_PORT_LOGGED_IN) { |
@@ -1463,14 +1280,6 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) | |||
1463 | goto gpd_error_out; | 1280 | goto gpd_error_out; |
1464 | } | 1281 | } |
1465 | 1282 | ||
1466 | if (fcport->loop_id == FC_NO_LOOP_ID || | ||
1467 | (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && | ||
1468 | memcmp(fcport->port_name, pd->port_name, 8))) { | ||
1469 | /* We lost the device mid way. */ | ||
1470 | rval = QLA_NOT_LOGGED_IN; | ||
1471 | goto gpd_error_out; | ||
1472 | } | ||
1473 | |||
1474 | /* Names are little-endian. */ | 1283 | /* Names are little-endian. */ |
1475 | memcpy(fcport->node_name, pd->node_name, WWN_SIZE); | 1284 | memcpy(fcport->node_name, pd->node_name, WWN_SIZE); |
1476 | memcpy(fcport->port_name, pd->port_name, WWN_SIZE); | 1285 | memcpy(fcport->port_name, pd->port_name, WWN_SIZE); |
@@ -1500,8 +1309,7 @@ gpd_error_out: | |||
1500 | "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, | 1309 | "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, |
1501 | mcp->mb[0], mcp->mb[1]); | 1310 | mcp->mb[0], mcp->mb[1]); |
1502 | } else { | 1311 | } else { |
1503 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, | 1312 | ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__); |
1504 | "Done %s.\n", __func__); | ||
1505 | } | 1313 | } |
1506 | 1314 | ||
1507 | return rval; | 1315 | return rval; |
@@ -1530,8 +1338,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) | |||
1530 | mbx_cmd_t mc; | 1338 | mbx_cmd_t mc; |
1531 | mbx_cmd_t *mcp = &mc; | 1339 | mbx_cmd_t *mcp = &mc; |
1532 | 1340 | ||
1533 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, | 1341 | ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__); |
1534 | "Entered %s.\n", __func__); | ||
1535 | 1342 | ||
1536 | mcp->mb[0] = MBC_GET_FIRMWARE_STATE; | 1343 | mcp->mb[0] = MBC_GET_FIRMWARE_STATE; |
1537 | mcp->out_mb = MBX_0; | 1344 | mcp->out_mb = MBX_0; |
@@ -1557,8 +1364,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) | |||
1557 | ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); | 1364 | ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); |
1558 | } else { | 1365 | } else { |
1559 | /*EMPTY*/ | 1366 | /*EMPTY*/ |
1560 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, | 1367 | ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__); |
1561 | "Done %s.\n", __func__); | ||
1562 | } | 1368 | } |
1563 | 1369 | ||
1564 | return rval; | 1370 | return rval; |
@@ -1590,8 +1396,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, | |||
1590 | mbx_cmd_t mc; | 1396 | mbx_cmd_t mc; |
1591 | mbx_cmd_t *mcp = &mc; | 1397 | mbx_cmd_t *mcp = &mc; |
1592 | 1398 | ||
1593 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, | 1399 | ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__); |
1594 | "Entered %s.\n", __func__); | ||
1595 | 1400 | ||
1596 | mcp->mb[0] = MBC_GET_PORT_NAME; | 1401 | mcp->mb[0] = MBC_GET_PORT_NAME; |
1597 | mcp->mb[9] = vha->vp_idx; | 1402 | mcp->mb[9] = vha->vp_idx; |
@@ -1625,8 +1430,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, | |||
1625 | name[7] = LSB(mcp->mb[7]); | 1430 | name[7] = LSB(mcp->mb[7]); |
1626 | } | 1431 | } |
1627 | 1432 | ||
1628 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, | 1433 | ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__); |
1629 | "Done %s.\n", __func__); | ||
1630 | } | 1434 | } |
1631 | 1435 | ||
1632 | return rval; | 1436 | return rval; |
@@ -1654,10 +1458,9 @@ qla2x00_lip_reset(scsi_qla_host_t *vha) | |||
1654 | mbx_cmd_t mc; | 1458 | mbx_cmd_t mc; |
1655 | mbx_cmd_t *mcp = &mc; | 1459 | mbx_cmd_t *mcp = &mc; |
1656 | 1460 | ||
1657 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a, | 1461 | ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__); |
1658 | "Entered %s.\n", __func__); | ||
1659 | 1462 | ||
1660 | if (IS_CNA_CAPABLE(vha->hw)) { | 1463 | if (IS_QLA8XXX_TYPE(vha->hw)) { |
1661 | /* Logout across all FCFs. */ | 1464 | /* Logout across all FCFs. */ |
1662 | mcp->mb[0] = MBC_LIP_FULL_LOGIN; | 1465 | mcp->mb[0] = MBC_LIP_FULL_LOGIN; |
1663 | mcp->mb[1] = BIT_1; | 1466 | mcp->mb[1] = BIT_1; |
@@ -1692,8 +1495,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha) | |||
1692 | ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); | 1495 | ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); |
1693 | } else { | 1496 | } else { |
1694 | /*EMPTY*/ | 1497 | /*EMPTY*/ |
1695 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, | 1498 | ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__); |
1696 | "Done %s.\n", __func__); | ||
1697 | } | 1499 | } |
1698 | 1500 | ||
1699 | return rval; | 1501 | return rval; |
@@ -1725,10 +1527,9 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, | |||
1725 | mbx_cmd_t mc; | 1527 | mbx_cmd_t mc; |
1726 | mbx_cmd_t *mcp = &mc; | 1528 | mbx_cmd_t *mcp = &mc; |
1727 | 1529 | ||
1728 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, | 1530 | ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__); |
1729 | "Entered %s.\n", __func__); | ||
1730 | 1531 | ||
1731 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, | 1532 | ql_dbg(ql_dbg_mbx, vha, 0x105e, |
1732 | "Retry cnt=%d ratov=%d total tov=%d.\n", | 1533 | "Retry cnt=%d ratov=%d total tov=%d.\n", |
1733 | vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); | 1534 | vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); |
1734 | 1535 | ||
@@ -1752,8 +1553,7 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, | |||
1752 | rval, mcp->mb[0], mcp->mb[1]); | 1553 | rval, mcp->mb[0], mcp->mb[1]); |
1753 | } else { | 1554 | } else { |
1754 | /*EMPTY*/ | 1555 | /*EMPTY*/ |
1755 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, | 1556 | ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__); |
1756 | "Done %s.\n", __func__); | ||
1757 | } | 1557 | } |
1758 | 1558 | ||
1759 | return rval; | 1559 | return rval; |
@@ -1772,8 +1572,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
1772 | struct req_que *req; | 1572 | struct req_que *req; |
1773 | struct rsp_que *rsp; | 1573 | struct rsp_que *rsp; |
1774 | 1574 | ||
1775 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, | 1575 | ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__); |
1776 | "Entered %s.\n", __func__); | ||
1777 | 1576 | ||
1778 | if (ha->flags.cpu_affinity_enabled) | 1577 | if (ha->flags.cpu_affinity_enabled) |
1779 | req = ha->req_q_map[0]; | 1578 | req = ha->req_q_map[0]; |
@@ -1802,8 +1601,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
1802 | lg->port_id[1] = area; | 1601 | lg->port_id[1] = area; |
1803 | lg->port_id[2] = domain; | 1602 | lg->port_id[2] = domain; |
1804 | lg->vp_index = vha->vp_idx; | 1603 | lg->vp_index = vha->vp_idx; |
1805 | rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, | 1604 | rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); |
1806 | (ha->r_a_tov / 10 * 2) + 2); | ||
1807 | if (rval != QLA_SUCCESS) { | 1605 | if (rval != QLA_SUCCESS) { |
1808 | ql_dbg(ql_dbg_mbx, vha, 0x1063, | 1606 | ql_dbg(ql_dbg_mbx, vha, 0x1063, |
1809 | "Failed to issue login IOCB (%x).\n", rval); | 1607 | "Failed to issue login IOCB (%x).\n", rval); |
@@ -1847,8 +1645,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
1847 | break; | 1645 | break; |
1848 | } | 1646 | } |
1849 | } else { | 1647 | } else { |
1850 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, | 1648 | ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__); |
1851 | "Done %s.\n", __func__); | ||
1852 | 1649 | ||
1853 | iop[0] = le32_to_cpu(lg->io_parameter[0]); | 1650 | iop[0] = le32_to_cpu(lg->io_parameter[0]); |
1854 | 1651 | ||
@@ -1866,10 +1663,6 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
1866 | mb[10] |= BIT_0; /* Class 2. */ | 1663 | mb[10] |= BIT_0; /* Class 2. */ |
1867 | if (lg->io_parameter[9] || lg->io_parameter[10]) | 1664 | if (lg->io_parameter[9] || lg->io_parameter[10]) |
1868 | mb[10] |= BIT_1; /* Class 3. */ | 1665 | mb[10] |= BIT_1; /* Class 3. */ |
1869 | if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7)) | ||
1870 | mb[10] |= BIT_7; /* Confirmed Completion | ||
1871 | * Allowed | ||
1872 | */ | ||
1873 | } | 1666 | } |
1874 | 1667 | ||
1875 | dma_pool_free(ha->s_dma_pool, lg, lg_dma); | 1668 | dma_pool_free(ha->s_dma_pool, lg, lg_dma); |
@@ -1907,8 +1700,7 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
1907 | mbx_cmd_t *mcp = &mc; | 1700 | mbx_cmd_t *mcp = &mc; |
1908 | struct qla_hw_data *ha = vha->hw; | 1701 | struct qla_hw_data *ha = vha->hw; |
1909 | 1702 | ||
1910 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, | 1703 | ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__); |
1911 | "Entered %s.\n", __func__); | ||
1912 | 1704 | ||
1913 | mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; | 1705 | mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; |
1914 | mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; | 1706 | mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; |
@@ -1956,8 +1748,7 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
1956 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); | 1748 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); |
1957 | } else { | 1749 | } else { |
1958 | /*EMPTY*/ | 1750 | /*EMPTY*/ |
1959 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, | 1751 | ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__); |
1960 | "Done %s.\n", __func__); | ||
1961 | } | 1752 | } |
1962 | 1753 | ||
1963 | return rval; | 1754 | return rval; |
@@ -1988,8 +1779,7 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
1988 | mbx_cmd_t *mcp = &mc; | 1779 | mbx_cmd_t *mcp = &mc; |
1989 | struct qla_hw_data *ha = vha->hw; | 1780 | struct qla_hw_data *ha = vha->hw; |
1990 | 1781 | ||
1991 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, | 1782 | ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__); |
1992 | "Entered %s.\n", __func__); | ||
1993 | 1783 | ||
1994 | if (IS_FWI2_CAPABLE(ha)) | 1784 | if (IS_FWI2_CAPABLE(ha)) |
1995 | return qla24xx_login_fabric(vha, fcport->loop_id, | 1785 | return qla24xx_login_fabric(vha, fcport->loop_id, |
@@ -2031,8 +1821,7 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
2031 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); | 1821 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); |
2032 | } else { | 1822 | } else { |
2033 | /*EMPTY*/ | 1823 | /*EMPTY*/ |
2034 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, | 1824 | ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__); |
2035 | "Done %s.\n", __func__); | ||
2036 | } | 1825 | } |
2037 | 1826 | ||
2038 | return (rval); | 1827 | return (rval); |
@@ -2049,8 +1838,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
2049 | struct req_que *req; | 1838 | struct req_que *req; |
2050 | struct rsp_que *rsp; | 1839 | struct rsp_que *rsp; |
2051 | 1840 | ||
2052 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, | 1841 | ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__); |
2053 | "Entered %s.\n", __func__); | ||
2054 | 1842 | ||
2055 | lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); | 1843 | lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); |
2056 | if (lg == NULL) { | 1844 | if (lg == NULL) { |
@@ -2076,8 +1864,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
2076 | lg->port_id[1] = area; | 1864 | lg->port_id[1] = area; |
2077 | lg->port_id[2] = domain; | 1865 | lg->port_id[2] = domain; |
2078 | lg->vp_index = vha->vp_idx; | 1866 | lg->vp_index = vha->vp_idx; |
2079 | rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, | 1867 | |
2080 | (ha->r_a_tov / 10 * 2) + 2); | 1868 | rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); |
2081 | if (rval != QLA_SUCCESS) { | 1869 | if (rval != QLA_SUCCESS) { |
2082 | ql_dbg(ql_dbg_mbx, vha, 0x106f, | 1870 | ql_dbg(ql_dbg_mbx, vha, 0x106f, |
2083 | "Failed to issue logout IOCB (%x).\n", rval); | 1871 | "Failed to issue logout IOCB (%x).\n", rval); |
@@ -2094,8 +1882,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
2094 | le32_to_cpu(lg->io_parameter[1])); | 1882 | le32_to_cpu(lg->io_parameter[1])); |
2095 | } else { | 1883 | } else { |
2096 | /*EMPTY*/ | 1884 | /*EMPTY*/ |
2097 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, | 1885 | ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__); |
2098 | "Done %s.\n", __func__); | ||
2099 | } | 1886 | } |
2100 | 1887 | ||
2101 | dma_pool_free(ha->s_dma_pool, lg, lg_dma); | 1888 | dma_pool_free(ha->s_dma_pool, lg, lg_dma); |
@@ -2127,8 +1914,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
2127 | mbx_cmd_t mc; | 1914 | mbx_cmd_t mc; |
2128 | mbx_cmd_t *mcp = &mc; | 1915 | mbx_cmd_t *mcp = &mc; |
2129 | 1916 | ||
2130 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, | 1917 | ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__); |
2131 | "Entered %s.\n", __func__); | ||
2132 | 1918 | ||
2133 | mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; | 1919 | mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; |
2134 | mcp->out_mb = MBX_1|MBX_0; | 1920 | mcp->out_mb = MBX_1|MBX_0; |
@@ -2151,8 +1937,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
2151 | "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); | 1937 | "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); |
2152 | } else { | 1938 | } else { |
2153 | /*EMPTY*/ | 1939 | /*EMPTY*/ |
2154 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, | 1940 | ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__); |
2155 | "Done %s.\n", __func__); | ||
2156 | } | 1941 | } |
2157 | 1942 | ||
2158 | return rval; | 1943 | return rval; |
@@ -2180,8 +1965,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha) | |||
2180 | mbx_cmd_t mc; | 1965 | mbx_cmd_t mc; |
2181 | mbx_cmd_t *mcp = &mc; | 1966 | mbx_cmd_t *mcp = &mc; |
2182 | 1967 | ||
2183 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, | 1968 | ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__); |
2184 | "Entered %s.\n", __func__); | ||
2185 | 1969 | ||
2186 | mcp->mb[0] = MBC_LIP_FULL_LOGIN; | 1970 | mcp->mb[0] = MBC_LIP_FULL_LOGIN; |
2187 | mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; | 1971 | mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; |
@@ -2198,8 +1982,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha) | |||
2198 | ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); | 1982 | ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); |
2199 | } else { | 1983 | } else { |
2200 | /*EMPTY*/ | 1984 | /*EMPTY*/ |
2201 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, | 1985 | ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__); |
2202 | "Done %s.\n", __func__); | ||
2203 | } | 1986 | } |
2204 | 1987 | ||
2205 | return rval; | 1988 | return rval; |
@@ -2225,8 +2008,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, | |||
2225 | mbx_cmd_t mc; | 2008 | mbx_cmd_t mc; |
2226 | mbx_cmd_t *mcp = &mc; | 2009 | mbx_cmd_t *mcp = &mc; |
2227 | 2010 | ||
2228 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, | 2011 | ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__); |
2229 | "Entered %s.\n", __func__); | ||
2230 | 2012 | ||
2231 | if (id_list == NULL) | 2013 | if (id_list == NULL) |
2232 | return QLA_FUNCTION_FAILED; | 2014 | return QLA_FUNCTION_FAILED; |
@@ -2258,8 +2040,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, | |||
2258 | ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); | 2040 | ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); |
2259 | } else { | 2041 | } else { |
2260 | *entries = mcp->mb[1]; | 2042 | *entries = mcp->mb[1]; |
2261 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, | 2043 | ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__); |
2262 | "Done %s.\n", __func__); | ||
2263 | } | 2044 | } |
2264 | 2045 | ||
2265 | return rval; | 2046 | return rval; |
@@ -2287,13 +2068,12 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, | |||
2287 | mbx_cmd_t mc; | 2068 | mbx_cmd_t mc; |
2288 | mbx_cmd_t *mcp = &mc; | 2069 | mbx_cmd_t *mcp = &mc; |
2289 | 2070 | ||
2290 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, | 2071 | ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__); |
2291 | "Entered %s.\n", __func__); | ||
2292 | 2072 | ||
2293 | mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; | 2073 | mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; |
2294 | mcp->out_mb = MBX_0; | 2074 | mcp->out_mb = MBX_0; |
2295 | mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | 2075 | mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; |
2296 | if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) | 2076 | if (IS_QLA81XX(vha->hw)) |
2297 | mcp->in_mb |= MBX_12; | 2077 | mcp->in_mb |= MBX_12; |
2298 | mcp->tov = MBX_TOV_SECONDS; | 2078 | mcp->tov = MBX_TOV_SECONDS; |
2299 | mcp->flags = 0; | 2079 | mcp->flags = 0; |
@@ -2304,7 +2084,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, | |||
2304 | ql_dbg(ql_dbg_mbx, vha, 0x107d, | 2084 | ql_dbg(ql_dbg_mbx, vha, 0x107d, |
2305 | "Failed mb[0]=%x.\n", mcp->mb[0]); | 2085 | "Failed mb[0]=%x.\n", mcp->mb[0]); |
2306 | } else { | 2086 | } else { |
2307 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, | 2087 | ql_dbg(ql_dbg_mbx, vha, 0x107e, |
2308 | "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " | 2088 | "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " |
2309 | "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], | 2089 | "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], |
2310 | mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], | 2090 | mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], |
@@ -2320,7 +2100,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, | |||
2320 | *orig_iocb_cnt = mcp->mb[10]; | 2100 | *orig_iocb_cnt = mcp->mb[10]; |
2321 | if (vha->hw->flags.npiv_supported && max_npiv_vports) | 2101 | if (vha->hw->flags.npiv_supported && max_npiv_vports) |
2322 | *max_npiv_vports = mcp->mb[11]; | 2102 | *max_npiv_vports = mcp->mb[11]; |
2323 | if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs) | 2103 | if (IS_QLA81XX(vha->hw) && max_fcfs) |
2324 | *max_fcfs = mcp->mb[12]; | 2104 | *max_fcfs = mcp->mb[12]; |
2325 | } | 2105 | } |
2326 | 2106 | ||
@@ -2351,8 +2131,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) | |||
2351 | dma_addr_t pmap_dma; | 2131 | dma_addr_t pmap_dma; |
2352 | struct qla_hw_data *ha = vha->hw; | 2132 | struct qla_hw_data *ha = vha->hw; |
2353 | 2133 | ||
2354 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, | 2134 | ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__); |
2355 | "Entered %s.\n", __func__); | ||
2356 | 2135 | ||
2357 | pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); | 2136 | pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); |
2358 | if (pmap == NULL) { | 2137 | if (pmap == NULL) { |
@@ -2375,7 +2154,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) | |||
2375 | rval = qla2x00_mailbox_command(vha, mcp); | 2154 | rval = qla2x00_mailbox_command(vha, mcp); |
2376 | 2155 | ||
2377 | if (rval == QLA_SUCCESS) { | 2156 | if (rval == QLA_SUCCESS) { |
2378 | ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, | 2157 | ql_dbg(ql_dbg_mbx, vha, 0x1081, |
2379 | "mb0/mb1=%x/%X FC/AL position map size (%x).\n", | 2158 | "mb0/mb1=%x/%X FC/AL position map size (%x).\n", |
2380 | mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); | 2159 | mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); |
2381 | ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, | 2160 | ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, |
@@ -2389,8 +2168,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) | |||
2389 | if (rval != QLA_SUCCESS) { | 2168 | if (rval != QLA_SUCCESS) { |
2390 | ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); | 2169 | ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); |
2391 | } else { | 2170 | } else { |
2392 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, | 2171 | ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__); |
2393 | "Done %s.\n", __func__); | ||
2394 | } | 2172 | } |
2395 | 2173 | ||
2396 | return rval; | 2174 | return rval; |
@@ -2419,8 +2197,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, | |||
2419 | uint32_t *siter, *diter, dwords; | 2197 | uint32_t *siter, *diter, dwords; |
2420 | struct qla_hw_data *ha = vha->hw; | 2198 | struct qla_hw_data *ha = vha->hw; |
2421 | 2199 | ||
2422 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, | 2200 | ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__); |
2423 | "Entered %s.\n", __func__); | ||
2424 | 2201 | ||
2425 | mcp->mb[0] = MBC_GET_LINK_STATUS; | 2202 | mcp->mb[0] = MBC_GET_LINK_STATUS; |
2426 | mcp->mb[2] = MSW(stats_dma); | 2203 | mcp->mb[2] = MSW(stats_dma); |
@@ -2454,8 +2231,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, | |||
2454 | rval = QLA_FUNCTION_FAILED; | 2231 | rval = QLA_FUNCTION_FAILED; |
2455 | } else { | 2232 | } else { |
2456 | /* Copy over data -- firmware data is LE. */ | 2233 | /* Copy over data -- firmware data is LE. */ |
2457 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, | 2234 | ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__); |
2458 | "Done %s.\n", __func__); | ||
2459 | dwords = offsetof(struct link_statistics, unused1) / 4; | 2235 | dwords = offsetof(struct link_statistics, unused1) / 4; |
2460 | siter = diter = &stats->link_fail_cnt; | 2236 | siter = diter = &stats->link_fail_cnt; |
2461 | while (dwords--) | 2237 | while (dwords--) |
@@ -2478,8 +2254,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, | |||
2478 | mbx_cmd_t *mcp = &mc; | 2254 | mbx_cmd_t *mcp = &mc; |
2479 | uint32_t *siter, *diter, dwords; | 2255 | uint32_t *siter, *diter, dwords; |
2480 | 2256 | ||
2481 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, | 2257 | ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__); |
2482 | "Entered %s.\n", __func__); | ||
2483 | 2258 | ||
2484 | mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; | 2259 | mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; |
2485 | mcp->mb[2] = MSW(stats_dma); | 2260 | mcp->mb[2] = MSW(stats_dma); |
@@ -2501,8 +2276,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, | |||
2501 | "Failed mb[0]=%x.\n", mcp->mb[0]); | 2276 | "Failed mb[0]=%x.\n", mcp->mb[0]); |
2502 | rval = QLA_FUNCTION_FAILED; | 2277 | rval = QLA_FUNCTION_FAILED; |
2503 | } else { | 2278 | } else { |
2504 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, | 2279 | ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__); |
2505 | "Done %s.\n", __func__); | ||
2506 | /* Copy over data -- firmware data is LE. */ | 2280 | /* Copy over data -- firmware data is LE. */ |
2507 | dwords = sizeof(struct link_statistics) / 4; | 2281 | dwords = sizeof(struct link_statistics) / 4; |
2508 | siter = diter = &stats->link_fail_cnt; | 2282 | siter = diter = &stats->link_fail_cnt; |
@@ -2531,8 +2305,7 @@ qla24xx_abort_command(srb_t *sp) | |||
2531 | struct qla_hw_data *ha = vha->hw; | 2305 | struct qla_hw_data *ha = vha->hw; |
2532 | struct req_que *req = vha->req; | 2306 | struct req_que *req = vha->req; |
2533 | 2307 | ||
2534 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, | 2308 | ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__); |
2535 | "Entered %s.\n", __func__); | ||
2536 | 2309 | ||
2537 | spin_lock_irqsave(&ha->hardware_lock, flags); | 2310 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2538 | for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { | 2311 | for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { |
@@ -2561,7 +2334,7 @@ qla24xx_abort_command(srb_t *sp) | |||
2561 | abt->port_id[0] = fcport->d_id.b.al_pa; | 2334 | abt->port_id[0] = fcport->d_id.b.al_pa; |
2562 | abt->port_id[1] = fcport->d_id.b.area; | 2335 | abt->port_id[1] = fcport->d_id.b.area; |
2563 | abt->port_id[2] = fcport->d_id.b.domain; | 2336 | abt->port_id[2] = fcport->d_id.b.domain; |
2564 | abt->vp_index = fcport->vha->vp_idx; | 2337 | abt->vp_index = fcport->vp_idx; |
2565 | 2338 | ||
2566 | abt->req_que_no = cpu_to_le16(req->id); | 2339 | abt->req_que_no = cpu_to_le16(req->id); |
2567 | 2340 | ||
@@ -2580,8 +2353,7 @@ qla24xx_abort_command(srb_t *sp) | |||
2580 | le16_to_cpu(abt->nport_handle)); | 2353 | le16_to_cpu(abt->nport_handle)); |
2581 | rval = QLA_FUNCTION_FAILED; | 2354 | rval = QLA_FUNCTION_FAILED; |
2582 | } else { | 2355 | } else { |
2583 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, | 2356 | ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__); |
2584 | "Done %s.\n", __func__); | ||
2585 | } | 2357 | } |
2586 | 2358 | ||
2587 | dma_pool_free(ha->s_dma_pool, abt, abt_dma); | 2359 | dma_pool_free(ha->s_dma_pool, abt, abt_dma); |
@@ -2613,8 +2385,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, | |||
2613 | ha = vha->hw; | 2385 | ha = vha->hw; |
2614 | req = vha->req; | 2386 | req = vha->req; |
2615 | 2387 | ||
2616 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, | 2388 | ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__); |
2617 | "Entered %s.\n", __func__); | ||
2618 | 2389 | ||
2619 | if (ha->flags.cpu_affinity_enabled) | 2390 | if (ha->flags.cpu_affinity_enabled) |
2620 | rsp = ha->rsp_q_map[tag + 1]; | 2391 | rsp = ha->rsp_q_map[tag + 1]; |
@@ -2637,7 +2408,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, | |||
2637 | tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; | 2408 | tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; |
2638 | tsk->p.tsk.port_id[1] = fcport->d_id.b.area; | 2409 | tsk->p.tsk.port_id[1] = fcport->d_id.b.area; |
2639 | tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; | 2410 | tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; |
2640 | tsk->p.tsk.vp_index = fcport->vha->vp_idx; | 2411 | tsk->p.tsk.vp_index = fcport->vp_idx; |
2641 | if (type == TCF_LUN_RESET) { | 2412 | if (type == TCF_LUN_RESET) { |
2642 | int_to_scsilun(l, &tsk->p.tsk.lun); | 2413 | int_to_scsilun(l, &tsk->p.tsk.lun); |
2643 | host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, | 2414 | host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, |
@@ -2663,7 +2434,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, | |||
2663 | } else if (le16_to_cpu(sts->scsi_status) & | 2434 | } else if (le16_to_cpu(sts->scsi_status) & |
2664 | SS_RESPONSE_INFO_LEN_VALID) { | 2435 | SS_RESPONSE_INFO_LEN_VALID) { |
2665 | if (le32_to_cpu(sts->rsp_data_len) < 4) { | 2436 | if (le32_to_cpu(sts->rsp_data_len) < 4) { |
2666 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, | 2437 | ql_dbg(ql_dbg_mbx, vha, 0x1097, |
2667 | "Ignoring inconsistent data length -- not enough " | 2438 | "Ignoring inconsistent data length -- not enough " |
2668 | "response info (%d).\n", | 2439 | "response info (%d).\n", |
2669 | le32_to_cpu(sts->rsp_data_len)); | 2440 | le32_to_cpu(sts->rsp_data_len)); |
@@ -2682,8 +2453,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, | |||
2682 | ql_dbg(ql_dbg_mbx, vha, 0x1099, | 2453 | ql_dbg(ql_dbg_mbx, vha, 0x1099, |
2683 | "Failed to issue marker IOCB (%x).\n", rval2); | 2454 | "Failed to issue marker IOCB (%x).\n", rval2); |
2684 | } else { | 2455 | } else { |
2685 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, | 2456 | ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__); |
2686 | "Done %s.\n", __func__); | ||
2687 | } | 2457 | } |
2688 | 2458 | ||
2689 | dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); | 2459 | dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); |
@@ -2724,8 +2494,7 @@ qla2x00_system_error(scsi_qla_host_t *vha) | |||
2724 | if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) | 2494 | if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) |
2725 | return QLA_FUNCTION_FAILED; | 2495 | return QLA_FUNCTION_FAILED; |
2726 | 2496 | ||
2727 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, | 2497 | ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__); |
2728 | "Entered %s.\n", __func__); | ||
2729 | 2498 | ||
2730 | mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; | 2499 | mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; |
2731 | mcp->out_mb = MBX_0; | 2500 | mcp->out_mb = MBX_0; |
@@ -2737,8 +2506,7 @@ qla2x00_system_error(scsi_qla_host_t *vha) | |||
2737 | if (rval != QLA_SUCCESS) { | 2506 | if (rval != QLA_SUCCESS) { |
2738 | ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); | 2507 | ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); |
2739 | } else { | 2508 | } else { |
2740 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, | 2509 | ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__); |
2741 | "Done %s.\n", __func__); | ||
2742 | } | 2510 | } |
2743 | 2511 | ||
2744 | return rval; | 2512 | return rval; |
@@ -2758,8 +2526,7 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, | |||
2758 | mbx_cmd_t mc; | 2526 | mbx_cmd_t mc; |
2759 | mbx_cmd_t *mcp = &mc; | 2527 | mbx_cmd_t *mcp = &mc; |
2760 | 2528 | ||
2761 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, | 2529 | ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__); |
2762 | "Entered %s.\n", __func__); | ||
2763 | 2530 | ||
2764 | mcp->mb[0] = MBC_SERDES_PARAMS; | 2531 | mcp->mb[0] = MBC_SERDES_PARAMS; |
2765 | mcp->mb[1] = BIT_0; | 2532 | mcp->mb[1] = BIT_0; |
@@ -2778,8 +2545,7 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, | |||
2778 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 2545 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
2779 | } else { | 2546 | } else { |
2780 | /*EMPTY*/ | 2547 | /*EMPTY*/ |
2781 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, | 2548 | ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__); |
2782 | "Done %s.\n", __func__); | ||
2783 | } | 2549 | } |
2784 | 2550 | ||
2785 | return rval; | 2551 | return rval; |
@@ -2795,12 +2561,10 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha) | |||
2795 | if (!IS_FWI2_CAPABLE(vha->hw)) | 2561 | if (!IS_FWI2_CAPABLE(vha->hw)) |
2796 | return QLA_FUNCTION_FAILED; | 2562 | return QLA_FUNCTION_FAILED; |
2797 | 2563 | ||
2798 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, | 2564 | ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__); |
2799 | "Entered %s.\n", __func__); | ||
2800 | 2565 | ||
2801 | mcp->mb[0] = MBC_STOP_FIRMWARE; | 2566 | mcp->mb[0] = MBC_STOP_FIRMWARE; |
2802 | mcp->mb[1] = 0; | 2567 | mcp->out_mb = MBX_0; |
2803 | mcp->out_mb = MBX_1|MBX_0; | ||
2804 | mcp->in_mb = MBX_0; | 2568 | mcp->in_mb = MBX_0; |
2805 | mcp->tov = 5; | 2569 | mcp->tov = 5; |
2806 | mcp->flags = 0; | 2570 | mcp->flags = 0; |
@@ -2811,8 +2575,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha) | |||
2811 | if (mcp->mb[0] == MBS_INVALID_COMMAND) | 2575 | if (mcp->mb[0] == MBS_INVALID_COMMAND) |
2812 | rval = QLA_INVALID_COMMAND; | 2576 | rval = QLA_INVALID_COMMAND; |
2813 | } else { | 2577 | } else { |
2814 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, | 2578 | ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__); |
2815 | "Done %s.\n", __func__); | ||
2816 | } | 2579 | } |
2817 | 2580 | ||
2818 | return rval; | 2581 | return rval; |
@@ -2826,8 +2589,7 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, | |||
2826 | mbx_cmd_t mc; | 2589 | mbx_cmd_t mc; |
2827 | mbx_cmd_t *mcp = &mc; | 2590 | mbx_cmd_t *mcp = &mc; |
2828 | 2591 | ||
2829 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, | 2592 | ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__); |
2830 | "Entered %s.\n", __func__); | ||
2831 | 2593 | ||
2832 | if (!IS_FWI2_CAPABLE(vha->hw)) | 2594 | if (!IS_FWI2_CAPABLE(vha->hw)) |
2833 | return QLA_FUNCTION_FAILED; | 2595 | return QLA_FUNCTION_FAILED; |
@@ -2853,8 +2615,7 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, | |||
2853 | "Failed=%x mb[0]=%x mb[1]=%x.\n", | 2615 | "Failed=%x mb[0]=%x mb[1]=%x.\n", |
2854 | rval, mcp->mb[0], mcp->mb[1]); | 2616 | rval, mcp->mb[0], mcp->mb[1]); |
2855 | } else { | 2617 | } else { |
2856 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, | 2618 | ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__); |
2857 | "Done %s.\n", __func__); | ||
2858 | } | 2619 | } |
2859 | 2620 | ||
2860 | return rval; | 2621 | return rval; |
@@ -2867,8 +2628,7 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha) | |||
2867 | mbx_cmd_t mc; | 2628 | mbx_cmd_t mc; |
2868 | mbx_cmd_t *mcp = &mc; | 2629 | mbx_cmd_t *mcp = &mc; |
2869 | 2630 | ||
2870 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, | 2631 | ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__); |
2871 | "Entered %s.\n", __func__); | ||
2872 | 2632 | ||
2873 | if (!IS_FWI2_CAPABLE(vha->hw)) | 2633 | if (!IS_FWI2_CAPABLE(vha->hw)) |
2874 | return QLA_FUNCTION_FAILED; | 2634 | return QLA_FUNCTION_FAILED; |
@@ -2888,8 +2648,7 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha) | |||
2888 | "Failed=%x mb[0]=%x mb[1]=%x.\n", | 2648 | "Failed=%x mb[0]=%x mb[1]=%x.\n", |
2889 | rval, mcp->mb[0], mcp->mb[1]); | 2649 | rval, mcp->mb[0], mcp->mb[1]); |
2890 | } else { | 2650 | } else { |
2891 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, | 2651 | ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__); |
2892 | "Done %s.\n", __func__); | ||
2893 | } | 2652 | } |
2894 | 2653 | ||
2895 | return rval; | 2654 | return rval; |
@@ -2903,11 +2662,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, | |||
2903 | mbx_cmd_t mc; | 2662 | mbx_cmd_t mc; |
2904 | mbx_cmd_t *mcp = &mc; | 2663 | mbx_cmd_t *mcp = &mc; |
2905 | 2664 | ||
2906 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, | 2665 | ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__); |
2907 | "Entered %s.\n", __func__); | ||
2908 | 2666 | ||
2909 | if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && | 2667 | if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) |
2910 | !IS_QLA83XX(vha->hw)) | ||
2911 | return QLA_FUNCTION_FAILED; | 2668 | return QLA_FUNCTION_FAILED; |
2912 | 2669 | ||
2913 | if (unlikely(pci_channel_offline(vha->hw->pdev))) | 2670 | if (unlikely(pci_channel_offline(vha->hw->pdev))) |
@@ -2935,8 +2692,7 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, | |||
2935 | "Failed=%x mb[0]=%x mb[1]=%x.\n", | 2692 | "Failed=%x mb[0]=%x mb[1]=%x.\n", |
2936 | rval, mcp->mb[0], mcp->mb[1]); | 2693 | rval, mcp->mb[0], mcp->mb[1]); |
2937 | } else { | 2694 | } else { |
2938 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, | 2695 | ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__); |
2939 | "Done %s.\n", __func__); | ||
2940 | 2696 | ||
2941 | if (mb) | 2697 | if (mb) |
2942 | memcpy(mb, mcp->mb, 8 * sizeof(*mb)); | 2698 | memcpy(mb, mcp->mb, 8 * sizeof(*mb)); |
@@ -2954,8 +2710,7 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) | |||
2954 | mbx_cmd_t mc; | 2710 | mbx_cmd_t mc; |
2955 | mbx_cmd_t *mcp = &mc; | 2711 | mbx_cmd_t *mcp = &mc; |
2956 | 2712 | ||
2957 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, | 2713 | ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__); |
2958 | "Entered %s.\n", __func__); | ||
2959 | 2714 | ||
2960 | if (!IS_FWI2_CAPABLE(vha->hw)) | 2715 | if (!IS_FWI2_CAPABLE(vha->hw)) |
2961 | return QLA_FUNCTION_FAILED; | 2716 | return QLA_FUNCTION_FAILED; |
@@ -2977,8 +2732,7 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) | |||
2977 | "Failed=%x mb[0]=%x mb[1]=%x.\n", | 2732 | "Failed=%x mb[0]=%x mb[1]=%x.\n", |
2978 | rval, mcp->mb[0], mcp->mb[1]); | 2733 | rval, mcp->mb[0], mcp->mb[1]); |
2979 | } else { | 2734 | } else { |
2980 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, | 2735 | ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__); |
2981 | "Done %s.\n", __func__); | ||
2982 | 2736 | ||
2983 | if (wr) | 2737 | if (wr) |
2984 | *wr = (uint64_t) mcp->mb[5] << 48 | | 2738 | *wr = (uint64_t) mcp->mb[5] << 48 | |
@@ -3003,8 +2757,7 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, | |||
3003 | mbx_cmd_t mc; | 2757 | mbx_cmd_t mc; |
3004 | mbx_cmd_t *mcp = &mc; | 2758 | mbx_cmd_t *mcp = &mc; |
3005 | 2759 | ||
3006 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, | 2760 | ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__); |
3007 | "Entered %s.\n", __func__); | ||
3008 | 2761 | ||
3009 | if (!IS_IIDMA_CAPABLE(vha->hw)) | 2762 | if (!IS_IIDMA_CAPABLE(vha->hw)) |
3010 | return QLA_FUNCTION_FAILED; | 2763 | return QLA_FUNCTION_FAILED; |
@@ -3029,8 +2782,7 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, | |||
3029 | if (rval != QLA_SUCCESS) { | 2782 | if (rval != QLA_SUCCESS) { |
3030 | ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); | 2783 | ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); |
3031 | } else { | 2784 | } else { |
3032 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, | 2785 | ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__); |
3033 | "Done %s.\n", __func__); | ||
3034 | if (port_speed) | 2786 | if (port_speed) |
3035 | *port_speed = mcp->mb[3]; | 2787 | *port_speed = mcp->mb[3]; |
3036 | } | 2788 | } |
@@ -3046,8 +2798,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, | |||
3046 | mbx_cmd_t mc; | 2798 | mbx_cmd_t mc; |
3047 | mbx_cmd_t *mcp = &mc; | 2799 | mbx_cmd_t *mcp = &mc; |
3048 | 2800 | ||
3049 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, | 2801 | ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__); |
3050 | "Entered %s.\n", __func__); | ||
3051 | 2802 | ||
3052 | if (!IS_IIDMA_CAPABLE(vha->hw)) | 2803 | if (!IS_IIDMA_CAPABLE(vha->hw)) |
3053 | return QLA_FUNCTION_FAILED; | 2804 | return QLA_FUNCTION_FAILED; |
@@ -3055,7 +2806,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, | |||
3055 | mcp->mb[0] = MBC_PORT_PARAMS; | 2806 | mcp->mb[0] = MBC_PORT_PARAMS; |
3056 | mcp->mb[1] = loop_id; | 2807 | mcp->mb[1] = loop_id; |
3057 | mcp->mb[2] = BIT_0; | 2808 | mcp->mb[2] = BIT_0; |
3058 | if (IS_CNA_CAPABLE(vha->hw)) | 2809 | if (IS_QLA8XXX_TYPE(vha->hw)) |
3059 | mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); | 2810 | mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); |
3060 | else | 2811 | else |
3061 | mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); | 2812 | mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); |
@@ -3074,11 +2825,9 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, | |||
3074 | } | 2825 | } |
3075 | 2826 | ||
3076 | if (rval != QLA_SUCCESS) { | 2827 | if (rval != QLA_SUCCESS) { |
3077 | ql_dbg(ql_dbg_mbx, vha, 0x10b4, | 2828 | ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval); |
3078 | "Failed=%x.\n", rval); | ||
3079 | } else { | 2829 | } else { |
3080 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, | 2830 | ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__); |
3081 | "Done %s.\n", __func__); | ||
3082 | } | 2831 | } |
3083 | 2832 | ||
3084 | return rval; | 2833 | return rval; |
@@ -3094,25 +2843,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
3094 | scsi_qla_host_t *vp; | 2843 | scsi_qla_host_t *vp; |
3095 | unsigned long flags; | 2844 | unsigned long flags; |
3096 | 2845 | ||
3097 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, | 2846 | ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__); |
3098 | "Entered %s.\n", __func__); | ||
3099 | 2847 | ||
3100 | if (rptid_entry->entry_status != 0) | 2848 | if (rptid_entry->entry_status != 0) |
3101 | return; | 2849 | return; |
3102 | 2850 | ||
3103 | if (rptid_entry->format == 0) { | 2851 | if (rptid_entry->format == 0) { |
3104 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, | 2852 | ql_dbg(ql_dbg_mbx, vha, 0x10b7, |
3105 | "Format 0 : Number of VPs setup %d, number of " | 2853 | "Format 0 : Number of VPs setup %d, number of " |
3106 | "VPs acquired %d.\n", | 2854 | "VPs acquired %d.\n", |
3107 | MSB(le16_to_cpu(rptid_entry->vp_count)), | 2855 | MSB(le16_to_cpu(rptid_entry->vp_count)), |
3108 | LSB(le16_to_cpu(rptid_entry->vp_count))); | 2856 | LSB(le16_to_cpu(rptid_entry->vp_count))); |
3109 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, | 2857 | ql_dbg(ql_dbg_mbx, vha, 0x10b8, |
3110 | "Primary port id %02x%02x%02x.\n", | 2858 | "Primary port id %02x%02x%02x.\n", |
3111 | rptid_entry->port_id[2], rptid_entry->port_id[1], | 2859 | rptid_entry->port_id[2], rptid_entry->port_id[1], |
3112 | rptid_entry->port_id[0]); | 2860 | rptid_entry->port_id[0]); |
3113 | } else if (rptid_entry->format == 1) { | 2861 | } else if (rptid_entry->format == 1) { |
3114 | vp_idx = LSB(stat); | 2862 | vp_idx = LSB(stat); |
3115 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, | 2863 | ql_dbg(ql_dbg_mbx, vha, 0x10b9, |
3116 | "Format 1: VP[%d] enabled - status %d - with " | 2864 | "Format 1: VP[%d] enabled - status %d - with " |
3117 | "port id %02x%02x%02x.\n", vp_idx, MSB(stat), | 2865 | "port id %02x%02x%02x.\n", vp_idx, MSB(stat), |
3118 | rptid_entry->port_id[2], rptid_entry->port_id[1], | 2866 | rptid_entry->port_id[2], rptid_entry->port_id[1], |
@@ -3122,7 +2870,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
3122 | if (vp_idx == 0 && (MSB(stat) != 1)) | 2870 | if (vp_idx == 0 && (MSB(stat) != 1)) |
3123 | goto reg_needed; | 2871 | goto reg_needed; |
3124 | 2872 | ||
3125 | if (MSB(stat) != 0 && MSB(stat) != 2) { | 2873 | if (MSB(stat) == 1) { |
3126 | ql_dbg(ql_dbg_mbx, vha, 0x10ba, | 2874 | ql_dbg(ql_dbg_mbx, vha, 0x10ba, |
3127 | "Could not acquire ID for VP[%d].\n", vp_idx); | 2875 | "Could not acquire ID for VP[%d].\n", vp_idx); |
3128 | return; | 2876 | return; |
@@ -3179,8 +2927,7 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha) | |||
3179 | 2927 | ||
3180 | /* This can be called by the parent */ | 2928 | /* This can be called by the parent */ |
3181 | 2929 | ||
3182 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, | 2930 | ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__); |
3183 | "Entered %s.\n", __func__); | ||
3184 | 2931 | ||
3185 | vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); | 2932 | vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); |
3186 | if (!vpmod) { | 2933 | if (!vpmod) { |
@@ -3196,9 +2943,6 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha) | |||
3196 | vpmod->vp_count = 1; | 2943 | vpmod->vp_count = 1; |
3197 | vpmod->vp_index1 = vha->vp_idx; | 2944 | vpmod->vp_index1 = vha->vp_idx; |
3198 | vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; | 2945 | vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; |
3199 | |||
3200 | qlt_modify_vp_config(vha, vpmod); | ||
3201 | |||
3202 | memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); | 2946 | memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); |
3203 | memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); | 2947 | memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); |
3204 | vpmod->entry_count = 1; | 2948 | vpmod->entry_count = 1; |
@@ -3219,8 +2963,7 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha) | |||
3219 | rval = QLA_FUNCTION_FAILED; | 2963 | rval = QLA_FUNCTION_FAILED; |
3220 | } else { | 2964 | } else { |
3221 | /* EMPTY */ | 2965 | /* EMPTY */ |
3222 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, | 2966 | ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__); |
3223 | "Done %s.\n", __func__); | ||
3224 | fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); | 2967 | fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); |
3225 | } | 2968 | } |
3226 | dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); | 2969 | dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); |
@@ -3254,7 +2997,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) | |||
3254 | int vp_index = vha->vp_idx; | 2997 | int vp_index = vha->vp_idx; |
3255 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | 2998 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); |
3256 | 2999 | ||
3257 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1, | 3000 | ql_dbg(ql_dbg_mbx, vha, 0x10c1, |
3258 | "Entered %s enabling index %d.\n", __func__, vp_index); | 3001 | "Entered %s enabling index %d.\n", __func__, vp_index); |
3259 | 3002 | ||
3260 | if (vp_index == 0 || vp_index >= ha->max_npiv_vports) | 3003 | if (vp_index == 0 || vp_index >= ha->max_npiv_vports) |
@@ -3297,8 +3040,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) | |||
3297 | le16_to_cpu(vce->comp_status)); | 3040 | le16_to_cpu(vce->comp_status)); |
3298 | rval = QLA_FUNCTION_FAILED; | 3041 | rval = QLA_FUNCTION_FAILED; |
3299 | } else { | 3042 | } else { |
3300 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6, | 3043 | ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__); |
3301 | "Done %s.\n", __func__); | ||
3302 | } | 3044 | } |
3303 | 3045 | ||
3304 | dma_pool_free(ha->s_dma_pool, vce, vce_dma); | 3046 | dma_pool_free(ha->s_dma_pool, vce, vce_dma); |
@@ -3335,8 +3077,14 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, | |||
3335 | mbx_cmd_t mc; | 3077 | mbx_cmd_t mc; |
3336 | mbx_cmd_t *mcp = &mc; | 3078 | mbx_cmd_t *mcp = &mc; |
3337 | 3079 | ||
3338 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, | 3080 | ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__); |
3339 | "Entered %s.\n", __func__); | 3081 | |
3082 | /* | ||
3083 | * This command is implicitly executed by firmware during login for the | ||
3084 | * physical hosts | ||
3085 | */ | ||
3086 | if (vp_idx == 0) | ||
3087 | return QLA_FUNCTION_FAILED; | ||
3340 | 3088 | ||
3341 | mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; | 3089 | mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; |
3342 | mcp->mb[1] = format; | 3090 | mcp->mb[1] = format; |
@@ -3365,8 +3113,7 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, | |||
3365 | mbx_cmd_t mc; | 3113 | mbx_cmd_t mc; |
3366 | mbx_cmd_t *mcp = &mc; | 3114 | mbx_cmd_t *mcp = &mc; |
3367 | 3115 | ||
3368 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, | 3116 | ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__); |
3369 | "Entered %s.\n", __func__); | ||
3370 | 3117 | ||
3371 | if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { | 3118 | if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { |
3372 | mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; | 3119 | mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; |
@@ -3400,12 +3147,12 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, | |||
3400 | ql_dbg(ql_dbg_mbx, vha, 0x1008, | 3147 | ql_dbg(ql_dbg_mbx, vha, 0x1008, |
3401 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 3148 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
3402 | } else { | 3149 | } else { |
3403 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, | 3150 | ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__); |
3404 | "Done %s.\n", __func__); | ||
3405 | } | 3151 | } |
3406 | 3152 | ||
3407 | return rval; | 3153 | return rval; |
3408 | } | 3154 | } |
3155 | |||
3409 | /* 84XX Support **************************************************************/ | 3156 | /* 84XX Support **************************************************************/ |
3410 | 3157 | ||
3411 | struct cs84xx_mgmt_cmd { | 3158 | struct cs84xx_mgmt_cmd { |
@@ -3425,8 +3172,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) | |||
3425 | unsigned long flags; | 3172 | unsigned long flags; |
3426 | struct qla_hw_data *ha = vha->hw; | 3173 | struct qla_hw_data *ha = vha->hw; |
3427 | 3174 | ||
3428 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, | 3175 | ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__); |
3429 | "Entered %s.\n", __func__); | ||
3430 | 3176 | ||
3431 | mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); | 3177 | mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); |
3432 | if (mn == NULL) { | 3178 | if (mn == NULL) { |
@@ -3467,7 +3213,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) | |||
3467 | status[0] = le16_to_cpu(mn->p.rsp.comp_status); | 3213 | status[0] = le16_to_cpu(mn->p.rsp.comp_status); |
3468 | status[1] = status[0] == CS_VCS_CHIP_FAILURE ? | 3214 | status[1] = status[0] == CS_VCS_CHIP_FAILURE ? |
3469 | le16_to_cpu(mn->p.rsp.failure_code) : 0; | 3215 | le16_to_cpu(mn->p.rsp.failure_code) : 0; |
3470 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, | 3216 | ql_dbg(ql_dbg_mbx, vha, 0x10ce, |
3471 | "cs=%x fc=%x.\n", status[0], status[1]); | 3217 | "cs=%x fc=%x.\n", status[0], status[1]); |
3472 | 3218 | ||
3473 | if (status[0] != CS_COMPLETE) { | 3219 | if (status[0] != CS_COMPLETE) { |
@@ -3481,7 +3227,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) | |||
3481 | retry = 1; | 3227 | retry = 1; |
3482 | } | 3228 | } |
3483 | } else { | 3229 | } else { |
3484 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, | 3230 | ql_dbg(ql_dbg_mbx, vha, 0x10d0, |
3485 | "Firmware updated to %x.\n", | 3231 | "Firmware updated to %x.\n", |
3486 | le32_to_cpu(mn->p.rsp.fw_ver)); | 3232 | le32_to_cpu(mn->p.rsp.fw_ver)); |
3487 | 3233 | ||
@@ -3498,11 +3244,9 @@ verify_done: | |||
3498 | dma_pool_free(ha->s_dma_pool, mn, mn_dma); | 3244 | dma_pool_free(ha->s_dma_pool, mn, mn_dma); |
3499 | 3245 | ||
3500 | if (rval != QLA_SUCCESS) { | 3246 | if (rval != QLA_SUCCESS) { |
3501 | ql_dbg(ql_dbg_mbx, vha, 0x10d1, | 3247 | ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval); |
3502 | "Failed=%x.\n", rval); | ||
3503 | } else { | 3248 | } else { |
3504 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, | 3249 | ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__); |
3505 | "Done %s.\n", __func__); | ||
3506 | } | 3250 | } |
3507 | 3251 | ||
3508 | return rval; | 3252 | return rval; |
@@ -3518,8 +3262,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
3518 | struct device_reg_25xxmq __iomem *reg; | 3262 | struct device_reg_25xxmq __iomem *reg; |
3519 | struct qla_hw_data *ha = vha->hw; | 3263 | struct qla_hw_data *ha = vha->hw; |
3520 | 3264 | ||
3521 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, | 3265 | ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__); |
3522 | "Entered %s.\n", __func__); | ||
3523 | 3266 | ||
3524 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | 3267 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; |
3525 | mcp->mb[1] = req->options; | 3268 | mcp->mb[1] = req->options; |
@@ -3533,10 +3276,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
3533 | mcp->mb[12] = req->qos; | 3276 | mcp->mb[12] = req->qos; |
3534 | mcp->mb[11] = req->vp_idx; | 3277 | mcp->mb[11] = req->vp_idx; |
3535 | mcp->mb[13] = req->rid; | 3278 | mcp->mb[13] = req->rid; |
3536 | if (IS_QLA83XX(ha)) | ||
3537 | mcp->mb[15] = 0; | ||
3538 | 3279 | ||
3539 | reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) + | 3280 | reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) + |
3540 | QLA_QUE_PAGE * req->id); | 3281 | QLA_QUE_PAGE * req->id); |
3541 | 3282 | ||
3542 | mcp->mb[4] = req->id; | 3283 | mcp->mb[4] = req->id; |
@@ -3548,21 +3289,12 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
3548 | MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 3289 | MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; |
3549 | mcp->in_mb = MBX_0; | 3290 | mcp->in_mb = MBX_0; |
3550 | mcp->flags = MBX_DMA_OUT; | 3291 | mcp->flags = MBX_DMA_OUT; |
3551 | mcp->tov = MBX_TOV_SECONDS * 2; | 3292 | mcp->tov = 60; |
3552 | |||
3553 | if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | ||
3554 | mcp->in_mb |= MBX_1; | ||
3555 | if (IS_QLA83XX(ha)) { | ||
3556 | mcp->out_mb |= MBX_15; | ||
3557 | /* debug q create issue in SR-IOV */ | ||
3558 | mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; | ||
3559 | } | ||
3560 | 3293 | ||
3561 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3294 | spin_lock_irqsave(&ha->hardware_lock, flags); |
3562 | if (!(req->options & BIT_0)) { | 3295 | if (!(req->options & BIT_0)) { |
3563 | WRT_REG_DWORD(®->req_q_in, 0); | 3296 | WRT_REG_DWORD(®->req_q_in, 0); |
3564 | if (!IS_QLA83XX(ha)) | 3297 | WRT_REG_DWORD(®->req_q_out, 0); |
3565 | WRT_REG_DWORD(®->req_q_out, 0); | ||
3566 | } | 3298 | } |
3567 | req->req_q_in = ®->req_q_in; | 3299 | req->req_q_in = ®->req_q_in; |
3568 | req->req_q_out = ®->req_q_out; | 3300 | req->req_q_out = ®->req_q_out; |
@@ -3573,8 +3305,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
3573 | ql_dbg(ql_dbg_mbx, vha, 0x10d4, | 3305 | ql_dbg(ql_dbg_mbx, vha, 0x10d4, |
3574 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 3306 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
3575 | } else { | 3307 | } else { |
3576 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, | 3308 | ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__); |
3577 | "Done %s.\n", __func__); | ||
3578 | } | 3309 | } |
3579 | 3310 | ||
3580 | return rval; | 3311 | return rval; |
@@ -3590,8 +3321,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | |||
3590 | struct device_reg_25xxmq __iomem *reg; | 3321 | struct device_reg_25xxmq __iomem *reg; |
3591 | struct qla_hw_data *ha = vha->hw; | 3322 | struct qla_hw_data *ha = vha->hw; |
3592 | 3323 | ||
3593 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, | 3324 | ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__); |
3594 | "Entered %s.\n", __func__); | ||
3595 | 3325 | ||
3596 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | 3326 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; |
3597 | mcp->mb[1] = rsp->options; | 3327 | mcp->mb[1] = rsp->options; |
@@ -3602,10 +3332,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | |||
3602 | mcp->mb[5] = rsp->length; | 3332 | mcp->mb[5] = rsp->length; |
3603 | mcp->mb[14] = rsp->msix->entry; | 3333 | mcp->mb[14] = rsp->msix->entry; |
3604 | mcp->mb[13] = rsp->rid; | 3334 | mcp->mb[13] = rsp->rid; |
3605 | if (IS_QLA83XX(ha)) | ||
3606 | mcp->mb[15] = 0; | ||
3607 | 3335 | ||
3608 | reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) + | 3336 | reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) + |
3609 | QLA_QUE_PAGE * rsp->id); | 3337 | QLA_QUE_PAGE * rsp->id); |
3610 | 3338 | ||
3611 | mcp->mb[4] = rsp->id; | 3339 | mcp->mb[4] = rsp->id; |
@@ -3617,23 +3345,12 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | |||
3617 | |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 3345 | |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; |
3618 | mcp->in_mb = MBX_0; | 3346 | mcp->in_mb = MBX_0; |
3619 | mcp->flags = MBX_DMA_OUT; | 3347 | mcp->flags = MBX_DMA_OUT; |
3620 | mcp->tov = MBX_TOV_SECONDS * 2; | 3348 | mcp->tov = 60; |
3621 | |||
3622 | if (IS_QLA81XX(ha)) { | ||
3623 | mcp->out_mb |= MBX_12|MBX_11|MBX_10; | ||
3624 | mcp->in_mb |= MBX_1; | ||
3625 | } else if (IS_QLA83XX(ha)) { | ||
3626 | mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; | ||
3627 | mcp->in_mb |= MBX_1; | ||
3628 | /* debug q create issue in SR-IOV */ | ||
3629 | mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; | ||
3630 | } | ||
3631 | 3349 | ||
3632 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3350 | spin_lock_irqsave(&ha->hardware_lock, flags); |
3633 | if (!(rsp->options & BIT_0)) { | 3351 | if (!(rsp->options & BIT_0)) { |
3634 | WRT_REG_DWORD(®->rsp_q_out, 0); | 3352 | WRT_REG_DWORD(®->rsp_q_out, 0); |
3635 | if (!IS_QLA83XX(ha)) | 3353 | WRT_REG_DWORD(®->rsp_q_in, 0); |
3636 | WRT_REG_DWORD(®->rsp_q_in, 0); | ||
3637 | } | 3354 | } |
3638 | 3355 | ||
3639 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 3356 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
@@ -3643,8 +3360,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | |||
3643 | ql_dbg(ql_dbg_mbx, vha, 0x10d7, | 3360 | ql_dbg(ql_dbg_mbx, vha, 0x10d7, |
3644 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 3361 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
3645 | } else { | 3362 | } else { |
3646 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, | 3363 | ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__); |
3647 | "Done %s.\n", __func__); | ||
3648 | } | 3364 | } |
3649 | 3365 | ||
3650 | return rval; | 3366 | return rval; |
@@ -3657,8 +3373,7 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) | |||
3657 | mbx_cmd_t mc; | 3373 | mbx_cmd_t mc; |
3658 | mbx_cmd_t *mcp = &mc; | 3374 | mbx_cmd_t *mcp = &mc; |
3659 | 3375 | ||
3660 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, | 3376 | ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__); |
3661 | "Entered %s.\n", __func__); | ||
3662 | 3377 | ||
3663 | mcp->mb[0] = MBC_IDC_ACK; | 3378 | mcp->mb[0] = MBC_IDC_ACK; |
3664 | memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); | 3379 | memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); |
@@ -3672,8 +3387,7 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) | |||
3672 | ql_dbg(ql_dbg_mbx, vha, 0x10da, | 3387 | ql_dbg(ql_dbg_mbx, vha, 0x10da, |
3673 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 3388 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
3674 | } else { | 3389 | } else { |
3675 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, | 3390 | ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__); |
3676 | "Done %s.\n", __func__); | ||
3677 | } | 3391 | } |
3678 | 3392 | ||
3679 | return rval; | 3393 | return rval; |
@@ -3686,10 +3400,9 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) | |||
3686 | mbx_cmd_t mc; | 3400 | mbx_cmd_t mc; |
3687 | mbx_cmd_t *mcp = &mc; | 3401 | mbx_cmd_t *mcp = &mc; |
3688 | 3402 | ||
3689 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, | 3403 | ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__); |
3690 | "Entered %s.\n", __func__); | ||
3691 | 3404 | ||
3692 | if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) | 3405 | if (!IS_QLA81XX(vha->hw)) |
3693 | return QLA_FUNCTION_FAILED; | 3406 | return QLA_FUNCTION_FAILED; |
3694 | 3407 | ||
3695 | mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; | 3408 | mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; |
@@ -3705,8 +3418,7 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) | |||
3705 | "Failed=%x mb[0]=%x mb[1]=%x.\n", | 3418 | "Failed=%x mb[0]=%x mb[1]=%x.\n", |
3706 | rval, mcp->mb[0], mcp->mb[1]); | 3419 | rval, mcp->mb[0], mcp->mb[1]); |
3707 | } else { | 3420 | } else { |
3708 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, | 3421 | ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__); |
3709 | "Done %s.\n", __func__); | ||
3710 | *sector_size = mcp->mb[1]; | 3422 | *sector_size = mcp->mb[1]; |
3711 | } | 3423 | } |
3712 | 3424 | ||
@@ -3720,11 +3432,10 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) | |||
3720 | mbx_cmd_t mc; | 3432 | mbx_cmd_t mc; |
3721 | mbx_cmd_t *mcp = &mc; | 3433 | mbx_cmd_t *mcp = &mc; |
3722 | 3434 | ||
3723 | if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) | 3435 | if (!IS_QLA81XX(vha->hw)) |
3724 | return QLA_FUNCTION_FAILED; | 3436 | return QLA_FUNCTION_FAILED; |
3725 | 3437 | ||
3726 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, | 3438 | ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__); |
3727 | "Entered %s.\n", __func__); | ||
3728 | 3439 | ||
3729 | mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; | 3440 | mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; |
3730 | mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : | 3441 | mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : |
@@ -3740,8 +3451,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) | |||
3740 | "Failed=%x mb[0]=%x mb[1]=%x.\n", | 3451 | "Failed=%x mb[0]=%x mb[1]=%x.\n", |
3741 | rval, mcp->mb[0], mcp->mb[1]); | 3452 | rval, mcp->mb[0], mcp->mb[1]); |
3742 | } else { | 3453 | } else { |
3743 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, | 3454 | ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__); |
3744 | "Done %s.\n", __func__); | ||
3745 | } | 3455 | } |
3746 | 3456 | ||
3747 | return rval; | 3457 | return rval; |
@@ -3754,11 +3464,10 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) | |||
3754 | mbx_cmd_t mc; | 3464 | mbx_cmd_t mc; |
3755 | mbx_cmd_t *mcp = &mc; | 3465 | mbx_cmd_t *mcp = &mc; |
3756 | 3466 | ||
3757 | if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) | 3467 | if (!IS_QLA81XX(vha->hw)) |
3758 | return QLA_FUNCTION_FAILED; | 3468 | return QLA_FUNCTION_FAILED; |
3759 | 3469 | ||
3760 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, | 3470 | ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__); |
3761 | "Entered %s.\n", __func__); | ||
3762 | 3471 | ||
3763 | mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; | 3472 | mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; |
3764 | mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; | 3473 | mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; |
@@ -3777,8 +3486,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) | |||
3777 | "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", | 3486 | "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", |
3778 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); | 3487 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); |
3779 | } else { | 3488 | } else { |
3780 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, | 3489 | ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__); |
3781 | "Done %s.\n", __func__); | ||
3782 | } | 3490 | } |
3783 | 3491 | ||
3784 | return rval; | 3492 | return rval; |
@@ -3791,8 +3499,7 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) | |||
3791 | mbx_cmd_t mc; | 3499 | mbx_cmd_t mc; |
3792 | mbx_cmd_t *mcp = &mc; | 3500 | mbx_cmd_t *mcp = &mc; |
3793 | 3501 | ||
3794 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, | 3502 | ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__); |
3795 | "Entered %s.\n", __func__); | ||
3796 | 3503 | ||
3797 | mcp->mb[0] = MBC_RESTART_MPI_FW; | 3504 | mcp->mb[0] = MBC_RESTART_MPI_FW; |
3798 | mcp->out_mb = MBX_0; | 3505 | mcp->out_mb = MBX_0; |
@@ -3806,8 +3513,7 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) | |||
3806 | "Failed=%x mb[0]=%x mb[1]=%x.\n", | 3513 | "Failed=%x mb[0]=%x mb[1]=%x.\n", |
3807 | rval, mcp->mb[0], mcp->mb[1]); | 3514 | rval, mcp->mb[0], mcp->mb[1]); |
3808 | } else { | 3515 | } else { |
3809 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, | 3516 | ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__); |
3810 | "Done %s.\n", __func__); | ||
3811 | } | 3517 | } |
3812 | 3518 | ||
3813 | return rval; | 3519 | return rval; |
@@ -3822,8 +3528,7 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, | |||
3822 | mbx_cmd_t *mcp = &mc; | 3528 | mbx_cmd_t *mcp = &mc; |
3823 | struct qla_hw_data *ha = vha->hw; | 3529 | struct qla_hw_data *ha = vha->hw; |
3824 | 3530 | ||
3825 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, | 3531 | ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__); |
3826 | "Entered %s.\n", __func__); | ||
3827 | 3532 | ||
3828 | if (!IS_FWI2_CAPABLE(ha)) | 3533 | if (!IS_FWI2_CAPABLE(ha)) |
3829 | return QLA_FUNCTION_FAILED; | 3534 | return QLA_FUNCTION_FAILED; |
@@ -3853,8 +3558,7 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, | |||
3853 | ql_dbg(ql_dbg_mbx, vha, 0x10e9, | 3558 | ql_dbg(ql_dbg_mbx, vha, 0x10e9, |
3854 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 3559 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
3855 | } else { | 3560 | } else { |
3856 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, | 3561 | ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__); |
3857 | "Done %s.\n", __func__); | ||
3858 | } | 3562 | } |
3859 | 3563 | ||
3860 | return rval; | 3564 | return rval; |
@@ -3869,8 +3573,7 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, | |||
3869 | mbx_cmd_t *mcp = &mc; | 3573 | mbx_cmd_t *mcp = &mc; |
3870 | struct qla_hw_data *ha = vha->hw; | 3574 | struct qla_hw_data *ha = vha->hw; |
3871 | 3575 | ||
3872 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, | 3576 | ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__); |
3873 | "Entered %s.\n", __func__); | ||
3874 | 3577 | ||
3875 | if (!IS_FWI2_CAPABLE(ha)) | 3578 | if (!IS_FWI2_CAPABLE(ha)) |
3876 | return QLA_FUNCTION_FAILED; | 3579 | return QLA_FUNCTION_FAILED; |
@@ -3900,8 +3603,7 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, | |||
3900 | ql_dbg(ql_dbg_mbx, vha, 0x10ec, | 3603 | ql_dbg(ql_dbg_mbx, vha, 0x10ec, |
3901 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 3604 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
3902 | } else { | 3605 | } else { |
3903 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, | 3606 | ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__); |
3904 | "Done %s.\n", __func__); | ||
3905 | } | 3607 | } |
3906 | 3608 | ||
3907 | return rval; | 3609 | return rval; |
@@ -3915,10 +3617,9 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, | |||
3915 | mbx_cmd_t mc; | 3617 | mbx_cmd_t mc; |
3916 | mbx_cmd_t *mcp = &mc; | 3618 | mbx_cmd_t *mcp = &mc; |
3917 | 3619 | ||
3918 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, | 3620 | ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__); |
3919 | "Entered %s.\n", __func__); | ||
3920 | 3621 | ||
3921 | if (!IS_CNA_CAPABLE(vha->hw)) | 3622 | if (!IS_QLA8XXX_TYPE(vha->hw)) |
3922 | return QLA_FUNCTION_FAILED; | 3623 | return QLA_FUNCTION_FAILED; |
3923 | 3624 | ||
3924 | mcp->mb[0] = MBC_GET_XGMAC_STATS; | 3625 | mcp->mb[0] = MBC_GET_XGMAC_STATS; |
@@ -3938,8 +3639,7 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, | |||
3938 | "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", | 3639 | "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", |
3939 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); | 3640 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); |
3940 | } else { | 3641 | } else { |
3941 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, | 3642 | ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__); |
3942 | "Done %s.\n", __func__); | ||
3943 | 3643 | ||
3944 | 3644 | ||
3945 | *actual_size = mcp->mb[2] << 2; | 3645 | *actual_size = mcp->mb[2] << 2; |
@@ -3956,10 +3656,9 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, | |||
3956 | mbx_cmd_t mc; | 3656 | mbx_cmd_t mc; |
3957 | mbx_cmd_t *mcp = &mc; | 3657 | mbx_cmd_t *mcp = &mc; |
3958 | 3658 | ||
3959 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, | 3659 | ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__); |
3960 | "Entered %s.\n", __func__); | ||
3961 | 3660 | ||
3962 | if (!IS_CNA_CAPABLE(vha->hw)) | 3661 | if (!IS_QLA8XXX_TYPE(vha->hw)) |
3963 | return QLA_FUNCTION_FAILED; | 3662 | return QLA_FUNCTION_FAILED; |
3964 | 3663 | ||
3965 | mcp->mb[0] = MBC_GET_DCBX_PARAMS; | 3664 | mcp->mb[0] = MBC_GET_DCBX_PARAMS; |
@@ -3980,8 +3679,7 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, | |||
3980 | "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", | 3679 | "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", |
3981 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); | 3680 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); |
3982 | } else { | 3681 | } else { |
3983 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, | 3682 | ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__); |
3984 | "Done %s.\n", __func__); | ||
3985 | } | 3683 | } |
3986 | 3684 | ||
3987 | return rval; | 3685 | return rval; |
@@ -3994,8 +3692,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) | |||
3994 | mbx_cmd_t mc; | 3692 | mbx_cmd_t mc; |
3995 | mbx_cmd_t *mcp = &mc; | 3693 | mbx_cmd_t *mcp = &mc; |
3996 | 3694 | ||
3997 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, | 3695 | ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__); |
3998 | "Entered %s.\n", __func__); | ||
3999 | 3696 | ||
4000 | if (!IS_FWI2_CAPABLE(vha->hw)) | 3697 | if (!IS_FWI2_CAPABLE(vha->hw)) |
4001 | return QLA_FUNCTION_FAILED; | 3698 | return QLA_FUNCTION_FAILED; |
@@ -4012,8 +3709,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) | |||
4012 | ql_dbg(ql_dbg_mbx, vha, 0x10f5, | 3709 | ql_dbg(ql_dbg_mbx, vha, 0x10f5, |
4013 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 3710 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
4014 | } else { | 3711 | } else { |
4015 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, | 3712 | ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__); |
4016 | "Done %s.\n", __func__); | ||
4017 | *data = mcp->mb[3] << 16 | mcp->mb[2]; | 3713 | *data = mcp->mb[3] << 16 | mcp->mb[2]; |
4018 | } | 3714 | } |
4019 | 3715 | ||
@@ -4029,8 +3725,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, | |||
4029 | mbx_cmd_t *mcp = &mc; | 3725 | mbx_cmd_t *mcp = &mc; |
4030 | uint32_t iter_cnt = 0x1; | 3726 | uint32_t iter_cnt = 0x1; |
4031 | 3727 | ||
4032 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, | 3728 | ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__); |
4033 | "Entered %s.\n", __func__); | ||
4034 | 3729 | ||
4035 | memset(mcp->mb, 0 , sizeof(mcp->mb)); | 3730 | memset(mcp->mb, 0 , sizeof(mcp->mb)); |
4036 | mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; | 3731 | mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; |
@@ -4058,7 +3753,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, | |||
4058 | 3753 | ||
4059 | mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| | 3754 | mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| |
4060 | MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; | 3755 | MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; |
4061 | if (IS_CNA_CAPABLE(vha->hw)) | 3756 | if (IS_QLA8XXX_TYPE(vha->hw)) |
4062 | mcp->out_mb |= MBX_2; | 3757 | mcp->out_mb |= MBX_2; |
4063 | mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; | 3758 | mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; |
4064 | 3759 | ||
@@ -4074,8 +3769,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, | |||
4074 | "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], | 3769 | "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], |
4075 | mcp->mb[3], mcp->mb[18], mcp->mb[19]); | 3770 | mcp->mb[3], mcp->mb[18], mcp->mb[19]); |
4076 | } else { | 3771 | } else { |
4077 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, | 3772 | ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__); |
4078 | "Done %s.\n", __func__); | ||
4079 | } | 3773 | } |
4080 | 3774 | ||
4081 | /* Copy mailbox information */ | 3775 | /* Copy mailbox information */ |
@@ -4092,13 +3786,12 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, | |||
4092 | mbx_cmd_t *mcp = &mc; | 3786 | mbx_cmd_t *mcp = &mc; |
4093 | struct qla_hw_data *ha = vha->hw; | 3787 | struct qla_hw_data *ha = vha->hw; |
4094 | 3788 | ||
4095 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, | 3789 | ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__); |
4096 | "Entered %s.\n", __func__); | ||
4097 | 3790 | ||
4098 | memset(mcp->mb, 0 , sizeof(mcp->mb)); | 3791 | memset(mcp->mb, 0 , sizeof(mcp->mb)); |
4099 | mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; | 3792 | mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; |
4100 | mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ | 3793 | mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ |
4101 | if (IS_CNA_CAPABLE(ha)) { | 3794 | if (IS_QLA8XXX_TYPE(ha)) { |
4102 | mcp->mb[1] |= BIT_15; | 3795 | mcp->mb[1] |= BIT_15; |
4103 | mcp->mb[2] = vha->fcoe_fcf_idx; | 3796 | mcp->mb[2] = vha->fcoe_fcf_idx; |
4104 | } | 3797 | } |
@@ -4116,14 +3809,13 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, | |||
4116 | 3809 | ||
4117 | mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| | 3810 | mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| |
4118 | MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; | 3811 | MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; |
4119 | if (IS_CNA_CAPABLE(ha)) | 3812 | if (IS_QLA8XXX_TYPE(ha)) |
4120 | mcp->out_mb |= MBX_2; | 3813 | mcp->out_mb |= MBX_2; |
4121 | 3814 | ||
4122 | mcp->in_mb = MBX_0; | 3815 | mcp->in_mb = MBX_0; |
4123 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || | 3816 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha)) |
4124 | IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) | ||
4125 | mcp->in_mb |= MBX_1; | 3817 | mcp->in_mb |= MBX_1; |
4126 | if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) | 3818 | if (IS_QLA8XXX_TYPE(ha)) |
4127 | mcp->in_mb |= MBX_3; | 3819 | mcp->in_mb |= MBX_3; |
4128 | 3820 | ||
4129 | mcp->tov = MBX_TOV_SECONDS; | 3821 | mcp->tov = MBX_TOV_SECONDS; |
@@ -4137,8 +3829,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, | |||
4137 | "Failed=%x mb[0]=%x mb[1]=%x.\n", | 3829 | "Failed=%x mb[0]=%x mb[1]=%x.\n", |
4138 | rval, mcp->mb[0], mcp->mb[1]); | 3830 | rval, mcp->mb[0], mcp->mb[1]); |
4139 | } else { | 3831 | } else { |
4140 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, | 3832 | ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__); |
4141 | "Done %s.\n", __func__); | ||
4142 | } | 3833 | } |
4143 | 3834 | ||
4144 | /* Copy mailbox information */ | 3835 | /* Copy mailbox information */ |
@@ -4153,7 +3844,7 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) | |||
4153 | mbx_cmd_t mc; | 3844 | mbx_cmd_t mc; |
4154 | mbx_cmd_t *mcp = &mc; | 3845 | mbx_cmd_t *mcp = &mc; |
4155 | 3846 | ||
4156 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, | 3847 | ql_dbg(ql_dbg_mbx, vha, 0x10fd, |
4157 | "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); | 3848 | "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); |
4158 | 3849 | ||
4159 | mcp->mb[0] = MBC_ISP84XX_RESET; | 3850 | mcp->mb[0] = MBC_ISP84XX_RESET; |
@@ -4167,8 +3858,7 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) | |||
4167 | if (rval != QLA_SUCCESS) | 3858 | if (rval != QLA_SUCCESS) |
4168 | ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); | 3859 | ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); |
4169 | else | 3860 | else |
4170 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, | 3861 | ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__); |
4171 | "Done %s.\n", __func__); | ||
4172 | 3862 | ||
4173 | return rval; | 3863 | return rval; |
4174 | } | 3864 | } |
@@ -4180,8 +3870,7 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) | |||
4180 | mbx_cmd_t mc; | 3870 | mbx_cmd_t mc; |
4181 | mbx_cmd_t *mcp = &mc; | 3871 | mbx_cmd_t *mcp = &mc; |
4182 | 3872 | ||
4183 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, | 3873 | ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__); |
4184 | "Entered %s.\n", __func__); | ||
4185 | 3874 | ||
4186 | if (!IS_FWI2_CAPABLE(vha->hw)) | 3875 | if (!IS_FWI2_CAPABLE(vha->hw)) |
4187 | return QLA_FUNCTION_FAILED; | 3876 | return QLA_FUNCTION_FAILED; |
@@ -4200,8 +3889,7 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) | |||
4200 | ql_dbg(ql_dbg_mbx, vha, 0x1101, | 3889 | ql_dbg(ql_dbg_mbx, vha, 0x1101, |
4201 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 3890 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
4202 | } else { | 3891 | } else { |
4203 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, | 3892 | ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__); |
4204 | "Done %s.\n", __func__); | ||
4205 | } | 3893 | } |
4206 | 3894 | ||
4207 | return rval; | 3895 | return rval; |
@@ -4218,8 +3906,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) | |||
4218 | 3906 | ||
4219 | rval = QLA_SUCCESS; | 3907 | rval = QLA_SUCCESS; |
4220 | 3908 | ||
4221 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, | 3909 | ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__); |
4222 | "Entered %s.\n", __func__); | ||
4223 | 3910 | ||
4224 | clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); | 3911 | clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); |
4225 | 3912 | ||
@@ -4262,13 +3949,11 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) | |||
4262 | ql_dbg(ql_dbg_mbx, vha, 0x1104, | 3949 | ql_dbg(ql_dbg_mbx, vha, 0x1104, |
4263 | "Failed=%x mb[0]=%x.\n", rval, mb[0]); | 3950 | "Failed=%x mb[0]=%x.\n", rval, mb[0]); |
4264 | } else { | 3951 | } else { |
4265 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, | 3952 | ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__); |
4266 | "Done %s.\n", __func__); | ||
4267 | } | 3953 | } |
4268 | 3954 | ||
4269 | return rval; | 3955 | return rval; |
4270 | } | 3956 | } |
4271 | |||
4272 | int | 3957 | int |
4273 | qla2x00_get_data_rate(scsi_qla_host_t *vha) | 3958 | qla2x00_get_data_rate(scsi_qla_host_t *vha) |
4274 | { | 3959 | { |
@@ -4277,8 +3962,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) | |||
4277 | mbx_cmd_t *mcp = &mc; | 3962 | mbx_cmd_t *mcp = &mc; |
4278 | struct qla_hw_data *ha = vha->hw; | 3963 | struct qla_hw_data *ha = vha->hw; |
4279 | 3964 | ||
4280 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, | 3965 | ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__); |
4281 | "Entered %s.\n", __func__); | ||
4282 | 3966 | ||
4283 | if (!IS_FWI2_CAPABLE(ha)) | 3967 | if (!IS_FWI2_CAPABLE(ha)) |
4284 | return QLA_FUNCTION_FAILED; | 3968 | return QLA_FUNCTION_FAILED; |
@@ -4287,8 +3971,6 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) | |||
4287 | mcp->mb[1] = 0; | 3971 | mcp->mb[1] = 0; |
4288 | mcp->out_mb = MBX_1|MBX_0; | 3972 | mcp->out_mb = MBX_1|MBX_0; |
4289 | mcp->in_mb = MBX_2|MBX_1|MBX_0; | 3973 | mcp->in_mb = MBX_2|MBX_1|MBX_0; |
4290 | if (IS_QLA83XX(ha)) | ||
4291 | mcp->in_mb |= MBX_3; | ||
4292 | mcp->tov = MBX_TOV_SECONDS; | 3974 | mcp->tov = MBX_TOV_SECONDS; |
4293 | mcp->flags = 0; | 3975 | mcp->flags = 0; |
4294 | rval = qla2x00_mailbox_command(vha, mcp); | 3976 | rval = qla2x00_mailbox_command(vha, mcp); |
@@ -4296,8 +3978,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) | |||
4296 | ql_dbg(ql_dbg_mbx, vha, 0x1107, | 3978 | ql_dbg(ql_dbg_mbx, vha, 0x1107, |
4297 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 3979 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
4298 | } else { | 3980 | } else { |
4299 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, | 3981 | ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__); |
4300 | "Done %s.\n", __func__); | ||
4301 | if (mcp->mb[1] != 0x7) | 3982 | if (mcp->mb[1] != 0x7) |
4302 | ha->link_data_rate = mcp->mb[1]; | 3983 | ha->link_data_rate = mcp->mb[1]; |
4303 | } | 3984 | } |
@@ -4313,10 +3994,9 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) | |||
4313 | mbx_cmd_t *mcp = &mc; | 3994 | mbx_cmd_t *mcp = &mc; |
4314 | struct qla_hw_data *ha = vha->hw; | 3995 | struct qla_hw_data *ha = vha->hw; |
4315 | 3996 | ||
4316 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, | 3997 | ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__); |
4317 | "Entered %s.\n", __func__); | ||
4318 | 3998 | ||
4319 | if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) | 3999 | if (!IS_QLA81XX(ha)) |
4320 | return QLA_FUNCTION_FAILED; | 4000 | return QLA_FUNCTION_FAILED; |
4321 | mcp->mb[0] = MBC_GET_PORT_CONFIG; | 4001 | mcp->mb[0] = MBC_GET_PORT_CONFIG; |
4322 | mcp->out_mb = MBX_0; | 4002 | mcp->out_mb = MBX_0; |
@@ -4333,8 +4013,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) | |||
4333 | /* Copy all bits to preserve original value */ | 4013 | /* Copy all bits to preserve original value */ |
4334 | memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); | 4014 | memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); |
4335 | 4015 | ||
4336 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, | 4016 | ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__); |
4337 | "Done %s.\n", __func__); | ||
4338 | } | 4017 | } |
4339 | return rval; | 4018 | return rval; |
4340 | } | 4019 | } |
@@ -4346,8 +4025,7 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) | |||
4346 | mbx_cmd_t mc; | 4025 | mbx_cmd_t mc; |
4347 | mbx_cmd_t *mcp = &mc; | 4026 | mbx_cmd_t *mcp = &mc; |
4348 | 4027 | ||
4349 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, | 4028 | ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__); |
4350 | "Entered %s.\n", __func__); | ||
4351 | 4029 | ||
4352 | mcp->mb[0] = MBC_SET_PORT_CONFIG; | 4030 | mcp->mb[0] = MBC_SET_PORT_CONFIG; |
4353 | /* Copy all bits to preserve original setting */ | 4031 | /* Copy all bits to preserve original setting */ |
@@ -4362,8 +4040,7 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) | |||
4362 | ql_dbg(ql_dbg_mbx, vha, 0x110d, | 4040 | ql_dbg(ql_dbg_mbx, vha, 0x110d, |
4363 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 4041 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
4364 | } else | 4042 | } else |
4365 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, | 4043 | ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__); |
4366 | "Done %s.\n", __func__); | ||
4367 | 4044 | ||
4368 | return rval; | 4045 | return rval; |
4369 | } | 4046 | } |
@@ -4378,8 +4055,7 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, | |||
4378 | mbx_cmd_t *mcp = &mc; | 4055 | mbx_cmd_t *mcp = &mc; |
4379 | struct qla_hw_data *ha = vha->hw; | 4056 | struct qla_hw_data *ha = vha->hw; |
4380 | 4057 | ||
4381 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, | 4058 | ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__); |
4382 | "Entered %s.\n", __func__); | ||
4383 | 4059 | ||
4384 | if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) | 4060 | if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) |
4385 | return QLA_FUNCTION_FAILED; | 4061 | return QLA_FUNCTION_FAILED; |
@@ -4407,8 +4083,7 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, | |||
4407 | if (rval != QLA_SUCCESS) { | 4083 | if (rval != QLA_SUCCESS) { |
4408 | ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); | 4084 | ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); |
4409 | } else { | 4085 | } else { |
4410 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, | 4086 | ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__); |
4411 | "Done %s.\n", __func__); | ||
4412 | } | 4087 | } |
4413 | 4088 | ||
4414 | return rval; | 4089 | return rval; |
@@ -4421,12 +4096,10 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) | |||
4421 | uint8_t byte; | 4096 | uint8_t byte; |
4422 | struct qla_hw_data *ha = vha->hw; | 4097 | struct qla_hw_data *ha = vha->hw; |
4423 | 4098 | ||
4424 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca, | 4099 | ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__); |
4425 | "Entered %s.\n", __func__); | ||
4426 | 4100 | ||
4427 | /* Integer part */ | 4101 | /* Integer part */ |
4428 | rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, | 4102 | rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); |
4429 | BIT_13|BIT_12|BIT_0); | ||
4430 | if (rval != QLA_SUCCESS) { | 4103 | if (rval != QLA_SUCCESS) { |
4431 | ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval); | 4104 | ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval); |
4432 | ha->flags.thermal_supported = 0; | 4105 | ha->flags.thermal_supported = 0; |
@@ -4435,8 +4108,7 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) | |||
4435 | *temp = byte; | 4108 | *temp = byte; |
4436 | 4109 | ||
4437 | /* Fraction part */ | 4110 | /* Fraction part */ |
4438 | rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, | 4111 | rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0); |
4439 | BIT_13|BIT_12|BIT_0); | ||
4440 | if (rval != QLA_SUCCESS) { | 4112 | if (rval != QLA_SUCCESS) { |
4441 | ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval); | 4113 | ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval); |
4442 | ha->flags.thermal_supported = 0; | 4114 | ha->flags.thermal_supported = 0; |
@@ -4444,8 +4116,7 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) | |||
4444 | } | 4116 | } |
4445 | *frac = (byte >> 6) * 25; | 4117 | *frac = (byte >> 6) * 25; |
4446 | 4118 | ||
4447 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018, | 4119 | ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__); |
4448 | "Done %s.\n", __func__); | ||
4449 | fail: | 4120 | fail: |
4450 | return rval; | 4121 | return rval; |
4451 | } | 4122 | } |
@@ -4458,8 +4129,7 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) | |||
4458 | mbx_cmd_t mc; | 4129 | mbx_cmd_t mc; |
4459 | mbx_cmd_t *mcp = &mc; | 4130 | mbx_cmd_t *mcp = &mc; |
4460 | 4131 | ||
4461 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, | 4132 | ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__); |
4462 | "Entered %s.\n", __func__); | ||
4463 | 4133 | ||
4464 | if (!IS_FWI2_CAPABLE(ha)) | 4134 | if (!IS_FWI2_CAPABLE(ha)) |
4465 | return QLA_FUNCTION_FAILED; | 4135 | return QLA_FUNCTION_FAILED; |
@@ -4478,8 +4148,7 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) | |||
4478 | ql_dbg(ql_dbg_mbx, vha, 0x1016, | 4148 | ql_dbg(ql_dbg_mbx, vha, 0x1016, |
4479 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 4149 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
4480 | } else { | 4150 | } else { |
4481 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, | 4151 | ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__); |
4482 | "Done %s.\n", __func__); | ||
4483 | } | 4152 | } |
4484 | 4153 | ||
4485 | return rval; | 4154 | return rval; |
@@ -4493,8 +4162,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) | |||
4493 | mbx_cmd_t mc; | 4162 | mbx_cmd_t mc; |
4494 | mbx_cmd_t *mcp = &mc; | 4163 | mbx_cmd_t *mcp = &mc; |
4495 | 4164 | ||
4496 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, | 4165 | ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__); |
4497 | "Entered %s.\n", __func__); | ||
4498 | 4166 | ||
4499 | if (!IS_QLA82XX(ha)) | 4167 | if (!IS_QLA82XX(ha)) |
4500 | return QLA_FUNCTION_FAILED; | 4168 | return QLA_FUNCTION_FAILED; |
@@ -4513,485 +4181,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) | |||
4513 | ql_dbg(ql_dbg_mbx, vha, 0x100c, | 4181 | ql_dbg(ql_dbg_mbx, vha, 0x100c, |
4514 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | 4182 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); |
4515 | } else { | 4183 | } else { |
4516 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, | 4184 | ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__); |
4517 | "Done %s.\n", __func__); | ||
4518 | } | ||
4519 | |||
4520 | return rval; | ||
4521 | } | ||
4522 | |||
4523 | int | ||
4524 | qla82xx_md_get_template_size(scsi_qla_host_t *vha) | ||
4525 | { | ||
4526 | struct qla_hw_data *ha = vha->hw; | ||
4527 | mbx_cmd_t mc; | ||
4528 | mbx_cmd_t *mcp = &mc; | ||
4529 | int rval = QLA_FUNCTION_FAILED; | ||
4530 | |||
4531 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, | ||
4532 | "Entered %s.\n", __func__); | ||
4533 | |||
4534 | memset(mcp->mb, 0 , sizeof(mcp->mb)); | ||
4535 | mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); | ||
4536 | mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); | ||
4537 | mcp->mb[2] = LSW(RQST_TMPLT_SIZE); | ||
4538 | mcp->mb[3] = MSW(RQST_TMPLT_SIZE); | ||
4539 | |||
4540 | mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; | ||
4541 | mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| | ||
4542 | MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | ||
4543 | |||
4544 | mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; | ||
4545 | mcp->tov = MBX_TOV_SECONDS; | ||
4546 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4547 | |||
4548 | /* Always copy back return mailbox values. */ | ||
4549 | if (rval != QLA_SUCCESS) { | ||
4550 | ql_dbg(ql_dbg_mbx, vha, 0x1120, | ||
4551 | "mailbox command FAILED=0x%x, subcode=%x.\n", | ||
4552 | (mcp->mb[1] << 16) | mcp->mb[0], | ||
4553 | (mcp->mb[3] << 16) | mcp->mb[2]); | ||
4554 | } else { | ||
4555 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, | ||
4556 | "Done %s.\n", __func__); | ||
4557 | ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); | ||
4558 | if (!ha->md_template_size) { | ||
4559 | ql_dbg(ql_dbg_mbx, vha, 0x1122, | ||
4560 | "Null template size obtained.\n"); | ||
4561 | rval = QLA_FUNCTION_FAILED; | ||
4562 | } | ||
4563 | } | ||
4564 | return rval; | ||
4565 | } | ||
4566 | |||
4567 | int | ||
4568 | qla82xx_md_get_template(scsi_qla_host_t *vha) | ||
4569 | { | ||
4570 | struct qla_hw_data *ha = vha->hw; | ||
4571 | mbx_cmd_t mc; | ||
4572 | mbx_cmd_t *mcp = &mc; | ||
4573 | int rval = QLA_FUNCTION_FAILED; | ||
4574 | |||
4575 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, | ||
4576 | "Entered %s.\n", __func__); | ||
4577 | |||
4578 | ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, | ||
4579 | ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); | ||
4580 | if (!ha->md_tmplt_hdr) { | ||
4581 | ql_log(ql_log_warn, vha, 0x1124, | ||
4582 | "Unable to allocate memory for Minidump template.\n"); | ||
4583 | return rval; | ||
4584 | } | ||
4585 | |||
4586 | memset(mcp->mb, 0 , sizeof(mcp->mb)); | ||
4587 | mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); | ||
4588 | mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); | ||
4589 | mcp->mb[2] = LSW(RQST_TMPLT); | ||
4590 | mcp->mb[3] = MSW(RQST_TMPLT); | ||
4591 | mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); | ||
4592 | mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); | ||
4593 | mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); | ||
4594 | mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); | ||
4595 | mcp->mb[8] = LSW(ha->md_template_size); | ||
4596 | mcp->mb[9] = MSW(ha->md_template_size); | ||
4597 | |||
4598 | mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; | ||
4599 | mcp->tov = MBX_TOV_SECONDS; | ||
4600 | mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| | ||
4601 | MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | ||
4602 | mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; | ||
4603 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4604 | |||
4605 | if (rval != QLA_SUCCESS) { | ||
4606 | ql_dbg(ql_dbg_mbx, vha, 0x1125, | ||
4607 | "mailbox command FAILED=0x%x, subcode=%x.\n", | ||
4608 | ((mcp->mb[1] << 16) | mcp->mb[0]), | ||
4609 | ((mcp->mb[3] << 16) | mcp->mb[2])); | ||
4610 | } else | ||
4611 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, | ||
4612 | "Done %s.\n", __func__); | ||
4613 | return rval; | ||
4614 | } | ||
4615 | |||
4616 | int | ||
4617 | qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) | ||
4618 | { | ||
4619 | int rval; | ||
4620 | struct qla_hw_data *ha = vha->hw; | ||
4621 | mbx_cmd_t mc; | ||
4622 | mbx_cmd_t *mcp = &mc; | ||
4623 | |||
4624 | if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) | ||
4625 | return QLA_FUNCTION_FAILED; | ||
4626 | |||
4627 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, | ||
4628 | "Entered %s.\n", __func__); | ||
4629 | |||
4630 | memset(mcp, 0, sizeof(mbx_cmd_t)); | ||
4631 | mcp->mb[0] = MBC_SET_LED_CONFIG; | ||
4632 | mcp->mb[1] = led_cfg[0]; | ||
4633 | mcp->mb[2] = led_cfg[1]; | ||
4634 | if (IS_QLA8031(ha)) { | ||
4635 | mcp->mb[3] = led_cfg[2]; | ||
4636 | mcp->mb[4] = led_cfg[3]; | ||
4637 | mcp->mb[5] = led_cfg[4]; | ||
4638 | mcp->mb[6] = led_cfg[5]; | ||
4639 | } | ||
4640 | |||
4641 | mcp->out_mb = MBX_2|MBX_1|MBX_0; | ||
4642 | if (IS_QLA8031(ha)) | ||
4643 | mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; | ||
4644 | mcp->in_mb = MBX_0; | ||
4645 | mcp->tov = 30; | ||
4646 | mcp->flags = 0; | ||
4647 | |||
4648 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4649 | if (rval != QLA_SUCCESS) { | ||
4650 | ql_dbg(ql_dbg_mbx, vha, 0x1134, | ||
4651 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
4652 | } else { | ||
4653 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, | ||
4654 | "Done %s.\n", __func__); | ||
4655 | } | ||
4656 | |||
4657 | return rval; | ||
4658 | } | ||
4659 | |||
4660 | int | ||
4661 | qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) | ||
4662 | { | ||
4663 | int rval; | ||
4664 | struct qla_hw_data *ha = vha->hw; | ||
4665 | mbx_cmd_t mc; | ||
4666 | mbx_cmd_t *mcp = &mc; | ||
4667 | |||
4668 | if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) | ||
4669 | return QLA_FUNCTION_FAILED; | ||
4670 | |||
4671 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, | ||
4672 | "Entered %s.\n", __func__); | ||
4673 | |||
4674 | memset(mcp, 0, sizeof(mbx_cmd_t)); | ||
4675 | mcp->mb[0] = MBC_GET_LED_CONFIG; | ||
4676 | |||
4677 | mcp->out_mb = MBX_0; | ||
4678 | mcp->in_mb = MBX_2|MBX_1|MBX_0; | ||
4679 | if (IS_QLA8031(ha)) | ||
4680 | mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; | ||
4681 | mcp->tov = 30; | ||
4682 | mcp->flags = 0; | ||
4683 | |||
4684 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4685 | if (rval != QLA_SUCCESS) { | ||
4686 | ql_dbg(ql_dbg_mbx, vha, 0x1137, | ||
4687 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
4688 | } else { | ||
4689 | led_cfg[0] = mcp->mb[1]; | ||
4690 | led_cfg[1] = mcp->mb[2]; | ||
4691 | if (IS_QLA8031(ha)) { | ||
4692 | led_cfg[2] = mcp->mb[3]; | ||
4693 | led_cfg[3] = mcp->mb[4]; | ||
4694 | led_cfg[4] = mcp->mb[5]; | ||
4695 | led_cfg[5] = mcp->mb[6]; | ||
4696 | } | ||
4697 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, | ||
4698 | "Done %s.\n", __func__); | ||
4699 | } | ||
4700 | |||
4701 | return rval; | ||
4702 | } | ||
4703 | |||
4704 | int | ||
4705 | qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) | ||
4706 | { | ||
4707 | int rval; | ||
4708 | struct qla_hw_data *ha = vha->hw; | ||
4709 | mbx_cmd_t mc; | ||
4710 | mbx_cmd_t *mcp = &mc; | ||
4711 | |||
4712 | if (!IS_QLA82XX(ha)) | ||
4713 | return QLA_FUNCTION_FAILED; | ||
4714 | |||
4715 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, | ||
4716 | "Entered %s.\n", __func__); | ||
4717 | |||
4718 | memset(mcp, 0, sizeof(mbx_cmd_t)); | ||
4719 | mcp->mb[0] = MBC_SET_LED_CONFIG; | ||
4720 | if (enable) | ||
4721 | mcp->mb[7] = 0xE; | ||
4722 | else | ||
4723 | mcp->mb[7] = 0xD; | ||
4724 | |||
4725 | mcp->out_mb = MBX_7|MBX_0; | ||
4726 | mcp->in_mb = MBX_0; | ||
4727 | mcp->tov = MBX_TOV_SECONDS; | ||
4728 | mcp->flags = 0; | ||
4729 | |||
4730 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4731 | if (rval != QLA_SUCCESS) { | ||
4732 | ql_dbg(ql_dbg_mbx, vha, 0x1128, | ||
4733 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
4734 | } else { | ||
4735 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, | ||
4736 | "Done %s.\n", __func__); | ||
4737 | } | ||
4738 | |||
4739 | return rval; | ||
4740 | } | ||
4741 | |||
4742 | int | ||
4743 | qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) | ||
4744 | { | ||
4745 | int rval; | ||
4746 | struct qla_hw_data *ha = vha->hw; | ||
4747 | mbx_cmd_t mc; | ||
4748 | mbx_cmd_t *mcp = &mc; | ||
4749 | |||
4750 | if (!IS_QLA83XX(ha)) | ||
4751 | return QLA_FUNCTION_FAILED; | ||
4752 | |||
4753 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, | ||
4754 | "Entered %s.\n", __func__); | ||
4755 | |||
4756 | mcp->mb[0] = MBC_WRITE_REMOTE_REG; | ||
4757 | mcp->mb[1] = LSW(reg); | ||
4758 | mcp->mb[2] = MSW(reg); | ||
4759 | mcp->mb[3] = LSW(data); | ||
4760 | mcp->mb[4] = MSW(data); | ||
4761 | mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | ||
4762 | |||
4763 | mcp->in_mb = MBX_1|MBX_0; | ||
4764 | mcp->tov = MBX_TOV_SECONDS; | ||
4765 | mcp->flags = 0; | ||
4766 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4767 | |||
4768 | if (rval != QLA_SUCCESS) { | ||
4769 | ql_dbg(ql_dbg_mbx, vha, 0x1131, | ||
4770 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
4771 | } else { | ||
4772 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, | ||
4773 | "Done %s.\n", __func__); | ||
4774 | } | ||
4775 | |||
4776 | return rval; | ||
4777 | } | ||
4778 | |||
4779 | int | ||
4780 | qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) | ||
4781 | { | ||
4782 | int rval; | ||
4783 | struct qla_hw_data *ha = vha->hw; | ||
4784 | mbx_cmd_t mc; | ||
4785 | mbx_cmd_t *mcp = &mc; | ||
4786 | |||
4787 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) { | ||
4788 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, | ||
4789 | "Implicit LOGO Unsupported.\n"); | ||
4790 | return QLA_FUNCTION_FAILED; | ||
4791 | } | ||
4792 | |||
4793 | |||
4794 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, | ||
4795 | "Entering %s.\n", __func__); | ||
4796 | |||
4797 | /* Perform Implicit LOGO. */ | ||
4798 | mcp->mb[0] = MBC_PORT_LOGOUT; | ||
4799 | mcp->mb[1] = fcport->loop_id; | ||
4800 | mcp->mb[10] = BIT_15; | ||
4801 | mcp->out_mb = MBX_10|MBX_1|MBX_0; | ||
4802 | mcp->in_mb = MBX_0; | ||
4803 | mcp->tov = MBX_TOV_SECONDS; | ||
4804 | mcp->flags = 0; | ||
4805 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4806 | if (rval != QLA_SUCCESS) | ||
4807 | ql_dbg(ql_dbg_mbx, vha, 0x113d, | ||
4808 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
4809 | else | ||
4810 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, | ||
4811 | "Done %s.\n", __func__); | ||
4812 | |||
4813 | return rval; | ||
4814 | } | ||
4815 | |||
4816 | int | ||
4817 | qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) | ||
4818 | { | ||
4819 | int rval; | ||
4820 | mbx_cmd_t mc; | ||
4821 | mbx_cmd_t *mcp = &mc; | ||
4822 | struct qla_hw_data *ha = vha->hw; | ||
4823 | unsigned long retry_max_time = jiffies + (2 * HZ); | ||
4824 | |||
4825 | if (!IS_QLA83XX(ha)) | ||
4826 | return QLA_FUNCTION_FAILED; | ||
4827 | |||
4828 | ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); | ||
4829 | |||
4830 | retry_rd_reg: | ||
4831 | mcp->mb[0] = MBC_READ_REMOTE_REG; | ||
4832 | mcp->mb[1] = LSW(reg); | ||
4833 | mcp->mb[2] = MSW(reg); | ||
4834 | mcp->out_mb = MBX_2|MBX_1|MBX_0; | ||
4835 | mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; | ||
4836 | mcp->tov = MBX_TOV_SECONDS; | ||
4837 | mcp->flags = 0; | ||
4838 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4839 | |||
4840 | if (rval != QLA_SUCCESS) { | ||
4841 | ql_dbg(ql_dbg_mbx, vha, 0x114c, | ||
4842 | "Failed=%x mb[0]=%x mb[1]=%x.\n", | ||
4843 | rval, mcp->mb[0], mcp->mb[1]); | ||
4844 | } else { | ||
4845 | *data = (mcp->mb[3] | (mcp->mb[4] << 16)); | ||
4846 | if (*data == QLA8XXX_BAD_VALUE) { | ||
4847 | /* | ||
4848 | * During soft-reset CAMRAM register reads might | ||
4849 | * return 0xbad0bad0. So retry for MAX of 2 sec | ||
4850 | * while reading camram registers. | ||
4851 | */ | ||
4852 | if (time_after(jiffies, retry_max_time)) { | ||
4853 | ql_dbg(ql_dbg_mbx, vha, 0x1141, | ||
4854 | "Failure to read CAMRAM register. " | ||
4855 | "data=0x%x.\n", *data); | ||
4856 | return QLA_FUNCTION_FAILED; | ||
4857 | } | ||
4858 | msleep(100); | ||
4859 | goto retry_rd_reg; | ||
4860 | } | ||
4861 | ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); | ||
4862 | } | ||
4863 | |||
4864 | return rval; | ||
4865 | } | ||
4866 | |||
4867 | int | ||
4868 | qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) | ||
4869 | { | ||
4870 | int rval; | ||
4871 | mbx_cmd_t mc; | ||
4872 | mbx_cmd_t *mcp = &mc; | ||
4873 | struct qla_hw_data *ha = vha->hw; | ||
4874 | |||
4875 | if (!IS_QLA83XX(ha)) | ||
4876 | return QLA_FUNCTION_FAILED; | ||
4877 | |||
4878 | ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); | ||
4879 | |||
4880 | mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; | ||
4881 | mcp->out_mb = MBX_0; | ||
4882 | mcp->in_mb = MBX_1|MBX_0; | ||
4883 | mcp->tov = MBX_TOV_SECONDS; | ||
4884 | mcp->flags = 0; | ||
4885 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4886 | |||
4887 | if (rval != QLA_SUCCESS) { | ||
4888 | ql_dbg(ql_dbg_mbx, vha, 0x1144, | ||
4889 | "Failed=%x mb[0]=%x mb[1]=%x.\n", | ||
4890 | rval, mcp->mb[0], mcp->mb[1]); | ||
4891 | ha->isp_ops->fw_dump(vha, 0); | ||
4892 | } else { | ||
4893 | ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); | ||
4894 | } | ||
4895 | |||
4896 | return rval; | ||
4897 | } | ||
4898 | |||
4899 | int | ||
4900 | qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, | ||
4901 | uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) | ||
4902 | { | ||
4903 | int rval; | ||
4904 | mbx_cmd_t mc; | ||
4905 | mbx_cmd_t *mcp = &mc; | ||
4906 | uint8_t subcode = (uint8_t)options; | ||
4907 | struct qla_hw_data *ha = vha->hw; | ||
4908 | |||
4909 | if (!IS_QLA8031(ha)) | ||
4910 | return QLA_FUNCTION_FAILED; | ||
4911 | |||
4912 | ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); | ||
4913 | |||
4914 | mcp->mb[0] = MBC_SET_ACCESS_CONTROL; | ||
4915 | mcp->mb[1] = options; | ||
4916 | mcp->out_mb = MBX_1|MBX_0; | ||
4917 | if (subcode & BIT_2) { | ||
4918 | mcp->mb[2] = LSW(start_addr); | ||
4919 | mcp->mb[3] = MSW(start_addr); | ||
4920 | mcp->mb[4] = LSW(end_addr); | ||
4921 | mcp->mb[5] = MSW(end_addr); | ||
4922 | mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; | ||
4923 | } | ||
4924 | mcp->in_mb = MBX_2|MBX_1|MBX_0; | ||
4925 | if (!(subcode & (BIT_2 | BIT_5))) | ||
4926 | mcp->in_mb |= MBX_4|MBX_3; | ||
4927 | mcp->tov = MBX_TOV_SECONDS; | ||
4928 | mcp->flags = 0; | ||
4929 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4930 | |||
4931 | if (rval != QLA_SUCCESS) { | ||
4932 | ql_dbg(ql_dbg_mbx, vha, 0x1147, | ||
4933 | "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", | ||
4934 | rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], | ||
4935 | mcp->mb[4]); | ||
4936 | ha->isp_ops->fw_dump(vha, 0); | ||
4937 | } else { | ||
4938 | if (subcode & BIT_5) | ||
4939 | *sector_size = mcp->mb[1]; | ||
4940 | else if (subcode & (BIT_6 | BIT_7)) { | ||
4941 | ql_dbg(ql_dbg_mbx, vha, 0x1148, | ||
4942 | "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); | ||
4943 | } else if (subcode & (BIT_3 | BIT_4)) { | ||
4944 | ql_dbg(ql_dbg_mbx, vha, 0x1149, | ||
4945 | "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); | ||
4946 | } | ||
4947 | ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); | ||
4948 | } | ||
4949 | |||
4950 | return rval; | ||
4951 | } | ||
4952 | |||
4953 | int | ||
4954 | qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, | ||
4955 | uint32_t size) | ||
4956 | { | ||
4957 | int rval; | ||
4958 | mbx_cmd_t mc; | ||
4959 | mbx_cmd_t *mcp = &mc; | ||
4960 | |||
4961 | if (!IS_MCTP_CAPABLE(vha->hw)) | ||
4962 | return QLA_FUNCTION_FAILED; | ||
4963 | |||
4964 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, | ||
4965 | "Entered %s.\n", __func__); | ||
4966 | |||
4967 | mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; | ||
4968 | mcp->mb[1] = LSW(addr); | ||
4969 | mcp->mb[2] = MSW(req_dma); | ||
4970 | mcp->mb[3] = LSW(req_dma); | ||
4971 | mcp->mb[4] = MSW(size); | ||
4972 | mcp->mb[5] = LSW(size); | ||
4973 | mcp->mb[6] = MSW(MSD(req_dma)); | ||
4974 | mcp->mb[7] = LSW(MSD(req_dma)); | ||
4975 | mcp->mb[8] = MSW(addr); | ||
4976 | /* Setting RAM ID to valid */ | ||
4977 | mcp->mb[10] |= BIT_7; | ||
4978 | /* For MCTP RAM ID is 0x40 */ | ||
4979 | mcp->mb[10] |= 0x40; | ||
4980 | |||
4981 | mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| | ||
4982 | MBX_0; | ||
4983 | |||
4984 | mcp->in_mb = MBX_0; | ||
4985 | mcp->tov = MBX_TOV_SECONDS; | ||
4986 | mcp->flags = 0; | ||
4987 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4988 | |||
4989 | if (rval != QLA_SUCCESS) { | ||
4990 | ql_dbg(ql_dbg_mbx, vha, 0x114e, | ||
4991 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
4992 | } else { | ||
4993 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, | ||
4994 | "Done %s.\n", __func__); | ||
4995 | } | 4185 | } |
4996 | 4186 | ||
4997 | return rval; | 4187 | return rval; |
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 20fd974f903..f488cc69fc7 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
@@ -1,12 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | #include "qla_gbl.h" | 8 | #include "qla_gbl.h" |
9 | #include "qla_target.h" | ||
10 | 9 | ||
11 | #include <linux/moduleparam.h> | 10 | #include <linux/moduleparam.h> |
12 | #include <linux/vmalloc.h> | 11 | #include <linux/vmalloc.h> |
@@ -50,9 +49,6 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) | |||
50 | 49 | ||
51 | spin_lock_irqsave(&ha->vport_slock, flags); | 50 | spin_lock_irqsave(&ha->vport_slock, flags); |
52 | list_add_tail(&vha->list, &ha->vp_list); | 51 | list_add_tail(&vha->list, &ha->vp_list); |
53 | |||
54 | qlt_update_vp_map(vha, SET_VP_IDX); | ||
55 | |||
56 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 52 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
57 | 53 | ||
58 | mutex_unlock(&ha->vport_lock); | 54 | mutex_unlock(&ha->vport_lock); |
@@ -83,7 +79,6 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) | |||
83 | spin_lock_irqsave(&ha->vport_slock, flags); | 79 | spin_lock_irqsave(&ha->vport_slock, flags); |
84 | } | 80 | } |
85 | list_del(&vha->list); | 81 | list_del(&vha->list); |
86 | qlt_update_vp_map(vha, RESET_VP_IDX); | ||
87 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 82 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
88 | 83 | ||
89 | vp_id = vha->vp_idx; | 84 | vp_id = vha->vp_idx; |
@@ -139,7 +134,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) | |||
139 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | 134 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
140 | ql_dbg(ql_dbg_vport, vha, 0xa001, | 135 | ql_dbg(ql_dbg_vport, vha, 0xa001, |
141 | "Marking port dead, loop_id=0x%04x : %x.\n", | 136 | "Marking port dead, loop_id=0x%04x : %x.\n", |
142 | fcport->loop_id, fcport->vha->vp_idx); | 137 | fcport->loop_id, fcport->vp_idx); |
143 | 138 | ||
144 | qla2x00_mark_device_lost(vha, fcport, 0, 0); | 139 | qla2x00_mark_device_lost(vha, fcport, 0, 0); |
145 | qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); | 140 | qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); |
@@ -149,18 +144,12 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) | |||
149 | int | 144 | int |
150 | qla24xx_disable_vp(scsi_qla_host_t *vha) | 145 | qla24xx_disable_vp(scsi_qla_host_t *vha) |
151 | { | 146 | { |
152 | unsigned long flags; | ||
153 | int ret; | 147 | int ret; |
154 | 148 | ||
155 | ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); | 149 | ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); |
156 | atomic_set(&vha->loop_state, LOOP_DOWN); | 150 | atomic_set(&vha->loop_state, LOOP_DOWN); |
157 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); | 151 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); |
158 | 152 | ||
159 | /* Remove port id from vp target map */ | ||
160 | spin_lock_irqsave(&vha->hw->vport_slock, flags); | ||
161 | qlt_update_vp_map(vha, RESET_AL_PA); | ||
162 | spin_unlock_irqrestore(&vha->hw->vport_slock, flags); | ||
163 | |||
164 | qla2x00_mark_vp_devices_dead(vha); | 153 | qla2x00_mark_vp_devices_dead(vha); |
165 | atomic_set(&vha->vp_state, VP_FAILED); | 154 | atomic_set(&vha->vp_state, VP_FAILED); |
166 | vha->flags.management_server_logged_in = 0; | 155 | vha->flags.management_server_logged_in = 0; |
@@ -306,8 +295,10 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha) | |||
306 | static int | 295 | static int |
307 | qla2x00_do_dpc_vp(scsi_qla_host_t *vha) | 296 | qla2x00_do_dpc_vp(scsi_qla_host_t *vha) |
308 | { | 297 | { |
309 | ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, | 298 | ql_dbg(ql_dbg_dpc, vha, 0x4012, |
310 | "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); | 299 | "Entering %s.\n", __func__); |
300 | ql_dbg(ql_dbg_dpc, vha, 0x4013, | ||
301 | "vp_flags: 0x%lx.\n", vha->vp_flags); | ||
311 | 302 | ||
312 | qla2x00_do_work(vha); | 303 | qla2x00_do_work(vha); |
313 | 304 | ||
@@ -357,7 +348,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) | |||
357 | } | 348 | } |
358 | } | 349 | } |
359 | 350 | ||
360 | ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c, | 351 | ql_dbg(ql_dbg_dpc, vha, 0x401c, |
361 | "Exiting %s.\n", __func__); | 352 | "Exiting %s.\n", __func__); |
362 | return 0; | 353 | return 0; |
363 | } | 354 | } |
@@ -479,6 +470,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) | |||
479 | 470 | ||
480 | vha->req = base_vha->req; | 471 | vha->req = base_vha->req; |
481 | host->can_queue = base_vha->req->length + 128; | 472 | host->can_queue = base_vha->req->length + 128; |
473 | host->this_id = 255; | ||
482 | host->cmd_per_lun = 3; | 474 | host->cmd_per_lun = 3; |
483 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) | 475 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) |
484 | host->max_cmd_len = 32; | 476 | host->max_cmd_len = 32; |
@@ -487,7 +479,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) | |||
487 | host->max_channel = MAX_BUSES - 1; | 479 | host->max_channel = MAX_BUSES - 1; |
488 | host->max_lun = ql2xmaxlun; | 480 | host->max_lun = ql2xmaxlun; |
489 | host->unique_id = host->host_no; | 481 | host->unique_id = host->host_no; |
490 | host->max_id = ha->max_fibre_devices; | 482 | host->max_id = MAX_TARGETS_2200; |
491 | host->transportt = qla2xxx_transport_vport_template; | 483 | host->transportt = qla2xxx_transport_vport_template; |
492 | 484 | ||
493 | ql_dbg(ql_dbg_vport, vha, 0xa007, | 485 | ql_dbg(ql_dbg_vport, vha, 0xa007, |
@@ -645,7 +637,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, | |||
645 | &req->dma, GFP_KERNEL); | 637 | &req->dma, GFP_KERNEL); |
646 | if (req->ring == NULL) { | 638 | if (req->ring == NULL) { |
647 | ql_log(ql_log_fatal, base_vha, 0x00da, | 639 | ql_log(ql_log_fatal, base_vha, 0x00da, |
648 | "Failed to allocate memory for request_ring.\n"); | 640 | "Failed to allocte memory for request_ring.\n"); |
649 | goto que_failed; | 641 | goto que_failed; |
650 | } | 642 | } |
651 | 643 | ||
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 3e3f593bada..049807cda41 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c | |||
@@ -1,14 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
9 | #include <linux/pci.h> | 9 | #include <linux/pci.h> |
10 | #include <linux/ratelimit.h> | ||
11 | #include <linux/vmalloc.h> | ||
12 | #include <scsi/scsi_tcq.h> | 10 | #include <scsi/scsi_tcq.h> |
13 | 11 | ||
14 | #define MASK(n) ((1ULL<<(n))-1) | 12 | #define MASK(n) ((1ULL<<(n))-1) |
@@ -36,7 +34,7 @@ | |||
36 | 34 | ||
37 | #define MAX_CRB_XFORM 60 | 35 | #define MAX_CRB_XFORM 60 |
38 | static unsigned long crb_addr_xform[MAX_CRB_XFORM]; | 36 | static unsigned long crb_addr_xform[MAX_CRB_XFORM]; |
39 | static int qla82xx_crb_table_initialized; | 37 | int qla82xx_crb_table_initialized; |
40 | 38 | ||
41 | #define qla82xx_crb_addr_transform(name) \ | 39 | #define qla82xx_crb_addr_transform(name) \ |
42 | (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ | 40 | (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ |
@@ -102,7 +100,7 @@ static void qla82xx_crb_addr_transform_setup(void) | |||
102 | qla82xx_crb_table_initialized = 1; | 100 | qla82xx_crb_table_initialized = 1; |
103 | } | 101 | } |
104 | 102 | ||
105 | static struct crb_128M_2M_block_map crb_128M_2M_map[64] = { | 103 | struct crb_128M_2M_block_map crb_128M_2M_map[64] = { |
106 | {{{0, 0, 0, 0} } }, | 104 | {{{0, 0, 0, 0} } }, |
107 | {{{1, 0x0100000, 0x0102000, 0x120000}, | 105 | {{{1, 0x0100000, 0x0102000, 0x120000}, |
108 | {1, 0x0110000, 0x0120000, 0x130000}, | 106 | {1, 0x0110000, 0x0120000, 0x130000}, |
@@ -262,7 +260,7 @@ static struct crb_128M_2M_block_map crb_128M_2M_map[64] = { | |||
262 | /* | 260 | /* |
263 | * top 12 bits of crb internal address (hub, agent) | 261 | * top 12 bits of crb internal address (hub, agent) |
264 | */ | 262 | */ |
265 | static unsigned qla82xx_crb_hub_agt[64] = { | 263 | unsigned qla82xx_crb_hub_agt[64] = { |
266 | 0, | 264 | 0, |
267 | QLA82XX_HW_CRB_HUB_AGT_ADR_PS, | 265 | QLA82XX_HW_CRB_HUB_AGT_ADR_PS, |
268 | QLA82XX_HW_CRB_HUB_AGT_ADR_MN, | 266 | QLA82XX_HW_CRB_HUB_AGT_ADR_MN, |
@@ -330,7 +328,7 @@ static unsigned qla82xx_crb_hub_agt[64] = { | |||
330 | }; | 328 | }; |
331 | 329 | ||
332 | /* Device states */ | 330 | /* Device states */ |
333 | static char *q_dev_state[] = { | 331 | char *qdev_state[] = { |
334 | "Unknown", | 332 | "Unknown", |
335 | "Cold", | 333 | "Cold", |
336 | "Initializing", | 334 | "Initializing", |
@@ -341,11 +339,6 @@ static char *q_dev_state[] = { | |||
341 | "Quiescent", | 339 | "Quiescent", |
342 | }; | 340 | }; |
343 | 341 | ||
344 | char *qdev_state(uint32_t dev_state) | ||
345 | { | ||
346 | return q_dev_state[dev_state]; | ||
347 | } | ||
348 | |||
349 | /* | 342 | /* |
350 | * In: 'off' is offset from CRB space in 128M pci map | 343 | * In: 'off' is offset from CRB space in 128M pci map |
351 | * Out: 'off' is 2M pci map addr | 344 | * Out: 'off' is 2M pci map addr |
@@ -359,18 +352,17 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) | |||
359 | 352 | ||
360 | ha->crb_win = CRB_HI(*off); | 353 | ha->crb_win = CRB_HI(*off); |
361 | writel(ha->crb_win, | 354 | writel(ha->crb_win, |
362 | (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); | 355 | (void *)(CRB_WINDOW_2M + ha->nx_pcibase)); |
363 | 356 | ||
364 | /* Read back value to make sure write has gone through before trying | 357 | /* Read back value to make sure write has gone through before trying |
365 | * to use it. | 358 | * to use it. |
366 | */ | 359 | */ |
367 | win_read = RD_REG_DWORD((void __iomem *) | 360 | win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase)); |
368 | (CRB_WINDOW_2M + ha->nx_pcibase)); | ||
369 | if (win_read != ha->crb_win) { | 361 | if (win_read != ha->crb_win) { |
370 | ql_dbg(ql_dbg_p3p, vha, 0xb000, | 362 | ql_dbg(ql_dbg_p3p, vha, 0xb000, |
371 | "%s: Written crbwin (0x%x) " | 363 | "%s: Written crbwin (0x%x) " |
372 | "!= Read crbwin (0x%x), off=0x%lx.\n", | 364 | "!= Read crbwin (0x%x), off=0x%lx.\n", |
373 | __func__, ha->crb_win, win_read, *off); | 365 | ha->crb_win, win_read, *off); |
374 | } | 366 | } |
375 | *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; | 367 | *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; |
376 | } | 368 | } |
@@ -410,7 +402,7 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) | |||
410 | } | 402 | } |
411 | /* strange address given */ | 403 | /* strange address given */ |
412 | ql_dbg(ql_dbg_p3p, vha, 0xb001, | 404 | ql_dbg(ql_dbg_p3p, vha, 0xb001, |
413 | "%s: Warning: unm_nic_pci_set_crbwindow " | 405 | "%x: Warning: unm_nic_pci_set_crbwindow " |
414 | "called with an unknown address(%llx).\n", | 406 | "called with an unknown address(%llx).\n", |
415 | QLA2XXX_DRIVER_NAME, off); | 407 | QLA2XXX_DRIVER_NAME, off); |
416 | return off; | 408 | return off; |
@@ -568,7 +560,7 @@ qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, | |||
568 | return 1; | 560 | return 1; |
569 | } | 561 | } |
570 | 562 | ||
571 | static int qla82xx_pci_set_window_warning_count; | 563 | int qla82xx_pci_set_window_warning_count; |
572 | 564 | ||
573 | static unsigned long | 565 | static unsigned long |
574 | qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) | 566 | qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) |
@@ -678,10 +670,10 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, | |||
678 | u64 off, void *data, int size) | 670 | u64 off, void *data, int size) |
679 | { | 671 | { |
680 | unsigned long flags; | 672 | unsigned long flags; |
681 | void __iomem *addr = NULL; | 673 | void *addr = NULL; |
682 | int ret = 0; | 674 | int ret = 0; |
683 | u64 start; | 675 | u64 start; |
684 | uint8_t __iomem *mem_ptr = NULL; | 676 | uint8_t *mem_ptr = NULL; |
685 | unsigned long mem_base; | 677 | unsigned long mem_base; |
686 | unsigned long mem_page; | 678 | unsigned long mem_page; |
687 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | 679 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); |
@@ -713,7 +705,7 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, | |||
713 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); | 705 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); |
714 | else | 706 | else |
715 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); | 707 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); |
716 | if (mem_ptr == NULL) { | 708 | if (mem_ptr == 0UL) { |
717 | *(u8 *)data = 0; | 709 | *(u8 *)data = 0; |
718 | return -1; | 710 | return -1; |
719 | } | 711 | } |
@@ -750,10 +742,10 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, | |||
750 | u64 off, void *data, int size) | 742 | u64 off, void *data, int size) |
751 | { | 743 | { |
752 | unsigned long flags; | 744 | unsigned long flags; |
753 | void __iomem *addr = NULL; | 745 | void *addr = NULL; |
754 | int ret = 0; | 746 | int ret = 0; |
755 | u64 start; | 747 | u64 start; |
756 | uint8_t __iomem *mem_ptr = NULL; | 748 | uint8_t *mem_ptr = NULL; |
757 | unsigned long mem_base; | 749 | unsigned long mem_base; |
758 | unsigned long mem_page; | 750 | unsigned long mem_page; |
759 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | 751 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); |
@@ -785,7 +777,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, | |||
785 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); | 777 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); |
786 | else | 778 | else |
787 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); | 779 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); |
788 | if (mem_ptr == NULL) | 780 | if (mem_ptr == 0UL) |
789 | return -1; | 781 | return -1; |
790 | 782 | ||
791 | addr = mem_ptr; | 783 | addr = mem_ptr; |
@@ -910,36 +902,26 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha) | |||
910 | } | 902 | } |
911 | 903 | ||
912 | static int | 904 | static int |
913 | qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag) | ||
914 | { | ||
915 | uint32_t off_value, rval = 0; | ||
916 | |||
917 | WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase), | ||
918 | (off & 0xFFFF0000)); | ||
919 | |||
920 | /* Read back value to make sure write has gone through */ | ||
921 | RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); | ||
922 | off_value = (off & 0x0000FFFF); | ||
923 | |||
924 | if (flag) | ||
925 | WRT_REG_DWORD((void __iomem *) | ||
926 | (off_value + CRB_INDIRECT_2M + ha->nx_pcibase), | ||
927 | data); | ||
928 | else | ||
929 | rval = RD_REG_DWORD((void __iomem *) | ||
930 | (off_value + CRB_INDIRECT_2M + ha->nx_pcibase)); | ||
931 | |||
932 | return rval; | ||
933 | } | ||
934 | |||
935 | static int | ||
936 | qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) | 905 | qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) |
937 | { | 906 | { |
938 | /* Dword reads to flash. */ | 907 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); |
939 | qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1); | ||
940 | *valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE + | ||
941 | (addr & 0x0000FFFF), 0, 0); | ||
942 | 908 | ||
909 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); | ||
910 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); | ||
911 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); | ||
912 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); | ||
913 | qla82xx_wait_rom_busy(ha); | ||
914 | if (qla82xx_wait_rom_done(ha)) { | ||
915 | ql_log(ql_log_fatal, vha, 0x00ba, | ||
916 | "Error waiting for rom done.\n"); | ||
917 | return -1; | ||
918 | } | ||
919 | /* Reset abyte_cnt and dummy_byte_cnt */ | ||
920 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); | ||
921 | udelay(10); | ||
922 | cond_resched(); | ||
923 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); | ||
924 | *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); | ||
943 | return 0; | 925 | return 0; |
944 | } | 926 | } |
945 | 927 | ||
@@ -956,7 +938,7 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) | |||
956 | } | 938 | } |
957 | if (loops >= 50000) { | 939 | if (loops >= 50000) { |
958 | ql_log(ql_log_fatal, vha, 0x00b9, | 940 | ql_log(ql_log_fatal, vha, 0x00b9, |
959 | "Failed to acquire SEM2 lock.\n"); | 941 | "Failed to aquire SEM2 lock.\n"); |
960 | return -1; | 942 | return -1; |
961 | } | 943 | } |
962 | ret = qla82xx_do_rom_fast_read(ha, addr, valp); | 944 | ret = qla82xx_do_rom_fast_read(ha, addr, valp); |
@@ -1066,7 +1048,7 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha) | |||
1066 | "ROM lock failed.\n"); | 1048 | "ROM lock failed.\n"); |
1067 | return -1; | 1049 | return -1; |
1068 | } | 1050 | } |
1069 | return 0; | 1051 | return 0;; |
1070 | } | 1052 | } |
1071 | 1053 | ||
1072 | static int | 1054 | static int |
@@ -1123,7 +1105,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) | |||
1123 | long data; | 1105 | long data; |
1124 | }; | 1106 | }; |
1125 | 1107 | ||
1126 | /* Halt all the individual PEGs and other blocks of the ISP */ | 1108 | /* Halt all the indiviual PEGs and other blocks of the ISP */ |
1127 | qla82xx_rom_lock(ha); | 1109 | qla82xx_rom_lock(ha); |
1128 | 1110 | ||
1129 | /* disable all I2Q */ | 1111 | /* disable all I2Q */ |
@@ -1176,6 +1158,19 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) | |||
1176 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); | 1158 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); |
1177 | else | 1159 | else |
1178 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); | 1160 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); |
1161 | |||
1162 | /* reset ms */ | ||
1163 | val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4); | ||
1164 | val |= (1 << 1); | ||
1165 | qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); | ||
1166 | msleep(20); | ||
1167 | |||
1168 | /* unreset ms */ | ||
1169 | val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4); | ||
1170 | val &= ~(1 << 1); | ||
1171 | qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); | ||
1172 | msleep(20); | ||
1173 | |||
1179 | qla82xx_rom_unlock(ha); | 1174 | qla82xx_rom_unlock(ha); |
1180 | 1175 | ||
1181 | /* Read the signature value from the flash. | 1176 | /* Read the signature value from the flash. |
@@ -1191,12 +1186,12 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) | |||
1191 | } | 1186 | } |
1192 | 1187 | ||
1193 | /* Offset in flash = lower 16 bits | 1188 | /* Offset in flash = lower 16 bits |
1194 | * Number of entries = upper 16 bits | 1189 | * Number of enteries = upper 16 bits |
1195 | */ | 1190 | */ |
1196 | offset = n & 0xffffU; | 1191 | offset = n & 0xffffU; |
1197 | n = (n >> 16) & 0xffffU; | 1192 | n = (n >> 16) & 0xffffU; |
1198 | 1193 | ||
1199 | /* number of addr/value pair should not exceed 1024 entries */ | 1194 | /* number of addr/value pair should not exceed 1024 enteries */ |
1200 | if (n >= 1024) { | 1195 | if (n >= 1024) { |
1201 | ql_log(ql_log_fatal, vha, 0x0071, | 1196 | ql_log(ql_log_fatal, vha, 0x0071, |
1202 | "Card flash not initialized:n=0x%x.\n", n); | 1197 | "Card flash not initialized:n=0x%x.\n", n); |
@@ -1613,6 +1608,25 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha) | |||
1613 | } | 1608 | } |
1614 | 1609 | ||
1615 | /* PCI related functions */ | 1610 | /* PCI related functions */ |
1611 | char * | ||
1612 | qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str) | ||
1613 | { | ||
1614 | int pcie_reg; | ||
1615 | struct qla_hw_data *ha = vha->hw; | ||
1616 | char lwstr[6]; | ||
1617 | uint16_t lnk; | ||
1618 | |||
1619 | pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); | ||
1620 | pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk); | ||
1621 | ha->link_width = (lnk >> 4) & 0x3f; | ||
1622 | |||
1623 | strcpy(str, "PCIe ("); | ||
1624 | strcat(str, "2.5Gb/s "); | ||
1625 | snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width); | ||
1626 | strcat(str, lwstr); | ||
1627 | return str; | ||
1628 | } | ||
1629 | |||
1616 | int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) | 1630 | int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) |
1617 | { | 1631 | { |
1618 | unsigned long val = 0; | 1632 | unsigned long val = 0; |
@@ -1655,6 +1669,7 @@ qla82xx_iospace_config(struct qla_hw_data *ha) | |||
1655 | if (!ha->nx_pcibase) { | 1669 | if (!ha->nx_pcibase) { |
1656 | ql_log_pci(ql_log_fatal, ha->pdev, 0x000e, | 1670 | ql_log_pci(ql_log_fatal, ha->pdev, 0x000e, |
1657 | "Cannot remap pcibase MMIO, aborting.\n"); | 1671 | "Cannot remap pcibase MMIO, aborting.\n"); |
1672 | pci_release_regions(ha->pdev); | ||
1658 | goto iospace_error_exit; | 1673 | goto iospace_error_exit; |
1659 | } | 1674 | } |
1660 | 1675 | ||
@@ -1669,6 +1684,7 @@ qla82xx_iospace_config(struct qla_hw_data *ha) | |||
1669 | if (!ha->nxdb_wr_ptr) { | 1684 | if (!ha->nxdb_wr_ptr) { |
1670 | ql_log_pci(ql_log_fatal, ha->pdev, 0x000f, | 1685 | ql_log_pci(ql_log_fatal, ha->pdev, 0x000f, |
1671 | "Cannot remap MMIO, aborting.\n"); | 1686 | "Cannot remap MMIO, aborting.\n"); |
1687 | pci_release_regions(ha->pdev); | ||
1672 | goto iospace_error_exit; | 1688 | goto iospace_error_exit; |
1673 | } | 1689 | } |
1674 | 1690 | ||
@@ -1688,12 +1704,12 @@ qla82xx_iospace_config(struct qla_hw_data *ha) | |||
1688 | ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006, | 1704 | ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006, |
1689 | "nx_pci_base=%p iobase=%p " | 1705 | "nx_pci_base=%p iobase=%p " |
1690 | "max_req_queues=%d msix_count=%d.\n", | 1706 | "max_req_queues=%d msix_count=%d.\n", |
1691 | (void *)ha->nx_pcibase, ha->iobase, | 1707 | ha->nx_pcibase, ha->iobase, |
1692 | ha->max_req_queues, ha->msix_count); | 1708 | ha->max_req_queues, ha->msix_count); |
1693 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010, | 1709 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010, |
1694 | "nx_pci_base=%p iobase=%p " | 1710 | "nx_pci_base=%p iobase=%p " |
1695 | "max_req_queues=%d msix_count=%d.\n", | 1711 | "max_req_queues=%d msix_count=%d.\n", |
1696 | (void *)ha->nx_pcibase, ha->iobase, | 1712 | ha->nx_pcibase, ha->iobase, |
1697 | ha->max_req_queues, ha->msix_count); | 1713 | ha->max_req_queues, ha->msix_count); |
1698 | return 0; | 1714 | return 0; |
1699 | 1715 | ||
@@ -1721,7 +1737,7 @@ qla82xx_pci_config(scsi_qla_host_t *vha) | |||
1721 | ret = pci_set_mwi(ha->pdev); | 1737 | ret = pci_set_mwi(ha->pdev); |
1722 | ha->chip_revision = ha->pdev->revision; | 1738 | ha->chip_revision = ha->pdev->revision; |
1723 | ql_dbg(ql_dbg_init, vha, 0x0043, | 1739 | ql_dbg(ql_dbg_init, vha, 0x0043, |
1724 | "Chip revision:%d.\n", | 1740 | "Chip revision:%ld.\n", |
1725 | ha->chip_revision); | 1741 | ha->chip_revision); |
1726 | return 0; | 1742 | return 0; |
1727 | } | 1743 | } |
@@ -1763,6 +1779,14 @@ void qla82xx_config_rings(struct scsi_qla_host *vha) | |||
1763 | WRT_REG_DWORD((unsigned long __iomem *)®->rsp_q_out[0], 0); | 1779 | WRT_REG_DWORD((unsigned long __iomem *)®->rsp_q_out[0], 0); |
1764 | } | 1780 | } |
1765 | 1781 | ||
1782 | void qla82xx_reset_adapter(struct scsi_qla_host *vha) | ||
1783 | { | ||
1784 | struct qla_hw_data *ha = vha->hw; | ||
1785 | vha->flags.online = 0; | ||
1786 | qla2x00_try_to_stop_firmware(vha); | ||
1787 | ha->isp_ops->disable_intrs(ha); | ||
1788 | } | ||
1789 | |||
1766 | static int | 1790 | static int |
1767 | qla82xx_fw_load_from_blob(struct qla_hw_data *ha) | 1791 | qla82xx_fw_load_from_blob(struct qla_hw_data *ha) |
1768 | { | 1792 | { |
@@ -1847,7 +1871,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha) | |||
1847 | return -1; | 1871 | return -1; |
1848 | } | 1872 | } |
1849 | 1873 | ||
1850 | static int | 1874 | int |
1851 | qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) | 1875 | qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) |
1852 | { | 1876 | { |
1853 | __le32 val; | 1877 | __le32 val; |
@@ -1952,6 +1976,20 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) | |||
1952 | } | 1976 | } |
1953 | 1977 | ||
1954 | /* ISR related functions */ | 1978 | /* ISR related functions */ |
1979 | uint32_t qla82xx_isr_int_target_mask_enable[8] = { | ||
1980 | ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1, | ||
1981 | ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3, | ||
1982 | ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5, | ||
1983 | ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7 | ||
1984 | }; | ||
1985 | |||
1986 | uint32_t qla82xx_isr_int_target_status[8] = { | ||
1987 | ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, | ||
1988 | ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, | ||
1989 | ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, | ||
1990 | ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7 | ||
1991 | }; | ||
1992 | |||
1955 | static struct qla82xx_legacy_intr_set legacy_intr[] = \ | 1993 | static struct qla82xx_legacy_intr_set legacy_intr[] = \ |
1956 | QLA82XX_LEGACY_INTR_CONFIG; | 1994 | QLA82XX_LEGACY_INTR_CONFIG; |
1957 | 1995 | ||
@@ -1978,9 +2016,13 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | |||
1978 | wptr++; | 2016 | wptr++; |
1979 | } | 2017 | } |
1980 | 2018 | ||
1981 | if (!ha->mcp) | 2019 | if (ha->mcp) { |
2020 | ql_dbg(ql_dbg_async, vha, 0x5052, | ||
2021 | "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]); | ||
2022 | } else { | ||
1982 | ql_dbg(ql_dbg_async, vha, 0x5053, | 2023 | ql_dbg(ql_dbg_async, vha, 0x5053, |
1983 | "MBX pointer ERROR.\n"); | 2024 | "MBX pointer ERROR.\n"); |
2025 | } | ||
1984 | } | 2026 | } |
1985 | 2027 | ||
1986 | /* | 2028 | /* |
@@ -2008,8 +2050,8 @@ qla82xx_intr_handler(int irq, void *dev_id) | |||
2008 | 2050 | ||
2009 | rsp = (struct rsp_que *) dev_id; | 2051 | rsp = (struct rsp_que *) dev_id; |
2010 | if (!rsp) { | 2052 | if (!rsp) { |
2011 | ql_log(ql_log_info, NULL, 0xb053, | 2053 | printk(KERN_INFO |
2012 | "%s: NULL response queue pointer.\n", __func__); | 2054 | "%s(): NULL response queue pointer.\n", __func__); |
2013 | return IRQ_NONE; | 2055 | return IRQ_NONE; |
2014 | } | 2056 | } |
2015 | ha = rsp->hw; | 2057 | ha = rsp->hw; |
@@ -2280,29 +2322,6 @@ void qla82xx_init_flags(struct qla_hw_data *ha) | |||
2280 | } | 2322 | } |
2281 | 2323 | ||
2282 | inline void | 2324 | inline void |
2283 | qla82xx_set_idc_version(scsi_qla_host_t *vha) | ||
2284 | { | ||
2285 | int idc_ver; | ||
2286 | uint32_t drv_active; | ||
2287 | struct qla_hw_data *ha = vha->hw; | ||
2288 | |||
2289 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); | ||
2290 | if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) { | ||
2291 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, | ||
2292 | QLA82XX_IDC_VERSION); | ||
2293 | ql_log(ql_log_info, vha, 0xb082, | ||
2294 | "IDC version updated to %d\n", QLA82XX_IDC_VERSION); | ||
2295 | } else { | ||
2296 | idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION); | ||
2297 | if (idc_ver != QLA82XX_IDC_VERSION) | ||
2298 | ql_log(ql_log_info, vha, 0xb083, | ||
2299 | "qla2xxx driver IDC version %d is not compatible " | ||
2300 | "with IDC version %d of the other drivers\n", | ||
2301 | QLA82XX_IDC_VERSION, idc_ver); | ||
2302 | } | ||
2303 | } | ||
2304 | |||
2305 | inline void | ||
2306 | qla82xx_set_drv_active(scsi_qla_host_t *vha) | 2325 | qla82xx_set_drv_active(scsi_qla_host_t *vha) |
2307 | { | 2326 | { |
2308 | uint32_t drv_active; | 2327 | uint32_t drv_active; |
@@ -2336,13 +2355,9 @@ qla82xx_need_reset(struct qla_hw_data *ha) | |||
2336 | uint32_t drv_state; | 2355 | uint32_t drv_state; |
2337 | int rval; | 2356 | int rval; |
2338 | 2357 | ||
2339 | if (ha->flags.nic_core_reset_owner) | 2358 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); |
2340 | return 1; | 2359 | rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); |
2341 | else { | 2360 | return rval; |
2342 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | ||
2343 | rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); | ||
2344 | return rval; | ||
2345 | } | ||
2346 | } | 2361 | } |
2347 | 2362 | ||
2348 | static inline void | 2363 | static inline void |
@@ -2359,8 +2374,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha) | |||
2359 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | 2374 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); |
2360 | } | 2375 | } |
2361 | drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); | 2376 | drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); |
2362 | ql_dbg(ql_dbg_init, vha, 0x00bb, | 2377 | ql_log(ql_log_info, vha, 0x00bb, |
2363 | "drv_state = 0x%08x.\n", drv_state); | 2378 | "drv_state = 0x%x.\n", drv_state); |
2364 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); | 2379 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); |
2365 | } | 2380 | } |
2366 | 2381 | ||
@@ -2427,7 +2442,7 @@ qla82xx_load_fw(scsi_qla_host_t *vha) | |||
2427 | 2442 | ||
2428 | if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { | 2443 | if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { |
2429 | ql_log(ql_log_info, vha, 0x00a1, | 2444 | ql_log(ql_log_info, vha, 0x00a1, |
2430 | "Firmware loaded successfully from flash.\n"); | 2445 | "Firmware loaded successully from flash.\n"); |
2431 | return QLA_SUCCESS; | 2446 | return QLA_SUCCESS; |
2432 | } else { | 2447 | } else { |
2433 | ql_log(ql_log_warn, vha, 0x0108, | 2448 | ql_log(ql_log_warn, vha, 0x0108, |
@@ -2442,7 +2457,7 @@ try_blob_fw: | |||
2442 | blob = ha->hablob = qla2x00_request_firmware(vha); | 2457 | blob = ha->hablob = qla2x00_request_firmware(vha); |
2443 | if (!blob) { | 2458 | if (!blob) { |
2444 | ql_log(ql_log_fatal, vha, 0x00a3, | 2459 | ql_log(ql_log_fatal, vha, 0x00a3, |
2445 | "Firmware image not present.\n"); | 2460 | "Firmware image not preset.\n"); |
2446 | goto fw_load_failed; | 2461 | goto fw_load_failed; |
2447 | } | 2462 | } |
2448 | 2463 | ||
@@ -2478,6 +2493,7 @@ fw_load_failed: | |||
2478 | int | 2493 | int |
2479 | qla82xx_start_firmware(scsi_qla_host_t *vha) | 2494 | qla82xx_start_firmware(scsi_qla_host_t *vha) |
2480 | { | 2495 | { |
2496 | int pcie_cap; | ||
2481 | uint16_t lnk; | 2497 | uint16_t lnk; |
2482 | struct qla_hw_data *ha = vha->hw; | 2498 | struct qla_hw_data *ha = vha->hw; |
2483 | 2499 | ||
@@ -2508,13 +2524,492 @@ qla82xx_start_firmware(scsi_qla_host_t *vha) | |||
2508 | } | 2524 | } |
2509 | 2525 | ||
2510 | /* Negotiated Link width */ | 2526 | /* Negotiated Link width */ |
2511 | pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); | 2527 | pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); |
2528 | pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); | ||
2512 | ha->link_width = (lnk >> 4) & 0x3f; | 2529 | ha->link_width = (lnk >> 4) & 0x3f; |
2513 | 2530 | ||
2514 | /* Synchronize with Receive peg */ | 2531 | /* Synchronize with Receive peg */ |
2515 | return qla82xx_check_rcvpeg_state(ha); | 2532 | return qla82xx_check_rcvpeg_state(ha); |
2516 | } | 2533 | } |
2517 | 2534 | ||
2535 | static inline int | ||
2536 | qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, | ||
2537 | uint16_t tot_dsds) | ||
2538 | { | ||
2539 | uint32_t *cur_dsd = NULL; | ||
2540 | scsi_qla_host_t *vha; | ||
2541 | struct qla_hw_data *ha; | ||
2542 | struct scsi_cmnd *cmd; | ||
2543 | struct scatterlist *cur_seg; | ||
2544 | uint32_t *dsd_seg; | ||
2545 | void *next_dsd; | ||
2546 | uint8_t avail_dsds; | ||
2547 | uint8_t first_iocb = 1; | ||
2548 | uint32_t dsd_list_len; | ||
2549 | struct dsd_dma *dsd_ptr; | ||
2550 | struct ct6_dsd *ctx; | ||
2551 | |||
2552 | cmd = sp->cmd; | ||
2553 | |||
2554 | /* Update entry type to indicate Command Type 3 IOCB */ | ||
2555 | *((uint32_t *)(&cmd_pkt->entry_type)) = | ||
2556 | __constant_cpu_to_le32(COMMAND_TYPE_6); | ||
2557 | |||
2558 | /* No data transfer */ | ||
2559 | if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { | ||
2560 | cmd_pkt->byte_count = __constant_cpu_to_le32(0); | ||
2561 | return 0; | ||
2562 | } | ||
2563 | |||
2564 | vha = sp->fcport->vha; | ||
2565 | ha = vha->hw; | ||
2566 | |||
2567 | /* Set transfer direction */ | ||
2568 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | ||
2569 | cmd_pkt->control_flags = | ||
2570 | __constant_cpu_to_le16(CF_WRITE_DATA); | ||
2571 | ha->qla_stats.output_bytes += scsi_bufflen(cmd); | ||
2572 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { | ||
2573 | cmd_pkt->control_flags = | ||
2574 | __constant_cpu_to_le16(CF_READ_DATA); | ||
2575 | ha->qla_stats.input_bytes += scsi_bufflen(cmd); | ||
2576 | } | ||
2577 | |||
2578 | cur_seg = scsi_sglist(cmd); | ||
2579 | ctx = sp->ctx; | ||
2580 | |||
2581 | while (tot_dsds) { | ||
2582 | avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? | ||
2583 | QLA_DSDS_PER_IOCB : tot_dsds; | ||
2584 | tot_dsds -= avail_dsds; | ||
2585 | dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; | ||
2586 | |||
2587 | dsd_ptr = list_first_entry(&ha->gbl_dsd_list, | ||
2588 | struct dsd_dma, list); | ||
2589 | next_dsd = dsd_ptr->dsd_addr; | ||
2590 | list_del(&dsd_ptr->list); | ||
2591 | ha->gbl_dsd_avail--; | ||
2592 | list_add_tail(&dsd_ptr->list, &ctx->dsd_list); | ||
2593 | ctx->dsd_use_cnt++; | ||
2594 | ha->gbl_dsd_inuse++; | ||
2595 | |||
2596 | if (first_iocb) { | ||
2597 | first_iocb = 0; | ||
2598 | dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; | ||
2599 | *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | ||
2600 | *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | ||
2601 | *dsd_seg++ = cpu_to_le32(dsd_list_len); | ||
2602 | } else { | ||
2603 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | ||
2604 | *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | ||
2605 | *cur_dsd++ = cpu_to_le32(dsd_list_len); | ||
2606 | } | ||
2607 | cur_dsd = (uint32_t *)next_dsd; | ||
2608 | while (avail_dsds) { | ||
2609 | dma_addr_t sle_dma; | ||
2610 | |||
2611 | sle_dma = sg_dma_address(cur_seg); | ||
2612 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | ||
2613 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | ||
2614 | *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); | ||
2615 | cur_seg = sg_next(cur_seg); | ||
2616 | avail_dsds--; | ||
2617 | } | ||
2618 | } | ||
2619 | |||
2620 | /* Null termination */ | ||
2621 | *cur_dsd++ = 0; | ||
2622 | *cur_dsd++ = 0; | ||
2623 | *cur_dsd++ = 0; | ||
2624 | cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; | ||
2625 | return 0; | ||
2626 | } | ||
2627 | |||
2628 | /* | ||
2629 | * qla82xx_calc_dsd_lists() - Determine number of DSD list required | ||
2630 | * for Command Type 6. | ||
2631 | * | ||
2632 | * @dsds: number of data segment decriptors needed | ||
2633 | * | ||
2634 | * Returns the number of dsd list needed to store @dsds. | ||
2635 | */ | ||
2636 | inline uint16_t | ||
2637 | qla82xx_calc_dsd_lists(uint16_t dsds) | ||
2638 | { | ||
2639 | uint16_t dsd_lists = 0; | ||
2640 | |||
2641 | dsd_lists = (dsds/QLA_DSDS_PER_IOCB); | ||
2642 | if (dsds % QLA_DSDS_PER_IOCB) | ||
2643 | dsd_lists++; | ||
2644 | return dsd_lists; | ||
2645 | } | ||
2646 | |||
2647 | /* | ||
2648 | * qla82xx_start_scsi() - Send a SCSI command to the ISP | ||
2649 | * @sp: command to send to the ISP | ||
2650 | * | ||
2651 | * Returns non-zero if a failure occurred, else zero. | ||
2652 | */ | ||
2653 | int | ||
2654 | qla82xx_start_scsi(srb_t *sp) | ||
2655 | { | ||
2656 | int ret, nseg; | ||
2657 | unsigned long flags; | ||
2658 | struct scsi_cmnd *cmd; | ||
2659 | uint32_t *clr_ptr; | ||
2660 | uint32_t index; | ||
2661 | uint32_t handle; | ||
2662 | uint16_t cnt; | ||
2663 | uint16_t req_cnt; | ||
2664 | uint16_t tot_dsds; | ||
2665 | struct device_reg_82xx __iomem *reg; | ||
2666 | uint32_t dbval; | ||
2667 | uint32_t *fcp_dl; | ||
2668 | uint8_t additional_cdb_len; | ||
2669 | struct ct6_dsd *ctx; | ||
2670 | struct scsi_qla_host *vha = sp->fcport->vha; | ||
2671 | struct qla_hw_data *ha = vha->hw; | ||
2672 | struct req_que *req = NULL; | ||
2673 | struct rsp_que *rsp = NULL; | ||
2674 | char tag[2]; | ||
2675 | |||
2676 | /* Setup device pointers. */ | ||
2677 | ret = 0; | ||
2678 | reg = &ha->iobase->isp82; | ||
2679 | cmd = sp->cmd; | ||
2680 | req = vha->req; | ||
2681 | rsp = ha->rsp_q_map[0]; | ||
2682 | |||
2683 | /* So we know we haven't pci_map'ed anything yet */ | ||
2684 | tot_dsds = 0; | ||
2685 | |||
2686 | dbval = 0x04 | (ha->portnum << 5); | ||
2687 | |||
2688 | /* Send marker if required */ | ||
2689 | if (vha->marker_needed != 0) { | ||
2690 | if (qla2x00_marker(vha, req, | ||
2691 | rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { | ||
2692 | ql_log(ql_log_warn, vha, 0x300c, | ||
2693 | "qla2x00_marker failed for cmd=%p.\n", cmd); | ||
2694 | return QLA_FUNCTION_FAILED; | ||
2695 | } | ||
2696 | vha->marker_needed = 0; | ||
2697 | } | ||
2698 | |||
2699 | /* Acquire ring specific lock */ | ||
2700 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
2701 | |||
2702 | /* Check for room in outstanding command list. */ | ||
2703 | handle = req->current_outstanding_cmd; | ||
2704 | for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | ||
2705 | handle++; | ||
2706 | if (handle == MAX_OUTSTANDING_COMMANDS) | ||
2707 | handle = 1; | ||
2708 | if (!req->outstanding_cmds[handle]) | ||
2709 | break; | ||
2710 | } | ||
2711 | if (index == MAX_OUTSTANDING_COMMANDS) | ||
2712 | goto queuing_error; | ||
2713 | |||
2714 | /* Map the sg table so we have an accurate count of sg entries needed */ | ||
2715 | if (scsi_sg_count(cmd)) { | ||
2716 | nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), | ||
2717 | scsi_sg_count(cmd), cmd->sc_data_direction); | ||
2718 | if (unlikely(!nseg)) | ||
2719 | goto queuing_error; | ||
2720 | } else | ||
2721 | nseg = 0; | ||
2722 | |||
2723 | tot_dsds = nseg; | ||
2724 | |||
2725 | if (tot_dsds > ql2xshiftctondsd) { | ||
2726 | struct cmd_type_6 *cmd_pkt; | ||
2727 | uint16_t more_dsd_lists = 0; | ||
2728 | struct dsd_dma *dsd_ptr; | ||
2729 | uint16_t i; | ||
2730 | |||
2731 | more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds); | ||
2732 | if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { | ||
2733 | ql_dbg(ql_dbg_io, vha, 0x300d, | ||
2734 | "Num of DSD list %d is than %d for cmd=%p.\n", | ||
2735 | more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, | ||
2736 | cmd); | ||
2737 | goto queuing_error; | ||
2738 | } | ||
2739 | |||
2740 | if (more_dsd_lists <= ha->gbl_dsd_avail) | ||
2741 | goto sufficient_dsds; | ||
2742 | else | ||
2743 | more_dsd_lists -= ha->gbl_dsd_avail; | ||
2744 | |||
2745 | for (i = 0; i < more_dsd_lists; i++) { | ||
2746 | dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); | ||
2747 | if (!dsd_ptr) { | ||
2748 | ql_log(ql_log_fatal, vha, 0x300e, | ||
2749 | "Failed to allocate memory for dsd_dma " | ||
2750 | "for cmd=%p.\n", cmd); | ||
2751 | goto queuing_error; | ||
2752 | } | ||
2753 | |||
2754 | dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, | ||
2755 | GFP_ATOMIC, &dsd_ptr->dsd_list_dma); | ||
2756 | if (!dsd_ptr->dsd_addr) { | ||
2757 | kfree(dsd_ptr); | ||
2758 | ql_log(ql_log_fatal, vha, 0x300f, | ||
2759 | "Failed to allocate memory for dsd_addr " | ||
2760 | "for cmd=%p.\n", cmd); | ||
2761 | goto queuing_error; | ||
2762 | } | ||
2763 | list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); | ||
2764 | ha->gbl_dsd_avail++; | ||
2765 | } | ||
2766 | |||
2767 | sufficient_dsds: | ||
2768 | req_cnt = 1; | ||
2769 | |||
2770 | if (req->cnt < (req_cnt + 2)) { | ||
2771 | cnt = (uint16_t)RD_REG_DWORD_RELAXED( | ||
2772 | ®->req_q_out[0]); | ||
2773 | if (req->ring_index < cnt) | ||
2774 | req->cnt = cnt - req->ring_index; | ||
2775 | else | ||
2776 | req->cnt = req->length - | ||
2777 | (req->ring_index - cnt); | ||
2778 | } | ||
2779 | |||
2780 | if (req->cnt < (req_cnt + 2)) | ||
2781 | goto queuing_error; | ||
2782 | |||
2783 | ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); | ||
2784 | if (!sp->ctx) { | ||
2785 | ql_log(ql_log_fatal, vha, 0x3010, | ||
2786 | "Failed to allocate ctx for cmd=%p.\n", cmd); | ||
2787 | goto queuing_error; | ||
2788 | } | ||
2789 | memset(ctx, 0, sizeof(struct ct6_dsd)); | ||
2790 | ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, | ||
2791 | GFP_ATOMIC, &ctx->fcp_cmnd_dma); | ||
2792 | if (!ctx->fcp_cmnd) { | ||
2793 | ql_log(ql_log_fatal, vha, 0x3011, | ||
2794 | "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); | ||
2795 | goto queuing_error_fcp_cmnd; | ||
2796 | } | ||
2797 | |||
2798 | /* Initialize the DSD list and dma handle */ | ||
2799 | INIT_LIST_HEAD(&ctx->dsd_list); | ||
2800 | ctx->dsd_use_cnt = 0; | ||
2801 | |||
2802 | if (cmd->cmd_len > 16) { | ||
2803 | additional_cdb_len = cmd->cmd_len - 16; | ||
2804 | if ((cmd->cmd_len % 4) != 0) { | ||
2805 | /* SCSI command bigger than 16 bytes must be | ||
2806 | * multiple of 4 | ||
2807 | */ | ||
2808 | ql_log(ql_log_warn, vha, 0x3012, | ||
2809 | "scsi cmd len %d not multiple of 4 " | ||
2810 | "for cmd=%p.\n", cmd->cmd_len, cmd); | ||
2811 | goto queuing_error_fcp_cmnd; | ||
2812 | } | ||
2813 | ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; | ||
2814 | } else { | ||
2815 | additional_cdb_len = 0; | ||
2816 | ctx->fcp_cmnd_len = 12 + 16 + 4; | ||
2817 | } | ||
2818 | |||
2819 | cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; | ||
2820 | cmd_pkt->handle = MAKE_HANDLE(req->id, handle); | ||
2821 | |||
2822 | /* Zero out remaining portion of packet. */ | ||
2823 | /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ | ||
2824 | clr_ptr = (uint32_t *)cmd_pkt + 2; | ||
2825 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); | ||
2826 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); | ||
2827 | |||
2828 | /* Set NPORT-ID and LUN number*/ | ||
2829 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); | ||
2830 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; | ||
2831 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; | ||
2832 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; | ||
2833 | cmd_pkt->vp_index = sp->fcport->vp_idx; | ||
2834 | |||
2835 | /* Build IOCB segments */ | ||
2836 | if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) | ||
2837 | goto queuing_error_fcp_cmnd; | ||
2838 | |||
2839 | int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); | ||
2840 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); | ||
2841 | |||
2842 | /* build FCP_CMND IU */ | ||
2843 | memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); | ||
2844 | int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); | ||
2845 | ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; | ||
2846 | |||
2847 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | ||
2848 | ctx->fcp_cmnd->additional_cdb_len |= 1; | ||
2849 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) | ||
2850 | ctx->fcp_cmnd->additional_cdb_len |= 2; | ||
2851 | |||
2852 | /* | ||
2853 | * Update tagged queuing modifier -- default is TSK_SIMPLE (0). | ||
2854 | */ | ||
2855 | if (scsi_populate_tag_msg(cmd, tag)) { | ||
2856 | switch (tag[0]) { | ||
2857 | case HEAD_OF_QUEUE_TAG: | ||
2858 | ctx->fcp_cmnd->task_attribute = | ||
2859 | TSK_HEAD_OF_QUEUE; | ||
2860 | break; | ||
2861 | case ORDERED_QUEUE_TAG: | ||
2862 | ctx->fcp_cmnd->task_attribute = | ||
2863 | TSK_ORDERED; | ||
2864 | break; | ||
2865 | } | ||
2866 | } | ||
2867 | |||
2868 | memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); | ||
2869 | |||
2870 | fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + | ||
2871 | additional_cdb_len); | ||
2872 | *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); | ||
2873 | |||
2874 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); | ||
2875 | cmd_pkt->fcp_cmnd_dseg_address[0] = | ||
2876 | cpu_to_le32(LSD(ctx->fcp_cmnd_dma)); | ||
2877 | cmd_pkt->fcp_cmnd_dseg_address[1] = | ||
2878 | cpu_to_le32(MSD(ctx->fcp_cmnd_dma)); | ||
2879 | |||
2880 | sp->flags |= SRB_FCP_CMND_DMA_VALID; | ||
2881 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); | ||
2882 | /* Set total data segment count. */ | ||
2883 | cmd_pkt->entry_count = (uint8_t)req_cnt; | ||
2884 | /* Specify response queue number where | ||
2885 | * completion should happen | ||
2886 | */ | ||
2887 | cmd_pkt->entry_status = (uint8_t) rsp->id; | ||
2888 | } else { | ||
2889 | struct cmd_type_7 *cmd_pkt; | ||
2890 | req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); | ||
2891 | if (req->cnt < (req_cnt + 2)) { | ||
2892 | cnt = (uint16_t)RD_REG_DWORD_RELAXED( | ||
2893 | ®->req_q_out[0]); | ||
2894 | if (req->ring_index < cnt) | ||
2895 | req->cnt = cnt - req->ring_index; | ||
2896 | else | ||
2897 | req->cnt = req->length - | ||
2898 | (req->ring_index - cnt); | ||
2899 | } | ||
2900 | if (req->cnt < (req_cnt + 2)) | ||
2901 | goto queuing_error; | ||
2902 | |||
2903 | cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; | ||
2904 | cmd_pkt->handle = MAKE_HANDLE(req->id, handle); | ||
2905 | |||
2906 | /* Zero out remaining portion of packet. */ | ||
2907 | /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ | ||
2908 | clr_ptr = (uint32_t *)cmd_pkt + 2; | ||
2909 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); | ||
2910 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); | ||
2911 | |||
2912 | /* Set NPORT-ID and LUN number*/ | ||
2913 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); | ||
2914 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; | ||
2915 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; | ||
2916 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; | ||
2917 | cmd_pkt->vp_index = sp->fcport->vp_idx; | ||
2918 | |||
2919 | int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); | ||
2920 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, | ||
2921 | sizeof(cmd_pkt->lun)); | ||
2922 | |||
2923 | /* | ||
2924 | * Update tagged queuing modifier -- default is TSK_SIMPLE (0). | ||
2925 | */ | ||
2926 | if (scsi_populate_tag_msg(cmd, tag)) { | ||
2927 | switch (tag[0]) { | ||
2928 | case HEAD_OF_QUEUE_TAG: | ||
2929 | cmd_pkt->task = TSK_HEAD_OF_QUEUE; | ||
2930 | break; | ||
2931 | case ORDERED_QUEUE_TAG: | ||
2932 | cmd_pkt->task = TSK_ORDERED; | ||
2933 | break; | ||
2934 | } | ||
2935 | } | ||
2936 | |||
2937 | /* Load SCSI command packet. */ | ||
2938 | memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); | ||
2939 | host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); | ||
2940 | |||
2941 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); | ||
2942 | |||
2943 | /* Build IOCB segments */ | ||
2944 | qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); | ||
2945 | |||
2946 | /* Set total data segment count. */ | ||
2947 | cmd_pkt->entry_count = (uint8_t)req_cnt; | ||
2948 | /* Specify response queue number where | ||
2949 | * completion should happen. | ||
2950 | */ | ||
2951 | cmd_pkt->entry_status = (uint8_t) rsp->id; | ||
2952 | |||
2953 | } | ||
2954 | /* Build command packet. */ | ||
2955 | req->current_outstanding_cmd = handle; | ||
2956 | req->outstanding_cmds[handle] = sp; | ||
2957 | sp->handle = handle; | ||
2958 | sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; | ||
2959 | req->cnt -= req_cnt; | ||
2960 | wmb(); | ||
2961 | |||
2962 | /* Adjust ring index. */ | ||
2963 | req->ring_index++; | ||
2964 | if (req->ring_index == req->length) { | ||
2965 | req->ring_index = 0; | ||
2966 | req->ring_ptr = req->ring; | ||
2967 | } else | ||
2968 | req->ring_ptr++; | ||
2969 | |||
2970 | sp->flags |= SRB_DMA_VALID; | ||
2971 | |||
2972 | /* Set chip new ring index. */ | ||
2973 | /* write, read and verify logic */ | ||
2974 | dbval = dbval | (req->id << 8) | (req->ring_index << 16); | ||
2975 | if (ql2xdbwr) | ||
2976 | qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); | ||
2977 | else { | ||
2978 | WRT_REG_DWORD( | ||
2979 | (unsigned long __iomem *)ha->nxdb_wr_ptr, | ||
2980 | dbval); | ||
2981 | wmb(); | ||
2982 | while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { | ||
2983 | WRT_REG_DWORD( | ||
2984 | (unsigned long __iomem *)ha->nxdb_wr_ptr, | ||
2985 | dbval); | ||
2986 | wmb(); | ||
2987 | } | ||
2988 | } | ||
2989 | |||
2990 | /* Manage unprocessed RIO/ZIO commands in response queue. */ | ||
2991 | if (vha->flags.process_response_queue && | ||
2992 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) | ||
2993 | qla24xx_process_response_queue(vha, rsp); | ||
2994 | |||
2995 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2996 | return QLA_SUCCESS; | ||
2997 | |||
2998 | queuing_error_fcp_cmnd: | ||
2999 | dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); | ||
3000 | queuing_error: | ||
3001 | if (tot_dsds) | ||
3002 | scsi_dma_unmap(cmd); | ||
3003 | |||
3004 | if (sp->ctx) { | ||
3005 | mempool_free(sp->ctx, ha->ctx_mempool); | ||
3006 | sp->ctx = NULL; | ||
3007 | } | ||
3008 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
3009 | |||
3010 | return QLA_FUNCTION_FAILED; | ||
3011 | } | ||
3012 | |||
2518 | static uint32_t * | 3013 | static uint32_t * |
2519 | qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, | 3014 | qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, |
2520 | uint32_t length) | 3015 | uint32_t length) |
@@ -2668,7 +3163,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, | |||
2668 | if (!optrom) { | 3163 | if (!optrom) { |
2669 | ql_log(ql_log_warn, vha, 0xb01b, | 3164 | ql_log(ql_log_warn, vha, 0xb01b, |
2670 | "Unable to allocate memory " | 3165 | "Unable to allocate memory " |
2671 | "for optrom burst write (%x KB).\n", | 3166 | "for optron burst write (%x KB).\n", |
2672 | OPTROM_BURST_SIZE / 1024); | 3167 | OPTROM_BURST_SIZE / 1024); |
2673 | } | 3168 | } |
2674 | } | 3169 | } |
@@ -2766,9 +3261,9 @@ qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, | |||
2766 | } | 3261 | } |
2767 | 3262 | ||
2768 | void | 3263 | void |
2769 | qla82xx_start_iocbs(scsi_qla_host_t *vha) | 3264 | qla82xx_start_iocbs(srb_t *sp) |
2770 | { | 3265 | { |
2771 | struct qla_hw_data *ha = vha->hw; | 3266 | struct qla_hw_data *ha = sp->fcport->vha->hw; |
2772 | struct req_que *req = ha->req_q_map[0]; | 3267 | struct req_que *req = ha->req_q_map[0]; |
2773 | struct device_reg_82xx __iomem *reg; | 3268 | struct device_reg_82xx __iomem *reg; |
2774 | uint32_t dbval; | 3269 | uint32_t dbval; |
@@ -2790,7 +3285,7 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha) | |||
2790 | else { | 3285 | else { |
2791 | WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); | 3286 | WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); |
2792 | wmb(); | 3287 | wmb(); |
2793 | while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) { | 3288 | while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { |
2794 | WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, | 3289 | WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, |
2795 | dbval); | 3290 | dbval); |
2796 | wmb(); | 3291 | wmb(); |
@@ -2798,8 +3293,7 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha) | |||
2798 | } | 3293 | } |
2799 | } | 3294 | } |
2800 | 3295 | ||
2801 | static void | 3296 | void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) |
2802 | qla82xx_rom_lock_recovery(struct qla_hw_data *ha) | ||
2803 | { | 3297 | { |
2804 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | 3298 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); |
2805 | 3299 | ||
@@ -2844,7 +3338,7 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha) | |||
2844 | timeout = msleep_interruptible(200); | 3338 | timeout = msleep_interruptible(200); |
2845 | if (timeout) { | 3339 | if (timeout) { |
2846 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | 3340 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, |
2847 | QLA8XXX_DEV_FAILED); | 3341 | QLA82XX_DEV_FAILED); |
2848 | return QLA_FUNCTION_FAILED; | 3342 | return QLA_FUNCTION_FAILED; |
2849 | } | 3343 | } |
2850 | 3344 | ||
@@ -2875,7 +3369,10 @@ dev_initialize: | |||
2875 | /* set to DEV_INITIALIZING */ | 3369 | /* set to DEV_INITIALIZING */ |
2876 | ql_log(ql_log_info, vha, 0x009e, | 3370 | ql_log(ql_log_info, vha, 0x009e, |
2877 | "HW State: INITIALIZING.\n"); | 3371 | "HW State: INITIALIZING.\n"); |
2878 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING); | 3372 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); |
3373 | |||
3374 | /* Driver that sets device state to initializating sets IDC version */ | ||
3375 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); | ||
2879 | 3376 | ||
2880 | qla82xx_idc_unlock(ha); | 3377 | qla82xx_idc_unlock(ha); |
2881 | rval = qla82xx_start_firmware(vha); | 3378 | rval = qla82xx_start_firmware(vha); |
@@ -2885,14 +3382,14 @@ dev_initialize: | |||
2885 | ql_log(ql_log_fatal, vha, 0x00ad, | 3382 | ql_log(ql_log_fatal, vha, 0x00ad, |
2886 | "HW State: FAILED.\n"); | 3383 | "HW State: FAILED.\n"); |
2887 | qla82xx_clear_drv_active(ha); | 3384 | qla82xx_clear_drv_active(ha); |
2888 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); | 3385 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); |
2889 | return rval; | 3386 | return rval; |
2890 | } | 3387 | } |
2891 | 3388 | ||
2892 | dev_ready: | 3389 | dev_ready: |
2893 | ql_log(ql_log_info, vha, 0x00ae, | 3390 | ql_log(ql_log_info, vha, 0x00ae, |
2894 | "HW State: READY.\n"); | 3391 | "HW State: READY.\n"); |
2895 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY); | 3392 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); |
2896 | 3393 | ||
2897 | return QLA_SUCCESS; | 3394 | return QLA_SUCCESS; |
2898 | } | 3395 | } |
@@ -2916,7 +3413,7 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) | |||
2916 | 3413 | ||
2917 | if (vha->flags.online) { | 3414 | if (vha->flags.online) { |
2918 | /*Block any further I/O and wait for pending cmnds to complete*/ | 3415 | /*Block any further I/O and wait for pending cmnds to complete*/ |
2919 | qla2x00_quiesce_io(vha); | 3416 | qla82xx_quiescent_state_cleanup(vha); |
2920 | } | 3417 | } |
2921 | 3418 | ||
2922 | /* Set the quiescence ready bit */ | 3419 | /* Set the quiescence ready bit */ |
@@ -2937,11 +3434,12 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) | |||
2937 | * changing the state to DEV_READY | 3434 | * changing the state to DEV_READY |
2938 | */ | 3435 | */ |
2939 | ql_log(ql_log_info, vha, 0xb023, | 3436 | ql_log(ql_log_info, vha, 0xb023, |
2940 | "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d " | 3437 | "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME); |
2941 | "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME, | 3438 | ql_log(ql_log_info, vha, 0xb024, |
3439 | "DRV_ACTIVE:%d DRV_STATE:%d.\n", | ||
2942 | drv_active, drv_state); | 3440 | drv_active, drv_state); |
2943 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | 3441 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, |
2944 | QLA8XXX_DEV_READY); | 3442 | QLA82XX_DEV_READY); |
2945 | ql_log(ql_log_info, vha, 0xb025, | 3443 | ql_log(ql_log_info, vha, 0xb025, |
2946 | "HW State: DEV_READY.\n"); | 3444 | "HW State: DEV_READY.\n"); |
2947 | qla82xx_idc_unlock(ha); | 3445 | qla82xx_idc_unlock(ha); |
@@ -2962,10 +3460,10 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) | |||
2962 | } | 3460 | } |
2963 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | 3461 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); |
2964 | /* everyone acked so set the state to DEV_QUIESCENCE */ | 3462 | /* everyone acked so set the state to DEV_QUIESCENCE */ |
2965 | if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { | 3463 | if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) { |
2966 | ql_log(ql_log_info, vha, 0xb026, | 3464 | ql_log(ql_log_info, vha, 0xb026, |
2967 | "HW State: DEV_QUIESCENT.\n"); | 3465 | "HW State: DEV_QUIESCENT.\n"); |
2968 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT); | 3466 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT); |
2969 | } | 3467 | } |
2970 | } | 3468 | } |
2971 | 3469 | ||
@@ -2995,8 +3493,8 @@ qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state) | |||
2995 | return dev_state; | 3493 | return dev_state; |
2996 | } | 3494 | } |
2997 | 3495 | ||
2998 | void | 3496 | static void |
2999 | qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) | 3497 | qla82xx_dev_failed_handler(scsi_qla_host_t *vha) |
3000 | { | 3498 | { |
3001 | struct qla_hw_data *ha = vha->hw; | 3499 | struct qla_hw_data *ha = vha->hw; |
3002 | 3500 | ||
@@ -3004,10 +3502,9 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) | |||
3004 | ql_log(ql_log_fatal, vha, 0x00b8, | 3502 | ql_log(ql_log_fatal, vha, 0x00b8, |
3005 | "Disabling the board.\n"); | 3503 | "Disabling the board.\n"); |
3006 | 3504 | ||
3007 | if (IS_QLA82XX(ha)) { | 3505 | qla82xx_idc_lock(ha); |
3008 | qla82xx_clear_drv_active(ha); | 3506 | qla82xx_clear_drv_active(ha); |
3009 | qla82xx_idc_unlock(ha); | 3507 | qla82xx_idc_unlock(ha); |
3010 | } | ||
3011 | 3508 | ||
3012 | /* Set DEV_FAILED flag to disable timer */ | 3509 | /* Set DEV_FAILED flag to disable timer */ |
3013 | vha->device_flags |= DFLG_DEV_FAILED; | 3510 | vha->device_flags |= DFLG_DEV_FAILED; |
@@ -3032,7 +3529,6 @@ static void | |||
3032 | qla82xx_need_reset_handler(scsi_qla_host_t *vha) | 3529 | qla82xx_need_reset_handler(scsi_qla_host_t *vha) |
3033 | { | 3530 | { |
3034 | uint32_t dev_state, drv_state, drv_active; | 3531 | uint32_t dev_state, drv_state, drv_active; |
3035 | uint32_t active_mask = 0; | ||
3036 | unsigned long reset_timeout; | 3532 | unsigned long reset_timeout; |
3037 | struct qla_hw_data *ha = vha->hw; | 3533 | struct qla_hw_data *ha = vha->hw; |
3038 | struct req_que *req = ha->req_q_map[0]; | 3534 | struct req_que *req = ha->req_q_map[0]; |
@@ -3045,32 +3541,15 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) | |||
3045 | qla82xx_idc_lock(ha); | 3541 | qla82xx_idc_lock(ha); |
3046 | } | 3542 | } |
3047 | 3543 | ||
3048 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); | 3544 | qla82xx_set_rst_ready(ha); |
3049 | if (!ha->flags.nic_core_reset_owner) { | ||
3050 | ql_dbg(ql_dbg_p3p, vha, 0xb028, | ||
3051 | "reset_acknowledged by 0x%x\n", ha->portnum); | ||
3052 | qla82xx_set_rst_ready(ha); | ||
3053 | } else { | ||
3054 | active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); | ||
3055 | drv_active &= active_mask; | ||
3056 | ql_dbg(ql_dbg_p3p, vha, 0xb029, | ||
3057 | "active_mask: 0x%08x\n", active_mask); | ||
3058 | } | ||
3059 | 3545 | ||
3060 | /* wait for 10 seconds for reset ack from all functions */ | 3546 | /* wait for 10 seconds for reset ack from all functions */ |
3061 | reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); | 3547 | reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); |
3062 | 3548 | ||
3063 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | 3549 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); |
3064 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); | 3550 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); |
3065 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | ||
3066 | |||
3067 | ql_dbg(ql_dbg_p3p, vha, 0xb02a, | ||
3068 | "drv_state: 0x%08x, drv_active: 0x%08x, " | ||
3069 | "dev_state: 0x%08x, active_mask: 0x%08x\n", | ||
3070 | drv_state, drv_active, dev_state, active_mask); | ||
3071 | 3551 | ||
3072 | while (drv_state != drv_active && | 3552 | while (drv_state != drv_active) { |
3073 | dev_state != QLA8XXX_DEV_INITIALIZING) { | ||
3074 | if (time_after_eq(jiffies, reset_timeout)) { | 3553 | if (time_after_eq(jiffies, reset_timeout)) { |
3075 | ql_log(ql_log_warn, vha, 0x00b5, | 3554 | ql_log(ql_log_warn, vha, 0x00b5, |
3076 | "Reset timeout.\n"); | 3555 | "Reset timeout.\n"); |
@@ -3081,81 +3560,23 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) | |||
3081 | qla82xx_idc_lock(ha); | 3560 | qla82xx_idc_lock(ha); |
3082 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | 3561 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); |
3083 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); | 3562 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); |
3084 | if (ha->flags.nic_core_reset_owner) | ||
3085 | drv_active &= active_mask; | ||
3086 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | ||
3087 | } | 3563 | } |
3088 | 3564 | ||
3089 | ql_dbg(ql_dbg_p3p, vha, 0xb02b, | 3565 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); |
3090 | "drv_state: 0x%08x, drv_active: 0x%08x, " | ||
3091 | "dev_state: 0x%08x, active_mask: 0x%08x\n", | ||
3092 | drv_state, drv_active, dev_state, active_mask); | ||
3093 | |||
3094 | ql_log(ql_log_info, vha, 0x00b6, | 3566 | ql_log(ql_log_info, vha, 0x00b6, |
3095 | "Device state is 0x%x = %s.\n", | 3567 | "Device state is 0x%x = %s.\n", |
3096 | dev_state, | 3568 | dev_state, |
3097 | dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); | 3569 | dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); |
3098 | 3570 | ||
3099 | /* Force to DEV_COLD unless someone else is starting a reset */ | 3571 | /* Force to DEV_COLD unless someone else is starting a reset */ |
3100 | if (dev_state != QLA8XXX_DEV_INITIALIZING && | 3572 | if (dev_state != QLA82XX_DEV_INITIALIZING) { |
3101 | dev_state != QLA8XXX_DEV_COLD) { | ||
3102 | ql_log(ql_log_info, vha, 0x00b7, | 3573 | ql_log(ql_log_info, vha, 0x00b7, |
3103 | "HW State: COLD/RE-INIT.\n"); | 3574 | "HW State: COLD/RE-INIT.\n"); |
3104 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD); | 3575 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); |
3105 | qla82xx_set_rst_ready(ha); | ||
3106 | if (ql2xmdenable) { | ||
3107 | if (qla82xx_md_collect(vha)) | ||
3108 | ql_log(ql_log_warn, vha, 0xb02c, | ||
3109 | "Minidump not collected.\n"); | ||
3110 | } else | ||
3111 | ql_log(ql_log_warn, vha, 0xb04f, | ||
3112 | "Minidump disabled.\n"); | ||
3113 | } | 3576 | } |
3114 | } | 3577 | } |
3115 | 3578 | ||
3116 | int | 3579 | int |
3117 | qla82xx_check_md_needed(scsi_qla_host_t *vha) | ||
3118 | { | ||
3119 | struct qla_hw_data *ha = vha->hw; | ||
3120 | uint16_t fw_major_version, fw_minor_version, fw_subminor_version; | ||
3121 | int rval = QLA_SUCCESS; | ||
3122 | |||
3123 | fw_major_version = ha->fw_major_version; | ||
3124 | fw_minor_version = ha->fw_minor_version; | ||
3125 | fw_subminor_version = ha->fw_subminor_version; | ||
3126 | |||
3127 | rval = qla2x00_get_fw_version(vha); | ||
3128 | if (rval != QLA_SUCCESS) | ||
3129 | return rval; | ||
3130 | |||
3131 | if (ql2xmdenable) { | ||
3132 | if (!ha->fw_dumped) { | ||
3133 | if (fw_major_version != ha->fw_major_version || | ||
3134 | fw_minor_version != ha->fw_minor_version || | ||
3135 | fw_subminor_version != ha->fw_subminor_version) { | ||
3136 | ql_log(ql_log_info, vha, 0xb02d, | ||
3137 | "Firmware version differs " | ||
3138 | "Previous version: %d:%d:%d - " | ||
3139 | "New version: %d:%d:%d\n", | ||
3140 | fw_major_version, fw_minor_version, | ||
3141 | fw_subminor_version, | ||
3142 | ha->fw_major_version, | ||
3143 | ha->fw_minor_version, | ||
3144 | ha->fw_subminor_version); | ||
3145 | /* Release MiniDump resources */ | ||
3146 | qla82xx_md_free(vha); | ||
3147 | /* ALlocate MiniDump resources */ | ||
3148 | qla82xx_md_prep(vha); | ||
3149 | } | ||
3150 | } else | ||
3151 | ql_log(ql_log_info, vha, 0xb02e, | ||
3152 | "Firmware dump available to retrieve\n"); | ||
3153 | } | ||
3154 | return rval; | ||
3155 | } | ||
3156 | |||
3157 | |||
3158 | static int | ||
3159 | qla82xx_check_fw_alive(scsi_qla_host_t *vha) | 3580 | qla82xx_check_fw_alive(scsi_qla_host_t *vha) |
3160 | { | 3581 | { |
3161 | uint32_t fw_heartbeat_counter; | 3582 | uint32_t fw_heartbeat_counter; |
@@ -3208,20 +3629,18 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) | |||
3208 | int loopcount = 0; | 3629 | int loopcount = 0; |
3209 | 3630 | ||
3210 | qla82xx_idc_lock(ha); | 3631 | qla82xx_idc_lock(ha); |
3211 | if (!vha->flags.init_done) { | 3632 | if (!vha->flags.init_done) |
3212 | qla82xx_set_drv_active(vha); | 3633 | qla82xx_set_drv_active(vha); |
3213 | qla82xx_set_idc_version(vha); | ||
3214 | } | ||
3215 | 3634 | ||
3216 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | 3635 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); |
3217 | old_dev_state = dev_state; | 3636 | old_dev_state = dev_state; |
3218 | ql_log(ql_log_info, vha, 0x009b, | 3637 | ql_log(ql_log_info, vha, 0x009b, |
3219 | "Device state is 0x%x = %s.\n", | 3638 | "Device state is 0x%x = %s.\n", |
3220 | dev_state, | 3639 | dev_state, |
3221 | dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); | 3640 | dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); |
3222 | 3641 | ||
3223 | /* wait for 30 seconds for device to go ready */ | 3642 | /* wait for 30 seconds for device to go ready */ |
3224 | dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); | 3643 | dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); |
3225 | 3644 | ||
3226 | while (1) { | 3645 | while (1) { |
3227 | 3646 | ||
@@ -3240,56 +3659,50 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) | |||
3240 | ql_log(ql_log_info, vha, 0x009d, | 3659 | ql_log(ql_log_info, vha, 0x009d, |
3241 | "Device state is 0x%x = %s.\n", | 3660 | "Device state is 0x%x = %s.\n", |
3242 | dev_state, | 3661 | dev_state, |
3243 | dev_state < MAX_STATES ? qdev_state(dev_state) : | 3662 | dev_state < MAX_STATES ? qdev_state[dev_state] : |
3244 | "Unknown"); | 3663 | "Unknown"); |
3245 | } | 3664 | } |
3246 | 3665 | ||
3247 | switch (dev_state) { | 3666 | switch (dev_state) { |
3248 | case QLA8XXX_DEV_READY: | 3667 | case QLA82XX_DEV_READY: |
3249 | ha->flags.nic_core_reset_owner = 0; | 3668 | goto exit; |
3250 | goto rel_lock; | 3669 | case QLA82XX_DEV_COLD: |
3251 | case QLA8XXX_DEV_COLD: | ||
3252 | rval = qla82xx_device_bootstrap(vha); | 3670 | rval = qla82xx_device_bootstrap(vha); |
3253 | break; | 3671 | goto exit; |
3254 | case QLA8XXX_DEV_INITIALIZING: | 3672 | case QLA82XX_DEV_INITIALIZING: |
3255 | qla82xx_idc_unlock(ha); | 3673 | qla82xx_idc_unlock(ha); |
3256 | msleep(1000); | 3674 | msleep(1000); |
3257 | qla82xx_idc_lock(ha); | 3675 | qla82xx_idc_lock(ha); |
3258 | break; | 3676 | break; |
3259 | case QLA8XXX_DEV_NEED_RESET: | 3677 | case QLA82XX_DEV_NEED_RESET: |
3260 | if (!ql2xdontresethba) | 3678 | if (!ql2xdontresethba) |
3261 | qla82xx_need_reset_handler(vha); | 3679 | qla82xx_need_reset_handler(vha); |
3262 | else { | ||
3263 | qla82xx_idc_unlock(ha); | ||
3264 | msleep(1000); | ||
3265 | qla82xx_idc_lock(ha); | ||
3266 | } | ||
3267 | dev_init_timeout = jiffies + | 3680 | dev_init_timeout = jiffies + |
3268 | (ha->fcoe_dev_init_timeout * HZ); | 3681 | (ha->nx_dev_init_timeout * HZ); |
3269 | break; | 3682 | break; |
3270 | case QLA8XXX_DEV_NEED_QUIESCENT: | 3683 | case QLA82XX_DEV_NEED_QUIESCENT: |
3271 | qla82xx_need_qsnt_handler(vha); | 3684 | qla82xx_need_qsnt_handler(vha); |
3272 | /* Reset timeout value after quiescence handler */ | 3685 | /* Reset timeout value after quiescence handler */ |
3273 | dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ | 3686 | dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ |
3274 | * HZ); | 3687 | * HZ); |
3275 | break; | 3688 | break; |
3276 | case QLA8XXX_DEV_QUIESCENT: | 3689 | case QLA82XX_DEV_QUIESCENT: |
3277 | /* Owner will exit and other will wait for the state | 3690 | /* Owner will exit and other will wait for the state |
3278 | * to get changed | 3691 | * to get changed |
3279 | */ | 3692 | */ |
3280 | if (ha->flags.quiesce_owner) | 3693 | if (ha->flags.quiesce_owner) |
3281 | goto rel_lock; | 3694 | goto exit; |
3282 | 3695 | ||
3283 | qla82xx_idc_unlock(ha); | 3696 | qla82xx_idc_unlock(ha); |
3284 | msleep(1000); | 3697 | msleep(1000); |
3285 | qla82xx_idc_lock(ha); | 3698 | qla82xx_idc_lock(ha); |
3286 | 3699 | ||
3287 | /* Reset timeout value after quiescence handler */ | 3700 | /* Reset timeout value after quiescence handler */ |
3288 | dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ | 3701 | dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ |
3289 | * HZ); | 3702 | * HZ); |
3290 | break; | 3703 | break; |
3291 | case QLA8XXX_DEV_FAILED: | 3704 | case QLA82XX_DEV_FAILED: |
3292 | qla8xxx_dev_failed_handler(vha); | 3705 | qla82xx_dev_failed_handler(vha); |
3293 | rval = QLA_FUNCTION_FAILED; | 3706 | rval = QLA_FUNCTION_FAILED; |
3294 | goto exit; | 3707 | goto exit; |
3295 | default: | 3708 | default: |
@@ -3299,89 +3712,36 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) | |||
3299 | } | 3712 | } |
3300 | loopcount++; | 3713 | loopcount++; |
3301 | } | 3714 | } |
3302 | rel_lock: | ||
3303 | qla82xx_idc_unlock(ha); | ||
3304 | exit: | 3715 | exit: |
3716 | qla82xx_idc_unlock(ha); | ||
3305 | return rval; | 3717 | return rval; |
3306 | } | 3718 | } |
3307 | 3719 | ||
3308 | static int qla82xx_check_temp(scsi_qla_host_t *vha) | ||
3309 | { | ||
3310 | uint32_t temp, temp_state, temp_val; | ||
3311 | struct qla_hw_data *ha = vha->hw; | ||
3312 | |||
3313 | temp = qla82xx_rd_32(ha, CRB_TEMP_STATE); | ||
3314 | temp_state = qla82xx_get_temp_state(temp); | ||
3315 | temp_val = qla82xx_get_temp_val(temp); | ||
3316 | |||
3317 | if (temp_state == QLA82XX_TEMP_PANIC) { | ||
3318 | ql_log(ql_log_warn, vha, 0x600e, | ||
3319 | "Device temperature %d degrees C exceeds " | ||
3320 | " maximum allowed. Hardware has been shut down.\n", | ||
3321 | temp_val); | ||
3322 | return 1; | ||
3323 | } else if (temp_state == QLA82XX_TEMP_WARN) { | ||
3324 | ql_log(ql_log_warn, vha, 0x600f, | ||
3325 | "Device temperature %d degrees C exceeds " | ||
3326 | "operating range. Immediate action needed.\n", | ||
3327 | temp_val); | ||
3328 | } | ||
3329 | return 0; | ||
3330 | } | ||
3331 | |||
3332 | void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) | ||
3333 | { | ||
3334 | struct qla_hw_data *ha = vha->hw; | ||
3335 | |||
3336 | if (ha->flags.mbox_busy) { | ||
3337 | ha->flags.mbox_int = 1; | ||
3338 | ha->flags.mbox_busy = 0; | ||
3339 | ql_log(ql_log_warn, vha, 0x6010, | ||
3340 | "Doing premature completion of mbx command.\n"); | ||
3341 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) | ||
3342 | complete(&ha->mbx_intr_comp); | ||
3343 | } | ||
3344 | } | ||
3345 | |||
3346 | void qla82xx_watchdog(scsi_qla_host_t *vha) | 3720 | void qla82xx_watchdog(scsi_qla_host_t *vha) |
3347 | { | 3721 | { |
3348 | uint32_t dev_state, halt_status; | 3722 | uint32_t dev_state, halt_status; |
3349 | struct qla_hw_data *ha = vha->hw; | 3723 | struct qla_hw_data *ha = vha->hw; |
3350 | 3724 | ||
3351 | /* don't poll if reset is going on */ | 3725 | /* don't poll if reset is going on */ |
3352 | if (!ha->flags.nic_core_reset_hdlr_active) { | 3726 | if (!ha->flags.isp82xx_reset_hdlr_active) { |
3353 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | 3727 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); |
3354 | if (qla82xx_check_temp(vha)) { | 3728 | if (dev_state == QLA82XX_DEV_NEED_RESET && |
3355 | set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); | ||
3356 | ha->flags.isp82xx_fw_hung = 1; | ||
3357 | qla82xx_clear_pending_mbx(vha); | ||
3358 | } else if (dev_state == QLA8XXX_DEV_NEED_RESET && | ||
3359 | !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { | 3729 | !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { |
3360 | ql_log(ql_log_warn, vha, 0x6001, | 3730 | ql_log(ql_log_warn, vha, 0x6001, |
3361 | "Adapter reset needed.\n"); | 3731 | "Adapter reset needed.\n"); |
3362 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | 3732 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); |
3363 | } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && | 3733 | qla2xxx_wake_dpc(vha); |
3734 | } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && | ||
3364 | !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { | 3735 | !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { |
3365 | ql_log(ql_log_warn, vha, 0x6002, | 3736 | ql_log(ql_log_warn, vha, 0x6002, |
3366 | "Quiescent needed.\n"); | 3737 | "Quiescent needed.\n"); |
3367 | set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); | 3738 | set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); |
3368 | } else if (dev_state == QLA8XXX_DEV_FAILED && | 3739 | qla2xxx_wake_dpc(vha); |
3369 | !test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) && | ||
3370 | vha->flags.online == 1) { | ||
3371 | ql_log(ql_log_warn, vha, 0xb055, | ||
3372 | "Adapter state is failed. Offlining.\n"); | ||
3373 | set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); | ||
3374 | ha->flags.isp82xx_fw_hung = 1; | ||
3375 | qla82xx_clear_pending_mbx(vha); | ||
3376 | } else { | 3740 | } else { |
3377 | if (qla82xx_check_fw_alive(vha)) { | 3741 | if (qla82xx_check_fw_alive(vha)) { |
3378 | ql_dbg(ql_dbg_timer, vha, 0x6011, | ||
3379 | "disabling pause transmit on port 0 & 1.\n"); | ||
3380 | qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, | ||
3381 | CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1); | ||
3382 | halt_status = qla82xx_rd_32(ha, | 3742 | halt_status = qla82xx_rd_32(ha, |
3383 | QLA82XX_PEG_HALT_STATUS1); | 3743 | QLA82XX_PEG_HALT_STATUS1); |
3384 | ql_log(ql_log_info, vha, 0x6005, | 3744 | ql_dbg(ql_dbg_timer, vha, 0x6005, |
3385 | "dumping hw/fw registers:.\n " | 3745 | "dumping hw/fw registers:.\n " |
3386 | " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n " | 3746 | " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n " |
3387 | " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n " | 3747 | " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n " |
@@ -3398,11 +3758,6 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) | |||
3398 | QLA82XX_CRB_PEG_NET_3 + 0x3c), | 3758 | QLA82XX_CRB_PEG_NET_3 + 0x3c), |
3399 | qla82xx_rd_32(ha, | 3759 | qla82xx_rd_32(ha, |
3400 | QLA82XX_CRB_PEG_NET_4 + 0x3c)); | 3760 | QLA82XX_CRB_PEG_NET_4 + 0x3c)); |
3401 | if (((halt_status & 0x1fffff00) >> 8) == 0x67) | ||
3402 | ql_log(ql_log_warn, vha, 0xb052, | ||
3403 | "Firmware aborted with " | ||
3404 | "error code 0x00006700. Device is " | ||
3405 | "being reset.\n"); | ||
3406 | if (halt_status & HALT_STATUS_UNRECOVERABLE) { | 3761 | if (halt_status & HALT_STATUS_UNRECOVERABLE) { |
3407 | set_bit(ISP_UNRECOVERABLE, | 3762 | set_bit(ISP_UNRECOVERABLE, |
3408 | &vha->dpc_flags); | 3763 | &vha->dpc_flags); |
@@ -3412,9 +3767,18 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) | |||
3412 | set_bit(ISP_ABORT_NEEDED, | 3767 | set_bit(ISP_ABORT_NEEDED, |
3413 | &vha->dpc_flags); | 3768 | &vha->dpc_flags); |
3414 | } | 3769 | } |
3770 | qla2xxx_wake_dpc(vha); | ||
3415 | ha->flags.isp82xx_fw_hung = 1; | 3771 | ha->flags.isp82xx_fw_hung = 1; |
3416 | ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n"); | 3772 | if (ha->flags.mbox_busy) { |
3417 | qla82xx_clear_pending_mbx(vha); | 3773 | ha->flags.mbox_int = 1; |
3774 | ql_log(ql_log_warn, vha, 0x6007, | ||
3775 | "Due to FW hung, doing " | ||
3776 | "premature completion of mbx " | ||
3777 | "command.\n"); | ||
3778 | if (test_bit(MBX_INTR_WAIT, | ||
3779 | &ha->mbx_cmd_flags)) | ||
3780 | complete(&ha->mbx_intr_comp); | ||
3781 | } | ||
3418 | } | 3782 | } |
3419 | } | 3783 | } |
3420 | } | 3784 | } |
@@ -3427,28 +3791,6 @@ int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
3427 | return rval; | 3791 | return rval; |
3428 | } | 3792 | } |
3429 | 3793 | ||
3430 | void | ||
3431 | qla82xx_set_reset_owner(scsi_qla_host_t *vha) | ||
3432 | { | ||
3433 | struct qla_hw_data *ha = vha->hw; | ||
3434 | uint32_t dev_state; | ||
3435 | |||
3436 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | ||
3437 | if (dev_state == QLA8XXX_DEV_READY) { | ||
3438 | ql_log(ql_log_info, vha, 0xb02f, | ||
3439 | "HW State: NEED RESET\n"); | ||
3440 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | ||
3441 | QLA8XXX_DEV_NEED_RESET); | ||
3442 | ha->flags.nic_core_reset_owner = 1; | ||
3443 | ql_dbg(ql_dbg_p3p, vha, 0xb030, | ||
3444 | "reset_owner is 0x%x\n", ha->portnum); | ||
3445 | } else | ||
3446 | ql_log(ql_log_info, vha, 0xb031, | ||
3447 | "Device state is 0x%x = %s.\n", | ||
3448 | dev_state, | ||
3449 | dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); | ||
3450 | } | ||
3451 | |||
3452 | /* | 3794 | /* |
3453 | * qla82xx_abort_isp | 3795 | * qla82xx_abort_isp |
3454 | * Resets ISP and aborts all outstanding commands. | 3796 | * Resets ISP and aborts all outstanding commands. |
@@ -3464,16 +3806,26 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) | |||
3464 | { | 3806 | { |
3465 | int rval; | 3807 | int rval; |
3466 | struct qla_hw_data *ha = vha->hw; | 3808 | struct qla_hw_data *ha = vha->hw; |
3809 | uint32_t dev_state; | ||
3467 | 3810 | ||
3468 | if (vha->device_flags & DFLG_DEV_FAILED) { | 3811 | if (vha->device_flags & DFLG_DEV_FAILED) { |
3469 | ql_log(ql_log_warn, vha, 0x8024, | 3812 | ql_log(ql_log_warn, vha, 0x8024, |
3470 | "Device in failed state, exiting.\n"); | 3813 | "Device in failed state, exiting.\n"); |
3471 | return QLA_SUCCESS; | 3814 | return QLA_SUCCESS; |
3472 | } | 3815 | } |
3473 | ha->flags.nic_core_reset_hdlr_active = 1; | 3816 | ha->flags.isp82xx_reset_hdlr_active = 1; |
3474 | 3817 | ||
3475 | qla82xx_idc_lock(ha); | 3818 | qla82xx_idc_lock(ha); |
3476 | qla82xx_set_reset_owner(vha); | 3819 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); |
3820 | if (dev_state == QLA82XX_DEV_READY) { | ||
3821 | ql_log(ql_log_info, vha, 0x8025, | ||
3822 | "HW State: NEED RESET.\n"); | ||
3823 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | ||
3824 | QLA82XX_DEV_NEED_RESET); | ||
3825 | } else | ||
3826 | ql_log(ql_log_info, vha, 0x8026, | ||
3827 | "Hw State: %s.\n", dev_state < MAX_STATES ? | ||
3828 | qdev_state[dev_state] : "Unknown"); | ||
3477 | qla82xx_idc_unlock(ha); | 3829 | qla82xx_idc_unlock(ha); |
3478 | 3830 | ||
3479 | rval = qla82xx_device_state_handler(vha); | 3831 | rval = qla82xx_device_state_handler(vha); |
@@ -3484,7 +3836,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) | |||
3484 | 3836 | ||
3485 | if (rval == QLA_SUCCESS) { | 3837 | if (rval == QLA_SUCCESS) { |
3486 | ha->flags.isp82xx_fw_hung = 0; | 3838 | ha->flags.isp82xx_fw_hung = 0; |
3487 | ha->flags.nic_core_reset_hdlr_active = 0; | 3839 | ha->flags.isp82xx_reset_hdlr_active = 0; |
3488 | qla82xx_restart_isp(vha); | 3840 | qla82xx_restart_isp(vha); |
3489 | } | 3841 | } |
3490 | 3842 | ||
@@ -3588,7 +3940,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) | |||
3588 | } | 3940 | } |
3589 | } | 3941 | } |
3590 | ql_dbg(ql_dbg_p3p, vha, 0xb027, | 3942 | ql_dbg(ql_dbg_p3p, vha, 0xb027, |
3591 | "%s: status=%d.\n", __func__, status); | 3943 | "%s status=%d.\n", status); |
3592 | 3944 | ||
3593 | return status; | 3945 | return status; |
3594 | } | 3946 | } |
@@ -3609,7 +3961,10 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) | |||
3609 | msleep(1000); | 3961 | msleep(1000); |
3610 | if (qla82xx_check_fw_alive(vha)) { | 3962 | if (qla82xx_check_fw_alive(vha)) { |
3611 | ha->flags.isp82xx_fw_hung = 1; | 3963 | ha->flags.isp82xx_fw_hung = 1; |
3612 | qla82xx_clear_pending_mbx(vha); | 3964 | if (ha->flags.mbox_busy) { |
3965 | ha->flags.mbox_int = 1; | ||
3966 | complete(&ha->mbx_intr_comp); | ||
3967 | } | ||
3613 | break; | 3968 | break; |
3614 | } | 3969 | } |
3615 | } | 3970 | } |
@@ -3632,7 +3987,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) | |||
3632 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | 3987 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { |
3633 | sp = req->outstanding_cmds[cnt]; | 3988 | sp = req->outstanding_cmds[cnt]; |
3634 | if (sp) { | 3989 | if (sp) { |
3635 | if (!sp->u.scmd.ctx || | 3990 | if (!sp->ctx || |
3636 | (sp->flags & SRB_FCP_CMND_DMA_VALID)) { | 3991 | (sp->flags & SRB_FCP_CMND_DMA_VALID)) { |
3637 | spin_unlock_irqrestore( | 3992 | spin_unlock_irqrestore( |
3638 | &ha->hardware_lock, flags); | 3993 | &ha->hardware_lock, flags); |
@@ -3661,790 +4016,3 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) | |||
3661 | } | 4016 | } |
3662 | } | 4017 | } |
3663 | } | 4018 | } |
3664 | |||
3665 | /* Minidump related functions */ | ||
3666 | static int | ||
3667 | qla82xx_minidump_process_control(scsi_qla_host_t *vha, | ||
3668 | qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) | ||
3669 | { | ||
3670 | struct qla_hw_data *ha = vha->hw; | ||
3671 | struct qla82xx_md_entry_crb *crb_entry; | ||
3672 | uint32_t read_value, opcode, poll_time; | ||
3673 | uint32_t addr, index, crb_addr; | ||
3674 | unsigned long wtime; | ||
3675 | struct qla82xx_md_template_hdr *tmplt_hdr; | ||
3676 | uint32_t rval = QLA_SUCCESS; | ||
3677 | int i; | ||
3678 | |||
3679 | tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; | ||
3680 | crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr; | ||
3681 | crb_addr = crb_entry->addr; | ||
3682 | |||
3683 | for (i = 0; i < crb_entry->op_count; i++) { | ||
3684 | opcode = crb_entry->crb_ctrl.opcode; | ||
3685 | if (opcode & QLA82XX_DBG_OPCODE_WR) { | ||
3686 | qla82xx_md_rw_32(ha, crb_addr, | ||
3687 | crb_entry->value_1, 1); | ||
3688 | opcode &= ~QLA82XX_DBG_OPCODE_WR; | ||
3689 | } | ||
3690 | |||
3691 | if (opcode & QLA82XX_DBG_OPCODE_RW) { | ||
3692 | read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); | ||
3693 | qla82xx_md_rw_32(ha, crb_addr, read_value, 1); | ||
3694 | opcode &= ~QLA82XX_DBG_OPCODE_RW; | ||
3695 | } | ||
3696 | |||
3697 | if (opcode & QLA82XX_DBG_OPCODE_AND) { | ||
3698 | read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); | ||
3699 | read_value &= crb_entry->value_2; | ||
3700 | opcode &= ~QLA82XX_DBG_OPCODE_AND; | ||
3701 | if (opcode & QLA82XX_DBG_OPCODE_OR) { | ||
3702 | read_value |= crb_entry->value_3; | ||
3703 | opcode &= ~QLA82XX_DBG_OPCODE_OR; | ||
3704 | } | ||
3705 | qla82xx_md_rw_32(ha, crb_addr, read_value, 1); | ||
3706 | } | ||
3707 | |||
3708 | if (opcode & QLA82XX_DBG_OPCODE_OR) { | ||
3709 | read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); | ||
3710 | read_value |= crb_entry->value_3; | ||
3711 | qla82xx_md_rw_32(ha, crb_addr, read_value, 1); | ||
3712 | opcode &= ~QLA82XX_DBG_OPCODE_OR; | ||
3713 | } | ||
3714 | |||
3715 | if (opcode & QLA82XX_DBG_OPCODE_POLL) { | ||
3716 | poll_time = crb_entry->crb_strd.poll_timeout; | ||
3717 | wtime = jiffies + poll_time; | ||
3718 | read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); | ||
3719 | |||
3720 | do { | ||
3721 | if ((read_value & crb_entry->value_2) | ||
3722 | == crb_entry->value_1) | ||
3723 | break; | ||
3724 | else if (time_after_eq(jiffies, wtime)) { | ||
3725 | /* capturing dump failed */ | ||
3726 | rval = QLA_FUNCTION_FAILED; | ||
3727 | break; | ||
3728 | } else | ||
3729 | read_value = qla82xx_md_rw_32(ha, | ||
3730 | crb_addr, 0, 0); | ||
3731 | } while (1); | ||
3732 | opcode &= ~QLA82XX_DBG_OPCODE_POLL; | ||
3733 | } | ||
3734 | |||
3735 | if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { | ||
3736 | if (crb_entry->crb_strd.state_index_a) { | ||
3737 | index = crb_entry->crb_strd.state_index_a; | ||
3738 | addr = tmplt_hdr->saved_state_array[index]; | ||
3739 | } else | ||
3740 | addr = crb_addr; | ||
3741 | |||
3742 | read_value = qla82xx_md_rw_32(ha, addr, 0, 0); | ||
3743 | index = crb_entry->crb_ctrl.state_index_v; | ||
3744 | tmplt_hdr->saved_state_array[index] = read_value; | ||
3745 | opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; | ||
3746 | } | ||
3747 | |||
3748 | if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { | ||
3749 | if (crb_entry->crb_strd.state_index_a) { | ||
3750 | index = crb_entry->crb_strd.state_index_a; | ||
3751 | addr = tmplt_hdr->saved_state_array[index]; | ||
3752 | } else | ||
3753 | addr = crb_addr; | ||
3754 | |||
3755 | if (crb_entry->crb_ctrl.state_index_v) { | ||
3756 | index = crb_entry->crb_ctrl.state_index_v; | ||
3757 | read_value = | ||
3758 | tmplt_hdr->saved_state_array[index]; | ||
3759 | } else | ||
3760 | read_value = crb_entry->value_1; | ||
3761 | |||
3762 | qla82xx_md_rw_32(ha, addr, read_value, 1); | ||
3763 | opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; | ||
3764 | } | ||
3765 | |||
3766 | if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { | ||
3767 | index = crb_entry->crb_ctrl.state_index_v; | ||
3768 | read_value = tmplt_hdr->saved_state_array[index]; | ||
3769 | read_value <<= crb_entry->crb_ctrl.shl; | ||
3770 | read_value >>= crb_entry->crb_ctrl.shr; | ||
3771 | if (crb_entry->value_2) | ||
3772 | read_value &= crb_entry->value_2; | ||
3773 | read_value |= crb_entry->value_3; | ||
3774 | read_value += crb_entry->value_1; | ||
3775 | tmplt_hdr->saved_state_array[index] = read_value; | ||
3776 | opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; | ||
3777 | } | ||
3778 | crb_addr += crb_entry->crb_strd.addr_stride; | ||
3779 | } | ||
3780 | return rval; | ||
3781 | } | ||
3782 | |||
3783 | static void | ||
3784 | qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, | ||
3785 | qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) | ||
3786 | { | ||
3787 | struct qla_hw_data *ha = vha->hw; | ||
3788 | uint32_t r_addr, r_stride, loop_cnt, i, r_value; | ||
3789 | struct qla82xx_md_entry_rdocm *ocm_hdr; | ||
3790 | uint32_t *data_ptr = *d_ptr; | ||
3791 | |||
3792 | ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; | ||
3793 | r_addr = ocm_hdr->read_addr; | ||
3794 | r_stride = ocm_hdr->read_addr_stride; | ||
3795 | loop_cnt = ocm_hdr->op_count; | ||
3796 | |||
3797 | for (i = 0; i < loop_cnt; i++) { | ||
3798 | r_value = RD_REG_DWORD((void __iomem *) | ||
3799 | (r_addr + ha->nx_pcibase)); | ||
3800 | *data_ptr++ = cpu_to_le32(r_value); | ||
3801 | r_addr += r_stride; | ||
3802 | } | ||
3803 | *d_ptr = data_ptr; | ||
3804 | } | ||
3805 | |||
3806 | static void | ||
3807 | qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha, | ||
3808 | qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) | ||
3809 | { | ||
3810 | struct qla_hw_data *ha = vha->hw; | ||
3811 | uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; | ||
3812 | struct qla82xx_md_entry_mux *mux_hdr; | ||
3813 | uint32_t *data_ptr = *d_ptr; | ||
3814 | |||
3815 | mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; | ||
3816 | r_addr = mux_hdr->read_addr; | ||
3817 | s_addr = mux_hdr->select_addr; | ||
3818 | s_stride = mux_hdr->select_value_stride; | ||
3819 | s_value = mux_hdr->select_value; | ||
3820 | loop_cnt = mux_hdr->op_count; | ||
3821 | |||
3822 | for (i = 0; i < loop_cnt; i++) { | ||
3823 | qla82xx_md_rw_32(ha, s_addr, s_value, 1); | ||
3824 | r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); | ||
3825 | *data_ptr++ = cpu_to_le32(s_value); | ||
3826 | *data_ptr++ = cpu_to_le32(r_value); | ||
3827 | s_value += s_stride; | ||
3828 | } | ||
3829 | *d_ptr = data_ptr; | ||
3830 | } | ||
3831 | |||
3832 | static void | ||
3833 | qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha, | ||
3834 | qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) | ||
3835 | { | ||
3836 | struct qla_hw_data *ha = vha->hw; | ||
3837 | uint32_t r_addr, r_stride, loop_cnt, i, r_value; | ||
3838 | struct qla82xx_md_entry_crb *crb_hdr; | ||
3839 | uint32_t *data_ptr = *d_ptr; | ||
3840 | |||
3841 | crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; | ||
3842 | r_addr = crb_hdr->addr; | ||
3843 | r_stride = crb_hdr->crb_strd.addr_stride; | ||
3844 | loop_cnt = crb_hdr->op_count; | ||
3845 | |||
3846 | for (i = 0; i < loop_cnt; i++) { | ||
3847 | r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); | ||
3848 | *data_ptr++ = cpu_to_le32(r_addr); | ||
3849 | *data_ptr++ = cpu_to_le32(r_value); | ||
3850 | r_addr += r_stride; | ||
3851 | } | ||
3852 | *d_ptr = data_ptr; | ||
3853 | } | ||
3854 | |||
3855 | static int | ||
3856 | qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, | ||
3857 | qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) | ||
3858 | { | ||
3859 | struct qla_hw_data *ha = vha->hw; | ||
3860 | uint32_t addr, r_addr, c_addr, t_r_addr; | ||
3861 | uint32_t i, k, loop_count, t_value, r_cnt, r_value; | ||
3862 | unsigned long p_wait, w_time, p_mask; | ||
3863 | uint32_t c_value_w, c_value_r; | ||
3864 | struct qla82xx_md_entry_cache *cache_hdr; | ||
3865 | int rval = QLA_FUNCTION_FAILED; | ||
3866 | uint32_t *data_ptr = *d_ptr; | ||
3867 | |||
3868 | cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; | ||
3869 | loop_count = cache_hdr->op_count; | ||
3870 | r_addr = cache_hdr->read_addr; | ||
3871 | c_addr = cache_hdr->control_addr; | ||
3872 | c_value_w = cache_hdr->cache_ctrl.write_value; | ||
3873 | |||
3874 | t_r_addr = cache_hdr->tag_reg_addr; | ||
3875 | t_value = cache_hdr->addr_ctrl.init_tag_value; | ||
3876 | r_cnt = cache_hdr->read_ctrl.read_addr_cnt; | ||
3877 | p_wait = cache_hdr->cache_ctrl.poll_wait; | ||
3878 | p_mask = cache_hdr->cache_ctrl.poll_mask; | ||
3879 | |||
3880 | for (i = 0; i < loop_count; i++) { | ||
3881 | qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); | ||
3882 | if (c_value_w) | ||
3883 | qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); | ||
3884 | |||
3885 | if (p_mask) { | ||
3886 | w_time = jiffies + p_wait; | ||
3887 | do { | ||
3888 | c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0); | ||
3889 | if ((c_value_r & p_mask) == 0) | ||
3890 | break; | ||
3891 | else if (time_after_eq(jiffies, w_time)) { | ||
3892 | /* capturing dump failed */ | ||
3893 | ql_dbg(ql_dbg_p3p, vha, 0xb032, | ||
3894 | "c_value_r: 0x%x, poll_mask: 0x%lx, " | ||
3895 | "w_time: 0x%lx\n", | ||
3896 | c_value_r, p_mask, w_time); | ||
3897 | return rval; | ||
3898 | } | ||
3899 | } while (1); | ||
3900 | } | ||
3901 | |||
3902 | addr = r_addr; | ||
3903 | for (k = 0; k < r_cnt; k++) { | ||
3904 | r_value = qla82xx_md_rw_32(ha, addr, 0, 0); | ||
3905 | *data_ptr++ = cpu_to_le32(r_value); | ||
3906 | addr += cache_hdr->read_ctrl.read_addr_stride; | ||
3907 | } | ||
3908 | t_value += cache_hdr->addr_ctrl.tag_value_stride; | ||
3909 | } | ||
3910 | *d_ptr = data_ptr; | ||
3911 | return QLA_SUCCESS; | ||
3912 | } | ||
3913 | |||
3914 | static void | ||
3915 | qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha, | ||
3916 | qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) | ||
3917 | { | ||
3918 | struct qla_hw_data *ha = vha->hw; | ||
3919 | uint32_t addr, r_addr, c_addr, t_r_addr; | ||
3920 | uint32_t i, k, loop_count, t_value, r_cnt, r_value; | ||
3921 | uint32_t c_value_w; | ||
3922 | struct qla82xx_md_entry_cache *cache_hdr; | ||
3923 | uint32_t *data_ptr = *d_ptr; | ||
3924 | |||
3925 | cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; | ||
3926 | loop_count = cache_hdr->op_count; | ||
3927 | r_addr = cache_hdr->read_addr; | ||
3928 | c_addr = cache_hdr->control_addr; | ||
3929 | c_value_w = cache_hdr->cache_ctrl.write_value; | ||
3930 | |||
3931 | t_r_addr = cache_hdr->tag_reg_addr; | ||
3932 | t_value = cache_hdr->addr_ctrl.init_tag_value; | ||
3933 | r_cnt = cache_hdr->read_ctrl.read_addr_cnt; | ||
3934 | |||
3935 | for (i = 0; i < loop_count; i++) { | ||
3936 | qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); | ||
3937 | qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); | ||
3938 | addr = r_addr; | ||
3939 | for (k = 0; k < r_cnt; k++) { | ||
3940 | r_value = qla82xx_md_rw_32(ha, addr, 0, 0); | ||
3941 | *data_ptr++ = cpu_to_le32(r_value); | ||
3942 | addr += cache_hdr->read_ctrl.read_addr_stride; | ||
3943 | } | ||
3944 | t_value += cache_hdr->addr_ctrl.tag_value_stride; | ||
3945 | } | ||
3946 | *d_ptr = data_ptr; | ||
3947 | } | ||
3948 | |||
3949 | static void | ||
3950 | qla82xx_minidump_process_queue(scsi_qla_host_t *vha, | ||
3951 | qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) | ||
3952 | { | ||
3953 | struct qla_hw_data *ha = vha->hw; | ||
3954 | uint32_t s_addr, r_addr; | ||
3955 | uint32_t r_stride, r_value, r_cnt, qid = 0; | ||
3956 | uint32_t i, k, loop_cnt; | ||
3957 | struct qla82xx_md_entry_queue *q_hdr; | ||
3958 | uint32_t *data_ptr = *d_ptr; | ||
3959 | |||
3960 | q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; | ||
3961 | s_addr = q_hdr->select_addr; | ||
3962 | r_cnt = q_hdr->rd_strd.read_addr_cnt; | ||
3963 | r_stride = q_hdr->rd_strd.read_addr_stride; | ||
3964 | loop_cnt = q_hdr->op_count; | ||
3965 | |||
3966 | for (i = 0; i < loop_cnt; i++) { | ||
3967 | qla82xx_md_rw_32(ha, s_addr, qid, 1); | ||
3968 | r_addr = q_hdr->read_addr; | ||
3969 | for (k = 0; k < r_cnt; k++) { | ||
3970 | r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); | ||
3971 | *data_ptr++ = cpu_to_le32(r_value); | ||
3972 | r_addr += r_stride; | ||
3973 | } | ||
3974 | qid += q_hdr->q_strd.queue_id_stride; | ||
3975 | } | ||
3976 | *d_ptr = data_ptr; | ||
3977 | } | ||
3978 | |||
3979 | static void | ||
3980 | qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha, | ||
3981 | qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) | ||
3982 | { | ||
3983 | struct qla_hw_data *ha = vha->hw; | ||
3984 | uint32_t r_addr, r_value; | ||
3985 | uint32_t i, loop_cnt; | ||
3986 | struct qla82xx_md_entry_rdrom *rom_hdr; | ||
3987 | uint32_t *data_ptr = *d_ptr; | ||
3988 | |||
3989 | rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; | ||
3990 | r_addr = rom_hdr->read_addr; | ||
3991 | loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); | ||
3992 | |||
3993 | for (i = 0; i < loop_cnt; i++) { | ||
3994 | qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, | ||
3995 | (r_addr & 0xFFFF0000), 1); | ||
3996 | r_value = qla82xx_md_rw_32(ha, | ||
3997 | MD_DIRECT_ROM_READ_BASE + | ||
3998 | (r_addr & 0x0000FFFF), 0, 0); | ||
3999 | *data_ptr++ = cpu_to_le32(r_value); | ||
4000 | r_addr += sizeof(uint32_t); | ||
4001 | } | ||
4002 | *d_ptr = data_ptr; | ||
4003 | } | ||
4004 | |||
4005 | static int | ||
4006 | qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, | ||
4007 | qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) | ||
4008 | { | ||
4009 | struct qla_hw_data *ha = vha->hw; | ||
4010 | uint32_t r_addr, r_value, r_data; | ||
4011 | uint32_t i, j, loop_cnt; | ||
4012 | struct qla82xx_md_entry_rdmem *m_hdr; | ||
4013 | unsigned long flags; | ||
4014 | int rval = QLA_FUNCTION_FAILED; | ||
4015 | uint32_t *data_ptr = *d_ptr; | ||
4016 | |||
4017 | m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; | ||
4018 | r_addr = m_hdr->read_addr; | ||
4019 | loop_cnt = m_hdr->read_data_size/16; | ||
4020 | |||
4021 | if (r_addr & 0xf) { | ||
4022 | ql_log(ql_log_warn, vha, 0xb033, | ||
4023 | "Read addr 0x%x not 16 bytes aligned\n", r_addr); | ||
4024 | return rval; | ||
4025 | } | ||
4026 | |||
4027 | if (m_hdr->read_data_size % 16) { | ||
4028 | ql_log(ql_log_warn, vha, 0xb034, | ||
4029 | "Read data[0x%x] not multiple of 16 bytes\n", | ||
4030 | m_hdr->read_data_size); | ||
4031 | return rval; | ||
4032 | } | ||
4033 | |||
4034 | ql_dbg(ql_dbg_p3p, vha, 0xb035, | ||
4035 | "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", | ||
4036 | __func__, r_addr, m_hdr->read_data_size, loop_cnt); | ||
4037 | |||
4038 | write_lock_irqsave(&ha->hw_lock, flags); | ||
4039 | for (i = 0; i < loop_cnt; i++) { | ||
4040 | qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1); | ||
4041 | r_value = 0; | ||
4042 | qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1); | ||
4043 | r_value = MIU_TA_CTL_ENABLE; | ||
4044 | qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); | ||
4045 | r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; | ||
4046 | qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); | ||
4047 | |||
4048 | for (j = 0; j < MAX_CTL_CHECK; j++) { | ||
4049 | r_value = qla82xx_md_rw_32(ha, | ||
4050 | MD_MIU_TEST_AGT_CTRL, 0, 0); | ||
4051 | if ((r_value & MIU_TA_CTL_BUSY) == 0) | ||
4052 | break; | ||
4053 | } | ||
4054 | |||
4055 | if (j >= MAX_CTL_CHECK) { | ||
4056 | printk_ratelimited(KERN_ERR | ||
4057 | "failed to read through agent\n"); | ||
4058 | write_unlock_irqrestore(&ha->hw_lock, flags); | ||
4059 | return rval; | ||
4060 | } | ||
4061 | |||
4062 | for (j = 0; j < 4; j++) { | ||
4063 | r_data = qla82xx_md_rw_32(ha, | ||
4064 | MD_MIU_TEST_AGT_RDDATA[j], 0, 0); | ||
4065 | *data_ptr++ = cpu_to_le32(r_data); | ||
4066 | } | ||
4067 | r_addr += 16; | ||
4068 | } | ||
4069 | write_unlock_irqrestore(&ha->hw_lock, flags); | ||
4070 | *d_ptr = data_ptr; | ||
4071 | return QLA_SUCCESS; | ||
4072 | } | ||
4073 | |||
4074 | static int | ||
4075 | qla82xx_validate_template_chksum(scsi_qla_host_t *vha) | ||
4076 | { | ||
4077 | struct qla_hw_data *ha = vha->hw; | ||
4078 | uint64_t chksum = 0; | ||
4079 | uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr; | ||
4080 | int count = ha->md_template_size/sizeof(uint32_t); | ||
4081 | |||
4082 | while (count-- > 0) | ||
4083 | chksum += *d_ptr++; | ||
4084 | while (chksum >> 32) | ||
4085 | chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32); | ||
4086 | return ~chksum; | ||
4087 | } | ||
4088 | |||
4089 | static void | ||
4090 | qla82xx_mark_entry_skipped(scsi_qla_host_t *vha, | ||
4091 | qla82xx_md_entry_hdr_t *entry_hdr, int index) | ||
4092 | { | ||
4093 | entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; | ||
4094 | ql_dbg(ql_dbg_p3p, vha, 0xb036, | ||
4095 | "Skipping entry[%d]: " | ||
4096 | "ETYPE[0x%x]-ELEVEL[0x%x]\n", | ||
4097 | index, entry_hdr->entry_type, | ||
4098 | entry_hdr->d_ctrl.entry_capture_mask); | ||
4099 | } | ||
4100 | |||
4101 | int | ||
4102 | qla82xx_md_collect(scsi_qla_host_t *vha) | ||
4103 | { | ||
4104 | struct qla_hw_data *ha = vha->hw; | ||
4105 | int no_entry_hdr = 0; | ||
4106 | qla82xx_md_entry_hdr_t *entry_hdr; | ||
4107 | struct qla82xx_md_template_hdr *tmplt_hdr; | ||
4108 | uint32_t *data_ptr; | ||
4109 | uint32_t total_data_size = 0, f_capture_mask, data_collected = 0; | ||
4110 | int i = 0, rval = QLA_FUNCTION_FAILED; | ||
4111 | |||
4112 | tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; | ||
4113 | data_ptr = (uint32_t *)ha->md_dump; | ||
4114 | |||
4115 | if (ha->fw_dumped) { | ||
4116 | ql_log(ql_log_warn, vha, 0xb037, | ||
4117 | "Firmware has been previously dumped (%p) " | ||
4118 | "-- ignoring request.\n", ha->fw_dump); | ||
4119 | goto md_failed; | ||
4120 | } | ||
4121 | |||
4122 | ha->fw_dumped = 0; | ||
4123 | |||
4124 | if (!ha->md_tmplt_hdr || !ha->md_dump) { | ||
4125 | ql_log(ql_log_warn, vha, 0xb038, | ||
4126 | "Memory not allocated for minidump capture\n"); | ||
4127 | goto md_failed; | ||
4128 | } | ||
4129 | |||
4130 | if (ha->flags.isp82xx_no_md_cap) { | ||
4131 | ql_log(ql_log_warn, vha, 0xb054, | ||
4132 | "Forced reset from application, " | ||
4133 | "ignore minidump capture\n"); | ||
4134 | ha->flags.isp82xx_no_md_cap = 0; | ||
4135 | goto md_failed; | ||
4136 | } | ||
4137 | |||
4138 | if (qla82xx_validate_template_chksum(vha)) { | ||
4139 | ql_log(ql_log_info, vha, 0xb039, | ||
4140 | "Template checksum validation error\n"); | ||
4141 | goto md_failed; | ||
4142 | } | ||
4143 | |||
4144 | no_entry_hdr = tmplt_hdr->num_of_entries; | ||
4145 | ql_dbg(ql_dbg_p3p, vha, 0xb03a, | ||
4146 | "No of entry headers in Template: 0x%x\n", no_entry_hdr); | ||
4147 | |||
4148 | ql_dbg(ql_dbg_p3p, vha, 0xb03b, | ||
4149 | "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); | ||
4150 | |||
4151 | f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; | ||
4152 | |||
4153 | /* Validate whether required debug level is set */ | ||
4154 | if ((f_capture_mask & 0x3) != 0x3) { | ||
4155 | ql_log(ql_log_warn, vha, 0xb03c, | ||
4156 | "Minimum required capture mask[0x%x] level not set\n", | ||
4157 | f_capture_mask); | ||
4158 | goto md_failed; | ||
4159 | } | ||
4160 | tmplt_hdr->driver_capture_mask = ql2xmdcapmask; | ||
4161 | |||
4162 | tmplt_hdr->driver_info[0] = vha->host_no; | ||
4163 | tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) | | ||
4164 | (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) | | ||
4165 | QLA_DRIVER_BETA_VER; | ||
4166 | |||
4167 | total_data_size = ha->md_dump_size; | ||
4168 | |||
4169 | ql_dbg(ql_dbg_p3p, vha, 0xb03d, | ||
4170 | "Total minidump data_size 0x%x to be captured\n", total_data_size); | ||
4171 | |||
4172 | /* Check whether template obtained is valid */ | ||
4173 | if (tmplt_hdr->entry_type != QLA82XX_TLHDR) { | ||
4174 | ql_log(ql_log_warn, vha, 0xb04e, | ||
4175 | "Bad template header entry type: 0x%x obtained\n", | ||
4176 | tmplt_hdr->entry_type); | ||
4177 | goto md_failed; | ||
4178 | } | ||
4179 | |||
4180 | entry_hdr = (qla82xx_md_entry_hdr_t *) \ | ||
4181 | (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); | ||
4182 | |||
4183 | /* Walk through the entry headers */ | ||
4184 | for (i = 0; i < no_entry_hdr; i++) { | ||
4185 | |||
4186 | if (data_collected > total_data_size) { | ||
4187 | ql_log(ql_log_warn, vha, 0xb03e, | ||
4188 | "More MiniDump data collected: [0x%x]\n", | ||
4189 | data_collected); | ||
4190 | goto md_failed; | ||
4191 | } | ||
4192 | |||
4193 | if (!(entry_hdr->d_ctrl.entry_capture_mask & | ||
4194 | ql2xmdcapmask)) { | ||
4195 | entry_hdr->d_ctrl.driver_flags |= | ||
4196 | QLA82XX_DBG_SKIPPED_FLAG; | ||
4197 | ql_dbg(ql_dbg_p3p, vha, 0xb03f, | ||
4198 | "Skipping entry[%d]: " | ||
4199 | "ETYPE[0x%x]-ELEVEL[0x%x]\n", | ||
4200 | i, entry_hdr->entry_type, | ||
4201 | entry_hdr->d_ctrl.entry_capture_mask); | ||
4202 | goto skip_nxt_entry; | ||
4203 | } | ||
4204 | |||
4205 | ql_dbg(ql_dbg_p3p, vha, 0xb040, | ||
4206 | "[%s]: data ptr[%d]: %p, entry_hdr: %p\n" | ||
4207 | "entry_type: 0x%x, captrue_mask: 0x%x\n", | ||
4208 | __func__, i, data_ptr, entry_hdr, | ||
4209 | entry_hdr->entry_type, | ||
4210 | entry_hdr->d_ctrl.entry_capture_mask); | ||
4211 | |||
4212 | ql_dbg(ql_dbg_p3p, vha, 0xb041, | ||
4213 | "Data collected: [0x%x], Dump size left:[0x%x]\n", | ||
4214 | data_collected, (ha->md_dump_size - data_collected)); | ||
4215 | |||
4216 | /* Decode the entry type and take | ||
4217 | * required action to capture debug data */ | ||
4218 | switch (entry_hdr->entry_type) { | ||
4219 | case QLA82XX_RDEND: | ||
4220 | qla82xx_mark_entry_skipped(vha, entry_hdr, i); | ||
4221 | break; | ||
4222 | case QLA82XX_CNTRL: | ||
4223 | rval = qla82xx_minidump_process_control(vha, | ||
4224 | entry_hdr, &data_ptr); | ||
4225 | if (rval != QLA_SUCCESS) { | ||
4226 | qla82xx_mark_entry_skipped(vha, entry_hdr, i); | ||
4227 | goto md_failed; | ||
4228 | } | ||
4229 | break; | ||
4230 | case QLA82XX_RDCRB: | ||
4231 | qla82xx_minidump_process_rdcrb(vha, | ||
4232 | entry_hdr, &data_ptr); | ||
4233 | break; | ||
4234 | case QLA82XX_RDMEM: | ||
4235 | rval = qla82xx_minidump_process_rdmem(vha, | ||
4236 | entry_hdr, &data_ptr); | ||
4237 | if (rval != QLA_SUCCESS) { | ||
4238 | qla82xx_mark_entry_skipped(vha, entry_hdr, i); | ||
4239 | goto md_failed; | ||
4240 | } | ||
4241 | break; | ||
4242 | case QLA82XX_BOARD: | ||
4243 | case QLA82XX_RDROM: | ||
4244 | qla82xx_minidump_process_rdrom(vha, | ||
4245 | entry_hdr, &data_ptr); | ||
4246 | break; | ||
4247 | case QLA82XX_L2DTG: | ||
4248 | case QLA82XX_L2ITG: | ||
4249 | case QLA82XX_L2DAT: | ||
4250 | case QLA82XX_L2INS: | ||
4251 | rval = qla82xx_minidump_process_l2tag(vha, | ||
4252 | entry_hdr, &data_ptr); | ||
4253 | if (rval != QLA_SUCCESS) { | ||
4254 | qla82xx_mark_entry_skipped(vha, entry_hdr, i); | ||
4255 | goto md_failed; | ||
4256 | } | ||
4257 | break; | ||
4258 | case QLA82XX_L1DAT: | ||
4259 | case QLA82XX_L1INS: | ||
4260 | qla82xx_minidump_process_l1cache(vha, | ||
4261 | entry_hdr, &data_ptr); | ||
4262 | break; | ||
4263 | case QLA82XX_RDOCM: | ||
4264 | qla82xx_minidump_process_rdocm(vha, | ||
4265 | entry_hdr, &data_ptr); | ||
4266 | break; | ||
4267 | case QLA82XX_RDMUX: | ||
4268 | qla82xx_minidump_process_rdmux(vha, | ||
4269 | entry_hdr, &data_ptr); | ||
4270 | break; | ||
4271 | case QLA82XX_QUEUE: | ||
4272 | qla82xx_minidump_process_queue(vha, | ||
4273 | entry_hdr, &data_ptr); | ||
4274 | break; | ||
4275 | case QLA82XX_RDNOP: | ||
4276 | default: | ||
4277 | qla82xx_mark_entry_skipped(vha, entry_hdr, i); | ||
4278 | break; | ||
4279 | } | ||
4280 | |||
4281 | ql_dbg(ql_dbg_p3p, vha, 0xb042, | ||
4282 | "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr); | ||
4283 | |||
4284 | data_collected = (uint8_t *)data_ptr - | ||
4285 | (uint8_t *)ha->md_dump; | ||
4286 | skip_nxt_entry: | ||
4287 | entry_hdr = (qla82xx_md_entry_hdr_t *) \ | ||
4288 | (((uint8_t *)entry_hdr) + entry_hdr->entry_size); | ||
4289 | } | ||
4290 | |||
4291 | if (data_collected != total_data_size) { | ||
4292 | ql_dbg(ql_dbg_p3p, vha, 0xb043, | ||
4293 | "MiniDump data mismatch: Data collected: [0x%x]," | ||
4294 | "total_data_size:[0x%x]\n", | ||
4295 | data_collected, total_data_size); | ||
4296 | goto md_failed; | ||
4297 | } | ||
4298 | |||
4299 | ql_log(ql_log_info, vha, 0xb044, | ||
4300 | "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", | ||
4301 | vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); | ||
4302 | ha->fw_dumped = 1; | ||
4303 | qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); | ||
4304 | |||
4305 | md_failed: | ||
4306 | return rval; | ||
4307 | } | ||
4308 | |||
4309 | int | ||
4310 | qla82xx_md_alloc(scsi_qla_host_t *vha) | ||
4311 | { | ||
4312 | struct qla_hw_data *ha = vha->hw; | ||
4313 | int i, k; | ||
4314 | struct qla82xx_md_template_hdr *tmplt_hdr; | ||
4315 | |||
4316 | tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; | ||
4317 | |||
4318 | if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) { | ||
4319 | ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF; | ||
4320 | ql_log(ql_log_info, vha, 0xb045, | ||
4321 | "Forcing driver capture mask to firmware default capture mask: 0x%x.\n", | ||
4322 | ql2xmdcapmask); | ||
4323 | } | ||
4324 | |||
4325 | for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) { | ||
4326 | if (i & ql2xmdcapmask) | ||
4327 | ha->md_dump_size += tmplt_hdr->capture_size_array[k]; | ||
4328 | } | ||
4329 | |||
4330 | if (ha->md_dump) { | ||
4331 | ql_log(ql_log_warn, vha, 0xb046, | ||
4332 | "Firmware dump previously allocated.\n"); | ||
4333 | return 1; | ||
4334 | } | ||
4335 | |||
4336 | ha->md_dump = vmalloc(ha->md_dump_size); | ||
4337 | if (ha->md_dump == NULL) { | ||
4338 | ql_log(ql_log_warn, vha, 0xb047, | ||
4339 | "Unable to allocate memory for Minidump size " | ||
4340 | "(0x%x).\n", ha->md_dump_size); | ||
4341 | return 1; | ||
4342 | } | ||
4343 | return 0; | ||
4344 | } | ||
4345 | |||
4346 | void | ||
4347 | qla82xx_md_free(scsi_qla_host_t *vha) | ||
4348 | { | ||
4349 | struct qla_hw_data *ha = vha->hw; | ||
4350 | |||
4351 | /* Release the template header allocated */ | ||
4352 | if (ha->md_tmplt_hdr) { | ||
4353 | ql_log(ql_log_info, vha, 0xb048, | ||
4354 | "Free MiniDump template: %p, size (%d KB)\n", | ||
4355 | ha->md_tmplt_hdr, ha->md_template_size / 1024); | ||
4356 | dma_free_coherent(&ha->pdev->dev, ha->md_template_size, | ||
4357 | ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); | ||
4358 | ha->md_tmplt_hdr = NULL; | ||
4359 | } | ||
4360 | |||
4361 | /* Release the template data buffer allocated */ | ||
4362 | if (ha->md_dump) { | ||
4363 | ql_log(ql_log_info, vha, 0xb049, | ||
4364 | "Free MiniDump memory: %p, size (%d KB)\n", | ||
4365 | ha->md_dump, ha->md_dump_size / 1024); | ||
4366 | vfree(ha->md_dump); | ||
4367 | ha->md_dump_size = 0; | ||
4368 | ha->md_dump = NULL; | ||
4369 | } | ||
4370 | } | ||
4371 | |||
4372 | void | ||
4373 | qla82xx_md_prep(scsi_qla_host_t *vha) | ||
4374 | { | ||
4375 | struct qla_hw_data *ha = vha->hw; | ||
4376 | int rval; | ||
4377 | |||
4378 | /* Get Minidump template size */ | ||
4379 | rval = qla82xx_md_get_template_size(vha); | ||
4380 | if (rval == QLA_SUCCESS) { | ||
4381 | ql_log(ql_log_info, vha, 0xb04a, | ||
4382 | "MiniDump Template size obtained (%d KB)\n", | ||
4383 | ha->md_template_size / 1024); | ||
4384 | |||
4385 | /* Get Minidump template */ | ||
4386 | rval = qla82xx_md_get_template(vha); | ||
4387 | if (rval == QLA_SUCCESS) { | ||
4388 | ql_dbg(ql_dbg_p3p, vha, 0xb04b, | ||
4389 | "MiniDump Template obtained\n"); | ||
4390 | |||
4391 | /* Allocate memory for minidump */ | ||
4392 | rval = qla82xx_md_alloc(vha); | ||
4393 | if (rval == QLA_SUCCESS) | ||
4394 | ql_log(ql_log_info, vha, 0xb04c, | ||
4395 | "MiniDump memory allocated (%d KB)\n", | ||
4396 | ha->md_dump_size / 1024); | ||
4397 | else { | ||
4398 | ql_log(ql_log_info, vha, 0xb04d, | ||
4399 | "Free MiniDump template: %p, size: (%d KB)\n", | ||
4400 | ha->md_tmplt_hdr, | ||
4401 | ha->md_template_size / 1024); | ||
4402 | dma_free_coherent(&ha->pdev->dev, | ||
4403 | ha->md_template_size, | ||
4404 | ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); | ||
4405 | ha->md_tmplt_hdr = NULL; | ||
4406 | } | ||
4407 | |||
4408 | } | ||
4409 | } | ||
4410 | } | ||
4411 | |||
4412 | int | ||
4413 | qla82xx_beacon_on(struct scsi_qla_host *vha) | ||
4414 | { | ||
4415 | |||
4416 | int rval; | ||
4417 | struct qla_hw_data *ha = vha->hw; | ||
4418 | qla82xx_idc_lock(ha); | ||
4419 | rval = qla82xx_mbx_beacon_ctl(vha, 1); | ||
4420 | |||
4421 | if (rval) { | ||
4422 | ql_log(ql_log_warn, vha, 0xb050, | ||
4423 | "mbx set led config failed in %s\n", __func__); | ||
4424 | goto exit; | ||
4425 | } | ||
4426 | ha->beacon_blink_led = 1; | ||
4427 | exit: | ||
4428 | qla82xx_idc_unlock(ha); | ||
4429 | return rval; | ||
4430 | } | ||
4431 | |||
4432 | int | ||
4433 | qla82xx_beacon_off(struct scsi_qla_host *vha) | ||
4434 | { | ||
4435 | |||
4436 | int rval; | ||
4437 | struct qla_hw_data *ha = vha->hw; | ||
4438 | qla82xx_idc_lock(ha); | ||
4439 | rval = qla82xx_mbx_beacon_ctl(vha, 0); | ||
4440 | |||
4441 | if (rval) { | ||
4442 | ql_log(ql_log_warn, vha, 0xb051, | ||
4443 | "mbx set led config failed in %s\n", __func__); | ||
4444 | goto exit; | ||
4445 | } | ||
4446 | ha->beacon_blink_led = 0; | ||
4447 | exit: | ||
4448 | qla82xx_idc_unlock(ha); | ||
4449 | return rval; | ||
4450 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 6c953e8c08f..8a21832c669 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -26,7 +26,6 @@ | |||
26 | #define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) | 26 | #define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) |
27 | #define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) | 27 | #define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) |
28 | #define CRB_DMA_SHIFT QLA82XX_REG(0xcc) | 28 | #define CRB_DMA_SHIFT QLA82XX_REG(0xcc) |
29 | #define CRB_TEMP_STATE QLA82XX_REG(0x1b4) | ||
30 | #define QLA82XX_DMA_SHIFT_VALUE 0x55555555 | 29 | #define QLA82XX_DMA_SHIFT_VALUE 0x55555555 |
31 | 30 | ||
32 | #define QLA82XX_HW_H0_CH_HUB_ADR 0x05 | 31 | #define QLA82XX_HW_H0_CH_HUB_ADR 0x05 |
@@ -485,6 +484,8 @@ | |||
485 | #define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL) | 484 | #define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL) |
486 | #define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL) | 485 | #define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL) |
487 | #define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) | 486 | #define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) |
487 | |||
488 | #define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL) | ||
488 | #define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) | 489 | #define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) |
489 | 490 | ||
490 | #define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 | 491 | #define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 |
@@ -542,15 +543,14 @@ | |||
542 | #define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174)) | 543 | #define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174)) |
543 | 544 | ||
544 | /* Every driver should use these Device State */ | 545 | /* Every driver should use these Device State */ |
545 | #define QLA8XXX_DEV_COLD 1 | 546 | #define QLA82XX_DEV_COLD 1 |
546 | #define QLA8XXX_DEV_INITIALIZING 2 | 547 | #define QLA82XX_DEV_INITIALIZING 2 |
547 | #define QLA8XXX_DEV_READY 3 | 548 | #define QLA82XX_DEV_READY 3 |
548 | #define QLA8XXX_DEV_NEED_RESET 4 | 549 | #define QLA82XX_DEV_NEED_RESET 4 |
549 | #define QLA8XXX_DEV_NEED_QUIESCENT 5 | 550 | #define QLA82XX_DEV_NEED_QUIESCENT 5 |
550 | #define QLA8XXX_DEV_FAILED 6 | 551 | #define QLA82XX_DEV_FAILED 6 |
551 | #define QLA8XXX_DEV_QUIESCENT 7 | 552 | #define QLA82XX_DEV_QUIESCENT 7 |
552 | #define MAX_STATES 8 /* Increment if new state added */ | 553 | #define MAX_STATES 8 /* Increment if new state added */ |
553 | #define QLA8XXX_BAD_VALUE 0xbad0bad0 | ||
554 | 554 | ||
555 | #define QLA82XX_IDC_VERSION 1 | 555 | #define QLA82XX_IDC_VERSION 1 |
556 | #define QLA82XX_ROM_DEV_INIT_TIMEOUT 30 | 556 | #define QLA82XX_ROM_DEV_INIT_TIMEOUT 30 |
@@ -563,6 +563,7 @@ | |||
563 | #define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158)) | 563 | #define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158)) |
564 | #define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg)) | 564 | #define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg)) |
565 | 565 | ||
566 | #define PCIE_CHICKEN3 (0x120c8) | ||
566 | #define PCIE_SETUP_FUNCTION (0x12040) | 567 | #define PCIE_SETUP_FUNCTION (0x12040) |
567 | #define PCIE_SETUP_FUNCTION2 (0x12048) | 568 | #define PCIE_SETUP_FUNCTION2 (0x12048) |
568 | 569 | ||
@@ -889,8 +890,6 @@ struct ct6_dsd { | |||
889 | }; | 890 | }; |
890 | 891 | ||
891 | #define MBC_TOGGLE_INTERRUPT 0x10 | 892 | #define MBC_TOGGLE_INTERRUPT 0x10 |
892 | #define MBC_SET_LED_CONFIG 0x125 /* FCoE specific LED control */ | ||
893 | #define MBC_GET_LED_CONFIG 0x126 /* FCoE specific LED control */ | ||
894 | 893 | ||
895 | /* Flash offset */ | 894 | /* Flash offset */ |
896 | #define FLT_REG_BOOTLOAD_82XX 0x72 | 895 | #define FLT_REG_BOOTLOAD_82XX 0x72 |
@@ -923,272 +922,4 @@ struct ct6_dsd { | |||
923 | #define M25P_INSTR_DP 0xb9 | 922 | #define M25P_INSTR_DP 0xb9 |
924 | #define M25P_INSTR_RES 0xab | 923 | #define M25P_INSTR_RES 0xab |
925 | 924 | ||
926 | /* Minidump related */ | ||
927 | |||
928 | /* | ||
929 | * Version of the template | ||
930 | * 4 Bytes | ||
931 | * X.Major.Minor.RELEASE | ||
932 | */ | ||
933 | #define QLA82XX_MINIDUMP_VERSION 0x10101 | ||
934 | |||
935 | /* | ||
936 | * Entry Type Defines | ||
937 | */ | ||
938 | #define QLA82XX_RDNOP 0 | ||
939 | #define QLA82XX_RDCRB 1 | ||
940 | #define QLA82XX_RDMUX 2 | ||
941 | #define QLA82XX_QUEUE 3 | ||
942 | #define QLA82XX_BOARD 4 | ||
943 | #define QLA82XX_RDSRE 5 | ||
944 | #define QLA82XX_RDOCM 6 | ||
945 | #define QLA82XX_CACHE 10 | ||
946 | #define QLA82XX_L1DAT 11 | ||
947 | #define QLA82XX_L1INS 12 | ||
948 | #define QLA82XX_L2DTG 21 | ||
949 | #define QLA82XX_L2ITG 22 | ||
950 | #define QLA82XX_L2DAT 23 | ||
951 | #define QLA82XX_L2INS 24 | ||
952 | #define QLA82XX_RDROM 71 | ||
953 | #define QLA82XX_RDMEM 72 | ||
954 | #define QLA82XX_CNTRL 98 | ||
955 | #define QLA82XX_TLHDR 99 | ||
956 | #define QLA82XX_RDEND 255 | ||
957 | |||
958 | /* | ||
959 | * Opcodes for Control Entries. | ||
960 | * These Flags are bit fields. | ||
961 | */ | ||
962 | #define QLA82XX_DBG_OPCODE_WR 0x01 | ||
963 | #define QLA82XX_DBG_OPCODE_RW 0x02 | ||
964 | #define QLA82XX_DBG_OPCODE_AND 0x04 | ||
965 | #define QLA82XX_DBG_OPCODE_OR 0x08 | ||
966 | #define QLA82XX_DBG_OPCODE_POLL 0x10 | ||
967 | #define QLA82XX_DBG_OPCODE_RDSTATE 0x20 | ||
968 | #define QLA82XX_DBG_OPCODE_WRSTATE 0x40 | ||
969 | #define QLA82XX_DBG_OPCODE_MDSTATE 0x80 | ||
970 | |||
971 | /* | ||
972 | * Template Header and Entry Header definitions start here. | ||
973 | */ | ||
974 | |||
975 | /* | ||
976 | * Template Header | ||
977 | * Parts of the template header can be modified by the driver. | ||
978 | * These include the saved_state_array, capture_debug_level, driver_timestamp | ||
979 | */ | ||
980 | |||
981 | #define QLA82XX_DBG_STATE_ARRAY_LEN 16 | ||
982 | #define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8 | ||
983 | #define QLA82XX_DBG_RSVD_ARRAY_LEN 8 | ||
984 | |||
985 | /* | ||
986 | * Driver Flags | ||
987 | */ | ||
988 | #define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */ | ||
989 | #define QLA82XX_DEFAULT_CAP_MASK 0xFF /* default capture mask */ | ||
990 | |||
991 | struct qla82xx_md_template_hdr { | ||
992 | uint32_t entry_type; | ||
993 | uint32_t first_entry_offset; | ||
994 | uint32_t size_of_template; | ||
995 | uint32_t capture_debug_level; | ||
996 | |||
997 | uint32_t num_of_entries; | ||
998 | uint32_t version; | ||
999 | uint32_t driver_timestamp; | ||
1000 | uint32_t template_checksum; | ||
1001 | |||
1002 | uint32_t driver_capture_mask; | ||
1003 | uint32_t driver_info[3]; | ||
1004 | |||
1005 | uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN]; | ||
1006 | uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN]; | ||
1007 | |||
1008 | /* markers_array used to capture some special locations on board */ | ||
1009 | uint32_t markers_array[QLA82XX_DBG_RSVD_ARRAY_LEN]; | ||
1010 | uint32_t num_of_free_entries; /* For internal use */ | ||
1011 | uint32_t free_entry_offset; /* For internal use */ | ||
1012 | uint32_t total_table_size; /* For internal use */ | ||
1013 | uint32_t bkup_table_offset; /* For internal use */ | ||
1014 | } __packed; | ||
1015 | |||
1016 | /* | ||
1017 | * Entry Header: Common to All Entry Types | ||
1018 | */ | ||
1019 | |||
1020 | /* | ||
1021 | * Driver Code is for driver to write some info about the entry. | ||
1022 | * Currently not used. | ||
1023 | */ | ||
1024 | typedef struct qla82xx_md_entry_hdr { | ||
1025 | uint32_t entry_type; | ||
1026 | uint32_t entry_size; | ||
1027 | uint32_t entry_capture_size; | ||
1028 | struct { | ||
1029 | uint8_t entry_capture_mask; | ||
1030 | uint8_t entry_code; | ||
1031 | uint8_t driver_code; | ||
1032 | uint8_t driver_flags; | ||
1033 | } d_ctrl; | ||
1034 | } __packed qla82xx_md_entry_hdr_t; | ||
1035 | |||
1036 | /* | ||
1037 | * Read CRB entry header | ||
1038 | */ | ||
1039 | struct qla82xx_md_entry_crb { | ||
1040 | qla82xx_md_entry_hdr_t h; | ||
1041 | uint32_t addr; | ||
1042 | struct { | ||
1043 | uint8_t addr_stride; | ||
1044 | uint8_t state_index_a; | ||
1045 | uint16_t poll_timeout; | ||
1046 | } crb_strd; | ||
1047 | |||
1048 | uint32_t data_size; | ||
1049 | uint32_t op_count; | ||
1050 | |||
1051 | struct { | ||
1052 | uint8_t opcode; | ||
1053 | uint8_t state_index_v; | ||
1054 | uint8_t shl; | ||
1055 | uint8_t shr; | ||
1056 | } crb_ctrl; | ||
1057 | |||
1058 | uint32_t value_1; | ||
1059 | uint32_t value_2; | ||
1060 | uint32_t value_3; | ||
1061 | } __packed; | ||
1062 | |||
1063 | /* | ||
1064 | * Cache entry header | ||
1065 | */ | ||
1066 | struct qla82xx_md_entry_cache { | ||
1067 | qla82xx_md_entry_hdr_t h; | ||
1068 | |||
1069 | uint32_t tag_reg_addr; | ||
1070 | struct { | ||
1071 | uint16_t tag_value_stride; | ||
1072 | uint16_t init_tag_value; | ||
1073 | } addr_ctrl; | ||
1074 | |||
1075 | uint32_t data_size; | ||
1076 | uint32_t op_count; | ||
1077 | |||
1078 | uint32_t control_addr; | ||
1079 | struct { | ||
1080 | uint16_t write_value; | ||
1081 | uint8_t poll_mask; | ||
1082 | uint8_t poll_wait; | ||
1083 | } cache_ctrl; | ||
1084 | |||
1085 | uint32_t read_addr; | ||
1086 | struct { | ||
1087 | uint8_t read_addr_stride; | ||
1088 | uint8_t read_addr_cnt; | ||
1089 | uint16_t rsvd_1; | ||
1090 | } read_ctrl; | ||
1091 | } __packed; | ||
1092 | |||
1093 | /* | ||
1094 | * Read OCM | ||
1095 | */ | ||
1096 | struct qla82xx_md_entry_rdocm { | ||
1097 | qla82xx_md_entry_hdr_t h; | ||
1098 | |||
1099 | uint32_t rsvd_0; | ||
1100 | uint32_t rsvd_1; | ||
1101 | uint32_t data_size; | ||
1102 | uint32_t op_count; | ||
1103 | |||
1104 | uint32_t rsvd_2; | ||
1105 | uint32_t rsvd_3; | ||
1106 | uint32_t read_addr; | ||
1107 | uint32_t read_addr_stride; | ||
1108 | uint32_t read_addr_cntrl; | ||
1109 | } __packed; | ||
1110 | |||
1111 | /* | ||
1112 | * Read Memory | ||
1113 | */ | ||
1114 | struct qla82xx_md_entry_rdmem { | ||
1115 | qla82xx_md_entry_hdr_t h; | ||
1116 | uint32_t rsvd[6]; | ||
1117 | uint32_t read_addr; | ||
1118 | uint32_t read_data_size; | ||
1119 | } __packed; | ||
1120 | |||
1121 | /* | ||
1122 | * Read ROM | ||
1123 | */ | ||
1124 | struct qla82xx_md_entry_rdrom { | ||
1125 | qla82xx_md_entry_hdr_t h; | ||
1126 | uint32_t rsvd[6]; | ||
1127 | uint32_t read_addr; | ||
1128 | uint32_t read_data_size; | ||
1129 | } __packed; | ||
1130 | |||
1131 | struct qla82xx_md_entry_mux { | ||
1132 | qla82xx_md_entry_hdr_t h; | ||
1133 | |||
1134 | uint32_t select_addr; | ||
1135 | uint32_t rsvd_0; | ||
1136 | uint32_t data_size; | ||
1137 | uint32_t op_count; | ||
1138 | |||
1139 | uint32_t select_value; | ||
1140 | uint32_t select_value_stride; | ||
1141 | uint32_t read_addr; | ||
1142 | uint32_t rsvd_1; | ||
1143 | } __packed; | ||
1144 | |||
1145 | struct qla82xx_md_entry_queue { | ||
1146 | qla82xx_md_entry_hdr_t h; | ||
1147 | |||
1148 | uint32_t select_addr; | ||
1149 | struct { | ||
1150 | uint16_t queue_id_stride; | ||
1151 | uint16_t rsvd_0; | ||
1152 | } q_strd; | ||
1153 | |||
1154 | uint32_t data_size; | ||
1155 | uint32_t op_count; | ||
1156 | uint32_t rsvd_1; | ||
1157 | uint32_t rsvd_2; | ||
1158 | |||
1159 | uint32_t read_addr; | ||
1160 | struct { | ||
1161 | uint8_t read_addr_stride; | ||
1162 | uint8_t read_addr_cnt; | ||
1163 | uint16_t rsvd_3; | ||
1164 | } rd_strd; | ||
1165 | } __packed; | ||
1166 | |||
1167 | #define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129 | ||
1168 | #define RQST_TMPLT_SIZE 0x0 | ||
1169 | #define RQST_TMPLT 0x1 | ||
1170 | #define MD_DIRECT_ROM_WINDOW 0x42110030 | ||
1171 | #define MD_DIRECT_ROM_READ_BASE 0x42150000 | ||
1172 | #define MD_MIU_TEST_AGT_CTRL 0x41000090 | ||
1173 | #define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 | ||
1174 | #define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 | ||
1175 | |||
1176 | static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, | ||
1177 | 0x410000B8, 0x410000BC }; | ||
1178 | |||
1179 | #define CRB_NIU_XG_PAUSE_CTL_P0 0x1 | ||
1180 | #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 | ||
1181 | |||
1182 | #define qla82xx_get_temp_val(x) ((x) >> 16) | ||
1183 | #define qla82xx_get_temp_state(x) ((x) & 0xffff) | ||
1184 | #define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) | ||
1185 | |||
1186 | /* | ||
1187 | * Temperature control. | ||
1188 | */ | ||
1189 | enum { | ||
1190 | QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */ | ||
1191 | QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */ | ||
1192 | QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */ | ||
1193 | }; | ||
1194 | #endif | 925 | #endif |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 10d23f8b703..1e69527f1e4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -13,13 +13,12 @@ | |||
13 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
14 | #include <linux/kobject.h> | 14 | #include <linux/kobject.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | |||
16 | #include <scsi/scsi_tcq.h> | 17 | #include <scsi/scsi_tcq.h> |
17 | #include <scsi/scsicam.h> | 18 | #include <scsi/scsicam.h> |
18 | #include <scsi/scsi_transport.h> | 19 | #include <scsi/scsi_transport.h> |
19 | #include <scsi/scsi_transport_fc.h> | 20 | #include <scsi/scsi_transport_fc.h> |
20 | 21 | ||
21 | #include "qla_target.h" | ||
22 | |||
23 | /* | 22 | /* |
24 | * Driver version | 23 | * Driver version |
25 | */ | 24 | */ |
@@ -41,12 +40,6 @@ static struct kmem_cache *ctx_cachep; | |||
41 | */ | 40 | */ |
42 | int ql_errlev = ql_log_all; | 41 | int ql_errlev = ql_log_all; |
43 | 42 | ||
44 | static int ql2xenableclass2; | ||
45 | module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); | ||
46 | MODULE_PARM_DESC(ql2xenableclass2, | ||
47 | "Specify if Class 2 operations are supported from the very " | ||
48 | "beginning. Default is 0 - class 2 not supported."); | ||
49 | |||
50 | int ql2xlogintimeout = 20; | 43 | int ql2xlogintimeout = 20; |
51 | module_param(ql2xlogintimeout, int, S_IRUGO); | 44 | module_param(ql2xlogintimeout, int, S_IRUGO); |
52 | MODULE_PARM_DESC(ql2xlogintimeout, | 45 | MODULE_PARM_DESC(ql2xlogintimeout, |
@@ -89,12 +82,7 @@ MODULE_PARM_DESC(ql2xextended_error_logging, | |||
89 | "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" | 82 | "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" |
90 | "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" | 83 | "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" |
91 | "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" | 84 | "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" |
92 | "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" | ||
93 | "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" | ||
94 | "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" | 85 | "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" |
95 | "\t\t0x1e400000 - Preferred value for capturing essential " | ||
96 | "debug information (equivalent to old " | ||
97 | "ql2xextended_error_logging=1).\n" | ||
98 | "\t\tDo LOGICAL OR of the value to enable more than one level"); | 86 | "\t\tDo LOGICAL OR of the value to enable more than one level"); |
99 | 87 | ||
100 | int ql2xshiftctondsd = 6; | 88 | int ql2xshiftctondsd = 6; |
@@ -115,11 +103,11 @@ MODULE_PARM_DESC(ql2xfdmienable, | |||
115 | static int ql2xmaxqdepth = MAX_Q_DEPTH; | 103 | static int ql2xmaxqdepth = MAX_Q_DEPTH; |
116 | module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); | 104 | module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); |
117 | MODULE_PARM_DESC(ql2xmaxqdepth, | 105 | MODULE_PARM_DESC(ql2xmaxqdepth, |
118 | "Maximum queue depth to set for each LUN. " | 106 | "Maximum queue depth to report for target devices."); |
119 | "Default is 32."); | ||
120 | 107 | ||
121 | int ql2xenabledif = 2; | 108 | /* Do not change the value of this after module load */ |
122 | module_param(ql2xenabledif, int, S_IRUGO); | 109 | int ql2xenabledif = 0; |
110 | module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); | ||
123 | MODULE_PARM_DESC(ql2xenabledif, | 111 | MODULE_PARM_DESC(ql2xenabledif, |
124 | " Enable T10-CRC-DIF " | 112 | " Enable T10-CRC-DIF " |
125 | " Default is 0 - No DIF Support. 1 - Enable it" | 113 | " Default is 0 - No DIF Support. 1 - Enable it" |
@@ -155,7 +143,7 @@ MODULE_PARM_DESC(ql2xmultique_tag, | |||
155 | "Set it to 1 to turn on the cpu affinity."); | 143 | "Set it to 1 to turn on the cpu affinity."); |
156 | 144 | ||
157 | int ql2xfwloadbin; | 145 | int ql2xfwloadbin; |
158 | module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); | 146 | module_param(ql2xfwloadbin, int, S_IRUGO); |
159 | MODULE_PARM_DESC(ql2xfwloadbin, | 147 | MODULE_PARM_DESC(ql2xfwloadbin, |
160 | "Option to specify location from which to load ISP firmware:.\n" | 148 | "Option to specify location from which to load ISP firmware:.\n" |
161 | " 2 -- load firmware via the request_firmware() (hotplug).\n" | 149 | " 2 -- load firmware via the request_firmware() (hotplug).\n" |
@@ -170,11 +158,11 @@ MODULE_PARM_DESC(ql2xetsenable, | |||
170 | "Default is 0 - skip ETS enablement."); | 158 | "Default is 0 - skip ETS enablement."); |
171 | 159 | ||
172 | int ql2xdbwr = 1; | 160 | int ql2xdbwr = 1; |
173 | module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); | 161 | module_param(ql2xdbwr, int, S_IRUGO); |
174 | MODULE_PARM_DESC(ql2xdbwr, | 162 | MODULE_PARM_DESC(ql2xdbwr, |
175 | "Option to specify scheme for request queue posting.\n" | 163 | "Option to specify scheme for request queue posting.\n" |
176 | " 0 -- Regular doorbell.\n" | 164 | " 0 -- Regular doorbell.\n" |
177 | " 1 -- CAMRAM doorbell (faster).\n"); | 165 | " 1 -- CAMRAM doorbell (faster).\n"); |
178 | 166 | ||
179 | int ql2xtargetreset = 1; | 167 | int ql2xtargetreset = 1; |
180 | module_param(ql2xtargetreset, int, S_IRUGO); | 168 | module_param(ql2xtargetreset, int, S_IRUGO); |
@@ -195,11 +183,11 @@ MODULE_PARM_DESC(ql2xasynctmfenable, | |||
195 | "Default is 0 - Issue TM IOCBs via mailbox mechanism."); | 183 | "Default is 0 - Issue TM IOCBs via mailbox mechanism."); |
196 | 184 | ||
197 | int ql2xdontresethba; | 185 | int ql2xdontresethba; |
198 | module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); | 186 | module_param(ql2xdontresethba, int, S_IRUGO); |
199 | MODULE_PARM_DESC(ql2xdontresethba, | 187 | MODULE_PARM_DESC(ql2xdontresethba, |
200 | "Option to specify reset behaviour.\n" | 188 | "Option to specify reset behaviour.\n" |
201 | " 0 (Default) -- Reset on failure.\n" | 189 | " 0 (Default) -- Reset on failure.\n" |
202 | " 1 -- Do not reset on failure.\n"); | 190 | " 1 -- Do not reset on failure.\n"); |
203 | 191 | ||
204 | uint ql2xmaxlun = MAX_LUNS; | 192 | uint ql2xmaxlun = MAX_LUNS; |
205 | module_param(ql2xmaxlun, uint, S_IRUGO); | 193 | module_param(ql2xmaxlun, uint, S_IRUGO); |
@@ -207,19 +195,6 @@ MODULE_PARM_DESC(ql2xmaxlun, | |||
207 | "Defines the maximum LU number to register with the SCSI " | 195 | "Defines the maximum LU number to register with the SCSI " |
208 | "midlayer. Default is 65535."); | 196 | "midlayer. Default is 65535."); |
209 | 197 | ||
210 | int ql2xmdcapmask = 0x1F; | ||
211 | module_param(ql2xmdcapmask, int, S_IRUGO); | ||
212 | MODULE_PARM_DESC(ql2xmdcapmask, | ||
213 | "Set the Minidump driver capture mask level. " | ||
214 | "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); | ||
215 | |||
216 | int ql2xmdenable = 1; | ||
217 | module_param(ql2xmdenable, int, S_IRUGO); | ||
218 | MODULE_PARM_DESC(ql2xmdenable, | ||
219 | "Enable/disable MiniDump. " | ||
220 | "0 - MiniDump disabled. " | ||
221 | "1 (Default) - MiniDump enabled."); | ||
222 | |||
223 | /* | 198 | /* |
224 | * SCSI host template entry points | 199 | * SCSI host template entry points |
225 | */ | 200 | */ |
@@ -264,8 +239,6 @@ struct scsi_host_template qla2xxx_driver_template = { | |||
264 | 239 | ||
265 | .max_sectors = 0xFFFF, | 240 | .max_sectors = 0xFFFF, |
266 | .shost_attrs = qla2x00_host_attrs, | 241 | .shost_attrs = qla2x00_host_attrs, |
267 | |||
268 | .supported_mode = MODE_INITIATOR, | ||
269 | }; | 242 | }; |
270 | 243 | ||
271 | static struct scsi_transport_template *qla2xxx_transport_template = NULL; | 244 | static struct scsi_transport_template *qla2xxx_transport_template = NULL; |
@@ -315,10 +288,10 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, | |||
315 | struct req_que **, struct rsp_que **); | 288 | struct req_que **, struct rsp_que **); |
316 | static void qla2x00_free_fw_dump(struct qla_hw_data *); | 289 | static void qla2x00_free_fw_dump(struct qla_hw_data *); |
317 | static void qla2x00_mem_free(struct qla_hw_data *); | 290 | static void qla2x00_mem_free(struct qla_hw_data *); |
291 | static void qla2x00_sp_free_dma(srb_t *); | ||
318 | 292 | ||
319 | /* -------------------------------------------------------------------------- */ | 293 | /* -------------------------------------------------------------------------- */ |
320 | static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, | 294 | static int qla2x00_alloc_queues(struct qla_hw_data *ha) |
321 | struct rsp_que *rsp) | ||
322 | { | 295 | { |
323 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); | 296 | scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); |
324 | ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, | 297 | ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, |
@@ -336,12 +309,6 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, | |||
336 | "Unable to allocate memory for response queue ptrs.\n"); | 309 | "Unable to allocate memory for response queue ptrs.\n"); |
337 | goto fail_rsp_map; | 310 | goto fail_rsp_map; |
338 | } | 311 | } |
339 | /* | ||
340 | * Make sure we record at least the request and response queue zero in | ||
341 | * case we need to free them if part of the probe fails. | ||
342 | */ | ||
343 | ha->rsp_q_map[0] = rsp; | ||
344 | ha->req_q_map[0] = req; | ||
345 | set_bit(0, ha->rsp_qid_map); | 312 | set_bit(0, ha->rsp_qid_map); |
346 | set_bit(0, ha->req_qid_map); | 313 | set_bit(0, ha->req_qid_map); |
347 | return 1; | 314 | return 1; |
@@ -443,7 +410,6 @@ fail2: | |||
443 | qla25xx_delete_queues(vha); | 410 | qla25xx_delete_queues(vha); |
444 | destroy_workqueue(ha->wq); | 411 | destroy_workqueue(ha->wq); |
445 | ha->wq = NULL; | 412 | ha->wq = NULL; |
446 | vha->req = ha->req_q_map[0]; | ||
447 | fail: | 413 | fail: |
448 | ha->mqenable = 0; | 414 | ha->mqenable = 0; |
449 | kfree(ha->req_q_map); | 415 | kfree(ha->req_q_map); |
@@ -484,32 +450,24 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) | |||
484 | uint32_t pci_bus; | 450 | uint32_t pci_bus; |
485 | int pcie_reg; | 451 | int pcie_reg; |
486 | 452 | ||
487 | pcie_reg = pci_pcie_cap(ha->pdev); | 453 | pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); |
488 | if (pcie_reg) { | 454 | if (pcie_reg) { |
489 | char lwstr[6]; | 455 | char lwstr[6]; |
490 | uint16_t pcie_lstat, lspeed, lwidth; | 456 | uint16_t pcie_lstat, lspeed, lwidth; |
491 | 457 | ||
492 | pcie_reg += PCI_EXP_LNKCAP; | 458 | pcie_reg += 0x12; |
493 | pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); | 459 | pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); |
494 | lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); | 460 | lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); |
495 | lwidth = (pcie_lstat & | 461 | lwidth = (pcie_lstat & |
496 | (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4; | 462 | (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4; |
497 | 463 | ||
498 | strcpy(str, "PCIe ("); | 464 | strcpy(str, "PCIe ("); |
499 | switch (lspeed) { | 465 | if (lspeed == 1) |
500 | case 1: | ||
501 | strcat(str, "2.5GT/s "); | 466 | strcat(str, "2.5GT/s "); |
502 | break; | 467 | else if (lspeed == 2) |
503 | case 2: | ||
504 | strcat(str, "5.0GT/s "); | 468 | strcat(str, "5.0GT/s "); |
505 | break; | 469 | else |
506 | case 3: | ||
507 | strcat(str, "8.0GT/s "); | ||
508 | break; | ||
509 | default: | ||
510 | strcat(str, "<unknown> "); | 470 | strcat(str, "<unknown> "); |
511 | break; | ||
512 | } | ||
513 | snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); | 471 | snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); |
514 | strcat(str, lwstr); | 472 | strcat(str, lwstr); |
515 | 473 | ||
@@ -584,75 +542,28 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str) | |||
584 | return str; | 542 | return str; |
585 | } | 543 | } |
586 | 544 | ||
587 | void | 545 | static inline srb_t * |
588 | qla2x00_sp_free_dma(void *vha, void *ptr) | 546 | qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, |
547 | struct scsi_cmnd *cmd) | ||
589 | { | 548 | { |
590 | srb_t *sp = (srb_t *)ptr; | 549 | srb_t *sp; |
591 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 550 | struct qla_hw_data *ha = vha->hw; |
592 | struct qla_hw_data *ha = sp->fcport->vha->hw; | ||
593 | void *ctx = GET_CMD_CTX_SP(sp); | ||
594 | |||
595 | if (sp->flags & SRB_DMA_VALID) { | ||
596 | scsi_dma_unmap(cmd); | ||
597 | sp->flags &= ~SRB_DMA_VALID; | ||
598 | } | ||
599 | |||
600 | if (sp->flags & SRB_CRC_PROT_DMA_VALID) { | ||
601 | dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), | ||
602 | scsi_prot_sg_count(cmd), cmd->sc_data_direction); | ||
603 | sp->flags &= ~SRB_CRC_PROT_DMA_VALID; | ||
604 | } | ||
605 | |||
606 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { | ||
607 | /* List assured to be having elements */ | ||
608 | qla2x00_clean_dsd_pool(ha, sp); | ||
609 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; | ||
610 | } | ||
611 | |||
612 | if (sp->flags & SRB_CRC_CTX_DMA_VALID) { | ||
613 | dma_pool_free(ha->dl_dma_pool, ctx, | ||
614 | ((struct crc_context *)ctx)->crc_ctx_dma); | ||
615 | sp->flags &= ~SRB_CRC_CTX_DMA_VALID; | ||
616 | } | ||
617 | 551 | ||
618 | if (sp->flags & SRB_FCP_CMND_DMA_VALID) { | 552 | sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); |
619 | struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; | 553 | if (!sp) { |
620 | 554 | ql_log(ql_log_warn, vha, 0x3006, | |
621 | dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, | 555 | "Memory allocation failed for sp.\n"); |
622 | ctx1->fcp_cmnd_dma); | 556 | return sp; |
623 | list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); | ||
624 | ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; | ||
625 | ha->gbl_dsd_avail += ctx1->dsd_use_cnt; | ||
626 | mempool_free(ctx1, ha->ctx_mempool); | ||
627 | ctx1 = NULL; | ||
628 | } | 557 | } |
629 | 558 | ||
630 | CMD_SP(cmd) = NULL; | 559 | atomic_set(&sp->ref_count, 1); |
631 | mempool_free(sp, ha->srb_mempool); | 560 | sp->fcport = fcport; |
632 | } | 561 | sp->cmd = cmd; |
633 | 562 | sp->flags = 0; | |
634 | static void | 563 | CMD_SP(cmd) = (void *)sp; |
635 | qla2x00_sp_compl(void *data, void *ptr, int res) | 564 | sp->ctx = NULL; |
636 | { | ||
637 | struct qla_hw_data *ha = (struct qla_hw_data *)data; | ||
638 | srb_t *sp = (srb_t *)ptr; | ||
639 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | ||
640 | |||
641 | cmd->result = res; | ||
642 | |||
643 | if (atomic_read(&sp->ref_count) == 0) { | ||
644 | ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015, | ||
645 | "SP reference-count to ZERO -- sp=%p cmd=%p.\n", | ||
646 | sp, GET_CMD_SP(sp)); | ||
647 | if (ql2xextended_error_logging & ql_dbg_io) | ||
648 | BUG(); | ||
649 | return; | ||
650 | } | ||
651 | if (!atomic_dec_and_test(&sp->ref_count)) | ||
652 | return; | ||
653 | 565 | ||
654 | qla2x00_sp_free_dma(ha, sp); | 566 | return sp; |
655 | cmd->scsi_done(cmd); | ||
656 | } | 567 | } |
657 | 568 | ||
658 | static int | 569 | static int |
@@ -668,12 +579,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
668 | 579 | ||
669 | if (ha->flags.eeh_busy) { | 580 | if (ha->flags.eeh_busy) { |
670 | if (ha->flags.pci_channel_io_perm_failure) { | 581 | if (ha->flags.pci_channel_io_perm_failure) { |
671 | ql_dbg(ql_dbg_aer, vha, 0x9010, | 582 | ql_dbg(ql_dbg_io, vha, 0x3001, |
672 | "PCI Channel IO permanent failure, exiting " | 583 | "PCI Channel IO permanent failure, exiting " |
673 | "cmd=%p.\n", cmd); | 584 | "cmd=%p.\n", cmd); |
674 | cmd->result = DID_NO_CONNECT << 16; | 585 | cmd->result = DID_NO_CONNECT << 16; |
675 | } else { | 586 | } else { |
676 | ql_dbg(ql_dbg_aer, vha, 0x9011, | 587 | ql_dbg(ql_dbg_io, vha, 0x3002, |
677 | "EEH_Busy, Requeuing the cmd=%p.\n", cmd); | 588 | "EEH_Busy, Requeuing the cmd=%p.\n", cmd); |
678 | cmd->result = DID_REQUEUE << 16; | 589 | cmd->result = DID_REQUEUE << 16; |
679 | } | 590 | } |
@@ -683,7 +594,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
683 | rval = fc_remote_port_chkready(rport); | 594 | rval = fc_remote_port_chkready(rport); |
684 | if (rval) { | 595 | if (rval) { |
685 | cmd->result = rval; | 596 | cmd->result = rval; |
686 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, | 597 | ql_dbg(ql_dbg_io, vha, 0x3003, |
687 | "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", | 598 | "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", |
688 | cmd, rval); | 599 | cmd, rval); |
689 | goto qc24_fail_command; | 600 | goto qc24_fail_command; |
@@ -697,12 +608,6 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
697 | cmd->result = DID_NO_CONNECT << 16; | 608 | cmd->result = DID_NO_CONNECT << 16; |
698 | goto qc24_fail_command; | 609 | goto qc24_fail_command; |
699 | } | 610 | } |
700 | |||
701 | if (!fcport) { | ||
702 | cmd->result = DID_NO_CONNECT << 16; | ||
703 | goto qc24_fail_command; | ||
704 | } | ||
705 | |||
706 | if (atomic_read(&fcport->state) != FCS_ONLINE) { | 611 | if (atomic_read(&fcport->state) != FCS_ONLINE) { |
707 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || | 612 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || |
708 | atomic_read(&base_vha->loop_state) == LOOP_DEAD) { | 613 | atomic_read(&base_vha->loop_state) == LOOP_DEAD) { |
@@ -716,20 +621,13 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
716 | goto qc24_target_busy; | 621 | goto qc24_target_busy; |
717 | } | 622 | } |
718 | 623 | ||
719 | sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC); | 624 | sp = qla2x00_get_new_sp(base_vha, fcport, cmd); |
720 | if (!sp) | 625 | if (!sp) |
721 | goto qc24_host_busy; | 626 | goto qc24_host_busy; |
722 | 627 | ||
723 | sp->u.scmd.cmd = cmd; | ||
724 | sp->type = SRB_SCSI_CMD; | ||
725 | atomic_set(&sp->ref_count, 1); | ||
726 | CMD_SP(cmd) = (void *)sp; | ||
727 | sp->free = qla2x00_sp_free_dma; | ||
728 | sp->done = qla2x00_sp_compl; | ||
729 | |||
730 | rval = ha->isp_ops->start_scsi(sp); | 628 | rval = ha->isp_ops->start_scsi(sp); |
731 | if (rval != QLA_SUCCESS) { | 629 | if (rval != QLA_SUCCESS) { |
732 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, | 630 | ql_dbg(ql_dbg_io, vha, 0x3013, |
733 | "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); | 631 | "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); |
734 | goto qc24_host_busy_free_sp; | 632 | goto qc24_host_busy_free_sp; |
735 | } | 633 | } |
@@ -737,7 +635,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
737 | return 0; | 635 | return 0; |
738 | 636 | ||
739 | qc24_host_busy_free_sp: | 637 | qc24_host_busy_free_sp: |
740 | qla2x00_sp_free_dma(ha, sp); | 638 | qla2x00_sp_free_dma(sp); |
639 | mempool_free(sp, ha->srb_mempool); | ||
741 | 640 | ||
742 | qc24_host_busy: | 641 | qc24_host_busy: |
743 | return SCSI_MLQUEUE_HOST_BUSY; | 642 | return SCSI_MLQUEUE_HOST_BUSY; |
@@ -902,6 +801,49 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) | |||
902 | return return_status; | 801 | return return_status; |
903 | } | 802 | } |
904 | 803 | ||
804 | /* | ||
805 | * qla2x00_wait_for_loop_ready | ||
806 | * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop | ||
807 | * to be in LOOP_READY state. | ||
808 | * Input: | ||
809 | * ha - pointer to host adapter structure | ||
810 | * | ||
811 | * Note: | ||
812 | * Does context switching-Release SPIN_LOCK | ||
813 | * (if any) before calling this routine. | ||
814 | * | ||
815 | * | ||
816 | * Return: | ||
817 | * Success (LOOP_READY) : 0 | ||
818 | * Failed (LOOP_NOT_READY) : 1 | ||
819 | */ | ||
820 | static inline int | ||
821 | qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha) | ||
822 | { | ||
823 | int return_status = QLA_SUCCESS; | ||
824 | unsigned long loop_timeout ; | ||
825 | struct qla_hw_data *ha = vha->hw; | ||
826 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | ||
827 | |||
828 | /* wait for 5 min at the max for loop to be ready */ | ||
829 | loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ); | ||
830 | |||
831 | while ((!atomic_read(&base_vha->loop_down_timer) && | ||
832 | atomic_read(&base_vha->loop_state) == LOOP_DOWN) || | ||
833 | atomic_read(&base_vha->loop_state) != LOOP_READY) { | ||
834 | if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) { | ||
835 | return_status = QLA_FUNCTION_FAILED; | ||
836 | break; | ||
837 | } | ||
838 | msleep(1000); | ||
839 | if (time_after_eq(jiffies, loop_timeout)) { | ||
840 | return_status = QLA_FUNCTION_FAILED; | ||
841 | break; | ||
842 | } | ||
843 | } | ||
844 | return (return_status); | ||
845 | } | ||
846 | |||
905 | static void | 847 | static void |
906 | sp_get(struct srb *sp) | 848 | sp_get(struct srb *sp) |
907 | { | 849 | { |
@@ -934,10 +876,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
934 | int wait = 0; | 876 | int wait = 0; |
935 | struct qla_hw_data *ha = vha->hw; | 877 | struct qla_hw_data *ha = vha->hw; |
936 | 878 | ||
879 | ql_dbg(ql_dbg_taskm, vha, 0x8000, | ||
880 | "Entered %s for cmd=%p.\n", __func__, cmd); | ||
937 | if (!CMD_SP(cmd)) | 881 | if (!CMD_SP(cmd)) |
938 | return SUCCESS; | 882 | return SUCCESS; |
939 | 883 | ||
940 | ret = fc_block_scsi_eh(cmd); | 884 | ret = fc_block_scsi_eh(cmd); |
885 | ql_dbg(ql_dbg_taskm, vha, 0x8001, | ||
886 | "Return value of fc_block_scsi_eh=%d.\n", ret); | ||
941 | if (ret != 0) | 887 | if (ret != 0) |
942 | return ret; | 888 | return ret; |
943 | ret = SUCCESS; | 889 | ret = SUCCESS; |
@@ -953,25 +899,23 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
953 | } | 899 | } |
954 | 900 | ||
955 | ql_dbg(ql_dbg_taskm, vha, 0x8002, | 901 | ql_dbg(ql_dbg_taskm, vha, 0x8002, |
956 | "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n", | 902 | "Aborting sp=%p cmd=%p from RISC ", sp, cmd); |
957 | vha->host_no, id, lun, sp, cmd); | ||
958 | 903 | ||
959 | /* Get a reference to the sp and drop the lock.*/ | 904 | /* Get a reference to the sp and drop the lock.*/ |
960 | sp_get(sp); | 905 | sp_get(sp); |
961 | 906 | ||
962 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 907 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
963 | if (ha->isp_ops->abort_command(sp)) { | 908 | if (ha->isp_ops->abort_command(sp)) { |
964 | ret = FAILED; | ||
965 | ql_dbg(ql_dbg_taskm, vha, 0x8003, | 909 | ql_dbg(ql_dbg_taskm, vha, 0x8003, |
966 | "Abort command mbx failed cmd=%p.\n", cmd); | 910 | "Abort command mbx failed for cmd=%p.\n", cmd); |
967 | } else { | 911 | } else { |
968 | ql_dbg(ql_dbg_taskm, vha, 0x8004, | 912 | ql_dbg(ql_dbg_taskm, vha, 0x8004, |
969 | "Abort command mbx success cmd=%p.\n", cmd); | 913 | "Abort command mbx success.\n"); |
970 | wait = 1; | 914 | wait = 1; |
971 | } | 915 | } |
972 | 916 | ||
973 | spin_lock_irqsave(&ha->hardware_lock, flags); | 917 | spin_lock_irqsave(&ha->hardware_lock, flags); |
974 | sp->done(ha, sp, 0); | 918 | qla2x00_sp_compl(ha, sp); |
975 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 919 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
976 | 920 | ||
977 | /* Did the command return during mailbox execution? */ | 921 | /* Did the command return during mailbox execution? */ |
@@ -982,14 +926,13 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
982 | if (wait) { | 926 | if (wait) { |
983 | if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { | 927 | if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { |
984 | ql_log(ql_log_warn, vha, 0x8006, | 928 | ql_log(ql_log_warn, vha, 0x8006, |
985 | "Abort handler timed out cmd=%p.\n", cmd); | 929 | "Abort handler timed out for cmd=%p.\n", cmd); |
986 | ret = FAILED; | 930 | ret = FAILED; |
987 | } | 931 | } |
988 | } | 932 | } |
989 | 933 | ||
990 | ql_log(ql_log_info, vha, 0x801c, | 934 | ql_log(ql_log_info, vha, 0x801c, |
991 | "Abort command issued nexus=%ld:%d:%d -- %d %x.\n", | 935 | "Abort command issued -- %d %x.\n", wait, ret); |
992 | vha->host_no, id, lun, wait, ret); | ||
993 | 936 | ||
994 | return ret; | 937 | return ret; |
995 | } | 938 | } |
@@ -1003,7 +946,6 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, | |||
1003 | struct qla_hw_data *ha = vha->hw; | 946 | struct qla_hw_data *ha = vha->hw; |
1004 | struct req_que *req; | 947 | struct req_que *req; |
1005 | srb_t *sp; | 948 | srb_t *sp; |
1006 | struct scsi_cmnd *cmd; | ||
1007 | 949 | ||
1008 | status = QLA_SUCCESS; | 950 | status = QLA_SUCCESS; |
1009 | 951 | ||
@@ -1014,29 +956,28 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, | |||
1014 | sp = req->outstanding_cmds[cnt]; | 956 | sp = req->outstanding_cmds[cnt]; |
1015 | if (!sp) | 957 | if (!sp) |
1016 | continue; | 958 | continue; |
1017 | if (sp->type != SRB_SCSI_CMD) | 959 | if ((sp->ctx) && !IS_PROT_IO(sp)) |
1018 | continue; | 960 | continue; |
1019 | if (vha->vp_idx != sp->fcport->vha->vp_idx) | 961 | if (vha->vp_idx != sp->fcport->vha->vp_idx) |
1020 | continue; | 962 | continue; |
1021 | match = 0; | 963 | match = 0; |
1022 | cmd = GET_CMD_SP(sp); | ||
1023 | switch (type) { | 964 | switch (type) { |
1024 | case WAIT_HOST: | 965 | case WAIT_HOST: |
1025 | match = 1; | 966 | match = 1; |
1026 | break; | 967 | break; |
1027 | case WAIT_TARGET: | 968 | case WAIT_TARGET: |
1028 | match = cmd->device->id == t; | 969 | match = sp->cmd->device->id == t; |
1029 | break; | 970 | break; |
1030 | case WAIT_LUN: | 971 | case WAIT_LUN: |
1031 | match = (cmd->device->id == t && | 972 | match = (sp->cmd->device->id == t && |
1032 | cmd->device->lun == l); | 973 | sp->cmd->device->lun == l); |
1033 | break; | 974 | break; |
1034 | } | 975 | } |
1035 | if (!match) | 976 | if (!match) |
1036 | continue; | 977 | continue; |
1037 | 978 | ||
1038 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 979 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1039 | status = qla2x00_eh_wait_on_command(cmd); | 980 | status = qla2x00_eh_wait_on_command(sp->cmd); |
1040 | spin_lock_irqsave(&ha->hardware_lock, flags); | 981 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1041 | } | 982 | } |
1042 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 983 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
@@ -1060,15 +1001,19 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, | |||
1060 | int err; | 1001 | int err; |
1061 | 1002 | ||
1062 | if (!fcport) { | 1003 | if (!fcport) { |
1004 | ql_log(ql_log_warn, vha, 0x8007, | ||
1005 | "fcport is NULL.\n"); | ||
1063 | return FAILED; | 1006 | return FAILED; |
1064 | } | 1007 | } |
1065 | 1008 | ||
1066 | err = fc_block_scsi_eh(cmd); | 1009 | err = fc_block_scsi_eh(cmd); |
1010 | ql_dbg(ql_dbg_taskm, vha, 0x8008, | ||
1011 | "fc_block_scsi_eh ret=%d.\n", err); | ||
1067 | if (err != 0) | 1012 | if (err != 0) |
1068 | return err; | 1013 | return err; |
1069 | 1014 | ||
1070 | ql_log(ql_log_info, vha, 0x8009, | 1015 | ql_log(ql_log_info, vha, 0x8009, |
1071 | "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no, | 1016 | "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name, |
1072 | cmd->device->id, cmd->device->lun, cmd); | 1017 | cmd->device->id, cmd->device->lun, cmd); |
1073 | 1018 | ||
1074 | err = 0; | 1019 | err = 0; |
@@ -1077,6 +1022,12 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, | |||
1077 | "Wait for hba online failed for cmd=%p.\n", cmd); | 1022 | "Wait for hba online failed for cmd=%p.\n", cmd); |
1078 | goto eh_reset_failed; | 1023 | goto eh_reset_failed; |
1079 | } | 1024 | } |
1025 | err = 1; | ||
1026 | if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) { | ||
1027 | ql_log(ql_log_warn, vha, 0x800b, | ||
1028 | "Wait for loop ready failed for cmd=%p.\n", cmd); | ||
1029 | goto eh_reset_failed; | ||
1030 | } | ||
1080 | err = 2; | 1031 | err = 2; |
1081 | if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) | 1032 | if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) |
1082 | != QLA_SUCCESS) { | 1033 | != QLA_SUCCESS) { |
@@ -1088,21 +1039,20 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, | |||
1088 | if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, | 1039 | if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, |
1089 | cmd->device->lun, type) != QLA_SUCCESS) { | 1040 | cmd->device->lun, type) != QLA_SUCCESS) { |
1090 | ql_log(ql_log_warn, vha, 0x800d, | 1041 | ql_log(ql_log_warn, vha, 0x800d, |
1091 | "wait for pending cmds failed for cmd=%p.\n", cmd); | 1042 | "wait for peding cmds failed for cmd=%p.\n", cmd); |
1092 | goto eh_reset_failed; | 1043 | goto eh_reset_failed; |
1093 | } | 1044 | } |
1094 | 1045 | ||
1095 | ql_log(ql_log_info, vha, 0x800e, | 1046 | ql_log(ql_log_info, vha, 0x800e, |
1096 | "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name, | 1047 | "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name, |
1097 | vha->host_no, cmd->device->id, cmd->device->lun, cmd); | 1048 | cmd->device->id, cmd->device->lun, cmd); |
1098 | 1049 | ||
1099 | return SUCCESS; | 1050 | return SUCCESS; |
1100 | 1051 | ||
1101 | eh_reset_failed: | 1052 | eh_reset_failed: |
1102 | ql_log(ql_log_info, vha, 0x800f, | 1053 | ql_log(ql_log_info, vha, 0x800f, |
1103 | "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name, | 1054 | "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name, |
1104 | reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, | 1055 | reset_errors[err], cmd->device->id, cmd->device->lun); |
1105 | cmd); | ||
1106 | return FAILED; | 1056 | return FAILED; |
1107 | } | 1057 | } |
1108 | 1058 | ||
@@ -1153,16 +1103,20 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
1153 | lun = cmd->device->lun; | 1103 | lun = cmd->device->lun; |
1154 | 1104 | ||
1155 | if (!fcport) { | 1105 | if (!fcport) { |
1106 | ql_log(ql_log_warn, vha, 0x8010, | ||
1107 | "fcport is NULL.\n"); | ||
1156 | return ret; | 1108 | return ret; |
1157 | } | 1109 | } |
1158 | 1110 | ||
1159 | ret = fc_block_scsi_eh(cmd); | 1111 | ret = fc_block_scsi_eh(cmd); |
1112 | ql_dbg(ql_dbg_taskm, vha, 0x8011, | ||
1113 | "fc_block_scsi_eh ret=%d.\n", ret); | ||
1160 | if (ret != 0) | 1114 | if (ret != 0) |
1161 | return ret; | 1115 | return ret; |
1162 | ret = FAILED; | 1116 | ret = FAILED; |
1163 | 1117 | ||
1164 | ql_log(ql_log_info, vha, 0x8012, | 1118 | ql_log(ql_log_info, vha, 0x8012, |
1165 | "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); | 1119 | "BUS RESET ISSUED for id %d lun %d.\n", id, lun); |
1166 | 1120 | ||
1167 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { | 1121 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { |
1168 | ql_log(ql_log_fatal, vha, 0x8013, | 1122 | ql_log(ql_log_fatal, vha, 0x8013, |
@@ -1170,9 +1124,10 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
1170 | goto eh_bus_reset_done; | 1124 | goto eh_bus_reset_done; |
1171 | } | 1125 | } |
1172 | 1126 | ||
1173 | if (qla2x00_loop_reset(vha) == QLA_SUCCESS) | 1127 | if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) { |
1174 | ret = SUCCESS; | 1128 | if (qla2x00_loop_reset(vha) == QLA_SUCCESS) |
1175 | 1129 | ret = SUCCESS; | |
1130 | } | ||
1176 | if (ret == FAILED) | 1131 | if (ret == FAILED) |
1177 | goto eh_bus_reset_done; | 1132 | goto eh_bus_reset_done; |
1178 | 1133 | ||
@@ -1186,8 +1141,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
1186 | 1141 | ||
1187 | eh_bus_reset_done: | 1142 | eh_bus_reset_done: |
1188 | ql_log(ql_log_warn, vha, 0x802b, | 1143 | ql_log(ql_log_warn, vha, 0x802b, |
1189 | "BUS RESET %s nexus=%ld:%d:%d.\n", | 1144 | "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED"); |
1190 | (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); | ||
1191 | 1145 | ||
1192 | return ret; | 1146 | return ret; |
1193 | } | 1147 | } |
@@ -1211,6 +1165,7 @@ static int | |||
1211 | qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | 1165 | qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) |
1212 | { | 1166 | { |
1213 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | 1167 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); |
1168 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | ||
1214 | struct qla_hw_data *ha = vha->hw; | 1169 | struct qla_hw_data *ha = vha->hw; |
1215 | int ret = FAILED; | 1170 | int ret = FAILED; |
1216 | unsigned int id, lun; | 1171 | unsigned int id, lun; |
@@ -1219,12 +1174,34 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
1219 | id = cmd->device->id; | 1174 | id = cmd->device->id; |
1220 | lun = cmd->device->lun; | 1175 | lun = cmd->device->lun; |
1221 | 1176 | ||
1177 | if (!fcport) { | ||
1178 | ql_log(ql_log_warn, vha, 0x8016, | ||
1179 | "fcport is NULL.\n"); | ||
1180 | return ret; | ||
1181 | } | ||
1182 | |||
1183 | ret = fc_block_scsi_eh(cmd); | ||
1184 | ql_dbg(ql_dbg_taskm, vha, 0x8017, | ||
1185 | "fc_block_scsi_eh ret=%d.\n", ret); | ||
1186 | if (ret != 0) | ||
1187 | return ret; | ||
1188 | ret = FAILED; | ||
1189 | |||
1222 | ql_log(ql_log_info, vha, 0x8018, | 1190 | ql_log(ql_log_info, vha, 0x8018, |
1223 | "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); | 1191 | "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun); |
1224 | 1192 | ||
1225 | if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) | 1193 | if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) |
1226 | goto eh_host_reset_lock; | 1194 | goto eh_host_reset_lock; |
1227 | 1195 | ||
1196 | /* | ||
1197 | * Fixme-may be dpc thread is active and processing | ||
1198 | * loop_resync,so wait a while for it to | ||
1199 | * be completed and then issue big hammer.Otherwise | ||
1200 | * it may cause I/O failure as big hammer marks the | ||
1201 | * devices as lost kicking of the port_down_timer | ||
1202 | * while dpc is stuck for the mailbox to complete. | ||
1203 | */ | ||
1204 | qla2x00_wait_for_loop_ready(vha); | ||
1228 | if (vha != base_vha) { | 1205 | if (vha != base_vha) { |
1229 | if (qla2x00_vp_abort_isp(vha)) | 1206 | if (qla2x00_vp_abort_isp(vha)) |
1230 | goto eh_host_reset_lock; | 1207 | goto eh_host_reset_lock; |
@@ -1261,9 +1238,8 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
1261 | ret = SUCCESS; | 1238 | ret = SUCCESS; |
1262 | 1239 | ||
1263 | eh_host_reset_lock: | 1240 | eh_host_reset_lock: |
1264 | ql_log(ql_log_info, vha, 0x8017, | 1241 | qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__, |
1265 | "ADAPTER RESET %s nexus=%ld:%d:%d.\n", | 1242 | (ret == FAILED) ? "failed" : "succeeded"); |
1266 | (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); | ||
1267 | 1243 | ||
1268 | return ret; | 1244 | return ret; |
1269 | } | 1245 | } |
@@ -1299,7 +1275,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) | |||
1299 | } | 1275 | } |
1300 | } | 1276 | } |
1301 | 1277 | ||
1302 | if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { | 1278 | if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) { |
1303 | ret = qla2x00_full_login_lip(vha); | 1279 | ret = qla2x00_full_login_lip(vha); |
1304 | if (ret != QLA_SUCCESS) { | 1280 | if (ret != QLA_SUCCESS) { |
1305 | ql_dbg(ql_dbg_taskm, vha, 0x802d, | 1281 | ql_dbg(ql_dbg_taskm, vha, 0x802d, |
@@ -1308,13 +1284,16 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) | |||
1308 | atomic_set(&vha->loop_state, LOOP_DOWN); | 1284 | atomic_set(&vha->loop_state, LOOP_DOWN); |
1309 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); | 1285 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); |
1310 | qla2x00_mark_all_devices_lost(vha, 0); | 1286 | qla2x00_mark_all_devices_lost(vha, 0); |
1287 | qla2x00_wait_for_loop_ready(vha); | ||
1311 | } | 1288 | } |
1312 | 1289 | ||
1313 | if (ha->flags.enable_lip_reset) { | 1290 | if (ha->flags.enable_lip_reset) { |
1314 | ret = qla2x00_lip_reset(vha); | 1291 | ret = qla2x00_lip_reset(vha); |
1315 | if (ret != QLA_SUCCESS) | 1292 | if (ret != QLA_SUCCESS) { |
1316 | ql_dbg(ql_dbg_taskm, vha, 0x802e, | 1293 | ql_dbg(ql_dbg_taskm, vha, 0x802e, |
1317 | "lip_reset failed (%d).\n", ret); | 1294 | "lip_reset failed (%d).\n", ret); |
1295 | } else | ||
1296 | qla2x00_wait_for_loop_ready(vha); | ||
1318 | } | 1297 | } |
1319 | 1298 | ||
1320 | /* Issue marker command only when we are going to start the I/O */ | 1299 | /* Issue marker command only when we are going to start the I/O */ |
@@ -1329,6 +1308,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1329 | int que, cnt; | 1308 | int que, cnt; |
1330 | unsigned long flags; | 1309 | unsigned long flags; |
1331 | srb_t *sp; | 1310 | srb_t *sp; |
1311 | struct srb_ctx *ctx; | ||
1332 | struct qla_hw_data *ha = vha->hw; | 1312 | struct qla_hw_data *ha = vha->hw; |
1333 | struct req_que *req; | 1313 | struct req_que *req; |
1334 | 1314 | ||
@@ -1341,7 +1321,31 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1341 | sp = req->outstanding_cmds[cnt]; | 1321 | sp = req->outstanding_cmds[cnt]; |
1342 | if (sp) { | 1322 | if (sp) { |
1343 | req->outstanding_cmds[cnt] = NULL; | 1323 | req->outstanding_cmds[cnt] = NULL; |
1344 | sp->done(vha, sp, res); | 1324 | if (!sp->ctx || |
1325 | (sp->flags & SRB_FCP_CMND_DMA_VALID) || | ||
1326 | IS_PROT_IO(sp)) { | ||
1327 | sp->cmd->result = res; | ||
1328 | qla2x00_sp_compl(ha, sp); | ||
1329 | } else { | ||
1330 | ctx = sp->ctx; | ||
1331 | if (ctx->type == SRB_ELS_CMD_RPT || | ||
1332 | ctx->type == SRB_ELS_CMD_HST || | ||
1333 | ctx->type == SRB_CT_CMD) { | ||
1334 | struct fc_bsg_job *bsg_job = | ||
1335 | ctx->u.bsg_job; | ||
1336 | if (bsg_job->request->msgcode | ||
1337 | == FC_BSG_HST_CT) | ||
1338 | kfree(sp->fcport); | ||
1339 | bsg_job->req->errors = 0; | ||
1340 | bsg_job->reply->result = res; | ||
1341 | bsg_job->job_done(bsg_job); | ||
1342 | kfree(sp->ctx); | ||
1343 | mempool_free(sp, | ||
1344 | ha->srb_mempool); | ||
1345 | } else { | ||
1346 | ctx->u.iocb_cmd->free(sp); | ||
1347 | } | ||
1348 | } | ||
1345 | } | 1349 | } |
1346 | } | 1350 | } |
1347 | } | 1351 | } |
@@ -1367,9 +1371,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev) | |||
1367 | scsi_qla_host_t *vha = shost_priv(sdev->host); | 1371 | scsi_qla_host_t *vha = shost_priv(sdev->host); |
1368 | struct req_que *req = vha->req; | 1372 | struct req_que *req = vha->req; |
1369 | 1373 | ||
1370 | if (IS_T10_PI_CAPABLE(vha->hw)) | ||
1371 | blk_queue_update_dma_alignment(sdev->request_queue, 0x7); | ||
1372 | |||
1373 | if (sdev->tagged_supported) | 1374 | if (sdev->tagged_supported) |
1374 | scsi_activate_tcq(sdev, req->max_q_depth); | 1375 | scsi_activate_tcq(sdev, req->max_q_depth); |
1375 | else | 1376 | else |
@@ -1391,8 +1392,10 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth) | |||
1391 | return; | 1392 | return; |
1392 | 1393 | ||
1393 | ql_dbg(ql_dbg_io, fcport->vha, 0x3029, | 1394 | ql_dbg(ql_dbg_io, fcport->vha, 0x3029, |
1394 | "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n", | 1395 | "Queue depth adjusted-down " |
1395 | sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); | 1396 | "to %d for scsi(%ld:%d:%d:%d).\n", |
1397 | sdev->queue_depth, fcport->vha->host_no, | ||
1398 | sdev->channel, sdev->id, sdev->lun); | ||
1396 | } | 1399 | } |
1397 | 1400 | ||
1398 | static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) | 1401 | static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) |
@@ -1414,8 +1417,10 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) | |||
1414 | scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); | 1417 | scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); |
1415 | 1418 | ||
1416 | ql_dbg(ql_dbg_io, vha, 0x302a, | 1419 | ql_dbg(ql_dbg_io, vha, 0x302a, |
1417 | "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n", | 1420 | "Queue depth adjusted-up to %d for " |
1418 | sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); | 1421 | "scsi(%ld:%d:%d:%d).\n", |
1422 | sdev->queue_depth, fcport->vha->host_no, | ||
1423 | sdev->channel, sdev->id, sdev->lun); | ||
1419 | } | 1424 | } |
1420 | 1425 | ||
1421 | static int | 1426 | static int |
@@ -1539,205 +1544,6 @@ qla24xx_disable_intrs(struct qla_hw_data *ha) | |||
1539 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1544 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1540 | } | 1545 | } |
1541 | 1546 | ||
1542 | static int | ||
1543 | qla2x00_iospace_config(struct qla_hw_data *ha) | ||
1544 | { | ||
1545 | resource_size_t pio; | ||
1546 | uint16_t msix; | ||
1547 | int cpus; | ||
1548 | |||
1549 | if (pci_request_selected_regions(ha->pdev, ha->bars, | ||
1550 | QLA2XXX_DRIVER_NAME)) { | ||
1551 | ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, | ||
1552 | "Failed to reserve PIO/MMIO regions (%s), aborting.\n", | ||
1553 | pci_name(ha->pdev)); | ||
1554 | goto iospace_error_exit; | ||
1555 | } | ||
1556 | if (!(ha->bars & 1)) | ||
1557 | goto skip_pio; | ||
1558 | |||
1559 | /* We only need PIO for Flash operations on ISP2312 v2 chips. */ | ||
1560 | pio = pci_resource_start(ha->pdev, 0); | ||
1561 | if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { | ||
1562 | if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { | ||
1563 | ql_log_pci(ql_log_warn, ha->pdev, 0x0012, | ||
1564 | "Invalid pci I/O region size (%s).\n", | ||
1565 | pci_name(ha->pdev)); | ||
1566 | pio = 0; | ||
1567 | } | ||
1568 | } else { | ||
1569 | ql_log_pci(ql_log_warn, ha->pdev, 0x0013, | ||
1570 | "Region #0 no a PIO resource (%s).\n", | ||
1571 | pci_name(ha->pdev)); | ||
1572 | pio = 0; | ||
1573 | } | ||
1574 | ha->pio_address = pio; | ||
1575 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, | ||
1576 | "PIO address=%llu.\n", | ||
1577 | (unsigned long long)ha->pio_address); | ||
1578 | |||
1579 | skip_pio: | ||
1580 | /* Use MMIO operations for all accesses. */ | ||
1581 | if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { | ||
1582 | ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, | ||
1583 | "Region #1 not an MMIO resource (%s), aborting.\n", | ||
1584 | pci_name(ha->pdev)); | ||
1585 | goto iospace_error_exit; | ||
1586 | } | ||
1587 | if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { | ||
1588 | ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, | ||
1589 | "Invalid PCI mem region size (%s), aborting.\n", | ||
1590 | pci_name(ha->pdev)); | ||
1591 | goto iospace_error_exit; | ||
1592 | } | ||
1593 | |||
1594 | ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); | ||
1595 | if (!ha->iobase) { | ||
1596 | ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, | ||
1597 | "Cannot remap MMIO (%s), aborting.\n", | ||
1598 | pci_name(ha->pdev)); | ||
1599 | goto iospace_error_exit; | ||
1600 | } | ||
1601 | |||
1602 | /* Determine queue resources */ | ||
1603 | ha->max_req_queues = ha->max_rsp_queues = 1; | ||
1604 | if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) || | ||
1605 | (ql2xmaxqueues > 1 && ql2xmultique_tag) || | ||
1606 | (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) | ||
1607 | goto mqiobase_exit; | ||
1608 | |||
1609 | ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), | ||
1610 | pci_resource_len(ha->pdev, 3)); | ||
1611 | if (ha->mqiobase) { | ||
1612 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, | ||
1613 | "MQIO Base=%p.\n", ha->mqiobase); | ||
1614 | /* Read MSIX vector size of the board */ | ||
1615 | pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); | ||
1616 | ha->msix_count = msix; | ||
1617 | /* Max queues are bounded by available msix vectors */ | ||
1618 | /* queue 0 uses two msix vectors */ | ||
1619 | if (ql2xmultique_tag) { | ||
1620 | cpus = num_online_cpus(); | ||
1621 | ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? | ||
1622 | (cpus + 1) : (ha->msix_count - 1); | ||
1623 | ha->max_req_queues = 2; | ||
1624 | } else if (ql2xmaxqueues > 1) { | ||
1625 | ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? | ||
1626 | QLA_MQ_SIZE : ql2xmaxqueues; | ||
1627 | ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008, | ||
1628 | "QoS mode set, max no of request queues:%d.\n", | ||
1629 | ha->max_req_queues); | ||
1630 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019, | ||
1631 | "QoS mode set, max no of request queues:%d.\n", | ||
1632 | ha->max_req_queues); | ||
1633 | } | ||
1634 | ql_log_pci(ql_log_info, ha->pdev, 0x001a, | ||
1635 | "MSI-X vector count: %d.\n", msix); | ||
1636 | } else | ||
1637 | ql_log_pci(ql_log_info, ha->pdev, 0x001b, | ||
1638 | "BAR 3 not enabled.\n"); | ||
1639 | |||
1640 | mqiobase_exit: | ||
1641 | ha->msix_count = ha->max_rsp_queues + 1; | ||
1642 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, | ||
1643 | "MSIX Count:%d.\n", ha->msix_count); | ||
1644 | return (0); | ||
1645 | |||
1646 | iospace_error_exit: | ||
1647 | return (-ENOMEM); | ||
1648 | } | ||
1649 | |||
1650 | |||
1651 | static int | ||
1652 | qla83xx_iospace_config(struct qla_hw_data *ha) | ||
1653 | { | ||
1654 | uint16_t msix; | ||
1655 | int cpus; | ||
1656 | |||
1657 | if (pci_request_selected_regions(ha->pdev, ha->bars, | ||
1658 | QLA2XXX_DRIVER_NAME)) { | ||
1659 | ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, | ||
1660 | "Failed to reserve PIO/MMIO regions (%s), aborting.\n", | ||
1661 | pci_name(ha->pdev)); | ||
1662 | |||
1663 | goto iospace_error_exit; | ||
1664 | } | ||
1665 | |||
1666 | /* Use MMIO operations for all accesses. */ | ||
1667 | if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { | ||
1668 | ql_log_pci(ql_log_warn, ha->pdev, 0x0118, | ||
1669 | "Invalid pci I/O region size (%s).\n", | ||
1670 | pci_name(ha->pdev)); | ||
1671 | goto iospace_error_exit; | ||
1672 | } | ||
1673 | if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { | ||
1674 | ql_log_pci(ql_log_warn, ha->pdev, 0x0119, | ||
1675 | "Invalid PCI mem region size (%s), aborting\n", | ||
1676 | pci_name(ha->pdev)); | ||
1677 | goto iospace_error_exit; | ||
1678 | } | ||
1679 | |||
1680 | ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); | ||
1681 | if (!ha->iobase) { | ||
1682 | ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, | ||
1683 | "Cannot remap MMIO (%s), aborting.\n", | ||
1684 | pci_name(ha->pdev)); | ||
1685 | goto iospace_error_exit; | ||
1686 | } | ||
1687 | |||
1688 | /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ | ||
1689 | /* 83XX 26XX always use MQ type access for queues | ||
1690 | * - mbar 2, a.k.a region 4 */ | ||
1691 | ha->max_req_queues = ha->max_rsp_queues = 1; | ||
1692 | ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), | ||
1693 | pci_resource_len(ha->pdev, 4)); | ||
1694 | |||
1695 | if (!ha->mqiobase) { | ||
1696 | ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, | ||
1697 | "BAR2/region4 not enabled\n"); | ||
1698 | goto mqiobase_exit; | ||
1699 | } | ||
1700 | |||
1701 | ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), | ||
1702 | pci_resource_len(ha->pdev, 2)); | ||
1703 | if (ha->msixbase) { | ||
1704 | /* Read MSIX vector size of the board */ | ||
1705 | pci_read_config_word(ha->pdev, | ||
1706 | QLA_83XX_PCI_MSIX_CONTROL, &msix); | ||
1707 | ha->msix_count = msix; | ||
1708 | /* Max queues are bounded by available msix vectors */ | ||
1709 | /* queue 0 uses two msix vectors */ | ||
1710 | if (ql2xmultique_tag) { | ||
1711 | cpus = num_online_cpus(); | ||
1712 | ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? | ||
1713 | (cpus + 1) : (ha->msix_count - 1); | ||
1714 | ha->max_req_queues = 2; | ||
1715 | } else if (ql2xmaxqueues > 1) { | ||
1716 | ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? | ||
1717 | QLA_MQ_SIZE : ql2xmaxqueues; | ||
1718 | ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c, | ||
1719 | "QoS mode set, max no of request queues:%d.\n", | ||
1720 | ha->max_req_queues); | ||
1721 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, | ||
1722 | "QoS mode set, max no of request queues:%d.\n", | ||
1723 | ha->max_req_queues); | ||
1724 | } | ||
1725 | ql_log_pci(ql_log_info, ha->pdev, 0x011c, | ||
1726 | "MSI-X vector count: %d.\n", msix); | ||
1727 | } else | ||
1728 | ql_log_pci(ql_log_info, ha->pdev, 0x011e, | ||
1729 | "BAR 1 not enabled.\n"); | ||
1730 | |||
1731 | mqiobase_exit: | ||
1732 | ha->msix_count = ha->max_rsp_queues + 1; | ||
1733 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, | ||
1734 | "MSIX Count:%d.\n", ha->msix_count); | ||
1735 | return 0; | ||
1736 | |||
1737 | iospace_error_exit: | ||
1738 | return -ENOMEM; | ||
1739 | } | ||
1740 | |||
1741 | static struct isp_operations qla2100_isp_ops = { | 1547 | static struct isp_operations qla2100_isp_ops = { |
1742 | .pci_config = qla2100_pci_config, | 1548 | .pci_config = qla2100_pci_config, |
1743 | .reset_chip = qla2x00_reset_chip, | 1549 | .reset_chip = qla2x00_reset_chip, |
@@ -1772,7 +1578,6 @@ static struct isp_operations qla2100_isp_ops = { | |||
1772 | .get_flash_version = qla2x00_get_flash_version, | 1578 | .get_flash_version = qla2x00_get_flash_version, |
1773 | .start_scsi = qla2x00_start_scsi, | 1579 | .start_scsi = qla2x00_start_scsi, |
1774 | .abort_isp = qla2x00_abort_isp, | 1580 | .abort_isp = qla2x00_abort_isp, |
1775 | .iospace_config = qla2x00_iospace_config, | ||
1776 | }; | 1581 | }; |
1777 | 1582 | ||
1778 | static struct isp_operations qla2300_isp_ops = { | 1583 | static struct isp_operations qla2300_isp_ops = { |
@@ -1809,7 +1614,6 @@ static struct isp_operations qla2300_isp_ops = { | |||
1809 | .get_flash_version = qla2x00_get_flash_version, | 1614 | .get_flash_version = qla2x00_get_flash_version, |
1810 | .start_scsi = qla2x00_start_scsi, | 1615 | .start_scsi = qla2x00_start_scsi, |
1811 | .abort_isp = qla2x00_abort_isp, | 1616 | .abort_isp = qla2x00_abort_isp, |
1812 | .iospace_config = qla2x00_iospace_config, | ||
1813 | }; | 1617 | }; |
1814 | 1618 | ||
1815 | static struct isp_operations qla24xx_isp_ops = { | 1619 | static struct isp_operations qla24xx_isp_ops = { |
@@ -1846,7 +1650,6 @@ static struct isp_operations qla24xx_isp_ops = { | |||
1846 | .get_flash_version = qla24xx_get_flash_version, | 1650 | .get_flash_version = qla24xx_get_flash_version, |
1847 | .start_scsi = qla24xx_start_scsi, | 1651 | .start_scsi = qla24xx_start_scsi, |
1848 | .abort_isp = qla2x00_abort_isp, | 1652 | .abort_isp = qla2x00_abort_isp, |
1849 | .iospace_config = qla2x00_iospace_config, | ||
1850 | }; | 1653 | }; |
1851 | 1654 | ||
1852 | static struct isp_operations qla25xx_isp_ops = { | 1655 | static struct isp_operations qla25xx_isp_ops = { |
@@ -1883,7 +1686,6 @@ static struct isp_operations qla25xx_isp_ops = { | |||
1883 | .get_flash_version = qla24xx_get_flash_version, | 1686 | .get_flash_version = qla24xx_get_flash_version, |
1884 | .start_scsi = qla24xx_dif_start_scsi, | 1687 | .start_scsi = qla24xx_dif_start_scsi, |
1885 | .abort_isp = qla2x00_abort_isp, | 1688 | .abort_isp = qla2x00_abort_isp, |
1886 | .iospace_config = qla2x00_iospace_config, | ||
1887 | }; | 1689 | }; |
1888 | 1690 | ||
1889 | static struct isp_operations qla81xx_isp_ops = { | 1691 | static struct isp_operations qla81xx_isp_ops = { |
@@ -1914,13 +1716,12 @@ static struct isp_operations qla81xx_isp_ops = { | |||
1914 | .fw_dump = qla81xx_fw_dump, | 1716 | .fw_dump = qla81xx_fw_dump, |
1915 | .beacon_on = qla24xx_beacon_on, | 1717 | .beacon_on = qla24xx_beacon_on, |
1916 | .beacon_off = qla24xx_beacon_off, | 1718 | .beacon_off = qla24xx_beacon_off, |
1917 | .beacon_blink = qla83xx_beacon_blink, | 1719 | .beacon_blink = qla24xx_beacon_blink, |
1918 | .read_optrom = qla25xx_read_optrom_data, | 1720 | .read_optrom = qla25xx_read_optrom_data, |
1919 | .write_optrom = qla24xx_write_optrom_data, | 1721 | .write_optrom = qla24xx_write_optrom_data, |
1920 | .get_flash_version = qla24xx_get_flash_version, | 1722 | .get_flash_version = qla24xx_get_flash_version, |
1921 | .start_scsi = qla24xx_dif_start_scsi, | 1723 | .start_scsi = qla24xx_dif_start_scsi, |
1922 | .abort_isp = qla2x00_abort_isp, | 1724 | .abort_isp = qla2x00_abort_isp, |
1923 | .iospace_config = qla2x00_iospace_config, | ||
1924 | }; | 1725 | }; |
1925 | 1726 | ||
1926 | static struct isp_operations qla82xx_isp_ops = { | 1727 | static struct isp_operations qla82xx_isp_ops = { |
@@ -1932,7 +1733,7 @@ static struct isp_operations qla82xx_isp_ops = { | |||
1932 | .nvram_config = qla81xx_nvram_config, | 1733 | .nvram_config = qla81xx_nvram_config, |
1933 | .update_fw_options = qla24xx_update_fw_options, | 1734 | .update_fw_options = qla24xx_update_fw_options, |
1934 | .load_risc = qla82xx_load_risc, | 1735 | .load_risc = qla82xx_load_risc, |
1935 | .pci_info_str = qla24xx_pci_info_str, | 1736 | .pci_info_str = qla82xx_pci_info_str, |
1936 | .fw_version_str = qla24xx_fw_version_str, | 1737 | .fw_version_str = qla24xx_fw_version_str, |
1937 | .intr_handler = qla82xx_intr_handler, | 1738 | .intr_handler = qla82xx_intr_handler, |
1938 | .enable_intrs = qla82xx_enable_intrs, | 1739 | .enable_intrs = qla82xx_enable_intrs, |
@@ -1949,52 +1750,14 @@ static struct isp_operations qla82xx_isp_ops = { | |||
1949 | .read_nvram = qla24xx_read_nvram_data, | 1750 | .read_nvram = qla24xx_read_nvram_data, |
1950 | .write_nvram = qla24xx_write_nvram_data, | 1751 | .write_nvram = qla24xx_write_nvram_data, |
1951 | .fw_dump = qla24xx_fw_dump, | 1752 | .fw_dump = qla24xx_fw_dump, |
1952 | .beacon_on = qla82xx_beacon_on, | 1753 | .beacon_on = qla24xx_beacon_on, |
1953 | .beacon_off = qla82xx_beacon_off, | 1754 | .beacon_off = qla24xx_beacon_off, |
1954 | .beacon_blink = NULL, | 1755 | .beacon_blink = qla24xx_beacon_blink, |
1955 | .read_optrom = qla82xx_read_optrom_data, | 1756 | .read_optrom = qla82xx_read_optrom_data, |
1956 | .write_optrom = qla82xx_write_optrom_data, | 1757 | .write_optrom = qla82xx_write_optrom_data, |
1957 | .get_flash_version = qla24xx_get_flash_version, | 1758 | .get_flash_version = qla24xx_get_flash_version, |
1958 | .start_scsi = qla82xx_start_scsi, | 1759 | .start_scsi = qla82xx_start_scsi, |
1959 | .abort_isp = qla82xx_abort_isp, | 1760 | .abort_isp = qla82xx_abort_isp, |
1960 | .iospace_config = qla82xx_iospace_config, | ||
1961 | }; | ||
1962 | |||
1963 | static struct isp_operations qla83xx_isp_ops = { | ||
1964 | .pci_config = qla25xx_pci_config, | ||
1965 | .reset_chip = qla24xx_reset_chip, | ||
1966 | .chip_diag = qla24xx_chip_diag, | ||
1967 | .config_rings = qla24xx_config_rings, | ||
1968 | .reset_adapter = qla24xx_reset_adapter, | ||
1969 | .nvram_config = qla81xx_nvram_config, | ||
1970 | .update_fw_options = qla81xx_update_fw_options, | ||
1971 | .load_risc = qla81xx_load_risc, | ||
1972 | .pci_info_str = qla24xx_pci_info_str, | ||
1973 | .fw_version_str = qla24xx_fw_version_str, | ||
1974 | .intr_handler = qla24xx_intr_handler, | ||
1975 | .enable_intrs = qla24xx_enable_intrs, | ||
1976 | .disable_intrs = qla24xx_disable_intrs, | ||
1977 | .abort_command = qla24xx_abort_command, | ||
1978 | .target_reset = qla24xx_abort_target, | ||
1979 | .lun_reset = qla24xx_lun_reset, | ||
1980 | .fabric_login = qla24xx_login_fabric, | ||
1981 | .fabric_logout = qla24xx_fabric_logout, | ||
1982 | .calc_req_entries = NULL, | ||
1983 | .build_iocbs = NULL, | ||
1984 | .prep_ms_iocb = qla24xx_prep_ms_iocb, | ||
1985 | .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, | ||
1986 | .read_nvram = NULL, | ||
1987 | .write_nvram = NULL, | ||
1988 | .fw_dump = qla83xx_fw_dump, | ||
1989 | .beacon_on = qla24xx_beacon_on, | ||
1990 | .beacon_off = qla24xx_beacon_off, | ||
1991 | .beacon_blink = qla83xx_beacon_blink, | ||
1992 | .read_optrom = qla25xx_read_optrom_data, | ||
1993 | .write_optrom = qla24xx_write_optrom_data, | ||
1994 | .get_flash_version = qla24xx_get_flash_version, | ||
1995 | .start_scsi = qla24xx_dif_start_scsi, | ||
1996 | .abort_isp = qla2x00_abort_isp, | ||
1997 | .iospace_config = qla83xx_iospace_config, | ||
1998 | }; | 1761 | }; |
1999 | 1762 | ||
2000 | static inline void | 1763 | static inline void |
@@ -2091,22 +1854,6 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) | |||
2091 | /* Initialize 82XX ISP flags */ | 1854 | /* Initialize 82XX ISP flags */ |
2092 | qla82xx_init_flags(ha); | 1855 | qla82xx_init_flags(ha); |
2093 | break; | 1856 | break; |
2094 | case PCI_DEVICE_ID_QLOGIC_ISP2031: | ||
2095 | ha->device_type |= DT_ISP2031; | ||
2096 | ha->device_type |= DT_ZIO_SUPPORTED; | ||
2097 | ha->device_type |= DT_FWI2; | ||
2098 | ha->device_type |= DT_IIDMA; | ||
2099 | ha->device_type |= DT_T10_PI; | ||
2100 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; | ||
2101 | break; | ||
2102 | case PCI_DEVICE_ID_QLOGIC_ISP8031: | ||
2103 | ha->device_type |= DT_ISP8031; | ||
2104 | ha->device_type |= DT_ZIO_SUPPORTED; | ||
2105 | ha->device_type |= DT_FWI2; | ||
2106 | ha->device_type |= DT_IIDMA; | ||
2107 | ha->device_type |= DT_T10_PI; | ||
2108 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; | ||
2109 | break; | ||
2110 | } | 1857 | } |
2111 | 1858 | ||
2112 | if (IS_QLA82XX(ha)) | 1859 | if (IS_QLA82XX(ha)) |
@@ -2120,10 +1867,121 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) | |||
2120 | else | 1867 | else |
2121 | ha->flags.port0 = 0; | 1868 | ha->flags.port0 = 0; |
2122 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, | 1869 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, |
2123 | "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", | 1870 | "device_type=0x%x port=%d fw_srisc_address=%p.\n", |
2124 | ha->device_type, ha->flags.port0, ha->fw_srisc_address); | 1871 | ha->device_type, ha->flags.port0, ha->fw_srisc_address); |
2125 | } | 1872 | } |
2126 | 1873 | ||
1874 | static int | ||
1875 | qla2x00_iospace_config(struct qla_hw_data *ha) | ||
1876 | { | ||
1877 | resource_size_t pio; | ||
1878 | uint16_t msix; | ||
1879 | int cpus; | ||
1880 | |||
1881 | if (IS_QLA82XX(ha)) | ||
1882 | return qla82xx_iospace_config(ha); | ||
1883 | |||
1884 | if (pci_request_selected_regions(ha->pdev, ha->bars, | ||
1885 | QLA2XXX_DRIVER_NAME)) { | ||
1886 | ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, | ||
1887 | "Failed to reserve PIO/MMIO regions (%s), aborting.\n", | ||
1888 | pci_name(ha->pdev)); | ||
1889 | goto iospace_error_exit; | ||
1890 | } | ||
1891 | if (!(ha->bars & 1)) | ||
1892 | goto skip_pio; | ||
1893 | |||
1894 | /* We only need PIO for Flash operations on ISP2312 v2 chips. */ | ||
1895 | pio = pci_resource_start(ha->pdev, 0); | ||
1896 | if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { | ||
1897 | if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { | ||
1898 | ql_log_pci(ql_log_warn, ha->pdev, 0x0012, | ||
1899 | "Invalid pci I/O region size (%s).\n", | ||
1900 | pci_name(ha->pdev)); | ||
1901 | pio = 0; | ||
1902 | } | ||
1903 | } else { | ||
1904 | ql_log_pci(ql_log_warn, ha->pdev, 0x0013, | ||
1905 | "Region #0 no a PIO resource (%s).\n", | ||
1906 | pci_name(ha->pdev)); | ||
1907 | pio = 0; | ||
1908 | } | ||
1909 | ha->pio_address = pio; | ||
1910 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, | ||
1911 | "PIO address=%p.\n", | ||
1912 | ha->pio_address); | ||
1913 | |||
1914 | skip_pio: | ||
1915 | /* Use MMIO operations for all accesses. */ | ||
1916 | if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { | ||
1917 | ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, | ||
1918 | "Region #1 not an MMIO resource (%s), aborting.\n", | ||
1919 | pci_name(ha->pdev)); | ||
1920 | goto iospace_error_exit; | ||
1921 | } | ||
1922 | if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { | ||
1923 | ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, | ||
1924 | "Invalid PCI mem region size (%s), aborting.\n", | ||
1925 | pci_name(ha->pdev)); | ||
1926 | goto iospace_error_exit; | ||
1927 | } | ||
1928 | |||
1929 | ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); | ||
1930 | if (!ha->iobase) { | ||
1931 | ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, | ||
1932 | "Cannot remap MMIO (%s), aborting.\n", | ||
1933 | pci_name(ha->pdev)); | ||
1934 | goto iospace_error_exit; | ||
1935 | } | ||
1936 | |||
1937 | /* Determine queue resources */ | ||
1938 | ha->max_req_queues = ha->max_rsp_queues = 1; | ||
1939 | if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) || | ||
1940 | (ql2xmaxqueues > 1 && ql2xmultique_tag) || | ||
1941 | (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) | ||
1942 | goto mqiobase_exit; | ||
1943 | |||
1944 | ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), | ||
1945 | pci_resource_len(ha->pdev, 3)); | ||
1946 | if (ha->mqiobase) { | ||
1947 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, | ||
1948 | "MQIO Base=%p.\n", ha->mqiobase); | ||
1949 | /* Read MSIX vector size of the board */ | ||
1950 | pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); | ||
1951 | ha->msix_count = msix; | ||
1952 | /* Max queues are bounded by available msix vectors */ | ||
1953 | /* queue 0 uses two msix vectors */ | ||
1954 | if (ql2xmultique_tag) { | ||
1955 | cpus = num_online_cpus(); | ||
1956 | ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? | ||
1957 | (cpus + 1) : (ha->msix_count - 1); | ||
1958 | ha->max_req_queues = 2; | ||
1959 | } else if (ql2xmaxqueues > 1) { | ||
1960 | ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? | ||
1961 | QLA_MQ_SIZE : ql2xmaxqueues; | ||
1962 | ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008, | ||
1963 | "QoS mode set, max no of request queues:%d.\n", | ||
1964 | ha->max_req_queues); | ||
1965 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019, | ||
1966 | "QoS mode set, max no of request queues:%d.\n", | ||
1967 | ha->max_req_queues); | ||
1968 | } | ||
1969 | ql_log_pci(ql_log_info, ha->pdev, 0x001a, | ||
1970 | "MSI-X vector count: %d.\n", msix); | ||
1971 | } else | ||
1972 | ql_log_pci(ql_log_info, ha->pdev, 0x001b, | ||
1973 | "BAR 3 not enabled.\n"); | ||
1974 | |||
1975 | mqiobase_exit: | ||
1976 | ha->msix_count = ha->max_rsp_queues + 1; | ||
1977 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, | ||
1978 | "MSIX Count:%d.\n", ha->msix_count); | ||
1979 | return (0); | ||
1980 | |||
1981 | iospace_error_exit: | ||
1982 | return (-ENOMEM); | ||
1983 | } | ||
1984 | |||
2127 | static void | 1985 | static void |
2128 | qla2xxx_scan_start(struct Scsi_Host *shost) | 1986 | qla2xxx_scan_start(struct Scsi_Host *shost) |
2129 | { | 1987 | { |
@@ -2154,7 +2012,7 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) | |||
2154 | /* | 2012 | /* |
2155 | * PCI driver interface | 2013 | * PCI driver interface |
2156 | */ | 2014 | */ |
2157 | static int | 2015 | static int __devinit |
2158 | qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | 2016 | qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
2159 | { | 2017 | { |
2160 | int ret = -ENODEV; | 2018 | int ret = -ENODEV; |
@@ -2162,9 +2020,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2162 | scsi_qla_host_t *base_vha = NULL; | 2020 | scsi_qla_host_t *base_vha = NULL; |
2163 | struct qla_hw_data *ha; | 2021 | struct qla_hw_data *ha; |
2164 | char pci_info[30]; | 2022 | char pci_info[30]; |
2165 | char fw_str[30], wq_name[30]; | 2023 | char fw_str[30]; |
2166 | struct scsi_host_template *sht; | 2024 | struct scsi_host_template *sht; |
2167 | int bars, mem_only = 0; | 2025 | int bars, max_id, mem_only = 0; |
2168 | uint16_t req_length = 0, rsp_length = 0; | 2026 | uint16_t req_length = 0, rsp_length = 0; |
2169 | struct req_que *req = NULL; | 2027 | struct req_que *req = NULL; |
2170 | struct rsp_que *rsp = NULL; | 2028 | struct rsp_que *rsp = NULL; |
@@ -2178,9 +2036,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2178 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || | 2036 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || |
2179 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || | 2037 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || |
2180 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || | 2038 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || |
2181 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || | 2039 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) { |
2182 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || | ||
2183 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) { | ||
2184 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | 2040 | bars = pci_select_bars(pdev, IORESOURCE_MEM); |
2185 | mem_only = 1; | 2041 | mem_only = 1; |
2186 | ql_dbg_pci(ql_dbg_init, pdev, 0x0007, | 2042 | ql_dbg_pci(ql_dbg_init, pdev, 0x0007, |
@@ -2209,31 +2065,38 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2209 | ql_dbg_pci(ql_dbg_init, pdev, 0x000a, | 2065 | ql_dbg_pci(ql_dbg_init, pdev, 0x000a, |
2210 | "Memory allocated for ha=%p.\n", ha); | 2066 | "Memory allocated for ha=%p.\n", ha); |
2211 | ha->pdev = pdev; | 2067 | ha->pdev = pdev; |
2212 | ha->tgt.enable_class_2 = ql2xenableclass2; | ||
2213 | 2068 | ||
2214 | /* Clear our data area */ | 2069 | /* Clear our data area */ |
2215 | ha->bars = bars; | 2070 | ha->bars = bars; |
2216 | ha->mem_only = mem_only; | 2071 | ha->mem_only = mem_only; |
2217 | spin_lock_init(&ha->hardware_lock); | 2072 | spin_lock_init(&ha->hardware_lock); |
2218 | spin_lock_init(&ha->vport_slock); | 2073 | spin_lock_init(&ha->vport_slock); |
2219 | mutex_init(&ha->selflogin_lock); | ||
2220 | 2074 | ||
2221 | /* Set ISP-type information. */ | 2075 | /* Set ISP-type information. */ |
2222 | qla2x00_set_isp_flags(ha); | 2076 | qla2x00_set_isp_flags(ha); |
2223 | 2077 | ||
2224 | /* Set EEH reset type to fundamental if required by hba */ | 2078 | /* Set EEH reset type to fundamental if required by hba */ |
2225 | if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || | 2079 | if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) { |
2226 | IS_QLA83XX(ha)) | ||
2227 | pdev->needs_freset = 1; | 2080 | pdev->needs_freset = 1; |
2081 | } | ||
2228 | 2082 | ||
2083 | /* Configure PCI I/O space */ | ||
2084 | ret = qla2x00_iospace_config(ha); | ||
2085 | if (ret) | ||
2086 | goto probe_hw_failed; | ||
2087 | |||
2088 | ql_log_pci(ql_log_info, pdev, 0x001d, | ||
2089 | "Found an ISP%04X irq %d iobase 0x%p.\n", | ||
2090 | pdev->device, pdev->irq, ha->iobase); | ||
2229 | ha->prev_topology = 0; | 2091 | ha->prev_topology = 0; |
2230 | ha->init_cb_size = sizeof(init_cb_t); | 2092 | ha->init_cb_size = sizeof(init_cb_t); |
2231 | ha->link_data_rate = PORT_SPEED_UNKNOWN; | 2093 | ha->link_data_rate = PORT_SPEED_UNKNOWN; |
2232 | ha->optrom_size = OPTROM_SIZE_2300; | 2094 | ha->optrom_size = OPTROM_SIZE_2300; |
2233 | 2095 | ||
2234 | /* Assign ISP specific operations. */ | 2096 | /* Assign ISP specific operations. */ |
2097 | max_id = MAX_TARGETS_2200; | ||
2235 | if (IS_QLA2100(ha)) { | 2098 | if (IS_QLA2100(ha)) { |
2236 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; | 2099 | max_id = MAX_TARGETS_2100; |
2237 | ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; | 2100 | ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; |
2238 | req_length = REQUEST_ENTRY_CNT_2100; | 2101 | req_length = REQUEST_ENTRY_CNT_2100; |
2239 | rsp_length = RESPONSE_ENTRY_CNT_2100; | 2102 | rsp_length = RESPONSE_ENTRY_CNT_2100; |
@@ -2245,8 +2108,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2245 | ha->nvram_data_off = ~0; | 2108 | ha->nvram_data_off = ~0; |
2246 | ha->isp_ops = &qla2100_isp_ops; | 2109 | ha->isp_ops = &qla2100_isp_ops; |
2247 | } else if (IS_QLA2200(ha)) { | 2110 | } else if (IS_QLA2200(ha)) { |
2248 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; | 2111 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
2249 | ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; | ||
2250 | req_length = REQUEST_ENTRY_CNT_2200; | 2112 | req_length = REQUEST_ENTRY_CNT_2200; |
2251 | rsp_length = RESPONSE_ENTRY_CNT_2100; | 2113 | rsp_length = RESPONSE_ENTRY_CNT_2100; |
2252 | ha->max_loop_id = SNS_LAST_LOOP_ID_2100; | 2114 | ha->max_loop_id = SNS_LAST_LOOP_ID_2100; |
@@ -2257,7 +2119,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2257 | ha->nvram_data_off = ~0; | 2119 | ha->nvram_data_off = ~0; |
2258 | ha->isp_ops = &qla2100_isp_ops; | 2120 | ha->isp_ops = &qla2100_isp_ops; |
2259 | } else if (IS_QLA23XX(ha)) { | 2121 | } else if (IS_QLA23XX(ha)) { |
2260 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; | ||
2261 | ha->mbx_count = MAILBOX_REGISTER_COUNT; | 2122 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
2262 | req_length = REQUEST_ENTRY_CNT_2200; | 2123 | req_length = REQUEST_ENTRY_CNT_2200; |
2263 | rsp_length = RESPONSE_ENTRY_CNT_2300; | 2124 | rsp_length = RESPONSE_ENTRY_CNT_2300; |
@@ -2271,11 +2132,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2271 | ha->nvram_data_off = ~0; | 2132 | ha->nvram_data_off = ~0; |
2272 | ha->isp_ops = &qla2300_isp_ops; | 2133 | ha->isp_ops = &qla2300_isp_ops; |
2273 | } else if (IS_QLA24XX_TYPE(ha)) { | 2134 | } else if (IS_QLA24XX_TYPE(ha)) { |
2274 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; | ||
2275 | ha->mbx_count = MAILBOX_REGISTER_COUNT; | 2135 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
2276 | req_length = REQUEST_ENTRY_CNT_24XX; | 2136 | req_length = REQUEST_ENTRY_CNT_24XX; |
2277 | rsp_length = RESPONSE_ENTRY_CNT_2300; | 2137 | rsp_length = RESPONSE_ENTRY_CNT_2300; |
2278 | ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; | ||
2279 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; | 2138 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
2280 | ha->init_cb_size = sizeof(struct mid_init_cb_24xx); | 2139 | ha->init_cb_size = sizeof(struct mid_init_cb_24xx); |
2281 | ha->gid_list_info_size = 8; | 2140 | ha->gid_list_info_size = 8; |
@@ -2287,11 +2146,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2287 | ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; | 2146 | ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; |
2288 | ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; | 2147 | ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; |
2289 | } else if (IS_QLA25XX(ha)) { | 2148 | } else if (IS_QLA25XX(ha)) { |
2290 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; | ||
2291 | ha->mbx_count = MAILBOX_REGISTER_COUNT; | 2149 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
2292 | req_length = REQUEST_ENTRY_CNT_24XX; | 2150 | req_length = REQUEST_ENTRY_CNT_24XX; |
2293 | rsp_length = RESPONSE_ENTRY_CNT_2300; | 2151 | rsp_length = RESPONSE_ENTRY_CNT_2300; |
2294 | ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; | ||
2295 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; | 2152 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
2296 | ha->init_cb_size = sizeof(struct mid_init_cb_24xx); | 2153 | ha->init_cb_size = sizeof(struct mid_init_cb_24xx); |
2297 | ha->gid_list_info_size = 8; | 2154 | ha->gid_list_info_size = 8; |
@@ -2303,7 +2160,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2303 | ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; | 2160 | ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; |
2304 | ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; | 2161 | ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; |
2305 | } else if (IS_QLA81XX(ha)) { | 2162 | } else if (IS_QLA81XX(ha)) { |
2306 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; | ||
2307 | ha->mbx_count = MAILBOX_REGISTER_COUNT; | 2163 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
2308 | req_length = REQUEST_ENTRY_CNT_24XX; | 2164 | req_length = REQUEST_ENTRY_CNT_24XX; |
2309 | rsp_length = RESPONSE_ENTRY_CNT_2300; | 2165 | rsp_length = RESPONSE_ENTRY_CNT_2300; |
@@ -2318,7 +2174,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2318 | ha->nvram_conf_off = ~0; | 2174 | ha->nvram_conf_off = ~0; |
2319 | ha->nvram_data_off = ~0; | 2175 | ha->nvram_data_off = ~0; |
2320 | } else if (IS_QLA82XX(ha)) { | 2176 | } else if (IS_QLA82XX(ha)) { |
2321 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; | ||
2322 | ha->mbx_count = MAILBOX_REGISTER_COUNT; | 2177 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
2323 | req_length = REQUEST_ENTRY_CNT_82XX; | 2178 | req_length = REQUEST_ENTRY_CNT_82XX; |
2324 | rsp_length = RESPONSE_ENTRY_CNT_82XX; | 2179 | rsp_length = RESPONSE_ENTRY_CNT_82XX; |
@@ -2332,46 +2187,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2332 | ha->flash_data_off = FARX_ACCESS_FLASH_DATA; | 2187 | ha->flash_data_off = FARX_ACCESS_FLASH_DATA; |
2333 | ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; | 2188 | ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; |
2334 | ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; | 2189 | ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; |
2335 | } else if (IS_QLA83XX(ha)) { | ||
2336 | ha->portnum = PCI_FUNC(ha->pdev->devfn); | ||
2337 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; | ||
2338 | ha->mbx_count = MAILBOX_REGISTER_COUNT; | ||
2339 | req_length = REQUEST_ENTRY_CNT_24XX; | ||
2340 | rsp_length = RESPONSE_ENTRY_CNT_2300; | ||
2341 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; | ||
2342 | ha->init_cb_size = sizeof(struct mid_init_cb_81xx); | ||
2343 | ha->gid_list_info_size = 8; | ||
2344 | ha->optrom_size = OPTROM_SIZE_83XX; | ||
2345 | ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; | ||
2346 | ha->isp_ops = &qla83xx_isp_ops; | ||
2347 | ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; | ||
2348 | ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; | ||
2349 | ha->nvram_conf_off = ~0; | ||
2350 | ha->nvram_data_off = ~0; | ||
2351 | } | 2190 | } |
2352 | |||
2353 | ql_dbg_pci(ql_dbg_init, pdev, 0x001e, | 2191 | ql_dbg_pci(ql_dbg_init, pdev, 0x001e, |
2354 | "mbx_count=%d, req_length=%d, " | 2192 | "mbx_count=%d, req_length=%d, " |
2355 | "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " | 2193 | "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " |
2356 | "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " | 2194 | "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n", |
2357 | "max_fibre_devices=%d.\n", | ||
2358 | ha->mbx_count, req_length, rsp_length, ha->max_loop_id, | 2195 | ha->mbx_count, req_length, rsp_length, ha->max_loop_id, |
2359 | ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, | 2196 | ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, |
2360 | ha->nvram_npiv_size, ha->max_fibre_devices); | 2197 | ha->nvram_npiv_size); |
2361 | ql_dbg_pci(ql_dbg_init, pdev, 0x001f, | 2198 | ql_dbg_pci(ql_dbg_init, pdev, 0x001f, |
2362 | "isp_ops=%p, flash_conf_off=%d, " | 2199 | "isp_ops=%p, flash_conf_off=%d, " |
2363 | "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", | 2200 | "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", |
2364 | ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, | 2201 | ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, |
2365 | ha->nvram_conf_off, ha->nvram_data_off); | 2202 | ha->nvram_conf_off, ha->nvram_data_off); |
2366 | |||
2367 | /* Configure PCI I/O space */ | ||
2368 | ret = ha->isp_ops->iospace_config(ha); | ||
2369 | if (ret) | ||
2370 | goto iospace_config_failed; | ||
2371 | |||
2372 | ql_log_pci(ql_log_info, pdev, 0x001d, | ||
2373 | "Found an ISP%04X irq %d iobase 0x%p.\n", | ||
2374 | pdev->device, pdev->irq, ha->iobase); | ||
2375 | mutex_init(&ha->vport_lock); | 2203 | mutex_init(&ha->vport_lock); |
2376 | init_completion(&ha->mbx_cmd_comp); | 2204 | init_completion(&ha->mbx_cmd_comp); |
2377 | complete(&ha->mbx_cmd_comp); | 2205 | complete(&ha->mbx_cmd_comp); |
@@ -2431,7 +2259,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2431 | "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", | 2259 | "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", |
2432 | host->can_queue, base_vha->req, | 2260 | host->can_queue, base_vha->req, |
2433 | base_vha->mgmt_svr_loop_id, host->sg_tablesize); | 2261 | base_vha->mgmt_svr_loop_id, host->sg_tablesize); |
2434 | host->max_id = ha->max_fibre_devices; | 2262 | host->max_id = max_id; |
2263 | host->this_id = 255; | ||
2435 | host->cmd_per_lun = 3; | 2264 | host->cmd_per_lun = 3; |
2436 | host->unique_id = host->host_no; | 2265 | host->unique_id = host->host_no; |
2437 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) | 2266 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) |
@@ -2446,22 +2275,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2446 | ql_dbg(ql_dbg_init, base_vha, 0x0033, | 2275 | ql_dbg(ql_dbg_init, base_vha, 0x0033, |
2447 | "max_id=%d this_id=%d " | 2276 | "max_id=%d this_id=%d " |
2448 | "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " | 2277 | "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " |
2449 | "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id, | 2278 | "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id, |
2450 | host->this_id, host->cmd_per_lun, host->unique_id, | 2279 | host->this_id, host->cmd_per_lun, host->unique_id, |
2451 | host->max_cmd_len, host->max_channel, host->max_lun, | 2280 | host->max_cmd_len, host->max_channel, host->max_lun, |
2452 | host->transportt, sht->vendor_id); | 2281 | host->transportt, sht->vendor_id); |
2453 | 2282 | ||
2454 | que_init: | ||
2455 | /* Alloc arrays of request and response ring ptrs */ | ||
2456 | if (!qla2x00_alloc_queues(ha, req, rsp)) { | ||
2457 | ql_log(ql_log_fatal, base_vha, 0x003d, | ||
2458 | "Failed to allocate memory for queue pointers..." | ||
2459 | "aborting.\n"); | ||
2460 | goto probe_init_failed; | ||
2461 | } | ||
2462 | |||
2463 | qlt_probe_one_stage1(base_vha, ha); | ||
2464 | |||
2465 | /* Set up the irqs */ | 2283 | /* Set up the irqs */ |
2466 | ret = qla2x00_request_irqs(ha, rsp); | 2284 | ret = qla2x00_request_irqs(ha, rsp); |
2467 | if (ret) | 2285 | if (ret) |
@@ -2469,16 +2287,26 @@ que_init: | |||
2469 | 2287 | ||
2470 | pci_save_state(pdev); | 2288 | pci_save_state(pdev); |
2471 | 2289 | ||
2472 | /* Assign back pointers */ | 2290 | /* Alloc arrays of request and response ring ptrs */ |
2291 | que_init: | ||
2292 | if (!qla2x00_alloc_queues(ha)) { | ||
2293 | ql_log(ql_log_fatal, base_vha, 0x003d, | ||
2294 | "Failed to allocate memory for queue pointers.. aborting.\n"); | ||
2295 | goto probe_init_failed; | ||
2296 | } | ||
2297 | |||
2298 | ha->rsp_q_map[0] = rsp; | ||
2299 | ha->req_q_map[0] = req; | ||
2473 | rsp->req = req; | 2300 | rsp->req = req; |
2474 | req->rsp = rsp; | 2301 | req->rsp = rsp; |
2475 | 2302 | set_bit(0, ha->req_qid_map); | |
2303 | set_bit(0, ha->rsp_qid_map); | ||
2476 | /* FWI2-capable only. */ | 2304 | /* FWI2-capable only. */ |
2477 | req->req_q_in = &ha->iobase->isp24.req_q_in; | 2305 | req->req_q_in = &ha->iobase->isp24.req_q_in; |
2478 | req->req_q_out = &ha->iobase->isp24.req_q_out; | 2306 | req->req_q_out = &ha->iobase->isp24.req_q_out; |
2479 | rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; | 2307 | rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; |
2480 | rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; | 2308 | rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; |
2481 | if (ha->mqenable || IS_QLA83XX(ha)) { | 2309 | if (ha->mqenable) { |
2482 | req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; | 2310 | req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; |
2483 | req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; | 2311 | req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; |
2484 | rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; | 2312 | rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; |
@@ -2514,7 +2342,7 @@ que_init: | |||
2514 | if (IS_QLA82XX(ha)) { | 2342 | if (IS_QLA82XX(ha)) { |
2515 | qla82xx_idc_lock(ha); | 2343 | qla82xx_idc_lock(ha); |
2516 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | 2344 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, |
2517 | QLA8XXX_DEV_FAILED); | 2345 | QLA82XX_DEV_FAILED); |
2518 | qla82xx_idc_unlock(ha); | 2346 | qla82xx_idc_unlock(ha); |
2519 | ql_log(ql_log_fatal, base_vha, 0x00d7, | 2347 | ql_log(ql_log_fatal, base_vha, 0x00d7, |
2520 | "HW State: FAILED.\n"); | 2348 | "HW State: FAILED.\n"); |
@@ -2549,28 +2377,6 @@ que_init: | |||
2549 | ql_dbg(ql_dbg_init, base_vha, 0x00ee, | 2377 | ql_dbg(ql_dbg_init, base_vha, 0x00ee, |
2550 | "DPC thread started successfully.\n"); | 2378 | "DPC thread started successfully.\n"); |
2551 | 2379 | ||
2552 | /* | ||
2553 | * If we're not coming up in initiator mode, we might sit for | ||
2554 | * a while without waking up the dpc thread, which leads to a | ||
2555 | * stuck process warning. So just kick the dpc once here and | ||
2556 | * let the kthread start (and go back to sleep in qla2x00_do_dpc). | ||
2557 | */ | ||
2558 | qla2xxx_wake_dpc(base_vha); | ||
2559 | |||
2560 | if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { | ||
2561 | sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); | ||
2562 | ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); | ||
2563 | INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); | ||
2564 | |||
2565 | sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); | ||
2566 | ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); | ||
2567 | INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); | ||
2568 | INIT_WORK(&ha->idc_state_handler, | ||
2569 | qla83xx_idc_state_handler_work); | ||
2570 | INIT_WORK(&ha->nic_core_unrecoverable, | ||
2571 | qla83xx_nic_core_unrecoverable_work); | ||
2572 | } | ||
2573 | |||
2574 | skip_dpc: | 2380 | skip_dpc: |
2575 | list_add_tail(&base_vha->list, &ha->vp_list); | 2381 | list_add_tail(&base_vha->list, &ha->vp_list); |
2576 | base_vha->host->irq = ha->pdev->irq; | 2382 | base_vha->host->irq = ha->pdev->irq; |
@@ -2586,7 +2392,7 @@ skip_dpc: | |||
2586 | 2392 | ||
2587 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { | 2393 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { |
2588 | if (ha->fw_attributes & BIT_4) { | 2394 | if (ha->fw_attributes & BIT_4) { |
2589 | int prot = 0, guard; | 2395 | int prot = 0; |
2590 | base_vha->flags.difdix_supported = 1; | 2396 | base_vha->flags.difdix_supported = 1; |
2591 | ql_dbg(ql_dbg_init, base_vha, 0x00f1, | 2397 | ql_dbg(ql_dbg_init, base_vha, 0x00f1, |
2592 | "Registering for DIF/DIX type 1 and 3 protection.\n"); | 2398 | "Registering for DIF/DIX type 1 and 3 protection.\n"); |
@@ -2599,14 +2405,7 @@ skip_dpc: | |||
2599 | | SHOST_DIX_TYPE1_PROTECTION | 2405 | | SHOST_DIX_TYPE1_PROTECTION |
2600 | | SHOST_DIX_TYPE2_PROTECTION | 2406 | | SHOST_DIX_TYPE2_PROTECTION |
2601 | | SHOST_DIX_TYPE3_PROTECTION); | 2407 | | SHOST_DIX_TYPE3_PROTECTION); |
2602 | 2408 | scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC); | |
2603 | guard = SHOST_DIX_GUARD_CRC; | ||
2604 | |||
2605 | if (IS_PI_IPGUARD_CAPABLE(ha) && | ||
2606 | (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) | ||
2607 | guard |= SHOST_DIX_GUARD_IP; | ||
2608 | |||
2609 | scsi_host_set_guard(host, guard); | ||
2610 | } else | 2409 | } else |
2611 | base_vha->flags.difdix_supported = 0; | 2410 | base_vha->flags.difdix_supported = 0; |
2612 | } | 2411 | } |
@@ -2623,11 +2422,7 @@ skip_dpc: | |||
2623 | ql_dbg(ql_dbg_init, base_vha, 0x00f2, | 2422 | ql_dbg(ql_dbg_init, base_vha, 0x00f2, |
2624 | "Init done and hba is online.\n"); | 2423 | "Init done and hba is online.\n"); |
2625 | 2424 | ||
2626 | if (qla_ini_mode_enabled(base_vha)) | 2425 | scsi_scan_host(host); |
2627 | scsi_scan_host(host); | ||
2628 | else | ||
2629 | ql_dbg(ql_dbg_init, base_vha, 0x0122, | ||
2630 | "skipping scsi_scan_host() for non-initiator port\n"); | ||
2631 | 2426 | ||
2632 | qla2x00_alloc_sysfs_attr(base_vha); | 2427 | qla2x00_alloc_sysfs_attr(base_vha); |
2633 | 2428 | ||
@@ -2635,6 +2430,9 @@ skip_dpc: | |||
2635 | 2430 | ||
2636 | qla2x00_dfs_setup(base_vha); | 2431 | qla2x00_dfs_setup(base_vha); |
2637 | 2432 | ||
2433 | ql_log(ql_log_info, base_vha, 0x00fa, | ||
2434 | "QLogic Fibre Channed HBA Driver: %s.\n", | ||
2435 | qla2x00_version_str); | ||
2638 | ql_log(ql_log_info, base_vha, 0x00fb, | 2436 | ql_log(ql_log_info, base_vha, 0x00fb, |
2639 | "QLogic %s - %s.\n", | 2437 | "QLogic %s - %s.\n", |
2640 | ha->model_number, ha->model_desc ? ha->model_desc : ""); | 2438 | ha->model_number, ha->model_desc ? ha->model_desc : ""); |
@@ -2645,17 +2443,11 @@ skip_dpc: | |||
2645 | base_vha->host_no, | 2443 | base_vha->host_no, |
2646 | ha->isp_ops->fw_version_str(base_vha, fw_str)); | 2444 | ha->isp_ops->fw_version_str(base_vha, fw_str)); |
2647 | 2445 | ||
2648 | qlt_add_target(ha, base_vha); | ||
2649 | |||
2650 | return 0; | 2446 | return 0; |
2651 | 2447 | ||
2652 | probe_init_failed: | 2448 | probe_init_failed: |
2653 | qla2x00_free_req_que(ha, req); | 2449 | qla2x00_free_req_que(ha, req); |
2654 | ha->req_q_map[0] = NULL; | ||
2655 | clear_bit(0, ha->req_qid_map); | ||
2656 | qla2x00_free_rsp_que(ha, rsp); | 2450 | qla2x00_free_rsp_que(ha, rsp); |
2657 | ha->rsp_q_map[0] = NULL; | ||
2658 | clear_bit(0, ha->rsp_qid_map); | ||
2659 | ha->max_req_queues = ha->max_rsp_queues = 0; | 2451 | ha->max_req_queues = ha->max_rsp_queues = 0; |
2660 | 2452 | ||
2661 | probe_failed: | 2453 | probe_failed: |
@@ -2678,11 +2470,7 @@ probe_hw_failed: | |||
2678 | qla82xx_idc_lock(ha); | 2470 | qla82xx_idc_lock(ha); |
2679 | qla82xx_clear_drv_active(ha); | 2471 | qla82xx_clear_drv_active(ha); |
2680 | qla82xx_idc_unlock(ha); | 2472 | qla82xx_idc_unlock(ha); |
2681 | } | 2473 | iounmap((device_reg_t __iomem *)ha->nx_pcibase); |
2682 | iospace_config_failed: | ||
2683 | if (IS_QLA82XX(ha)) { | ||
2684 | if (!ha->nx_pcibase) | ||
2685 | iounmap((device_reg_t __iomem *)ha->nx_pcibase); | ||
2686 | if (!ql2xdbwr) | 2474 | if (!ql2xdbwr) |
2687 | iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); | 2475 | iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); |
2688 | } else { | 2476 | } else { |
@@ -2699,22 +2487,6 @@ probe_out: | |||
2699 | } | 2487 | } |
2700 | 2488 | ||
2701 | static void | 2489 | static void |
2702 | qla2x00_stop_dpc_thread(scsi_qla_host_t *vha) | ||
2703 | { | ||
2704 | struct qla_hw_data *ha = vha->hw; | ||
2705 | struct task_struct *t = ha->dpc_thread; | ||
2706 | |||
2707 | if (ha->dpc_thread == NULL) | ||
2708 | return; | ||
2709 | /* | ||
2710 | * qla2xxx_wake_dpc checks for ->dpc_thread | ||
2711 | * so we need to zero it out. | ||
2712 | */ | ||
2713 | ha->dpc_thread = NULL; | ||
2714 | kthread_stop(t); | ||
2715 | } | ||
2716 | |||
2717 | static void | ||
2718 | qla2x00_shutdown(struct pci_dev *pdev) | 2490 | qla2x00_shutdown(struct pci_dev *pdev) |
2719 | { | 2491 | { |
2720 | scsi_qla_host_t *vha; | 2492 | scsi_qla_host_t *vha; |
@@ -2757,19 +2529,9 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
2757 | struct qla_hw_data *ha; | 2529 | struct qla_hw_data *ha; |
2758 | unsigned long flags; | 2530 | unsigned long flags; |
2759 | 2531 | ||
2760 | /* | ||
2761 | * If the PCI device is disabled that means that probe failed and any | ||
2762 | * resources should be have cleaned up on probe exit. | ||
2763 | */ | ||
2764 | if (!atomic_read(&pdev->enable_cnt)) | ||
2765 | return; | ||
2766 | |||
2767 | base_vha = pci_get_drvdata(pdev); | 2532 | base_vha = pci_get_drvdata(pdev); |
2768 | ha = base_vha->hw; | 2533 | ha = base_vha->hw; |
2769 | 2534 | ||
2770 | ha->flags.host_shutting_down = 1; | ||
2771 | |||
2772 | set_bit(UNLOADING, &base_vha->dpc_flags); | ||
2773 | mutex_lock(&ha->vport_lock); | 2535 | mutex_lock(&ha->vport_lock); |
2774 | while (ha->cur_vport_count) { | 2536 | while (ha->cur_vport_count) { |
2775 | struct Scsi_Host *scsi_host; | 2537 | struct Scsi_Host *scsi_host; |
@@ -2791,13 +2553,7 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
2791 | } | 2553 | } |
2792 | mutex_unlock(&ha->vport_lock); | 2554 | mutex_unlock(&ha->vport_lock); |
2793 | 2555 | ||
2794 | if (IS_QLA8031(ha)) { | 2556 | set_bit(UNLOADING, &base_vha->dpc_flags); |
2795 | ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, | ||
2796 | "Clearing fcoe driver presence.\n"); | ||
2797 | if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) | ||
2798 | ql_dbg(ql_dbg_p3p, base_vha, 0xb079, | ||
2799 | "Error while clearing DRV-Presence.\n"); | ||
2800 | } | ||
2801 | 2557 | ||
2802 | qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); | 2558 | qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); |
2803 | 2559 | ||
@@ -2818,21 +2574,6 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
2818 | ha->wq = NULL; | 2574 | ha->wq = NULL; |
2819 | } | 2575 | } |
2820 | 2576 | ||
2821 | /* Cancel all work and destroy DPC workqueues */ | ||
2822 | if (ha->dpc_lp_wq) { | ||
2823 | cancel_work_sync(&ha->idc_aen); | ||
2824 | destroy_workqueue(ha->dpc_lp_wq); | ||
2825 | ha->dpc_lp_wq = NULL; | ||
2826 | } | ||
2827 | |||
2828 | if (ha->dpc_hp_wq) { | ||
2829 | cancel_work_sync(&ha->nic_core_reset); | ||
2830 | cancel_work_sync(&ha->idc_state_handler); | ||
2831 | cancel_work_sync(&ha->nic_core_unrecoverable); | ||
2832 | destroy_workqueue(ha->dpc_hp_wq); | ||
2833 | ha->dpc_hp_wq = NULL; | ||
2834 | } | ||
2835 | |||
2836 | /* Kill the kernel thread for this host */ | 2577 | /* Kill the kernel thread for this host */ |
2837 | if (ha->dpc_thread) { | 2578 | if (ha->dpc_thread) { |
2838 | struct task_struct *t = ha->dpc_thread; | 2579 | struct task_struct *t = ha->dpc_thread; |
@@ -2844,7 +2585,6 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
2844 | ha->dpc_thread = NULL; | 2585 | ha->dpc_thread = NULL; |
2845 | kthread_stop(t); | 2586 | kthread_stop(t); |
2846 | } | 2587 | } |
2847 | qlt_remove_target(ha, base_vha); | ||
2848 | 2588 | ||
2849 | qla2x00_free_sysfs_attr(base_vha); | 2589 | qla2x00_free_sysfs_attr(base_vha); |
2850 | 2590 | ||
@@ -2870,9 +2610,6 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
2870 | 2610 | ||
2871 | if (ha->mqiobase) | 2611 | if (ha->mqiobase) |
2872 | iounmap(ha->mqiobase); | 2612 | iounmap(ha->mqiobase); |
2873 | |||
2874 | if (IS_QLA83XX(ha) && ha->msixbase) | ||
2875 | iounmap(ha->msixbase); | ||
2876 | } | 2613 | } |
2877 | 2614 | ||
2878 | pci_release_selected_regions(ha->pdev, ha->bars); | 2615 | pci_release_selected_regions(ha->pdev, ha->bars); |
@@ -2896,9 +2633,20 @@ qla2x00_free_device(scsi_qla_host_t *vha) | |||
2896 | if (vha->timer_active) | 2633 | if (vha->timer_active) |
2897 | qla2x00_stop_timer(vha); | 2634 | qla2x00_stop_timer(vha); |
2898 | 2635 | ||
2899 | qla2x00_stop_dpc_thread(vha); | 2636 | /* Kill the kernel thread for this host */ |
2637 | if (ha->dpc_thread) { | ||
2638 | struct task_struct *t = ha->dpc_thread; | ||
2639 | |||
2640 | /* | ||
2641 | * qla2xxx_wake_dpc checks for ->dpc_thread | ||
2642 | * so we need to zero it out. | ||
2643 | */ | ||
2644 | ha->dpc_thread = NULL; | ||
2645 | kthread_stop(t); | ||
2646 | } | ||
2900 | 2647 | ||
2901 | qla25xx_delete_queues(vha); | 2648 | qla25xx_delete_queues(vha); |
2649 | |||
2902 | if (ha->flags.fce_enabled) | 2650 | if (ha->flags.fce_enabled) |
2903 | qla2x00_disable_fce_trace(vha, NULL, NULL); | 2651 | qla2x00_disable_fce_trace(vha, NULL, NULL); |
2904 | 2652 | ||
@@ -2922,8 +2670,6 @@ qla2x00_free_device(scsi_qla_host_t *vha) | |||
2922 | 2670 | ||
2923 | qla2x00_mem_free(ha); | 2671 | qla2x00_mem_free(ha); |
2924 | 2672 | ||
2925 | qla82xx_md_free(vha); | ||
2926 | |||
2927 | qla2x00_free_queues(ha); | 2673 | qla2x00_free_queues(ha); |
2928 | } | 2674 | } |
2929 | 2675 | ||
@@ -2933,7 +2679,6 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha) | |||
2933 | 2679 | ||
2934 | list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { | 2680 | list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { |
2935 | list_del(&fcport->list); | 2681 | list_del(&fcport->list); |
2936 | qla2x00_clear_loop_id(fcport); | ||
2937 | kfree(fcport); | 2682 | kfree(fcport); |
2938 | fcport = NULL; | 2683 | fcport = NULL; |
2939 | } | 2684 | } |
@@ -2958,10 +2703,8 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
2958 | spin_unlock_irqrestore(vha->host->host_lock, flags); | 2703 | spin_unlock_irqrestore(vha->host->host_lock, flags); |
2959 | set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); | 2704 | set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); |
2960 | qla2xxx_wake_dpc(base_vha); | 2705 | qla2xxx_wake_dpc(base_vha); |
2961 | } else { | 2706 | } else |
2962 | fc_remote_port_delete(rport); | 2707 | fc_remote_port_delete(rport); |
2963 | qlt_fc_port_deleted(vha, fcport); | ||
2964 | } | ||
2965 | } | 2708 | } |
2966 | 2709 | ||
2967 | /* | 2710 | /* |
@@ -2977,7 +2720,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
2977 | int do_login, int defer) | 2720 | int do_login, int defer) |
2978 | { | 2721 | { |
2979 | if (atomic_read(&fcport->state) == FCS_ONLINE && | 2722 | if (atomic_read(&fcport->state) == FCS_ONLINE && |
2980 | vha->vp_idx == fcport->vha->vp_idx) { | 2723 | vha->vp_idx == fcport->vp_idx) { |
2981 | qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); | 2724 | qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); |
2982 | qla2x00_schedule_rport_del(vha, fcport, defer); | 2725 | qla2x00_schedule_rport_del(vha, fcport, defer); |
2983 | } | 2726 | } |
@@ -3026,7 +2769,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) | |||
3026 | fc_port_t *fcport; | 2769 | fc_port_t *fcport; |
3027 | 2770 | ||
3028 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | 2771 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
3029 | if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) | 2772 | if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx) |
3030 | continue; | 2773 | continue; |
3031 | 2774 | ||
3032 | /* | 2775 | /* |
@@ -3039,7 +2782,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) | |||
3039 | qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); | 2782 | qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); |
3040 | if (defer) | 2783 | if (defer) |
3041 | qla2x00_schedule_rport_del(vha, fcport, defer); | 2784 | qla2x00_schedule_rport_del(vha, fcport, defer); |
3042 | else if (vha->vp_idx == fcport->vha->vp_idx) | 2785 | else if (vha->vp_idx == fcport->vp_idx) |
3043 | qla2x00_schedule_rport_del(vha, fcport, defer); | 2786 | qla2x00_schedule_rport_del(vha, fcport, defer); |
3044 | } | 2787 | } |
3045 | } | 2788 | } |
@@ -3064,13 +2807,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, | |||
3064 | if (!ha->init_cb) | 2807 | if (!ha->init_cb) |
3065 | goto fail; | 2808 | goto fail; |
3066 | 2809 | ||
3067 | if (qlt_mem_alloc(ha) < 0) | 2810 | ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE, |
3068 | goto fail_free_init_cb; | 2811 | &ha->gid_list_dma, GFP_KERNEL); |
3069 | |||
3070 | ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, | ||
3071 | qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); | ||
3072 | if (!ha->gid_list) | 2812 | if (!ha->gid_list) |
3073 | goto fail_free_tgt_mem; | 2813 | goto fail_free_init_cb; |
3074 | 2814 | ||
3075 | ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); | 2815 | ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); |
3076 | if (!ha->srb_mempool) | 2816 | if (!ha->srb_mempool) |
@@ -3139,7 +2879,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, | |||
3139 | if (!ha->sns_cmd) | 2879 | if (!ha->sns_cmd) |
3140 | goto fail_dma_pool; | 2880 | goto fail_dma_pool; |
3141 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, | 2881 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, |
3142 | "sns_cmd: %p.\n", ha->sns_cmd); | 2882 | "sns_cmd.\n", ha->sns_cmd); |
3143 | } else { | 2883 | } else { |
3144 | /* Get consistent memory allocated for MS IOCB */ | 2884 | /* Get consistent memory allocated for MS IOCB */ |
3145 | ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, | 2885 | ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, |
@@ -3209,7 +2949,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, | |||
3209 | ha->npiv_info = NULL; | 2949 | ha->npiv_info = NULL; |
3210 | 2950 | ||
3211 | /* Get consistent memory allocated for EX-INIT-CB. */ | 2951 | /* Get consistent memory allocated for EX-INIT-CB. */ |
3212 | if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) { | 2952 | if (IS_QLA8XXX_TYPE(ha)) { |
3213 | ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, | 2953 | ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, |
3214 | &ha->ex_init_cb_dma); | 2954 | &ha->ex_init_cb_dma); |
3215 | if (!ha->ex_init_cb) | 2955 | if (!ha->ex_init_cb) |
@@ -3231,18 +2971,6 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, | |||
3231 | } | 2971 | } |
3232 | 2972 | ||
3233 | INIT_LIST_HEAD(&ha->vp_list); | 2973 | INIT_LIST_HEAD(&ha->vp_list); |
3234 | |||
3235 | /* Allocate memory for our loop_id bitmap */ | ||
3236 | ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), | ||
3237 | GFP_KERNEL); | ||
3238 | if (!ha->loop_id_map) | ||
3239 | goto fail_async_pd; | ||
3240 | else { | ||
3241 | qla2x00_set_reserved_loop_ids(ha); | ||
3242 | ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, | ||
3243 | "loop_id_map=%p. \n", ha->loop_id_map); | ||
3244 | } | ||
3245 | |||
3246 | return 1; | 2974 | return 1; |
3247 | 2975 | ||
3248 | fail_async_pd: | 2976 | fail_async_pd: |
@@ -3295,13 +3023,10 @@ fail_free_srb_mempool: | |||
3295 | mempool_destroy(ha->srb_mempool); | 3023 | mempool_destroy(ha->srb_mempool); |
3296 | ha->srb_mempool = NULL; | 3024 | ha->srb_mempool = NULL; |
3297 | fail_free_gid_list: | 3025 | fail_free_gid_list: |
3298 | dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), | 3026 | dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, |
3299 | ha->gid_list, | ||
3300 | ha->gid_list_dma); | 3027 | ha->gid_list_dma); |
3301 | ha->gid_list = NULL; | 3028 | ha->gid_list = NULL; |
3302 | ha->gid_list_dma = 0; | 3029 | ha->gid_list_dma = 0; |
3303 | fail_free_tgt_mem: | ||
3304 | qlt_mem_free(ha); | ||
3305 | fail_free_init_cb: | 3030 | fail_free_init_cb: |
3306 | dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, | 3031 | dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, |
3307 | ha->init_cb_dma); | 3032 | ha->init_cb_dma); |
@@ -3354,10 +3079,6 @@ qla2x00_mem_free(struct qla_hw_data *ha) | |||
3354 | { | 3079 | { |
3355 | qla2x00_free_fw_dump(ha); | 3080 | qla2x00_free_fw_dump(ha); |
3356 | 3081 | ||
3357 | if (ha->mctp_dump) | ||
3358 | dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, | ||
3359 | ha->mctp_dump_dma); | ||
3360 | |||
3361 | if (ha->srb_mempool) | 3082 | if (ha->srb_mempool) |
3362 | mempool_destroy(ha->srb_mempool); | 3083 | mempool_destroy(ha->srb_mempool); |
3363 | 3084 | ||
@@ -3380,6 +3101,9 @@ qla2x00_mem_free(struct qla_hw_data *ha) | |||
3380 | if (ha->sfp_data) | 3101 | if (ha->sfp_data) |
3381 | dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); | 3102 | dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); |
3382 | 3103 | ||
3104 | if (ha->edc_data) | ||
3105 | dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma); | ||
3106 | |||
3383 | if (ha->ms_iocb) | 3107 | if (ha->ms_iocb) |
3384 | dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); | 3108 | dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); |
3385 | 3109 | ||
@@ -3394,8 +3118,8 @@ qla2x00_mem_free(struct qla_hw_data *ha) | |||
3394 | dma_pool_destroy(ha->s_dma_pool); | 3118 | dma_pool_destroy(ha->s_dma_pool); |
3395 | 3119 | ||
3396 | if (ha->gid_list) | 3120 | if (ha->gid_list) |
3397 | dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), | 3121 | dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, |
3398 | ha->gid_list, ha->gid_list_dma); | 3122 | ha->gid_list_dma); |
3399 | 3123 | ||
3400 | if (IS_QLA82XX(ha)) { | 3124 | if (IS_QLA82XX(ha)) { |
3401 | if (!list_empty(&ha->gbl_dsd_list)) { | 3125 | if (!list_empty(&ha->gbl_dsd_list)) { |
@@ -3421,16 +3145,12 @@ qla2x00_mem_free(struct qla_hw_data *ha) | |||
3421 | if (ha->ctx_mempool) | 3145 | if (ha->ctx_mempool) |
3422 | mempool_destroy(ha->ctx_mempool); | 3146 | mempool_destroy(ha->ctx_mempool); |
3423 | 3147 | ||
3424 | qlt_mem_free(ha); | ||
3425 | |||
3426 | if (ha->init_cb) | 3148 | if (ha->init_cb) |
3427 | dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, | 3149 | dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, |
3428 | ha->init_cb, ha->init_cb_dma); | 3150 | ha->init_cb, ha->init_cb_dma); |
3429 | vfree(ha->optrom_buffer); | 3151 | vfree(ha->optrom_buffer); |
3430 | kfree(ha->nvram); | 3152 | kfree(ha->nvram); |
3431 | kfree(ha->npiv_info); | 3153 | kfree(ha->npiv_info); |
3432 | kfree(ha->swl); | ||
3433 | kfree(ha->loop_id_map); | ||
3434 | 3154 | ||
3435 | ha->srb_mempool = NULL; | 3155 | ha->srb_mempool = NULL; |
3436 | ha->ctx_mempool = NULL; | 3156 | ha->ctx_mempool = NULL; |
@@ -3453,10 +3173,6 @@ qla2x00_mem_free(struct qla_hw_data *ha) | |||
3453 | 3173 | ||
3454 | ha->gid_list = NULL; | 3174 | ha->gid_list = NULL; |
3455 | ha->gid_list_dma = 0; | 3175 | ha->gid_list_dma = 0; |
3456 | |||
3457 | ha->tgt.atio_ring = NULL; | ||
3458 | ha->tgt.atio_dma = 0; | ||
3459 | ha->tgt.tgt_vp_map = NULL; | ||
3460 | } | 3176 | } |
3461 | 3177 | ||
3462 | struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, | 3178 | struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, |
@@ -3723,24 +3439,9 @@ void qla2x00_relogin(struct scsi_qla_host *vha) | |||
3723 | continue; | 3439 | continue; |
3724 | /* Attempt a retry. */ | 3440 | /* Attempt a retry. */ |
3725 | status = 1; | 3441 | status = 1; |
3726 | } else { | 3442 | } else |
3727 | status = qla2x00_fabric_login(vha, | 3443 | status = qla2x00_fabric_login(vha, |
3728 | fcport, &next_loopid); | 3444 | fcport, &next_loopid); |
3729 | if (status == QLA_SUCCESS) { | ||
3730 | int status2; | ||
3731 | uint8_t opts; | ||
3732 | |||
3733 | opts = 0; | ||
3734 | if (fcport->flags & | ||
3735 | FCF_FCP2_DEVICE) | ||
3736 | opts |= BIT_1; | ||
3737 | status2 = | ||
3738 | qla2x00_get_port_database( | ||
3739 | vha, fcport, opts); | ||
3740 | if (status2 != QLA_SUCCESS) | ||
3741 | status = 1; | ||
3742 | } | ||
3743 | } | ||
3744 | } else | 3445 | } else |
3745 | status = qla2x00_local_device_login(vha, | 3446 | status = qla2x00_local_device_login(vha, |
3746 | fcport); | 3447 | fcport); |
@@ -3765,651 +3466,13 @@ void qla2x00_relogin(struct scsi_qla_host *vha) | |||
3765 | } | 3466 | } |
3766 | 3467 | ||
3767 | if (fcport->login_retry == 0 && status != QLA_SUCCESS) | 3468 | if (fcport->login_retry == 0 && status != QLA_SUCCESS) |
3768 | qla2x00_clear_loop_id(fcport); | 3469 | fcport->loop_id = FC_NO_LOOP_ID; |
3769 | } | 3470 | } |
3770 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) | 3471 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) |
3771 | break; | 3472 | break; |
3772 | } | 3473 | } |
3773 | } | 3474 | } |
3774 | 3475 | ||
3775 | /* Schedule work on any of the dpc-workqueues */ | ||
3776 | void | ||
3777 | qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) | ||
3778 | { | ||
3779 | struct qla_hw_data *ha = base_vha->hw; | ||
3780 | |||
3781 | switch (work_code) { | ||
3782 | case MBA_IDC_AEN: /* 0x8200 */ | ||
3783 | if (ha->dpc_lp_wq) | ||
3784 | queue_work(ha->dpc_lp_wq, &ha->idc_aen); | ||
3785 | break; | ||
3786 | |||
3787 | case QLA83XX_NIC_CORE_RESET: /* 0x1 */ | ||
3788 | if (!ha->flags.nic_core_reset_hdlr_active) { | ||
3789 | if (ha->dpc_hp_wq) | ||
3790 | queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); | ||
3791 | } else | ||
3792 | ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, | ||
3793 | "NIC Core reset is already active. Skip " | ||
3794 | "scheduling it again.\n"); | ||
3795 | break; | ||
3796 | case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ | ||
3797 | if (ha->dpc_hp_wq) | ||
3798 | queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); | ||
3799 | break; | ||
3800 | case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ | ||
3801 | if (ha->dpc_hp_wq) | ||
3802 | queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); | ||
3803 | break; | ||
3804 | default: | ||
3805 | ql_log(ql_log_warn, base_vha, 0xb05f, | ||
3806 | "Unknow work-code=0x%x.\n", work_code); | ||
3807 | } | ||
3808 | |||
3809 | return; | ||
3810 | } | ||
3811 | |||
3812 | /* Work: Perform NIC Core Unrecoverable state handling */ | ||
3813 | void | ||
3814 | qla83xx_nic_core_unrecoverable_work(struct work_struct *work) | ||
3815 | { | ||
3816 | struct qla_hw_data *ha = | ||
3817 | container_of(work, struct qla_hw_data, nic_core_unrecoverable); | ||
3818 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | ||
3819 | uint32_t dev_state = 0; | ||
3820 | |||
3821 | qla83xx_idc_lock(base_vha, 0); | ||
3822 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); | ||
3823 | qla83xx_reset_ownership(base_vha); | ||
3824 | if (ha->flags.nic_core_reset_owner) { | ||
3825 | ha->flags.nic_core_reset_owner = 0; | ||
3826 | qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, | ||
3827 | QLA8XXX_DEV_FAILED); | ||
3828 | ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); | ||
3829 | qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); | ||
3830 | } | ||
3831 | qla83xx_idc_unlock(base_vha, 0); | ||
3832 | } | ||
3833 | |||
3834 | /* Work: Execute IDC state handler */ | ||
3835 | void | ||
3836 | qla83xx_idc_state_handler_work(struct work_struct *work) | ||
3837 | { | ||
3838 | struct qla_hw_data *ha = | ||
3839 | container_of(work, struct qla_hw_data, idc_state_handler); | ||
3840 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | ||
3841 | uint32_t dev_state = 0; | ||
3842 | |||
3843 | qla83xx_idc_lock(base_vha, 0); | ||
3844 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); | ||
3845 | if (dev_state == QLA8XXX_DEV_FAILED || | ||
3846 | dev_state == QLA8XXX_DEV_NEED_QUIESCENT) | ||
3847 | qla83xx_idc_state_handler(base_vha); | ||
3848 | qla83xx_idc_unlock(base_vha, 0); | ||
3849 | } | ||
3850 | |||
3851 | static int | ||
3852 | qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) | ||
3853 | { | ||
3854 | int rval = QLA_SUCCESS; | ||
3855 | unsigned long heart_beat_wait = jiffies + (1 * HZ); | ||
3856 | uint32_t heart_beat_counter1, heart_beat_counter2; | ||
3857 | |||
3858 | do { | ||
3859 | if (time_after(jiffies, heart_beat_wait)) { | ||
3860 | ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, | ||
3861 | "Nic Core f/w is not alive.\n"); | ||
3862 | rval = QLA_FUNCTION_FAILED; | ||
3863 | break; | ||
3864 | } | ||
3865 | |||
3866 | qla83xx_idc_lock(base_vha, 0); | ||
3867 | qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, | ||
3868 | &heart_beat_counter1); | ||
3869 | qla83xx_idc_unlock(base_vha, 0); | ||
3870 | msleep(100); | ||
3871 | qla83xx_idc_lock(base_vha, 0); | ||
3872 | qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, | ||
3873 | &heart_beat_counter2); | ||
3874 | qla83xx_idc_unlock(base_vha, 0); | ||
3875 | } while (heart_beat_counter1 == heart_beat_counter2); | ||
3876 | |||
3877 | return rval; | ||
3878 | } | ||
3879 | |||
3880 | /* Work: Perform NIC Core Reset handling */ | ||
3881 | void | ||
3882 | qla83xx_nic_core_reset_work(struct work_struct *work) | ||
3883 | { | ||
3884 | struct qla_hw_data *ha = | ||
3885 | container_of(work, struct qla_hw_data, nic_core_reset); | ||
3886 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | ||
3887 | uint32_t dev_state = 0; | ||
3888 | |||
3889 | if (IS_QLA2031(ha)) { | ||
3890 | if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) | ||
3891 | ql_log(ql_log_warn, base_vha, 0xb081, | ||
3892 | "Failed to dump mctp\n"); | ||
3893 | return; | ||
3894 | } | ||
3895 | |||
3896 | if (!ha->flags.nic_core_reset_hdlr_active) { | ||
3897 | if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { | ||
3898 | qla83xx_idc_lock(base_vha, 0); | ||
3899 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, | ||
3900 | &dev_state); | ||
3901 | qla83xx_idc_unlock(base_vha, 0); | ||
3902 | if (dev_state != QLA8XXX_DEV_NEED_RESET) { | ||
3903 | ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, | ||
3904 | "Nic Core f/w is alive.\n"); | ||
3905 | return; | ||
3906 | } | ||
3907 | } | ||
3908 | |||
3909 | ha->flags.nic_core_reset_hdlr_active = 1; | ||
3910 | if (qla83xx_nic_core_reset(base_vha)) { | ||
3911 | /* NIC Core reset failed. */ | ||
3912 | ql_dbg(ql_dbg_p3p, base_vha, 0xb061, | ||
3913 | "NIC Core reset failed.\n"); | ||
3914 | } | ||
3915 | ha->flags.nic_core_reset_hdlr_active = 0; | ||
3916 | } | ||
3917 | } | ||
3918 | |||
3919 | /* Work: Handle 8200 IDC aens */ | ||
3920 | void | ||
3921 | qla83xx_service_idc_aen(struct work_struct *work) | ||
3922 | { | ||
3923 | struct qla_hw_data *ha = | ||
3924 | container_of(work, struct qla_hw_data, idc_aen); | ||
3925 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | ||
3926 | uint32_t dev_state, idc_control; | ||
3927 | |||
3928 | qla83xx_idc_lock(base_vha, 0); | ||
3929 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); | ||
3930 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); | ||
3931 | qla83xx_idc_unlock(base_vha, 0); | ||
3932 | if (dev_state == QLA8XXX_DEV_NEED_RESET) { | ||
3933 | if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { | ||
3934 | ql_dbg(ql_dbg_p3p, base_vha, 0xb062, | ||
3935 | "Application requested NIC Core Reset.\n"); | ||
3936 | qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); | ||
3937 | } else if (qla83xx_check_nic_core_fw_alive(base_vha) == | ||
3938 | QLA_SUCCESS) { | ||
3939 | ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, | ||
3940 | "Other protocol driver requested NIC Core Reset.\n"); | ||
3941 | qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); | ||
3942 | } | ||
3943 | } else if (dev_state == QLA8XXX_DEV_FAILED || | ||
3944 | dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { | ||
3945 | qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); | ||
3946 | } | ||
3947 | } | ||
3948 | |||
3949 | static void | ||
3950 | qla83xx_wait_logic(void) | ||
3951 | { | ||
3952 | int i; | ||
3953 | |||
3954 | /* Yield CPU */ | ||
3955 | if (!in_interrupt()) { | ||
3956 | /* | ||
3957 | * Wait about 200ms before retrying again. | ||
3958 | * This controls the number of retries for single | ||
3959 | * lock operation. | ||
3960 | */ | ||
3961 | msleep(100); | ||
3962 | schedule(); | ||
3963 | } else { | ||
3964 | for (i = 0; i < 20; i++) | ||
3965 | cpu_relax(); /* This a nop instr on i386 */ | ||
3966 | } | ||
3967 | } | ||
3968 | |||
3969 | static int | ||
3970 | qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) | ||
3971 | { | ||
3972 | int rval; | ||
3973 | uint32_t data; | ||
3974 | uint32_t idc_lck_rcvry_stage_mask = 0x3; | ||
3975 | uint32_t idc_lck_rcvry_owner_mask = 0x3c; | ||
3976 | struct qla_hw_data *ha = base_vha->hw; | ||
3977 | |||
3978 | rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); | ||
3979 | if (rval) | ||
3980 | return rval; | ||
3981 | |||
3982 | if ((data & idc_lck_rcvry_stage_mask) > 0) { | ||
3983 | return QLA_SUCCESS; | ||
3984 | } else { | ||
3985 | data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); | ||
3986 | rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, | ||
3987 | data); | ||
3988 | if (rval) | ||
3989 | return rval; | ||
3990 | |||
3991 | msleep(200); | ||
3992 | |||
3993 | rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, | ||
3994 | &data); | ||
3995 | if (rval) | ||
3996 | return rval; | ||
3997 | |||
3998 | if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { | ||
3999 | data &= (IDC_LOCK_RECOVERY_STAGE2 | | ||
4000 | ~(idc_lck_rcvry_stage_mask)); | ||
4001 | rval = qla83xx_wr_reg(base_vha, | ||
4002 | QLA83XX_IDC_LOCK_RECOVERY, data); | ||
4003 | if (rval) | ||
4004 | return rval; | ||
4005 | |||
4006 | /* Forcefully perform IDC UnLock */ | ||
4007 | rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, | ||
4008 | &data); | ||
4009 | if (rval) | ||
4010 | return rval; | ||
4011 | /* Clear lock-id by setting 0xff */ | ||
4012 | rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, | ||
4013 | 0xff); | ||
4014 | if (rval) | ||
4015 | return rval; | ||
4016 | /* Clear lock-recovery by setting 0x0 */ | ||
4017 | rval = qla83xx_wr_reg(base_vha, | ||
4018 | QLA83XX_IDC_LOCK_RECOVERY, 0x0); | ||
4019 | if (rval) | ||
4020 | return rval; | ||
4021 | } else | ||
4022 | return QLA_SUCCESS; | ||
4023 | } | ||
4024 | |||
4025 | return rval; | ||
4026 | } | ||
4027 | |||
4028 | static int | ||
4029 | qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) | ||
4030 | { | ||
4031 | int rval = QLA_SUCCESS; | ||
4032 | uint32_t o_drv_lockid, n_drv_lockid; | ||
4033 | unsigned long lock_recovery_timeout; | ||
4034 | |||
4035 | lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; | ||
4036 | retry_lockid: | ||
4037 | rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); | ||
4038 | if (rval) | ||
4039 | goto exit; | ||
4040 | |||
4041 | /* MAX wait time before forcing IDC Lock recovery = 2 secs */ | ||
4042 | if (time_after_eq(jiffies, lock_recovery_timeout)) { | ||
4043 | if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) | ||
4044 | return QLA_SUCCESS; | ||
4045 | else | ||
4046 | return QLA_FUNCTION_FAILED; | ||
4047 | } | ||
4048 | |||
4049 | rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); | ||
4050 | if (rval) | ||
4051 | goto exit; | ||
4052 | |||
4053 | if (o_drv_lockid == n_drv_lockid) { | ||
4054 | qla83xx_wait_logic(); | ||
4055 | goto retry_lockid; | ||
4056 | } else | ||
4057 | return QLA_SUCCESS; | ||
4058 | |||
4059 | exit: | ||
4060 | return rval; | ||
4061 | } | ||
4062 | |||
4063 | void | ||
4064 | qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) | ||
4065 | { | ||
4066 | uint16_t options = (requester_id << 15) | BIT_6; | ||
4067 | uint32_t data; | ||
4068 | struct qla_hw_data *ha = base_vha->hw; | ||
4069 | |||
4070 | /* IDC-lock implementation using driver-lock/lock-id remote registers */ | ||
4071 | retry_lock: | ||
4072 | if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) | ||
4073 | == QLA_SUCCESS) { | ||
4074 | if (data) { | ||
4075 | /* Setting lock-id to our function-number */ | ||
4076 | qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, | ||
4077 | ha->portnum); | ||
4078 | } else { | ||
4079 | ql_dbg(ql_dbg_p3p, base_vha, 0xb063, | ||
4080 | "Failed to acquire IDC lock. retrying...\n"); | ||
4081 | |||
4082 | /* Retry/Perform IDC-Lock recovery */ | ||
4083 | if (qla83xx_idc_lock_recovery(base_vha) | ||
4084 | == QLA_SUCCESS) { | ||
4085 | qla83xx_wait_logic(); | ||
4086 | goto retry_lock; | ||
4087 | } else | ||
4088 | ql_log(ql_log_warn, base_vha, 0xb075, | ||
4089 | "IDC Lock recovery FAILED.\n"); | ||
4090 | } | ||
4091 | |||
4092 | } | ||
4093 | |||
4094 | return; | ||
4095 | |||
4096 | /* XXX: IDC-lock implementation using access-control mbx */ | ||
4097 | retry_lock2: | ||
4098 | if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { | ||
4099 | ql_dbg(ql_dbg_p3p, base_vha, 0xb072, | ||
4100 | "Failed to acquire IDC lock. retrying...\n"); | ||
4101 | /* Retry/Perform IDC-Lock recovery */ | ||
4102 | if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) { | ||
4103 | qla83xx_wait_logic(); | ||
4104 | goto retry_lock2; | ||
4105 | } else | ||
4106 | ql_log(ql_log_warn, base_vha, 0xb076, | ||
4107 | "IDC Lock recovery FAILED.\n"); | ||
4108 | } | ||
4109 | |||
4110 | return; | ||
4111 | } | ||
4112 | |||
4113 | void | ||
4114 | qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) | ||
4115 | { | ||
4116 | uint16_t options = (requester_id << 15) | BIT_7, retry; | ||
4117 | uint32_t data; | ||
4118 | struct qla_hw_data *ha = base_vha->hw; | ||
4119 | |||
4120 | /* IDC-unlock implementation using driver-unlock/lock-id | ||
4121 | * remote registers | ||
4122 | */ | ||
4123 | retry = 0; | ||
4124 | retry_unlock: | ||
4125 | if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) | ||
4126 | == QLA_SUCCESS) { | ||
4127 | if (data == ha->portnum) { | ||
4128 | qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); | ||
4129 | /* Clearing lock-id by setting 0xff */ | ||
4130 | qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); | ||
4131 | } else if (retry < 10) { | ||
4132 | /* SV: XXX: IDC unlock retrying needed here? */ | ||
4133 | |||
4134 | /* Retry for IDC-unlock */ | ||
4135 | qla83xx_wait_logic(); | ||
4136 | retry++; | ||
4137 | ql_dbg(ql_dbg_p3p, base_vha, 0xb064, | ||
4138 | "Failed to release IDC lock, retyring=%d\n", retry); | ||
4139 | goto retry_unlock; | ||
4140 | } | ||
4141 | } else if (retry < 10) { | ||
4142 | /* Retry for IDC-unlock */ | ||
4143 | qla83xx_wait_logic(); | ||
4144 | retry++; | ||
4145 | ql_dbg(ql_dbg_p3p, base_vha, 0xb065, | ||
4146 | "Failed to read drv-lockid, retyring=%d\n", retry); | ||
4147 | goto retry_unlock; | ||
4148 | } | ||
4149 | |||
4150 | return; | ||
4151 | |||
4152 | /* XXX: IDC-unlock implementation using access-control mbx */ | ||
4153 | retry = 0; | ||
4154 | retry_unlock2: | ||
4155 | if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { | ||
4156 | if (retry < 10) { | ||
4157 | /* Retry for IDC-unlock */ | ||
4158 | qla83xx_wait_logic(); | ||
4159 | retry++; | ||
4160 | ql_dbg(ql_dbg_p3p, base_vha, 0xb066, | ||
4161 | "Failed to release IDC lock, retyring=%d\n", retry); | ||
4162 | goto retry_unlock2; | ||
4163 | } | ||
4164 | } | ||
4165 | |||
4166 | return; | ||
4167 | } | ||
4168 | |||
4169 | int | ||
4170 | __qla83xx_set_drv_presence(scsi_qla_host_t *vha) | ||
4171 | { | ||
4172 | int rval = QLA_SUCCESS; | ||
4173 | struct qla_hw_data *ha = vha->hw; | ||
4174 | uint32_t drv_presence; | ||
4175 | |||
4176 | rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); | ||
4177 | if (rval == QLA_SUCCESS) { | ||
4178 | drv_presence |= (1 << ha->portnum); | ||
4179 | rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, | ||
4180 | drv_presence); | ||
4181 | } | ||
4182 | |||
4183 | return rval; | ||
4184 | } | ||
4185 | |||
4186 | int | ||
4187 | qla83xx_set_drv_presence(scsi_qla_host_t *vha) | ||
4188 | { | ||
4189 | int rval = QLA_SUCCESS; | ||
4190 | |||
4191 | qla83xx_idc_lock(vha, 0); | ||
4192 | rval = __qla83xx_set_drv_presence(vha); | ||
4193 | qla83xx_idc_unlock(vha, 0); | ||
4194 | |||
4195 | return rval; | ||
4196 | } | ||
4197 | |||
4198 | int | ||
4199 | __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) | ||
4200 | { | ||
4201 | int rval = QLA_SUCCESS; | ||
4202 | struct qla_hw_data *ha = vha->hw; | ||
4203 | uint32_t drv_presence; | ||
4204 | |||
4205 | rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); | ||
4206 | if (rval == QLA_SUCCESS) { | ||
4207 | drv_presence &= ~(1 << ha->portnum); | ||
4208 | rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, | ||
4209 | drv_presence); | ||
4210 | } | ||
4211 | |||
4212 | return rval; | ||
4213 | } | ||
4214 | |||
4215 | int | ||
4216 | qla83xx_clear_drv_presence(scsi_qla_host_t *vha) | ||
4217 | { | ||
4218 | int rval = QLA_SUCCESS; | ||
4219 | |||
4220 | qla83xx_idc_lock(vha, 0); | ||
4221 | rval = __qla83xx_clear_drv_presence(vha); | ||
4222 | qla83xx_idc_unlock(vha, 0); | ||
4223 | |||
4224 | return rval; | ||
4225 | } | ||
4226 | |||
4227 | static void | ||
4228 | qla83xx_need_reset_handler(scsi_qla_host_t *vha) | ||
4229 | { | ||
4230 | struct qla_hw_data *ha = vha->hw; | ||
4231 | uint32_t drv_ack, drv_presence; | ||
4232 | unsigned long ack_timeout; | ||
4233 | |||
4234 | /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ | ||
4235 | ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); | ||
4236 | while (1) { | ||
4237 | qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); | ||
4238 | qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); | ||
4239 | if ((drv_ack & drv_presence) == drv_presence) | ||
4240 | break; | ||
4241 | |||
4242 | if (time_after_eq(jiffies, ack_timeout)) { | ||
4243 | ql_log(ql_log_warn, vha, 0xb067, | ||
4244 | "RESET ACK TIMEOUT! drv_presence=0x%x " | ||
4245 | "drv_ack=0x%x\n", drv_presence, drv_ack); | ||
4246 | /* | ||
4247 | * The function(s) which did not ack in time are forced | ||
4248 | * to withdraw any further participation in the IDC | ||
4249 | * reset. | ||
4250 | */ | ||
4251 | if (drv_ack != drv_presence) | ||
4252 | qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, | ||
4253 | drv_ack); | ||
4254 | break; | ||
4255 | } | ||
4256 | |||
4257 | qla83xx_idc_unlock(vha, 0); | ||
4258 | msleep(1000); | ||
4259 | qla83xx_idc_lock(vha, 0); | ||
4260 | } | ||
4261 | |||
4262 | qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); | ||
4263 | ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); | ||
4264 | } | ||
4265 | |||
4266 | static int | ||
4267 | qla83xx_device_bootstrap(scsi_qla_host_t *vha) | ||
4268 | { | ||
4269 | int rval = QLA_SUCCESS; | ||
4270 | uint32_t idc_control; | ||
4271 | |||
4272 | qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); | ||
4273 | ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); | ||
4274 | |||
4275 | /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ | ||
4276 | __qla83xx_get_idc_control(vha, &idc_control); | ||
4277 | idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; | ||
4278 | __qla83xx_set_idc_control(vha, 0); | ||
4279 | |||
4280 | qla83xx_idc_unlock(vha, 0); | ||
4281 | rval = qla83xx_restart_nic_firmware(vha); | ||
4282 | qla83xx_idc_lock(vha, 0); | ||
4283 | |||
4284 | if (rval != QLA_SUCCESS) { | ||
4285 | ql_log(ql_log_fatal, vha, 0xb06a, | ||
4286 | "Failed to restart NIC f/w.\n"); | ||
4287 | qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); | ||
4288 | ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); | ||
4289 | } else { | ||
4290 | ql_dbg(ql_dbg_p3p, vha, 0xb06c, | ||
4291 | "Success in restarting nic f/w.\n"); | ||
4292 | qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); | ||
4293 | ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); | ||
4294 | } | ||
4295 | |||
4296 | return rval; | ||
4297 | } | ||
4298 | |||
4299 | /* Assumes idc_lock always held on entry */ | ||
4300 | int | ||
4301 | qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) | ||
4302 | { | ||
4303 | struct qla_hw_data *ha = base_vha->hw; | ||
4304 | int rval = QLA_SUCCESS; | ||
4305 | unsigned long dev_init_timeout; | ||
4306 | uint32_t dev_state; | ||
4307 | |||
4308 | /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ | ||
4309 | dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); | ||
4310 | |||
4311 | while (1) { | ||
4312 | |||
4313 | if (time_after_eq(jiffies, dev_init_timeout)) { | ||
4314 | ql_log(ql_log_warn, base_vha, 0xb06e, | ||
4315 | "Initialization TIMEOUT!\n"); | ||
4316 | /* Init timeout. Disable further NIC Core | ||
4317 | * communication. | ||
4318 | */ | ||
4319 | qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, | ||
4320 | QLA8XXX_DEV_FAILED); | ||
4321 | ql_log(ql_log_info, base_vha, 0xb06f, | ||
4322 | "HW State: FAILED.\n"); | ||
4323 | } | ||
4324 | |||
4325 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); | ||
4326 | switch (dev_state) { | ||
4327 | case QLA8XXX_DEV_READY: | ||
4328 | if (ha->flags.nic_core_reset_owner) | ||
4329 | qla83xx_idc_audit(base_vha, | ||
4330 | IDC_AUDIT_COMPLETION); | ||
4331 | ha->flags.nic_core_reset_owner = 0; | ||
4332 | ql_dbg(ql_dbg_p3p, base_vha, 0xb070, | ||
4333 | "Reset_owner reset by 0x%x.\n", | ||
4334 | ha->portnum); | ||
4335 | goto exit; | ||
4336 | case QLA8XXX_DEV_COLD: | ||
4337 | if (ha->flags.nic_core_reset_owner) | ||
4338 | rval = qla83xx_device_bootstrap(base_vha); | ||
4339 | else { | ||
4340 | /* Wait for AEN to change device-state */ | ||
4341 | qla83xx_idc_unlock(base_vha, 0); | ||
4342 | msleep(1000); | ||
4343 | qla83xx_idc_lock(base_vha, 0); | ||
4344 | } | ||
4345 | break; | ||
4346 | case QLA8XXX_DEV_INITIALIZING: | ||
4347 | /* Wait for AEN to change device-state */ | ||
4348 | qla83xx_idc_unlock(base_vha, 0); | ||
4349 | msleep(1000); | ||
4350 | qla83xx_idc_lock(base_vha, 0); | ||
4351 | break; | ||
4352 | case QLA8XXX_DEV_NEED_RESET: | ||
4353 | if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) | ||
4354 | qla83xx_need_reset_handler(base_vha); | ||
4355 | else { | ||
4356 | /* Wait for AEN to change device-state */ | ||
4357 | qla83xx_idc_unlock(base_vha, 0); | ||
4358 | msleep(1000); | ||
4359 | qla83xx_idc_lock(base_vha, 0); | ||
4360 | } | ||
4361 | /* reset timeout value after need reset handler */ | ||
4362 | dev_init_timeout = jiffies + | ||
4363 | (ha->fcoe_dev_init_timeout * HZ); | ||
4364 | break; | ||
4365 | case QLA8XXX_DEV_NEED_QUIESCENT: | ||
4366 | /* XXX: DEBUG for now */ | ||
4367 | qla83xx_idc_unlock(base_vha, 0); | ||
4368 | msleep(1000); | ||
4369 | qla83xx_idc_lock(base_vha, 0); | ||
4370 | break; | ||
4371 | case QLA8XXX_DEV_QUIESCENT: | ||
4372 | /* XXX: DEBUG for now */ | ||
4373 | if (ha->flags.quiesce_owner) | ||
4374 | goto exit; | ||
4375 | |||
4376 | qla83xx_idc_unlock(base_vha, 0); | ||
4377 | msleep(1000); | ||
4378 | qla83xx_idc_lock(base_vha, 0); | ||
4379 | dev_init_timeout = jiffies + | ||
4380 | (ha->fcoe_dev_init_timeout * HZ); | ||
4381 | break; | ||
4382 | case QLA8XXX_DEV_FAILED: | ||
4383 | if (ha->flags.nic_core_reset_owner) | ||
4384 | qla83xx_idc_audit(base_vha, | ||
4385 | IDC_AUDIT_COMPLETION); | ||
4386 | ha->flags.nic_core_reset_owner = 0; | ||
4387 | __qla83xx_clear_drv_presence(base_vha); | ||
4388 | qla83xx_idc_unlock(base_vha, 0); | ||
4389 | qla8xxx_dev_failed_handler(base_vha); | ||
4390 | rval = QLA_FUNCTION_FAILED; | ||
4391 | qla83xx_idc_lock(base_vha, 0); | ||
4392 | goto exit; | ||
4393 | case QLA8XXX_BAD_VALUE: | ||
4394 | qla83xx_idc_unlock(base_vha, 0); | ||
4395 | msleep(1000); | ||
4396 | qla83xx_idc_lock(base_vha, 0); | ||
4397 | break; | ||
4398 | default: | ||
4399 | ql_log(ql_log_warn, base_vha, 0xb071, | ||
4400 | "Unknow Device State: %x.\n", dev_state); | ||
4401 | qla83xx_idc_unlock(base_vha, 0); | ||
4402 | qla8xxx_dev_failed_handler(base_vha); | ||
4403 | rval = QLA_FUNCTION_FAILED; | ||
4404 | qla83xx_idc_lock(base_vha, 0); | ||
4405 | goto exit; | ||
4406 | } | ||
4407 | } | ||
4408 | |||
4409 | exit: | ||
4410 | return rval; | ||
4411 | } | ||
4412 | |||
4413 | /************************************************************************** | 3476 | /************************************************************************** |
4414 | * qla2x00_do_dpc | 3477 | * qla2x00_do_dpc |
4415 | * This kernel thread is a task that is schedule by the interrupt handler | 3478 | * This kernel thread is a task that is schedule by the interrupt handler |
@@ -4443,20 +3506,27 @@ qla2x00_do_dpc(void *data) | |||
4443 | schedule(); | 3506 | schedule(); |
4444 | __set_current_state(TASK_RUNNING); | 3507 | __set_current_state(TASK_RUNNING); |
4445 | 3508 | ||
4446 | if (!base_vha->flags.init_done || ha->flags.mbox_busy) | 3509 | ql_dbg(ql_dbg_dpc, base_vha, 0x4001, |
4447 | goto end_loop; | 3510 | "DPC handler waking up.\n"); |
3511 | ql_dbg(ql_dbg_dpc, base_vha, 0x4002, | ||
3512 | "dpc_flags=0x%lx.\n", base_vha->dpc_flags); | ||
3513 | |||
3514 | /* Initialization not yet finished. Don't do anything yet. */ | ||
3515 | if (!base_vha->flags.init_done) | ||
3516 | continue; | ||
4448 | 3517 | ||
4449 | if (ha->flags.eeh_busy) { | 3518 | if (ha->flags.eeh_busy) { |
4450 | ql_dbg(ql_dbg_dpc, base_vha, 0x4003, | 3519 | ql_dbg(ql_dbg_dpc, base_vha, 0x4003, |
4451 | "eeh_busy=%d.\n", ha->flags.eeh_busy); | 3520 | "eeh_busy=%d.\n", ha->flags.eeh_busy); |
4452 | goto end_loop; | 3521 | continue; |
4453 | } | 3522 | } |
4454 | 3523 | ||
4455 | ha->dpc_active = 1; | 3524 | ha->dpc_active = 1; |
4456 | 3525 | ||
4457 | ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, | 3526 | if (ha->flags.mbox_busy) { |
4458 | "DPC handler waking up, dpc_flags=0x%lx.\n", | 3527 | ha->dpc_active = 0; |
4459 | base_vha->dpc_flags); | 3528 | continue; |
3529 | } | ||
4460 | 3530 | ||
4461 | qla2x00_do_work(base_vha); | 3531 | qla2x00_do_work(base_vha); |
4462 | 3532 | ||
@@ -4465,7 +3535,7 @@ qla2x00_do_dpc(void *data) | |||
4465 | &base_vha->dpc_flags)) { | 3535 | &base_vha->dpc_flags)) { |
4466 | qla82xx_idc_lock(ha); | 3536 | qla82xx_idc_lock(ha); |
4467 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | 3537 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, |
4468 | QLA8XXX_DEV_FAILED); | 3538 | QLA82XX_DEV_FAILED); |
4469 | qla82xx_idc_unlock(ha); | 3539 | qla82xx_idc_unlock(ha); |
4470 | ql_log(ql_log_info, base_vha, 0x4004, | 3540 | ql_log(ql_log_info, base_vha, 0x4004, |
4471 | "HW State: FAILED.\n"); | 3541 | "HW State: FAILED.\n"); |
@@ -4517,39 +3587,22 @@ qla2x00_do_dpc(void *data) | |||
4517 | "ISP abort end.\n"); | 3587 | "ISP abort end.\n"); |
4518 | } | 3588 | } |
4519 | 3589 | ||
4520 | if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, | 3590 | if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) { |
4521 | &base_vha->dpc_flags)) { | ||
4522 | qla2x00_update_fcports(base_vha); | 3591 | qla2x00_update_fcports(base_vha); |
4523 | } | 3592 | clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); |
4524 | |||
4525 | if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) { | ||
4526 | int ret; | ||
4527 | ret = qla2x00_send_change_request(base_vha, 0x3, 0); | ||
4528 | if (ret != QLA_SUCCESS) | ||
4529 | ql_log(ql_log_warn, base_vha, 0x121, | ||
4530 | "Failed to enable receiving of RSCN " | ||
4531 | "requests: 0x%x.\n", ret); | ||
4532 | clear_bit(SCR_PENDING, &base_vha->dpc_flags); | ||
4533 | } | 3593 | } |
4534 | 3594 | ||
4535 | if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { | 3595 | if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { |
4536 | ql_dbg(ql_dbg_dpc, base_vha, 0x4009, | 3596 | ql_dbg(ql_dbg_dpc, base_vha, 0x4009, |
4537 | "Quiescence mode scheduled.\n"); | 3597 | "Quiescence mode scheduled.\n"); |
4538 | if (IS_QLA82XX(ha)) { | 3598 | qla82xx_device_state_handler(base_vha); |
4539 | qla82xx_device_state_handler(base_vha); | 3599 | clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); |
4540 | clear_bit(ISP_QUIESCE_NEEDED, | 3600 | if (!ha->flags.quiesce_owner) { |
4541 | &base_vha->dpc_flags); | 3601 | qla2x00_perform_loop_resync(base_vha); |
4542 | if (!ha->flags.quiesce_owner) { | 3602 | |
4543 | qla2x00_perform_loop_resync(base_vha); | 3603 | qla82xx_idc_lock(ha); |
4544 | 3604 | qla82xx_clear_qsnt_ready(base_vha); | |
4545 | qla82xx_idc_lock(ha); | 3605 | qla82xx_idc_unlock(ha); |
4546 | qla82xx_clear_qsnt_ready(base_vha); | ||
4547 | qla82xx_idc_unlock(ha); | ||
4548 | } | ||
4549 | } else { | ||
4550 | clear_bit(ISP_QUIESCE_NEEDED, | ||
4551 | &base_vha->dpc_flags); | ||
4552 | qla2x00_quiesce_io(base_vha); | ||
4553 | } | 3606 | } |
4554 | ql_dbg(ql_dbg_dpc, base_vha, 0x400a, | 3607 | ql_dbg(ql_dbg_dpc, base_vha, 0x400a, |
4555 | "Quiescence mode end.\n"); | 3608 | "Quiescence mode end.\n"); |
@@ -4615,7 +3668,6 @@ qla2x00_do_dpc(void *data) | |||
4615 | qla2x00_do_dpc_all_vps(base_vha); | 3668 | qla2x00_do_dpc_all_vps(base_vha); |
4616 | 3669 | ||
4617 | ha->dpc_active = 0; | 3670 | ha->dpc_active = 0; |
4618 | end_loop: | ||
4619 | set_current_state(TASK_INTERRUPTIBLE); | 3671 | set_current_state(TASK_INTERRUPTIBLE); |
4620 | } /* End of while(1) */ | 3672 | } /* End of while(1) */ |
4621 | __set_current_state(TASK_RUNNING); | 3673 | __set_current_state(TASK_RUNNING); |
@@ -4670,6 +3722,76 @@ qla2x00_rst_aen(scsi_qla_host_t *vha) | |||
4670 | } | 3722 | } |
4671 | } | 3723 | } |
4672 | 3724 | ||
3725 | static void | ||
3726 | qla2x00_sp_free_dma(srb_t *sp) | ||
3727 | { | ||
3728 | struct scsi_cmnd *cmd = sp->cmd; | ||
3729 | struct qla_hw_data *ha = sp->fcport->vha->hw; | ||
3730 | |||
3731 | if (sp->flags & SRB_DMA_VALID) { | ||
3732 | scsi_dma_unmap(cmd); | ||
3733 | sp->flags &= ~SRB_DMA_VALID; | ||
3734 | } | ||
3735 | |||
3736 | if (sp->flags & SRB_CRC_PROT_DMA_VALID) { | ||
3737 | dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), | ||
3738 | scsi_prot_sg_count(cmd), cmd->sc_data_direction); | ||
3739 | sp->flags &= ~SRB_CRC_PROT_DMA_VALID; | ||
3740 | } | ||
3741 | |||
3742 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { | ||
3743 | /* List assured to be having elements */ | ||
3744 | qla2x00_clean_dsd_pool(ha, sp); | ||
3745 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; | ||
3746 | } | ||
3747 | |||
3748 | if (sp->flags & SRB_CRC_CTX_DMA_VALID) { | ||
3749 | dma_pool_free(ha->dl_dma_pool, sp->ctx, | ||
3750 | ((struct crc_context *)sp->ctx)->crc_ctx_dma); | ||
3751 | sp->flags &= ~SRB_CRC_CTX_DMA_VALID; | ||
3752 | } | ||
3753 | |||
3754 | CMD_SP(cmd) = NULL; | ||
3755 | } | ||
3756 | |||
3757 | static void | ||
3758 | qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp) | ||
3759 | { | ||
3760 | struct scsi_cmnd *cmd = sp->cmd; | ||
3761 | |||
3762 | qla2x00_sp_free_dma(sp); | ||
3763 | |||
3764 | if (sp->flags & SRB_FCP_CMND_DMA_VALID) { | ||
3765 | struct ct6_dsd *ctx = sp->ctx; | ||
3766 | dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, | ||
3767 | ctx->fcp_cmnd_dma); | ||
3768 | list_splice(&ctx->dsd_list, &ha->gbl_dsd_list); | ||
3769 | ha->gbl_dsd_inuse -= ctx->dsd_use_cnt; | ||
3770 | ha->gbl_dsd_avail += ctx->dsd_use_cnt; | ||
3771 | mempool_free(sp->ctx, ha->ctx_mempool); | ||
3772 | sp->ctx = NULL; | ||
3773 | } | ||
3774 | |||
3775 | mempool_free(sp, ha->srb_mempool); | ||
3776 | cmd->scsi_done(cmd); | ||
3777 | } | ||
3778 | |||
3779 | void | ||
3780 | qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp) | ||
3781 | { | ||
3782 | if (atomic_read(&sp->ref_count) == 0) { | ||
3783 | ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015, | ||
3784 | "SP reference-count to ZERO -- sp=%p cmd=%p.\n", | ||
3785 | sp, sp->cmd); | ||
3786 | if (ql2xextended_error_logging & ql_dbg_io) | ||
3787 | BUG(); | ||
3788 | return; | ||
3789 | } | ||
3790 | if (!atomic_dec_and_test(&sp->ref_count)) | ||
3791 | return; | ||
3792 | qla2x00_sp_final_compl(ha, sp); | ||
3793 | } | ||
3794 | |||
4673 | /************************************************************************** | 3795 | /************************************************************************** |
4674 | * qla2x00_timer | 3796 | * qla2x00_timer |
4675 | * | 3797 | * |
@@ -4740,7 +3862,7 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
4740 | sp = req->outstanding_cmds[index]; | 3862 | sp = req->outstanding_cmds[index]; |
4741 | if (!sp) | 3863 | if (!sp) |
4742 | continue; | 3864 | continue; |
4743 | if (sp->type != SRB_SCSI_CMD) | 3865 | if (sp->ctx && !IS_PROT_IO(sp)) |
4744 | continue; | 3866 | continue; |
4745 | sfcp = sp->fcport; | 3867 | sfcp = sp->fcport; |
4746 | if (!(sfcp->flags & FCF_FCP2_DEVICE)) | 3868 | if (!(sfcp->flags & FCF_FCP2_DEVICE)) |
@@ -4781,11 +3903,8 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
4781 | 3903 | ||
4782 | /* Check if beacon LED needs to be blinked for physical host only */ | 3904 | /* Check if beacon LED needs to be blinked for physical host only */ |
4783 | if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { | 3905 | if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { |
4784 | /* There is no beacon_blink function for ISP82xx */ | 3906 | set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); |
4785 | if (!IS_QLA82XX(ha)) { | 3907 | start_dpc++; |
4786 | set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); | ||
4787 | start_dpc++; | ||
4788 | } | ||
4789 | } | 3908 | } |
4790 | 3909 | ||
4791 | /* Process any deferred work. */ | 3910 | /* Process any deferred work. */ |
@@ -4829,7 +3948,7 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
4829 | 3948 | ||
4830 | /* Firmware interface routines. */ | 3949 | /* Firmware interface routines. */ |
4831 | 3950 | ||
4832 | #define FW_BLOBS 10 | 3951 | #define FW_BLOBS 8 |
4833 | #define FW_ISP21XX 0 | 3952 | #define FW_ISP21XX 0 |
4834 | #define FW_ISP22XX 1 | 3953 | #define FW_ISP22XX 1 |
4835 | #define FW_ISP2300 2 | 3954 | #define FW_ISP2300 2 |
@@ -4838,8 +3957,6 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
4838 | #define FW_ISP25XX 5 | 3957 | #define FW_ISP25XX 5 |
4839 | #define FW_ISP81XX 6 | 3958 | #define FW_ISP81XX 6 |
4840 | #define FW_ISP82XX 7 | 3959 | #define FW_ISP82XX 7 |
4841 | #define FW_ISP2031 8 | ||
4842 | #define FW_ISP8031 9 | ||
4843 | 3960 | ||
4844 | #define FW_FILE_ISP21XX "ql2100_fw.bin" | 3961 | #define FW_FILE_ISP21XX "ql2100_fw.bin" |
4845 | #define FW_FILE_ISP22XX "ql2200_fw.bin" | 3962 | #define FW_FILE_ISP22XX "ql2200_fw.bin" |
@@ -4849,8 +3966,6 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
4849 | #define FW_FILE_ISP25XX "ql2500_fw.bin" | 3966 | #define FW_FILE_ISP25XX "ql2500_fw.bin" |
4850 | #define FW_FILE_ISP81XX "ql8100_fw.bin" | 3967 | #define FW_FILE_ISP81XX "ql8100_fw.bin" |
4851 | #define FW_FILE_ISP82XX "ql8200_fw.bin" | 3968 | #define FW_FILE_ISP82XX "ql8200_fw.bin" |
4852 | #define FW_FILE_ISP2031 "ql2600_fw.bin" | ||
4853 | #define FW_FILE_ISP8031 "ql8300_fw.bin" | ||
4854 | 3969 | ||
4855 | static DEFINE_MUTEX(qla_fw_lock); | 3970 | static DEFINE_MUTEX(qla_fw_lock); |
4856 | 3971 | ||
@@ -4863,8 +3978,6 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = { | |||
4863 | { .name = FW_FILE_ISP25XX, }, | 3978 | { .name = FW_FILE_ISP25XX, }, |
4864 | { .name = FW_FILE_ISP81XX, }, | 3979 | { .name = FW_FILE_ISP81XX, }, |
4865 | { .name = FW_FILE_ISP82XX, }, | 3980 | { .name = FW_FILE_ISP82XX, }, |
4866 | { .name = FW_FILE_ISP2031, }, | ||
4867 | { .name = FW_FILE_ISP8031, }, | ||
4868 | }; | 3981 | }; |
4869 | 3982 | ||
4870 | struct fw_blob * | 3983 | struct fw_blob * |
@@ -4873,6 +3986,7 @@ qla2x00_request_firmware(scsi_qla_host_t *vha) | |||
4873 | struct qla_hw_data *ha = vha->hw; | 3986 | struct qla_hw_data *ha = vha->hw; |
4874 | struct fw_blob *blob; | 3987 | struct fw_blob *blob; |
4875 | 3988 | ||
3989 | blob = NULL; | ||
4876 | if (IS_QLA2100(ha)) { | 3990 | if (IS_QLA2100(ha)) { |
4877 | blob = &qla_fw_blobs[FW_ISP21XX]; | 3991 | blob = &qla_fw_blobs[FW_ISP21XX]; |
4878 | } else if (IS_QLA2200(ha)) { | 3992 | } else if (IS_QLA2200(ha)) { |
@@ -4889,12 +4003,6 @@ qla2x00_request_firmware(scsi_qla_host_t *vha) | |||
4889 | blob = &qla_fw_blobs[FW_ISP81XX]; | 4003 | blob = &qla_fw_blobs[FW_ISP81XX]; |
4890 | } else if (IS_QLA82XX(ha)) { | 4004 | } else if (IS_QLA82XX(ha)) { |
4891 | blob = &qla_fw_blobs[FW_ISP82XX]; | 4005 | blob = &qla_fw_blobs[FW_ISP82XX]; |
4892 | } else if (IS_QLA2031(ha)) { | ||
4893 | blob = &qla_fw_blobs[FW_ISP2031]; | ||
4894 | } else if (IS_QLA8031(ha)) { | ||
4895 | blob = &qla_fw_blobs[FW_ISP8031]; | ||
4896 | } else { | ||
4897 | return NULL; | ||
4898 | } | 4006 | } |
4899 | 4007 | ||
4900 | mutex_lock(&qla_fw_lock); | 4008 | mutex_lock(&qla_fw_lock); |
@@ -4921,7 +4029,8 @@ qla2x00_release_firmware(void) | |||
4921 | 4029 | ||
4922 | mutex_lock(&qla_fw_lock); | 4030 | mutex_lock(&qla_fw_lock); |
4923 | for (idx = 0; idx < FW_BLOBS; idx++) | 4031 | for (idx = 0; idx < FW_BLOBS; idx++) |
4924 | release_firmware(qla_fw_blobs[idx].fw); | 4032 | if (qla_fw_blobs[idx].fw) |
4033 | release_firmware(qla_fw_blobs[idx].fw); | ||
4925 | mutex_unlock(&qla_fw_lock); | 4034 | mutex_unlock(&qla_fw_lock); |
4926 | } | 4035 | } |
4927 | 4036 | ||
@@ -4943,8 +4052,13 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | |||
4943 | /* For ISP82XX complete any pending mailbox cmd */ | 4052 | /* For ISP82XX complete any pending mailbox cmd */ |
4944 | if (IS_QLA82XX(ha)) { | 4053 | if (IS_QLA82XX(ha)) { |
4945 | ha->flags.isp82xx_fw_hung = 1; | 4054 | ha->flags.isp82xx_fw_hung = 1; |
4946 | ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n"); | 4055 | if (ha->flags.mbox_busy) { |
4947 | qla82xx_clear_pending_mbx(vha); | 4056 | ha->flags.mbox_int = 1; |
4057 | ql_dbg(ql_dbg_aer, vha, 0x9001, | ||
4058 | "Due to pci channel io frozen, doing premature " | ||
4059 | "completion of mbx command.\n"); | ||
4060 | complete(&ha->mbx_intr_comp); | ||
4061 | } | ||
4948 | } | 4062 | } |
4949 | qla2x00_free_irqs(vha); | 4063 | qla2x00_free_irqs(vha); |
4950 | pci_disable_device(pdev); | 4064 | pci_disable_device(pdev); |
@@ -4999,8 +4113,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) | |||
4999 | return PCI_ERS_RESULT_RECOVERED; | 4113 | return PCI_ERS_RESULT_RECOVERED; |
5000 | } | 4114 | } |
5001 | 4115 | ||
5002 | static uint32_t | 4116 | uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) |
5003 | qla82xx_error_recovery(scsi_qla_host_t *base_vha) | ||
5004 | { | 4117 | { |
5005 | uint32_t rval = QLA_FUNCTION_FAILED; | 4118 | uint32_t rval = QLA_FUNCTION_FAILED; |
5006 | uint32_t drv_active = 0; | 4119 | uint32_t drv_active = 0; |
@@ -5050,7 +4163,7 @@ qla82xx_error_recovery(scsi_qla_host_t *base_vha) | |||
5050 | qla82xx_idc_lock(ha); | 4163 | qla82xx_idc_lock(ha); |
5051 | 4164 | ||
5052 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | 4165 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, |
5053 | QLA8XXX_DEV_INITIALIZING); | 4166 | QLA82XX_DEV_INITIALIZING); |
5054 | 4167 | ||
5055 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, | 4168 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, |
5056 | QLA82XX_IDC_VERSION); | 4169 | QLA82XX_IDC_VERSION); |
@@ -5074,12 +4187,12 @@ qla82xx_error_recovery(scsi_qla_host_t *base_vha) | |||
5074 | "HW State: FAILED.\n"); | 4187 | "HW State: FAILED.\n"); |
5075 | qla82xx_clear_drv_active(ha); | 4188 | qla82xx_clear_drv_active(ha); |
5076 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | 4189 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, |
5077 | QLA8XXX_DEV_FAILED); | 4190 | QLA82XX_DEV_FAILED); |
5078 | } else { | 4191 | } else { |
5079 | ql_log(ql_log_info, base_vha, 0x900c, | 4192 | ql_log(ql_log_info, base_vha, 0x900c, |
5080 | "HW State: READY.\n"); | 4193 | "HW State: READY.\n"); |
5081 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | 4194 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, |
5082 | QLA8XXX_DEV_READY); | 4195 | QLA82XX_DEV_READY); |
5083 | qla82xx_idc_unlock(ha); | 4196 | qla82xx_idc_unlock(ha); |
5084 | ha->flags.isp82xx_fw_hung = 0; | 4197 | ha->flags.isp82xx_fw_hung = 0; |
5085 | rval = qla82xx_restart_isp(base_vha); | 4198 | rval = qla82xx_restart_isp(base_vha); |
@@ -5094,7 +4207,7 @@ qla82xx_error_recovery(scsi_qla_host_t *base_vha) | |||
5094 | "This devfn is not reset owner = 0x%x.\n", | 4207 | "This devfn is not reset owner = 0x%x.\n", |
5095 | ha->pdev->devfn); | 4208 | ha->pdev->devfn); |
5096 | if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == | 4209 | if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == |
5097 | QLA8XXX_DEV_READY)) { | 4210 | QLA82XX_DEV_READY)) { |
5098 | ha->flags.isp82xx_fw_hung = 0; | 4211 | ha->flags.isp82xx_fw_hung = 0; |
5099 | rval = qla82xx_restart_isp(base_vha); | 4212 | rval = qla82xx_restart_isp(base_vha); |
5100 | qla82xx_idc_lock(ha); | 4213 | qla82xx_idc_lock(ha); |
@@ -5195,7 +4308,7 @@ qla2xxx_pci_resume(struct pci_dev *pdev) | |||
5195 | ha->flags.eeh_busy = 0; | 4308 | ha->flags.eeh_busy = 0; |
5196 | } | 4309 | } |
5197 | 4310 | ||
5198 | static const struct pci_error_handlers qla2xxx_err_handler = { | 4311 | static struct pci_error_handlers qla2xxx_err_handler = { |
5199 | .error_detected = qla2xxx_pci_error_detected, | 4312 | .error_detected = qla2xxx_pci_error_detected, |
5200 | .mmio_enabled = qla2xxx_pci_mmio_enabled, | 4313 | .mmio_enabled = qla2xxx_pci_mmio_enabled, |
5201 | .slot_reset = qla2xxx_pci_slot_reset, | 4314 | .slot_reset = qla2xxx_pci_slot_reset, |
@@ -5216,10 +4329,8 @@ static struct pci_device_id qla2xxx_pci_tbl[] = { | |||
5216 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, | 4329 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, |
5217 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, | 4330 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, |
5218 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, | 4331 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, |
5219 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, | ||
5220 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, | 4332 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, |
5221 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, | 4333 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, |
5222 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, | ||
5223 | { 0 }, | 4334 | { 0 }, |
5224 | }; | 4335 | }; |
5225 | MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); | 4336 | MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); |
@@ -5258,21 +4369,6 @@ qla2x00_module_init(void) | |||
5258 | return -ENOMEM; | 4369 | return -ENOMEM; |
5259 | } | 4370 | } |
5260 | 4371 | ||
5261 | /* Initialize target kmem_cache and mem_pools */ | ||
5262 | ret = qlt_init(); | ||
5263 | if (ret < 0) { | ||
5264 | kmem_cache_destroy(srb_cachep); | ||
5265 | return ret; | ||
5266 | } else if (ret > 0) { | ||
5267 | /* | ||
5268 | * If initiator mode is explictly disabled by qlt_init(), | ||
5269 | * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from | ||
5270 | * performing scsi_scan_target() during LOOP UP event. | ||
5271 | */ | ||
5272 | qla2xxx_transport_functions.disable_target_scan = 1; | ||
5273 | qla2xxx_transport_vport_functions.disable_target_scan = 1; | ||
5274 | } | ||
5275 | |||
5276 | /* Derive version string. */ | 4372 | /* Derive version string. */ |
5277 | strcpy(qla2x00_version_str, QLA2XXX_VERSION); | 4373 | strcpy(qla2x00_version_str, QLA2XXX_VERSION); |
5278 | if (ql2xextended_error_logging) | 4374 | if (ql2xextended_error_logging) |
@@ -5284,7 +4380,6 @@ qla2x00_module_init(void) | |||
5284 | kmem_cache_destroy(srb_cachep); | 4380 | kmem_cache_destroy(srb_cachep); |
5285 | ql_log(ql_log_fatal, NULL, 0x0002, | 4381 | ql_log(ql_log_fatal, NULL, 0x0002, |
5286 | "fc_attach_transport failed...Failing load!.\n"); | 4382 | "fc_attach_transport failed...Failing load!.\n"); |
5287 | qlt_exit(); | ||
5288 | return -ENODEV; | 4383 | return -ENODEV; |
5289 | } | 4384 | } |
5290 | 4385 | ||
@@ -5298,7 +4393,6 @@ qla2x00_module_init(void) | |||
5298 | fc_attach_transport(&qla2xxx_transport_vport_functions); | 4393 | fc_attach_transport(&qla2xxx_transport_vport_functions); |
5299 | if (!qla2xxx_transport_vport_template) { | 4394 | if (!qla2xxx_transport_vport_template) { |
5300 | kmem_cache_destroy(srb_cachep); | 4395 | kmem_cache_destroy(srb_cachep); |
5301 | qlt_exit(); | ||
5302 | fc_release_transport(qla2xxx_transport_template); | 4396 | fc_release_transport(qla2xxx_transport_template); |
5303 | ql_log(ql_log_fatal, NULL, 0x0004, | 4397 | ql_log(ql_log_fatal, NULL, 0x0004, |
5304 | "fc_attach_transport vport failed...Failing load!.\n"); | 4398 | "fc_attach_transport vport failed...Failing load!.\n"); |
@@ -5310,7 +4404,6 @@ qla2x00_module_init(void) | |||
5310 | ret = pci_register_driver(&qla2xxx_pci_driver); | 4404 | ret = pci_register_driver(&qla2xxx_pci_driver); |
5311 | if (ret) { | 4405 | if (ret) { |
5312 | kmem_cache_destroy(srb_cachep); | 4406 | kmem_cache_destroy(srb_cachep); |
5313 | qlt_exit(); | ||
5314 | fc_release_transport(qla2xxx_transport_template); | 4407 | fc_release_transport(qla2xxx_transport_template); |
5315 | fc_release_transport(qla2xxx_transport_vport_template); | 4408 | fc_release_transport(qla2xxx_transport_vport_template); |
5316 | ql_log(ql_log_fatal, NULL, 0x0006, | 4409 | ql_log(ql_log_fatal, NULL, 0x0006, |
@@ -5330,7 +4423,6 @@ qla2x00_module_exit(void) | |||
5330 | pci_unregister_driver(&qla2xxx_pci_driver); | 4423 | pci_unregister_driver(&qla2xxx_pci_driver); |
5331 | qla2x00_release_firmware(); | 4424 | qla2x00_release_firmware(); |
5332 | kmem_cache_destroy(srb_cachep); | 4425 | kmem_cache_destroy(srb_cachep); |
5333 | qlt_exit(); | ||
5334 | if (ctx_cachep) | 4426 | if (ctx_cachep) |
5335 | kmem_cache_destroy(ctx_cachep); | 4427 | kmem_cache_destroy(ctx_cachep); |
5336 | fc_release_transport(qla2xxx_transport_template); | 4428 | fc_release_transport(qla2xxx_transport_template); |
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h index 892a81e457b..d70f0300898 100644 --- a/drivers/scsi/qla2xxx/qla_settings.h +++ b/drivers/scsi/qla2xxx/qla_settings.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 32fdc2a66dd..eff13563c82 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -568,9 +568,6 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) | |||
568 | else if (IS_QLA82XX(ha)) { | 568 | else if (IS_QLA82XX(ha)) { |
569 | *start = FA_FLASH_LAYOUT_ADDR_82; | 569 | *start = FA_FLASH_LAYOUT_ADDR_82; |
570 | goto end; | 570 | goto end; |
571 | } else if (IS_QLA83XX(ha)) { | ||
572 | *start = FA_FLASH_LAYOUT_ADDR_83; | ||
573 | goto end; | ||
574 | } | 571 | } |
575 | /* Begin with first PCI expansion ROM header. */ | 572 | /* Begin with first PCI expansion ROM header. */ |
576 | buf = (uint8_t *)req->ring; | 573 | buf = (uint8_t *)req->ring; |
@@ -724,22 +721,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) | |||
724 | le32_to_cpu(region->size)); | 721 | le32_to_cpu(region->size)); |
725 | 722 | ||
726 | switch (le32_to_cpu(region->code) & 0xff) { | 723 | switch (le32_to_cpu(region->code) & 0xff) { |
727 | case FLT_REG_FCOE_FW: | ||
728 | if (!IS_QLA8031(ha)) | ||
729 | break; | ||
730 | ha->flt_region_fw = start; | ||
731 | break; | ||
732 | case FLT_REG_FW: | 724 | case FLT_REG_FW: |
733 | if (IS_QLA8031(ha)) | ||
734 | break; | ||
735 | ha->flt_region_fw = start; | 725 | ha->flt_region_fw = start; |
736 | break; | 726 | break; |
737 | case FLT_REG_BOOT_CODE: | 727 | case FLT_REG_BOOT_CODE: |
738 | ha->flt_region_boot = start; | 728 | ha->flt_region_boot = start; |
739 | break; | 729 | break; |
740 | case FLT_REG_VPD_0: | 730 | case FLT_REG_VPD_0: |
741 | if (IS_QLA8031(ha)) | ||
742 | break; | ||
743 | ha->flt_region_vpd_nvram = start; | 731 | ha->flt_region_vpd_nvram = start; |
744 | if (IS_QLA82XX(ha)) | 732 | if (IS_QLA82XX(ha)) |
745 | break; | 733 | break; |
@@ -747,20 +735,16 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) | |||
747 | ha->flt_region_vpd = start; | 735 | ha->flt_region_vpd = start; |
748 | break; | 736 | break; |
749 | case FLT_REG_VPD_1: | 737 | case FLT_REG_VPD_1: |
750 | if (IS_QLA82XX(ha) || IS_QLA8031(ha)) | 738 | if (IS_QLA82XX(ha)) |
751 | break; | 739 | break; |
752 | if (!ha->flags.port0) | 740 | if (!ha->flags.port0) |
753 | ha->flt_region_vpd = start; | 741 | ha->flt_region_vpd = start; |
754 | break; | 742 | break; |
755 | case FLT_REG_NVRAM_0: | 743 | case FLT_REG_NVRAM_0: |
756 | if (IS_QLA8031(ha)) | ||
757 | break; | ||
758 | if (ha->flags.port0) | 744 | if (ha->flags.port0) |
759 | ha->flt_region_nvram = start; | 745 | ha->flt_region_nvram = start; |
760 | break; | 746 | break; |
761 | case FLT_REG_NVRAM_1: | 747 | case FLT_REG_NVRAM_1: |
762 | if (IS_QLA8031(ha)) | ||
763 | break; | ||
764 | if (!ha->flags.port0) | 748 | if (!ha->flags.port0) |
765 | ha->flt_region_nvram = start; | 749 | ha->flt_region_nvram = start; |
766 | break; | 750 | break; |
@@ -801,31 +785,6 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) | |||
801 | case FLT_REG_VPD_82XX: | 785 | case FLT_REG_VPD_82XX: |
802 | ha->flt_region_vpd = start; | 786 | ha->flt_region_vpd = start; |
803 | break; | 787 | break; |
804 | case FLT_REG_FCOE_VPD_0: | ||
805 | if (!IS_QLA8031(ha)) | ||
806 | break; | ||
807 | ha->flt_region_vpd_nvram = start; | ||
808 | if (ha->flags.port0) | ||
809 | ha->flt_region_vpd = start; | ||
810 | break; | ||
811 | case FLT_REG_FCOE_VPD_1: | ||
812 | if (!IS_QLA8031(ha)) | ||
813 | break; | ||
814 | if (!ha->flags.port0) | ||
815 | ha->flt_region_vpd = start; | ||
816 | break; | ||
817 | case FLT_REG_FCOE_NVRAM_0: | ||
818 | if (!IS_QLA8031(ha)) | ||
819 | break; | ||
820 | if (ha->flags.port0) | ||
821 | ha->flt_region_nvram = start; | ||
822 | break; | ||
823 | case FLT_REG_FCOE_NVRAM_1: | ||
824 | if (!IS_QLA8031(ha)) | ||
825 | break; | ||
826 | if (!ha->flags.port0) | ||
827 | ha->flt_region_nvram = start; | ||
828 | break; | ||
829 | } | 788 | } |
830 | } | 789 | } |
831 | goto done; | 790 | goto done; |
@@ -845,12 +804,15 @@ no_flash_data: | |||
845 | def_npiv_conf0[def] : def_npiv_conf1[def]; | 804 | def_npiv_conf0[def] : def_npiv_conf1[def]; |
846 | done: | 805 | done: |
847 | ql_dbg(ql_dbg_init, vha, 0x004a, | 806 | ql_dbg(ql_dbg_init, vha, 0x004a, |
848 | "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x nvram=0x%x " | 807 | "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n", |
849 | "fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n", | 808 | loc, ha->flt_region_boot, |
850 | loc, ha->flt_region_boot, ha->flt_region_fw, | 809 | ha->flt_region_fw, ha->flt_region_vpd_nvram, |
851 | ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram, | 810 | ha->flt_region_vpd); |
852 | ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf, | 811 | ql_dbg(ql_dbg_init, vha, 0x004b, |
853 | ha->flt_region_fcp_prio); | 812 | "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n", |
813 | ha->flt_region_nvram, | ||
814 | ha->flt_region_fdt, ha->flt_region_flt, | ||
815 | ha->flt_region_npiv_conf, ha->flt_region_fcp_prio); | ||
854 | } | 816 | } |
855 | 817 | ||
856 | static void | 818 | static void |
@@ -942,9 +904,8 @@ no_flash_data: | |||
942 | } | 904 | } |
943 | done: | 905 | done: |
944 | ql_dbg(ql_dbg_init, vha, 0x004d, | 906 | ql_dbg(ql_dbg_init, vha, 0x004d, |
945 | "FDT[%s]: (0x%x/0x%x) erase=0x%x " | 907 | "FDT[%x]: (0x%x/0x%x) erase=0x%x " |
946 | "pr=%x wrtd=0x%x blk=0x%x.\n", | 908 | "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, |
947 | loc, mid, fid, | ||
948 | ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, | 909 | ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, |
949 | ha->fdt_wrt_disable, ha->fdt_block_size); | 910 | ha->fdt_wrt_disable, ha->fdt_block_size); |
950 | 911 | ||
@@ -966,16 +927,16 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha) | |||
966 | QLA82XX_IDC_PARAM_ADDR , 8); | 927 | QLA82XX_IDC_PARAM_ADDR , 8); |
967 | 928 | ||
968 | if (*wptr == __constant_cpu_to_le32(0xffffffff)) { | 929 | if (*wptr == __constant_cpu_to_le32(0xffffffff)) { |
969 | ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT; | 930 | ha->nx_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT; |
970 | ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT; | 931 | ha->nx_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT; |
971 | } else { | 932 | } else { |
972 | ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr++); | 933 | ha->nx_dev_init_timeout = le32_to_cpu(*wptr++); |
973 | ha->fcoe_reset_timeout = le32_to_cpu(*wptr); | 934 | ha->nx_reset_timeout = le32_to_cpu(*wptr); |
974 | } | 935 | } |
975 | ql_dbg(ql_dbg_init, vha, 0x004e, | 936 | ql_dbg(ql_dbg_init, vha, 0x004e, |
976 | "fcoe_dev_init_timeout=%d " | 937 | "nx_dev_init_timeout=%d " |
977 | "fcoe_reset_timeout=%d.\n", ha->fcoe_dev_init_timeout, | 938 | "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout, |
978 | ha->fcoe_reset_timeout); | 939 | ha->nx_reset_timeout); |
979 | return; | 940 | return; |
980 | } | 941 | } |
981 | 942 | ||
@@ -986,8 +947,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha) | |||
986 | uint32_t flt_addr; | 947 | uint32_t flt_addr; |
987 | struct qla_hw_data *ha = vha->hw; | 948 | struct qla_hw_data *ha = vha->hw; |
988 | 949 | ||
989 | if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && | 950 | if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha)) |
990 | !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) | ||
991 | return QLA_SUCCESS; | 951 | return QLA_SUCCESS; |
992 | 952 | ||
993 | ret = qla2xxx_find_flt_start(vha, &flt_addr); | 953 | ret = qla2xxx_find_flt_start(vha, &flt_addr); |
@@ -1013,11 +973,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) | |||
1013 | struct qla_npiv_entry *entry; | 973 | struct qla_npiv_entry *entry; |
1014 | struct qla_hw_data *ha = vha->hw; | 974 | struct qla_hw_data *ha = vha->hw; |
1015 | 975 | ||
1016 | if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && | 976 | if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha)) |
1017 | !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) | ||
1018 | return; | ||
1019 | |||
1020 | if (ha->flags.nic_core_reset_hdlr_active) | ||
1021 | return; | 977 | return; |
1022 | 978 | ||
1023 | ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, | 979 | ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, |
@@ -1187,8 +1143,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, | |||
1187 | struct qla_hw_data *ha = vha->hw; | 1143 | struct qla_hw_data *ha = vha->hw; |
1188 | 1144 | ||
1189 | /* Prepare burst-capable write on supported ISPs. */ | 1145 | /* Prepare burst-capable write on supported ISPs. */ |
1190 | if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) && | 1146 | if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && !(faddr & 0xfff) && |
1191 | !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) { | 1147 | dwords > OPTROM_BURST_DWORDS) { |
1192 | optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, | 1148 | optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, |
1193 | &optrom_dma, GFP_KERNEL); | 1149 | &optrom_dma, GFP_KERNEL); |
1194 | if (!optrom) { | 1150 | if (!optrom) { |
@@ -1662,100 +1618,6 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha) | |||
1662 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1618 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1663 | } | 1619 | } |
1664 | 1620 | ||
1665 | static uint32_t | ||
1666 | qla83xx_select_led_port(struct qla_hw_data *ha) | ||
1667 | { | ||
1668 | uint32_t led_select_value = 0; | ||
1669 | |||
1670 | if (!IS_QLA83XX(ha)) | ||
1671 | goto out; | ||
1672 | |||
1673 | if (ha->flags.port0) | ||
1674 | led_select_value = QLA83XX_LED_PORT0; | ||
1675 | else | ||
1676 | led_select_value = QLA83XX_LED_PORT1; | ||
1677 | |||
1678 | out: | ||
1679 | return led_select_value; | ||
1680 | } | ||
1681 | |||
1682 | void | ||
1683 | qla83xx_beacon_blink(struct scsi_qla_host *vha) | ||
1684 | { | ||
1685 | uint32_t led_select_value; | ||
1686 | struct qla_hw_data *ha = vha->hw; | ||
1687 | uint16_t led_cfg[6]; | ||
1688 | uint16_t orig_led_cfg[6]; | ||
1689 | uint32_t led_10_value, led_43_value; | ||
1690 | |||
1691 | if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha)) | ||
1692 | return; | ||
1693 | |||
1694 | if (!ha->beacon_blink_led) | ||
1695 | return; | ||
1696 | |||
1697 | if (IS_QLA2031(ha)) { | ||
1698 | led_select_value = qla83xx_select_led_port(ha); | ||
1699 | |||
1700 | qla83xx_wr_reg(vha, led_select_value, 0x40002000); | ||
1701 | qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000); | ||
1702 | msleep(1000); | ||
1703 | qla83xx_wr_reg(vha, led_select_value, 0x40004000); | ||
1704 | qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000); | ||
1705 | } else if (IS_QLA8031(ha)) { | ||
1706 | led_select_value = qla83xx_select_led_port(ha); | ||
1707 | |||
1708 | qla83xx_rd_reg(vha, led_select_value, &led_10_value); | ||
1709 | qla83xx_rd_reg(vha, led_select_value + 0x10, &led_43_value); | ||
1710 | qla83xx_wr_reg(vha, led_select_value, 0x01f44000); | ||
1711 | msleep(500); | ||
1712 | qla83xx_wr_reg(vha, led_select_value, 0x400001f4); | ||
1713 | msleep(1000); | ||
1714 | qla83xx_wr_reg(vha, led_select_value, led_10_value); | ||
1715 | qla83xx_wr_reg(vha, led_select_value + 0x10, led_43_value); | ||
1716 | } else if (IS_QLA81XX(ha)) { | ||
1717 | int rval; | ||
1718 | |||
1719 | /* Save Current */ | ||
1720 | rval = qla81xx_get_led_config(vha, orig_led_cfg); | ||
1721 | /* Do the blink */ | ||
1722 | if (rval == QLA_SUCCESS) { | ||
1723 | if (IS_QLA81XX(ha)) { | ||
1724 | led_cfg[0] = 0x4000; | ||
1725 | led_cfg[1] = 0x2000; | ||
1726 | led_cfg[2] = 0; | ||
1727 | led_cfg[3] = 0; | ||
1728 | led_cfg[4] = 0; | ||
1729 | led_cfg[5] = 0; | ||
1730 | } else { | ||
1731 | led_cfg[0] = 0x4000; | ||
1732 | led_cfg[1] = 0x4000; | ||
1733 | led_cfg[2] = 0x4000; | ||
1734 | led_cfg[3] = 0x2000; | ||
1735 | led_cfg[4] = 0; | ||
1736 | led_cfg[5] = 0x2000; | ||
1737 | } | ||
1738 | rval = qla81xx_set_led_config(vha, led_cfg); | ||
1739 | msleep(1000); | ||
1740 | if (IS_QLA81XX(ha)) { | ||
1741 | led_cfg[0] = 0x4000; | ||
1742 | led_cfg[1] = 0x2000; | ||
1743 | led_cfg[2] = 0; | ||
1744 | } else { | ||
1745 | led_cfg[0] = 0x4000; | ||
1746 | led_cfg[1] = 0x2000; | ||
1747 | led_cfg[2] = 0x4000; | ||
1748 | led_cfg[3] = 0x4000; | ||
1749 | led_cfg[4] = 0; | ||
1750 | led_cfg[5] = 0x2000; | ||
1751 | } | ||
1752 | rval = qla81xx_set_led_config(vha, led_cfg); | ||
1753 | } | ||
1754 | /* On exit, restore original (presumes no status change) */ | ||
1755 | qla81xx_set_led_config(vha, orig_led_cfg); | ||
1756 | } | ||
1757 | } | ||
1758 | |||
1759 | int | 1621 | int |
1760 | qla24xx_beacon_on(struct scsi_qla_host *vha) | 1622 | qla24xx_beacon_on(struct scsi_qla_host *vha) |
1761 | { | 1623 | { |
@@ -1767,9 +1629,6 @@ qla24xx_beacon_on(struct scsi_qla_host *vha) | |||
1767 | if (IS_QLA82XX(ha)) | 1629 | if (IS_QLA82XX(ha)) |
1768 | return QLA_SUCCESS; | 1630 | return QLA_SUCCESS; |
1769 | 1631 | ||
1770 | if (IS_QLA8031(ha) || IS_QLA81XX(ha)) | ||
1771 | goto skip_gpio; /* let blink handle it */ | ||
1772 | |||
1773 | if (ha->beacon_blink_led == 0) { | 1632 | if (ha->beacon_blink_led == 0) { |
1774 | /* Enable firmware for update */ | 1633 | /* Enable firmware for update */ |
1775 | ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; | 1634 | ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; |
@@ -1784,9 +1643,6 @@ qla24xx_beacon_on(struct scsi_qla_host *vha) | |||
1784 | return QLA_FUNCTION_FAILED; | 1643 | return QLA_FUNCTION_FAILED; |
1785 | } | 1644 | } |
1786 | 1645 | ||
1787 | if (IS_QLA2031(ha)) | ||
1788 | goto skip_gpio; | ||
1789 | |||
1790 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1646 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1791 | gpio_data = RD_REG_DWORD(®->gpiod); | 1647 | gpio_data = RD_REG_DWORD(®->gpiod); |
1792 | 1648 | ||
@@ -1801,7 +1657,6 @@ qla24xx_beacon_on(struct scsi_qla_host *vha) | |||
1801 | /* So all colors blink together. */ | 1657 | /* So all colors blink together. */ |
1802 | ha->beacon_color_state = 0; | 1658 | ha->beacon_color_state = 0; |
1803 | 1659 | ||
1804 | skip_gpio: | ||
1805 | /* Let the per HBA timer kick off the blinking process. */ | 1660 | /* Let the per HBA timer kick off the blinking process. */ |
1806 | ha->beacon_blink_led = 1; | 1661 | ha->beacon_blink_led = 1; |
1807 | 1662 | ||
@@ -1820,13 +1675,6 @@ qla24xx_beacon_off(struct scsi_qla_host *vha) | |||
1820 | return QLA_SUCCESS; | 1675 | return QLA_SUCCESS; |
1821 | 1676 | ||
1822 | ha->beacon_blink_led = 0; | 1677 | ha->beacon_blink_led = 0; |
1823 | |||
1824 | if (IS_QLA2031(ha)) | ||
1825 | goto set_fw_options; | ||
1826 | |||
1827 | if (IS_QLA8031(ha) || IS_QLA81XX(ha)) | ||
1828 | return QLA_SUCCESS; | ||
1829 | |||
1830 | ha->beacon_color_state = QLA_LED_ALL_ON; | 1678 | ha->beacon_color_state = QLA_LED_ALL_ON; |
1831 | 1679 | ||
1832 | ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */ | 1680 | ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */ |
@@ -1841,7 +1689,6 @@ qla24xx_beacon_off(struct scsi_qla_host *vha) | |||
1841 | RD_REG_DWORD(®->gpiod); | 1689 | RD_REG_DWORD(®->gpiod); |
1842 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1690 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1843 | 1691 | ||
1844 | set_fw_options: | ||
1845 | ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; | 1692 | ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; |
1846 | 1693 | ||
1847 | if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { | 1694 | if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c deleted file mode 100644 index 80f4b849e2b..00000000000 --- a/drivers/scsi/qla2xxx/qla_target.c +++ /dev/null | |||
@@ -1,4979 +0,0 @@ | |||
1 | /* | ||
2 | * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx | ||
3 | * | ||
4 | * based on qla2x00t.c code: | ||
5 | * | ||
6 | * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> | ||
7 | * Copyright (C) 2004 - 2005 Leonid Stoljar | ||
8 | * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> | ||
9 | * Copyright (C) 2006 - 2010 ID7 Ltd. | ||
10 | * | ||
11 | * Forward port and refactoring to modern qla2xxx and target/configfs | ||
12 | * | ||
13 | * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org> | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or | ||
16 | * modify it under the terms of the GNU General Public License | ||
17 | * as published by the Free Software Foundation, version 2 | ||
18 | * of the License. | ||
19 | * | ||
20 | * This program is distributed in the hope that it will be useful, | ||
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
23 | * GNU General Public License for more details. | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/blkdev.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/list.h> | ||
34 | #include <linux/workqueue.h> | ||
35 | #include <asm/unaligned.h> | ||
36 | #include <scsi/scsi.h> | ||
37 | #include <scsi/scsi_host.h> | ||
38 | #include <scsi/scsi_tcq.h> | ||
39 | #include <target/target_core_base.h> | ||
40 | #include <target/target_core_fabric.h> | ||
41 | |||
42 | #include "qla_def.h" | ||
43 | #include "qla_target.h" | ||
44 | |||
45 | static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; | ||
46 | module_param(qlini_mode, charp, S_IRUGO); | ||
47 | MODULE_PARM_DESC(qlini_mode, | ||
48 | "Determines when initiator mode will be enabled. Possible values: " | ||
49 | "\"exclusive\" - initiator mode will be enabled on load, " | ||
50 | "disabled on enabling target mode and then on disabling target mode " | ||
51 | "enabled back; " | ||
52 | "\"disabled\" - initiator mode will never be enabled; " | ||
53 | "\"enabled\" (default) - initiator mode will always stay enabled."); | ||
54 | |||
55 | static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; | ||
56 | |||
57 | /* | ||
58 | * From scsi/fc/fc_fcp.h | ||
59 | */ | ||
60 | enum fcp_resp_rsp_codes { | ||
61 | FCP_TMF_CMPL = 0, | ||
62 | FCP_DATA_LEN_INVALID = 1, | ||
63 | FCP_CMND_FIELDS_INVALID = 2, | ||
64 | FCP_DATA_PARAM_MISMATCH = 3, | ||
65 | FCP_TMF_REJECTED = 4, | ||
66 | FCP_TMF_FAILED = 5, | ||
67 | FCP_TMF_INVALID_LUN = 9, | ||
68 | }; | ||
69 | |||
70 | /* | ||
71 | * fc_pri_ta from scsi/fc/fc_fcp.h | ||
72 | */ | ||
73 | #define FCP_PTA_SIMPLE 0 /* simple task attribute */ | ||
74 | #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ | ||
75 | #define FCP_PTA_ORDERED 2 /* ordered task attribute */ | ||
76 | #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ | ||
77 | #define FCP_PTA_MASK 7 /* mask for task attribute field */ | ||
78 | #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ | ||
79 | #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ | ||
80 | |||
81 | /* | ||
82 | * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which | ||
83 | * must be called under HW lock and could unlock/lock it inside. | ||
84 | * It isn't an issue, since in the current implementation on the time when | ||
85 | * those functions are called: | ||
86 | * | ||
87 | * - Either context is IRQ and only IRQ handler can modify HW data, | ||
88 | * including rings related fields, | ||
89 | * | ||
90 | * - Or access to target mode variables from struct qla_tgt doesn't | ||
91 | * cross those functions boundaries, except tgt_stop, which | ||
92 | * additionally protected by irq_cmd_count. | ||
93 | */ | ||
94 | /* Predefs for callbacks handed to qla2xxx LLD */ | ||
95 | static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, | ||
96 | struct atio_from_isp *pkt); | ||
97 | static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); | ||
98 | static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, | ||
99 | int fn, void *iocb, int flags); | ||
100 | static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd | ||
101 | *cmd, struct atio_from_isp *atio, int ha_locked); | ||
102 | static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, | ||
103 | struct qla_tgt_srr_imm *imm, int ha_lock); | ||
104 | /* | ||
105 | * Global Variables | ||
106 | */ | ||
107 | static struct kmem_cache *qla_tgt_cmd_cachep; | ||
108 | static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; | ||
109 | static mempool_t *qla_tgt_mgmt_cmd_mempool; | ||
110 | static struct workqueue_struct *qla_tgt_wq; | ||
111 | static DEFINE_MUTEX(qla_tgt_mutex); | ||
112 | static LIST_HEAD(qla_tgt_glist); | ||
113 | |||
114 | /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ | ||
115 | static struct qla_tgt_sess *qlt_find_sess_by_port_name( | ||
116 | struct qla_tgt *tgt, | ||
117 | const uint8_t *port_name) | ||
118 | { | ||
119 | struct qla_tgt_sess *sess; | ||
120 | |||
121 | list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { | ||
122 | if (!memcmp(sess->port_name, port_name, WWN_SIZE)) | ||
123 | return sess; | ||
124 | } | ||
125 | |||
126 | return NULL; | ||
127 | } | ||
128 | |||
129 | /* Might release hw lock, then reaquire!! */ | ||
130 | static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) | ||
131 | { | ||
132 | /* Send marker if required */ | ||
133 | if (unlikely(vha->marker_needed != 0)) { | ||
134 | int rc = qla2x00_issue_marker(vha, vha_locked); | ||
135 | if (rc != QLA_SUCCESS) { | ||
136 | ql_dbg(ql_dbg_tgt, vha, 0xe03d, | ||
137 | "qla_target(%d): issue_marker() failed\n", | ||
138 | vha->vp_idx); | ||
139 | } | ||
140 | return rc; | ||
141 | } | ||
142 | return QLA_SUCCESS; | ||
143 | } | ||
144 | |||
145 | static inline | ||
146 | struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, | ||
147 | uint8_t *d_id) | ||
148 | { | ||
149 | struct qla_hw_data *ha = vha->hw; | ||
150 | uint8_t vp_idx; | ||
151 | |||
152 | if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) | ||
153 | return NULL; | ||
154 | |||
155 | if (vha->d_id.b.al_pa == d_id[2]) | ||
156 | return vha; | ||
157 | |||
158 | BUG_ON(ha->tgt.tgt_vp_map == NULL); | ||
159 | vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; | ||
160 | if (likely(test_bit(vp_idx, ha->vp_idx_map))) | ||
161 | return ha->tgt.tgt_vp_map[vp_idx].vha; | ||
162 | |||
163 | return NULL; | ||
164 | } | ||
165 | |||
166 | static inline | ||
167 | struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, | ||
168 | uint16_t vp_idx) | ||
169 | { | ||
170 | struct qla_hw_data *ha = vha->hw; | ||
171 | |||
172 | if (vha->vp_idx == vp_idx) | ||
173 | return vha; | ||
174 | |||
175 | BUG_ON(ha->tgt.tgt_vp_map == NULL); | ||
176 | if (likely(test_bit(vp_idx, ha->vp_idx_map))) | ||
177 | return ha->tgt.tgt_vp_map[vp_idx].vha; | ||
178 | |||
179 | return NULL; | ||
180 | } | ||
181 | |||
182 | void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, | ||
183 | struct atio_from_isp *atio) | ||
184 | { | ||
185 | switch (atio->u.raw.entry_type) { | ||
186 | case ATIO_TYPE7: | ||
187 | { | ||
188 | struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, | ||
189 | atio->u.isp24.fcp_hdr.d_id); | ||
190 | if (unlikely(NULL == host)) { | ||
191 | ql_dbg(ql_dbg_tgt, vha, 0xe03e, | ||
192 | "qla_target(%d): Received ATIO_TYPE7 " | ||
193 | "with unknown d_id %x:%x:%x\n", vha->vp_idx, | ||
194 | atio->u.isp24.fcp_hdr.d_id[0], | ||
195 | atio->u.isp24.fcp_hdr.d_id[1], | ||
196 | atio->u.isp24.fcp_hdr.d_id[2]); | ||
197 | break; | ||
198 | } | ||
199 | qlt_24xx_atio_pkt(host, atio); | ||
200 | break; | ||
201 | } | ||
202 | |||
203 | case IMMED_NOTIFY_TYPE: | ||
204 | { | ||
205 | struct scsi_qla_host *host = vha; | ||
206 | struct imm_ntfy_from_isp *entry = | ||
207 | (struct imm_ntfy_from_isp *)atio; | ||
208 | |||
209 | if ((entry->u.isp24.vp_index != 0xFF) && | ||
210 | (entry->u.isp24.nport_handle != 0xFFFF)) { | ||
211 | host = qlt_find_host_by_vp_idx(vha, | ||
212 | entry->u.isp24.vp_index); | ||
213 | if (unlikely(!host)) { | ||
214 | ql_dbg(ql_dbg_tgt, vha, 0xe03f, | ||
215 | "qla_target(%d): Received " | ||
216 | "ATIO (IMMED_NOTIFY_TYPE) " | ||
217 | "with unknown vp_index %d\n", | ||
218 | vha->vp_idx, entry->u.isp24.vp_index); | ||
219 | break; | ||
220 | } | ||
221 | } | ||
222 | qlt_24xx_atio_pkt(host, atio); | ||
223 | break; | ||
224 | } | ||
225 | |||
226 | default: | ||
227 | ql_dbg(ql_dbg_tgt, vha, 0xe040, | ||
228 | "qla_target(%d): Received unknown ATIO atio " | ||
229 | "type %x\n", vha->vp_idx, atio->u.raw.entry_type); | ||
230 | break; | ||
231 | } | ||
232 | |||
233 | return; | ||
234 | } | ||
235 | |||
236 | void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) | ||
237 | { | ||
238 | switch (pkt->entry_type) { | ||
239 | case CTIO_TYPE7: | ||
240 | { | ||
241 | struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; | ||
242 | struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, | ||
243 | entry->vp_index); | ||
244 | if (unlikely(!host)) { | ||
245 | ql_dbg(ql_dbg_tgt, vha, 0xe041, | ||
246 | "qla_target(%d): Response pkt (CTIO_TYPE7) " | ||
247 | "received, with unknown vp_index %d\n", | ||
248 | vha->vp_idx, entry->vp_index); | ||
249 | break; | ||
250 | } | ||
251 | qlt_response_pkt(host, pkt); | ||
252 | break; | ||
253 | } | ||
254 | |||
255 | case IMMED_NOTIFY_TYPE: | ||
256 | { | ||
257 | struct scsi_qla_host *host = vha; | ||
258 | struct imm_ntfy_from_isp *entry = | ||
259 | (struct imm_ntfy_from_isp *)pkt; | ||
260 | |||
261 | host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); | ||
262 | if (unlikely(!host)) { | ||
263 | ql_dbg(ql_dbg_tgt, vha, 0xe042, | ||
264 | "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " | ||
265 | "received, with unknown vp_index %d\n", | ||
266 | vha->vp_idx, entry->u.isp24.vp_index); | ||
267 | break; | ||
268 | } | ||
269 | qlt_response_pkt(host, pkt); | ||
270 | break; | ||
271 | } | ||
272 | |||
273 | case NOTIFY_ACK_TYPE: | ||
274 | { | ||
275 | struct scsi_qla_host *host = vha; | ||
276 | struct nack_to_isp *entry = (struct nack_to_isp *)pkt; | ||
277 | |||
278 | if (0xFF != entry->u.isp24.vp_index) { | ||
279 | host = qlt_find_host_by_vp_idx(vha, | ||
280 | entry->u.isp24.vp_index); | ||
281 | if (unlikely(!host)) { | ||
282 | ql_dbg(ql_dbg_tgt, vha, 0xe043, | ||
283 | "qla_target(%d): Response " | ||
284 | "pkt (NOTIFY_ACK_TYPE) " | ||
285 | "received, with unknown " | ||
286 | "vp_index %d\n", vha->vp_idx, | ||
287 | entry->u.isp24.vp_index); | ||
288 | break; | ||
289 | } | ||
290 | } | ||
291 | qlt_response_pkt(host, pkt); | ||
292 | break; | ||
293 | } | ||
294 | |||
295 | case ABTS_RECV_24XX: | ||
296 | { | ||
297 | struct abts_recv_from_24xx *entry = | ||
298 | (struct abts_recv_from_24xx *)pkt; | ||
299 | struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, | ||
300 | entry->vp_index); | ||
301 | if (unlikely(!host)) { | ||
302 | ql_dbg(ql_dbg_tgt, vha, 0xe044, | ||
303 | "qla_target(%d): Response pkt " | ||
304 | "(ABTS_RECV_24XX) received, with unknown " | ||
305 | "vp_index %d\n", vha->vp_idx, entry->vp_index); | ||
306 | break; | ||
307 | } | ||
308 | qlt_response_pkt(host, pkt); | ||
309 | break; | ||
310 | } | ||
311 | |||
312 | case ABTS_RESP_24XX: | ||
313 | { | ||
314 | struct abts_resp_to_24xx *entry = | ||
315 | (struct abts_resp_to_24xx *)pkt; | ||
316 | struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, | ||
317 | entry->vp_index); | ||
318 | if (unlikely(!host)) { | ||
319 | ql_dbg(ql_dbg_tgt, vha, 0xe045, | ||
320 | "qla_target(%d): Response pkt " | ||
321 | "(ABTS_RECV_24XX) received, with unknown " | ||
322 | "vp_index %d\n", vha->vp_idx, entry->vp_index); | ||
323 | break; | ||
324 | } | ||
325 | qlt_response_pkt(host, pkt); | ||
326 | break; | ||
327 | } | ||
328 | |||
329 | default: | ||
330 | qlt_response_pkt(vha, pkt); | ||
331 | break; | ||
332 | } | ||
333 | |||
334 | } | ||
335 | |||
336 | static void qlt_free_session_done(struct work_struct *work) | ||
337 | { | ||
338 | struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, | ||
339 | free_work); | ||
340 | struct qla_tgt *tgt = sess->tgt; | ||
341 | struct scsi_qla_host *vha = sess->vha; | ||
342 | struct qla_hw_data *ha = vha->hw; | ||
343 | |||
344 | BUG_ON(!tgt); | ||
345 | /* | ||
346 | * Release the target session for FC Nexus from fabric module code. | ||
347 | */ | ||
348 | if (sess->se_sess != NULL) | ||
349 | ha->tgt.tgt_ops->free_session(sess); | ||
350 | |||
351 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, | ||
352 | "Unregistration of sess %p finished\n", sess); | ||
353 | |||
354 | kfree(sess); | ||
355 | /* | ||
356 | * We need to protect against race, when tgt is freed before or | ||
357 | * inside wake_up() | ||
358 | */ | ||
359 | tgt->sess_count--; | ||
360 | if (tgt->sess_count == 0) | ||
361 | wake_up_all(&tgt->waitQ); | ||
362 | } | ||
363 | |||
364 | /* ha->hardware_lock supposed to be held on entry */ | ||
365 | void qlt_unreg_sess(struct qla_tgt_sess *sess) | ||
366 | { | ||
367 | struct scsi_qla_host *vha = sess->vha; | ||
368 | |||
369 | vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); | ||
370 | |||
371 | list_del(&sess->sess_list_entry); | ||
372 | if (sess->deleted) | ||
373 | list_del(&sess->del_list_entry); | ||
374 | |||
375 | INIT_WORK(&sess->free_work, qlt_free_session_done); | ||
376 | schedule_work(&sess->free_work); | ||
377 | } | ||
378 | EXPORT_SYMBOL(qlt_unreg_sess); | ||
379 | |||
380 | /* ha->hardware_lock supposed to be held on entry */ | ||
381 | static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) | ||
382 | { | ||
383 | struct qla_hw_data *ha = vha->hw; | ||
384 | struct qla_tgt_sess *sess = NULL; | ||
385 | uint32_t unpacked_lun, lun = 0; | ||
386 | uint16_t loop_id; | ||
387 | int res = 0; | ||
388 | struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; | ||
389 | struct atio_from_isp *a = (struct atio_from_isp *)iocb; | ||
390 | |||
391 | loop_id = le16_to_cpu(n->u.isp24.nport_handle); | ||
392 | if (loop_id == 0xFFFF) { | ||
393 | #if 0 /* FIXME: Re-enable Global event handling.. */ | ||
394 | /* Global event */ | ||
395 | atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); | ||
396 | qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); | ||
397 | if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { | ||
398 | sess = list_entry(ha->tgt.qla_tgt->sess_list.next, | ||
399 | typeof(*sess), sess_list_entry); | ||
400 | switch (mcmd) { | ||
401 | case QLA_TGT_NEXUS_LOSS_SESS: | ||
402 | mcmd = QLA_TGT_NEXUS_LOSS; | ||
403 | break; | ||
404 | case QLA_TGT_ABORT_ALL_SESS: | ||
405 | mcmd = QLA_TGT_ABORT_ALL; | ||
406 | break; | ||
407 | case QLA_TGT_NEXUS_LOSS: | ||
408 | case QLA_TGT_ABORT_ALL: | ||
409 | break; | ||
410 | default: | ||
411 | ql_dbg(ql_dbg_tgt, vha, 0xe046, | ||
412 | "qla_target(%d): Not allowed " | ||
413 | "command %x in %s", vha->vp_idx, | ||
414 | mcmd, __func__); | ||
415 | sess = NULL; | ||
416 | break; | ||
417 | } | ||
418 | } else | ||
419 | sess = NULL; | ||
420 | #endif | ||
421 | } else { | ||
422 | sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); | ||
423 | } | ||
424 | |||
425 | ql_dbg(ql_dbg_tgt, vha, 0xe000, | ||
426 | "Using sess for qla_tgt_reset: %p\n", sess); | ||
427 | if (!sess) { | ||
428 | res = -ESRCH; | ||
429 | return res; | ||
430 | } | ||
431 | |||
432 | ql_dbg(ql_dbg_tgt, vha, 0xe047, | ||
433 | "scsi(%ld): resetting (session %p from port " | ||
434 | "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, " | ||
435 | "mcmd %x, loop_id %d)\n", vha->host_no, sess, | ||
436 | sess->port_name[0], sess->port_name[1], | ||
437 | sess->port_name[2], sess->port_name[3], | ||
438 | sess->port_name[4], sess->port_name[5], | ||
439 | sess->port_name[6], sess->port_name[7], | ||
440 | mcmd, loop_id); | ||
441 | |||
442 | lun = a->u.isp24.fcp_cmnd.lun; | ||
443 | unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); | ||
444 | |||
445 | return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, | ||
446 | iocb, QLA24XX_MGMT_SEND_NACK); | ||
447 | } | ||
448 | |||
449 | /* ha->hardware_lock supposed to be held on entry */ | ||
450 | static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, | ||
451 | bool immediate) | ||
452 | { | ||
453 | struct qla_tgt *tgt = sess->tgt; | ||
454 | uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; | ||
455 | |||
456 | if (sess->deleted) | ||
457 | return; | ||
458 | |||
459 | ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, | ||
460 | "Scheduling sess %p for deletion\n", sess); | ||
461 | list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); | ||
462 | sess->deleted = 1; | ||
463 | |||
464 | if (immediate) | ||
465 | dev_loss_tmo = 0; | ||
466 | |||
467 | sess->expires = jiffies + dev_loss_tmo * HZ; | ||
468 | |||
469 | ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, | ||
470 | "qla_target(%d): session for port %02x:%02x:%02x:" | ||
471 | "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for " | ||
472 | "deletion in %u secs (expires: %lu) immed: %d\n", | ||
473 | sess->vha->vp_idx, | ||
474 | sess->port_name[0], sess->port_name[1], | ||
475 | sess->port_name[2], sess->port_name[3], | ||
476 | sess->port_name[4], sess->port_name[5], | ||
477 | sess->port_name[6], sess->port_name[7], | ||
478 | sess->loop_id, dev_loss_tmo, sess->expires, immediate); | ||
479 | |||
480 | if (immediate) | ||
481 | schedule_delayed_work(&tgt->sess_del_work, 0); | ||
482 | else | ||
483 | schedule_delayed_work(&tgt->sess_del_work, | ||
484 | jiffies - sess->expires); | ||
485 | } | ||
486 | |||
487 | /* ha->hardware_lock supposed to be held on entry */ | ||
488 | static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) | ||
489 | { | ||
490 | struct qla_tgt_sess *sess; | ||
491 | |||
492 | list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) | ||
493 | qlt_schedule_sess_for_deletion(sess, true); | ||
494 | |||
495 | /* At this point tgt could be already dead */ | ||
496 | } | ||
497 | |||
498 | static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, | ||
499 | uint16_t *loop_id) | ||
500 | { | ||
501 | struct qla_hw_data *ha = vha->hw; | ||
502 | dma_addr_t gid_list_dma; | ||
503 | struct gid_list_info *gid_list; | ||
504 | char *id_iter; | ||
505 | int res, rc, i; | ||
506 | uint16_t entries; | ||
507 | |||
508 | gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), | ||
509 | &gid_list_dma, GFP_KERNEL); | ||
510 | if (!gid_list) { | ||
511 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, | ||
512 | "qla_target(%d): DMA Alloc failed of %u\n", | ||
513 | vha->vp_idx, qla2x00_gid_list_size(ha)); | ||
514 | return -ENOMEM; | ||
515 | } | ||
516 | |||
517 | /* Get list of logged in devices */ | ||
518 | rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); | ||
519 | if (rc != QLA_SUCCESS) { | ||
520 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, | ||
521 | "qla_target(%d): get_id_list() failed: %x\n", | ||
522 | vha->vp_idx, rc); | ||
523 | res = -1; | ||
524 | goto out_free_id_list; | ||
525 | } | ||
526 | |||
527 | id_iter = (char *)gid_list; | ||
528 | res = -1; | ||
529 | for (i = 0; i < entries; i++) { | ||
530 | struct gid_list_info *gid = (struct gid_list_info *)id_iter; | ||
531 | if ((gid->al_pa == s_id[2]) && | ||
532 | (gid->area == s_id[1]) && | ||
533 | (gid->domain == s_id[0])) { | ||
534 | *loop_id = le16_to_cpu(gid->loop_id); | ||
535 | res = 0; | ||
536 | break; | ||
537 | } | ||
538 | id_iter += ha->gid_list_info_size; | ||
539 | } | ||
540 | |||
541 | out_free_id_list: | ||
542 | dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), | ||
543 | gid_list, gid_list_dma); | ||
544 | return res; | ||
545 | } | ||
546 | |||
547 | static bool qlt_check_fcport_exist(struct scsi_qla_host *vha, | ||
548 | struct qla_tgt_sess *sess) | ||
549 | { | ||
550 | struct qla_hw_data *ha = vha->hw; | ||
551 | struct qla_port_24xx_data *pmap24; | ||
552 | bool res, found = false; | ||
553 | int rc, i; | ||
554 | uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */ | ||
555 | uint16_t entries; | ||
556 | void *pmap; | ||
557 | int pmap_len; | ||
558 | fc_port_t *fcport; | ||
559 | int global_resets; | ||
560 | unsigned long flags; | ||
561 | |||
562 | retry: | ||
563 | global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); | ||
564 | |||
565 | rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len); | ||
566 | if (rc != QLA_SUCCESS) { | ||
567 | res = false; | ||
568 | goto out; | ||
569 | } | ||
570 | |||
571 | pmap24 = pmap; | ||
572 | entries = pmap_len/sizeof(*pmap24); | ||
573 | |||
574 | for (i = 0; i < entries; ++i) { | ||
575 | if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) { | ||
576 | loop_id = le16_to_cpu(pmap24[i].loop_id); | ||
577 | found = true; | ||
578 | break; | ||
579 | } | ||
580 | } | ||
581 | |||
582 | kfree(pmap); | ||
583 | |||
584 | if (!found) { | ||
585 | res = false; | ||
586 | goto out; | ||
587 | } | ||
588 | |||
589 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046, | ||
590 | "qlt_check_fcport_exist(): loop_id %d", loop_id); | ||
591 | |||
592 | fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); | ||
593 | if (fcport == NULL) { | ||
594 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047, | ||
595 | "qla_target(%d): Allocation of tmp FC port failed", | ||
596 | vha->vp_idx); | ||
597 | res = false; | ||
598 | goto out; | ||
599 | } | ||
600 | |||
601 | fcport->loop_id = loop_id; | ||
602 | |||
603 | rc = qla2x00_get_port_database(vha, fcport, 0); | ||
604 | if (rc != QLA_SUCCESS) { | ||
605 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048, | ||
606 | "qla_target(%d): Failed to retrieve fcport " | ||
607 | "information -- get_port_database() returned %x " | ||
608 | "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); | ||
609 | res = false; | ||
610 | goto out_free_fcport; | ||
611 | } | ||
612 | |||
613 | if (global_resets != | ||
614 | atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { | ||
615 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, | ||
616 | "qla_target(%d): global reset during session discovery" | ||
617 | " (counter was %d, new %d), retrying", | ||
618 | vha->vp_idx, global_resets, | ||
619 | atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); | ||
620 | goto retry; | ||
621 | } | ||
622 | |||
623 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, | ||
624 | "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, " | ||
625 | "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa, | ||
626 | sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain, | ||
627 | fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id); | ||
628 | |||
629 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
630 | ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, | ||
631 | (fcport->flags & FCF_CONF_COMP_SUPPORTED)); | ||
632 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
633 | |||
634 | res = true; | ||
635 | |||
636 | out_free_fcport: | ||
637 | kfree(fcport); | ||
638 | |||
639 | out: | ||
640 | return res; | ||
641 | } | ||
642 | |||
643 | /* ha->hardware_lock supposed to be held on entry */ | ||
644 | static void qlt_undelete_sess(struct qla_tgt_sess *sess) | ||
645 | { | ||
646 | BUG_ON(!sess->deleted); | ||
647 | |||
648 | list_del(&sess->del_list_entry); | ||
649 | sess->deleted = 0; | ||
650 | } | ||
651 | |||
652 | static void qlt_del_sess_work_fn(struct delayed_work *work) | ||
653 | { | ||
654 | struct qla_tgt *tgt = container_of(work, struct qla_tgt, | ||
655 | sess_del_work); | ||
656 | struct scsi_qla_host *vha = tgt->vha; | ||
657 | struct qla_hw_data *ha = vha->hw; | ||
658 | struct qla_tgt_sess *sess; | ||
659 | unsigned long flags; | ||
660 | |||
661 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
662 | while (!list_empty(&tgt->del_sess_list)) { | ||
663 | sess = list_entry(tgt->del_sess_list.next, typeof(*sess), | ||
664 | del_list_entry); | ||
665 | if (time_after_eq(jiffies, sess->expires)) { | ||
666 | bool cancel; | ||
667 | |||
668 | qlt_undelete_sess(sess); | ||
669 | |||
670 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
671 | cancel = qlt_check_fcport_exist(vha, sess); | ||
672 | |||
673 | if (cancel) { | ||
674 | if (sess->deleted) { | ||
675 | /* | ||
676 | * sess was again deleted while we were | ||
677 | * discovering it | ||
678 | */ | ||
679 | spin_lock_irqsave(&ha->hardware_lock, | ||
680 | flags); | ||
681 | continue; | ||
682 | } | ||
683 | |||
684 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049, | ||
685 | "qla_target(%d): cancel deletion of " | ||
686 | "session for port %02x:%02x:%02x:%02x:%02x:" | ||
687 | "%02x:%02x:%02x (loop ID %d), because " | ||
688 | " it isn't deleted by firmware", | ||
689 | vha->vp_idx, sess->port_name[0], | ||
690 | sess->port_name[1], sess->port_name[2], | ||
691 | sess->port_name[3], sess->port_name[4], | ||
692 | sess->port_name[5], sess->port_name[6], | ||
693 | sess->port_name[7], sess->loop_id); | ||
694 | } else { | ||
695 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, | ||
696 | "Timeout: sess %p about to be deleted\n", | ||
697 | sess); | ||
698 | ha->tgt.tgt_ops->shutdown_sess(sess); | ||
699 | ha->tgt.tgt_ops->put_sess(sess); | ||
700 | } | ||
701 | |||
702 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
703 | } else { | ||
704 | schedule_delayed_work(&tgt->sess_del_work, | ||
705 | jiffies - sess->expires); | ||
706 | break; | ||
707 | } | ||
708 | } | ||
709 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Adds an extra ref to allow to drop hw lock after adding sess to the list. | ||
714 | * Caller must put it. | ||
715 | */ | ||
716 | static struct qla_tgt_sess *qlt_create_sess( | ||
717 | struct scsi_qla_host *vha, | ||
718 | fc_port_t *fcport, | ||
719 | bool local) | ||
720 | { | ||
721 | struct qla_hw_data *ha = vha->hw; | ||
722 | struct qla_tgt_sess *sess; | ||
723 | unsigned long flags; | ||
724 | unsigned char be_sid[3]; | ||
725 | |||
726 | /* Check to avoid double sessions */ | ||
727 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
728 | list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list, | ||
729 | sess_list_entry) { | ||
730 | if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { | ||
731 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, | ||
732 | "Double sess %p found (s_id %x:%x:%x, " | ||
733 | "loop_id %d), updating to d_id %x:%x:%x, " | ||
734 | "loop_id %d", sess, sess->s_id.b.domain, | ||
735 | sess->s_id.b.al_pa, sess->s_id.b.area, | ||
736 | sess->loop_id, fcport->d_id.b.domain, | ||
737 | fcport->d_id.b.al_pa, fcport->d_id.b.area, | ||
738 | fcport->loop_id); | ||
739 | |||
740 | if (sess->deleted) | ||
741 | qlt_undelete_sess(sess); | ||
742 | |||
743 | kref_get(&sess->se_sess->sess_kref); | ||
744 | ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, | ||
745 | (fcport->flags & FCF_CONF_COMP_SUPPORTED)); | ||
746 | |||
747 | if (sess->local && !local) | ||
748 | sess->local = 0; | ||
749 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
750 | |||
751 | return sess; | ||
752 | } | ||
753 | } | ||
754 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
755 | |||
756 | sess = kzalloc(sizeof(*sess), GFP_KERNEL); | ||
757 | if (!sess) { | ||
758 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, | ||
759 | "qla_target(%u): session allocation failed, " | ||
760 | "all commands from port %02x:%02x:%02x:%02x:" | ||
761 | "%02x:%02x:%02x:%02x will be refused", vha->vp_idx, | ||
762 | fcport->port_name[0], fcport->port_name[1], | ||
763 | fcport->port_name[2], fcport->port_name[3], | ||
764 | fcport->port_name[4], fcport->port_name[5], | ||
765 | fcport->port_name[6], fcport->port_name[7]); | ||
766 | |||
767 | return NULL; | ||
768 | } | ||
769 | sess->tgt = ha->tgt.qla_tgt; | ||
770 | sess->vha = vha; | ||
771 | sess->s_id = fcport->d_id; | ||
772 | sess->loop_id = fcport->loop_id; | ||
773 | sess->local = local; | ||
774 | |||
775 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, | ||
776 | "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", | ||
777 | sess, ha->tgt.qla_tgt); | ||
778 | |||
779 | be_sid[0] = sess->s_id.b.domain; | ||
780 | be_sid[1] = sess->s_id.b.area; | ||
781 | be_sid[2] = sess->s_id.b.al_pa; | ||
782 | /* | ||
783 | * Determine if this fc_port->port_name is allowed to access | ||
784 | * target mode using explict NodeACLs+MappedLUNs, or using | ||
785 | * TPG demo mode. If this is successful a target mode FC nexus | ||
786 | * is created. | ||
787 | */ | ||
788 | if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, | ||
789 | &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) { | ||
790 | kfree(sess); | ||
791 | return NULL; | ||
792 | } | ||
793 | /* | ||
794 | * Take an extra reference to ->sess_kref here to handle qla_tgt_sess | ||
795 | * access across ->hardware_lock reaquire. | ||
796 | */ | ||
797 | kref_get(&sess->se_sess->sess_kref); | ||
798 | |||
799 | sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); | ||
800 | BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); | ||
801 | memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); | ||
802 | |||
803 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
804 | list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list); | ||
805 | ha->tgt.qla_tgt->sess_count++; | ||
806 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
807 | |||
808 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, | ||
809 | "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:" | ||
810 | "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed" | ||
811 | " completion %ssupported) added\n", | ||
812 | vha->vp_idx, local ? "local " : "", fcport->port_name[0], | ||
813 | fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], | ||
814 | fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], | ||
815 | fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain, | ||
816 | sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ? | ||
817 | "" : "not "); | ||
818 | |||
819 | return sess; | ||
820 | } | ||
821 | |||
822 | /* | ||
823 | * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() | ||
824 | */ | ||
825 | void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) | ||
826 | { | ||
827 | struct qla_hw_data *ha = vha->hw; | ||
828 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
829 | struct qla_tgt_sess *sess; | ||
830 | unsigned long flags; | ||
831 | |||
832 | if (!vha->hw->tgt.tgt_ops) | ||
833 | return; | ||
834 | |||
835 | if (!tgt || (fcport->port_type != FCT_INITIATOR)) | ||
836 | return; | ||
837 | |||
838 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
839 | if (tgt->tgt_stop) { | ||
840 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
841 | return; | ||
842 | } | ||
843 | sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); | ||
844 | if (!sess) { | ||
845 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
846 | |||
847 | mutex_lock(&ha->tgt.tgt_mutex); | ||
848 | sess = qlt_create_sess(vha, fcport, false); | ||
849 | mutex_unlock(&ha->tgt.tgt_mutex); | ||
850 | |||
851 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
852 | } else { | ||
853 | kref_get(&sess->se_sess->sess_kref); | ||
854 | |||
855 | if (sess->deleted) { | ||
856 | qlt_undelete_sess(sess); | ||
857 | |||
858 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, | ||
859 | "qla_target(%u): %ssession for port %02x:" | ||
860 | "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) " | ||
861 | "reappeared\n", vha->vp_idx, sess->local ? "local " | ||
862 | : "", sess->port_name[0], sess->port_name[1], | ||
863 | sess->port_name[2], sess->port_name[3], | ||
864 | sess->port_name[4], sess->port_name[5], | ||
865 | sess->port_name[6], sess->port_name[7], | ||
866 | sess->loop_id); | ||
867 | |||
868 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, | ||
869 | "Reappeared sess %p\n", sess); | ||
870 | } | ||
871 | ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, | ||
872 | (fcport->flags & FCF_CONF_COMP_SUPPORTED)); | ||
873 | } | ||
874 | |||
875 | if (sess && sess->local) { | ||
876 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, | ||
877 | "qla_target(%u): local session for " | ||
878 | "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " | ||
879 | "(loop ID %d) became global\n", vha->vp_idx, | ||
880 | fcport->port_name[0], fcport->port_name[1], | ||
881 | fcport->port_name[2], fcport->port_name[3], | ||
882 | fcport->port_name[4], fcport->port_name[5], | ||
883 | fcport->port_name[6], fcport->port_name[7], | ||
884 | sess->loop_id); | ||
885 | sess->local = 0; | ||
886 | } | ||
887 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
888 | |||
889 | ha->tgt.tgt_ops->put_sess(sess); | ||
890 | } | ||
891 | |||
892 | void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) | ||
893 | { | ||
894 | struct qla_hw_data *ha = vha->hw; | ||
895 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
896 | struct qla_tgt_sess *sess; | ||
897 | unsigned long flags; | ||
898 | |||
899 | if (!vha->hw->tgt.tgt_ops) | ||
900 | return; | ||
901 | |||
902 | if (!tgt || (fcport->port_type != FCT_INITIATOR)) | ||
903 | return; | ||
904 | |||
905 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
906 | if (tgt->tgt_stop) { | ||
907 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
908 | return; | ||
909 | } | ||
910 | sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); | ||
911 | if (!sess) { | ||
912 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
913 | return; | ||
914 | } | ||
915 | |||
916 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); | ||
917 | |||
918 | sess->local = 1; | ||
919 | qlt_schedule_sess_for_deletion(sess, false); | ||
920 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
921 | } | ||
922 | |||
923 | static inline int test_tgt_sess_count(struct qla_tgt *tgt) | ||
924 | { | ||
925 | struct qla_hw_data *ha = tgt->ha; | ||
926 | unsigned long flags; | ||
927 | int res; | ||
928 | /* | ||
929 | * We need to protect against race, when tgt is freed before or | ||
930 | * inside wake_up() | ||
931 | */ | ||
932 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
933 | ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, | ||
934 | "tgt %p, empty(sess_list)=%d sess_count=%d\n", | ||
935 | tgt, list_empty(&tgt->sess_list), tgt->sess_count); | ||
936 | res = (tgt->sess_count == 0); | ||
937 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
938 | |||
939 | return res; | ||
940 | } | ||
941 | |||
942 | /* Called by tcm_qla2xxx configfs code */ | ||
943 | void qlt_stop_phase1(struct qla_tgt *tgt) | ||
944 | { | ||
945 | struct scsi_qla_host *vha = tgt->vha; | ||
946 | struct qla_hw_data *ha = tgt->ha; | ||
947 | unsigned long flags; | ||
948 | |||
949 | if (tgt->tgt_stop || tgt->tgt_stopped) { | ||
950 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, | ||
951 | "Already in tgt->tgt_stop or tgt_stopped state\n"); | ||
952 | dump_stack(); | ||
953 | return; | ||
954 | } | ||
955 | |||
956 | ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", | ||
957 | vha->host_no, vha); | ||
958 | /* | ||
959 | * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. | ||
960 | * Lock is needed, because we still can get an incoming packet. | ||
961 | */ | ||
962 | mutex_lock(&ha->tgt.tgt_mutex); | ||
963 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
964 | tgt->tgt_stop = 1; | ||
965 | qlt_clear_tgt_db(tgt, true); | ||
966 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
967 | mutex_unlock(&ha->tgt.tgt_mutex); | ||
968 | |||
969 | flush_delayed_work(&tgt->sess_del_work); | ||
970 | |||
971 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, | ||
972 | "Waiting for sess works (tgt %p)", tgt); | ||
973 | spin_lock_irqsave(&tgt->sess_work_lock, flags); | ||
974 | while (!list_empty(&tgt->sess_works_list)) { | ||
975 | spin_unlock_irqrestore(&tgt->sess_work_lock, flags); | ||
976 | flush_scheduled_work(); | ||
977 | spin_lock_irqsave(&tgt->sess_work_lock, flags); | ||
978 | } | ||
979 | spin_unlock_irqrestore(&tgt->sess_work_lock, flags); | ||
980 | |||
981 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, | ||
982 | "Waiting for tgt %p: list_empty(sess_list)=%d " | ||
983 | "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), | ||
984 | tgt->sess_count); | ||
985 | |||
986 | wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); | ||
987 | |||
988 | /* Big hammer */ | ||
989 | if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) | ||
990 | qlt_disable_vha(vha); | ||
991 | |||
992 | /* Wait for sessions to clear out (just in case) */ | ||
993 | wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); | ||
994 | } | ||
995 | EXPORT_SYMBOL(qlt_stop_phase1); | ||
996 | |||
997 | /* Called by tcm_qla2xxx configfs code */ | ||
998 | void qlt_stop_phase2(struct qla_tgt *tgt) | ||
999 | { | ||
1000 | struct qla_hw_data *ha = tgt->ha; | ||
1001 | unsigned long flags; | ||
1002 | |||
1003 | if (tgt->tgt_stopped) { | ||
1004 | ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f, | ||
1005 | "Already in tgt->tgt_stopped state\n"); | ||
1006 | dump_stack(); | ||
1007 | return; | ||
1008 | } | ||
1009 | |||
1010 | ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b, | ||
1011 | "Waiting for %d IRQ commands to complete (tgt %p)", | ||
1012 | tgt->irq_cmd_count, tgt); | ||
1013 | |||
1014 | mutex_lock(&ha->tgt.tgt_mutex); | ||
1015 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
1016 | while (tgt->irq_cmd_count != 0) { | ||
1017 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
1018 | udelay(2); | ||
1019 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
1020 | } | ||
1021 | tgt->tgt_stop = 0; | ||
1022 | tgt->tgt_stopped = 1; | ||
1023 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
1024 | mutex_unlock(&ha->tgt.tgt_mutex); | ||
1025 | |||
1026 | ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished", | ||
1027 | tgt); | ||
1028 | } | ||
1029 | EXPORT_SYMBOL(qlt_stop_phase2); | ||
1030 | |||
1031 | /* Called from qlt_remove_target() -> qla2x00_remove_one() */ | ||
1032 | static void qlt_release(struct qla_tgt *tgt) | ||
1033 | { | ||
1034 | struct qla_hw_data *ha = tgt->ha; | ||
1035 | |||
1036 | if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped) | ||
1037 | qlt_stop_phase2(tgt); | ||
1038 | |||
1039 | ha->tgt.qla_tgt = NULL; | ||
1040 | |||
1041 | ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d, | ||
1042 | "Release of tgt %p finished\n", tgt); | ||
1043 | |||
1044 | kfree(tgt); | ||
1045 | } | ||
1046 | |||
1047 | /* ha->hardware_lock supposed to be held on entry */ | ||
1048 | static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, | ||
1049 | const void *param, unsigned int param_size) | ||
1050 | { | ||
1051 | struct qla_tgt_sess_work_param *prm; | ||
1052 | unsigned long flags; | ||
1053 | |||
1054 | prm = kzalloc(sizeof(*prm), GFP_ATOMIC); | ||
1055 | if (!prm) { | ||
1056 | ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, | ||
1057 | "qla_target(%d): Unable to create session " | ||
1058 | "work, command will be refused", 0); | ||
1059 | return -ENOMEM; | ||
1060 | } | ||
1061 | |||
1062 | ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, | ||
1063 | "Scheduling work (type %d, prm %p)" | ||
1064 | " to find session for param %p (size %d, tgt %p)\n", | ||
1065 | type, prm, param, param_size, tgt); | ||
1066 | |||
1067 | prm->type = type; | ||
1068 | memcpy(&prm->tm_iocb, param, param_size); | ||
1069 | |||
1070 | spin_lock_irqsave(&tgt->sess_work_lock, flags); | ||
1071 | list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); | ||
1072 | spin_unlock_irqrestore(&tgt->sess_work_lock, flags); | ||
1073 | |||
1074 | schedule_work(&tgt->sess_work); | ||
1075 | |||
1076 | return 0; | ||
1077 | } | ||
1078 | |||
1079 | /* | ||
1080 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
1081 | */ | ||
1082 | static void qlt_send_notify_ack(struct scsi_qla_host *vha, | ||
1083 | struct imm_ntfy_from_isp *ntfy, | ||
1084 | uint32_t add_flags, uint16_t resp_code, int resp_code_valid, | ||
1085 | uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) | ||
1086 | { | ||
1087 | struct qla_hw_data *ha = vha->hw; | ||
1088 | request_t *pkt; | ||
1089 | struct nack_to_isp *nack; | ||
1090 | |||
1091 | ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); | ||
1092 | |||
1093 | /* Send marker if required */ | ||
1094 | if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) | ||
1095 | return; | ||
1096 | |||
1097 | pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); | ||
1098 | if (!pkt) { | ||
1099 | ql_dbg(ql_dbg_tgt, vha, 0xe049, | ||
1100 | "qla_target(%d): %s failed: unable to allocate " | ||
1101 | "request packet\n", vha->vp_idx, __func__); | ||
1102 | return; | ||
1103 | } | ||
1104 | |||
1105 | if (ha->tgt.qla_tgt != NULL) | ||
1106 | ha->tgt.qla_tgt->notify_ack_expected++; | ||
1107 | |||
1108 | pkt->entry_type = NOTIFY_ACK_TYPE; | ||
1109 | pkt->entry_count = 1; | ||
1110 | |||
1111 | nack = (struct nack_to_isp *)pkt; | ||
1112 | nack->ox_id = ntfy->ox_id; | ||
1113 | |||
1114 | nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; | ||
1115 | if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { | ||
1116 | nack->u.isp24.flags = ntfy->u.isp24.flags & | ||
1117 | __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); | ||
1118 | } | ||
1119 | nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; | ||
1120 | nack->u.isp24.status = ntfy->u.isp24.status; | ||
1121 | nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; | ||
1122 | nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; | ||
1123 | nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; | ||
1124 | nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; | ||
1125 | nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); | ||
1126 | nack->u.isp24.srr_reject_code = srr_reject_code; | ||
1127 | nack->u.isp24.srr_reject_code_expl = srr_explan; | ||
1128 | nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; | ||
1129 | |||
1130 | ql_dbg(ql_dbg_tgt, vha, 0xe005, | ||
1131 | "qla_target(%d): Sending 24xx Notify Ack %d\n", | ||
1132 | vha->vp_idx, nack->u.isp24.status); | ||
1133 | |||
1134 | qla2x00_start_iocbs(vha, vha->req); | ||
1135 | } | ||
1136 | |||
1137 | /* | ||
1138 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
1139 | */ | ||
1140 | static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, | ||
1141 | struct abts_recv_from_24xx *abts, uint32_t status, | ||
1142 | bool ids_reversed) | ||
1143 | { | ||
1144 | struct qla_hw_data *ha = vha->hw; | ||
1145 | struct abts_resp_to_24xx *resp; | ||
1146 | uint32_t f_ctl; | ||
1147 | uint8_t *p; | ||
1148 | |||
1149 | ql_dbg(ql_dbg_tgt, vha, 0xe006, | ||
1150 | "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", | ||
1151 | ha, abts, status); | ||
1152 | |||
1153 | /* Send marker if required */ | ||
1154 | if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) | ||
1155 | return; | ||
1156 | |||
1157 | resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); | ||
1158 | if (!resp) { | ||
1159 | ql_dbg(ql_dbg_tgt, vha, 0xe04a, | ||
1160 | "qla_target(%d): %s failed: unable to allocate " | ||
1161 | "request packet", vha->vp_idx, __func__); | ||
1162 | return; | ||
1163 | } | ||
1164 | |||
1165 | resp->entry_type = ABTS_RESP_24XX; | ||
1166 | resp->entry_count = 1; | ||
1167 | resp->nport_handle = abts->nport_handle; | ||
1168 | resp->vp_index = vha->vp_idx; | ||
1169 | resp->sof_type = abts->sof_type; | ||
1170 | resp->exchange_address = abts->exchange_address; | ||
1171 | resp->fcp_hdr_le = abts->fcp_hdr_le; | ||
1172 | f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | | ||
1173 | F_CTL_LAST_SEQ | F_CTL_END_SEQ | | ||
1174 | F_CTL_SEQ_INITIATIVE); | ||
1175 | p = (uint8_t *)&f_ctl; | ||
1176 | resp->fcp_hdr_le.f_ctl[0] = *p++; | ||
1177 | resp->fcp_hdr_le.f_ctl[1] = *p++; | ||
1178 | resp->fcp_hdr_le.f_ctl[2] = *p; | ||
1179 | if (ids_reversed) { | ||
1180 | resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; | ||
1181 | resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; | ||
1182 | resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; | ||
1183 | resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; | ||
1184 | resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; | ||
1185 | resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; | ||
1186 | } else { | ||
1187 | resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; | ||
1188 | resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; | ||
1189 | resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; | ||
1190 | resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; | ||
1191 | resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; | ||
1192 | resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; | ||
1193 | } | ||
1194 | resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; | ||
1195 | if (status == FCP_TMF_CMPL) { | ||
1196 | resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; | ||
1197 | resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; | ||
1198 | resp->payload.ba_acct.low_seq_cnt = 0x0000; | ||
1199 | resp->payload.ba_acct.high_seq_cnt = 0xFFFF; | ||
1200 | resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; | ||
1201 | resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; | ||
1202 | } else { | ||
1203 | resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; | ||
1204 | resp->payload.ba_rjt.reason_code = | ||
1205 | BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; | ||
1206 | /* Other bytes are zero */ | ||
1207 | } | ||
1208 | |||
1209 | ha->tgt.qla_tgt->abts_resp_expected++; | ||
1210 | |||
1211 | qla2x00_start_iocbs(vha, vha->req); | ||
1212 | } | ||
1213 | |||
1214 | /* | ||
1215 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
1216 | */ | ||
1217 | static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, | ||
1218 | struct abts_resp_from_24xx_fw *entry) | ||
1219 | { | ||
1220 | struct ctio7_to_24xx *ctio; | ||
1221 | |||
1222 | ql_dbg(ql_dbg_tgt, vha, 0xe007, | ||
1223 | "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); | ||
1224 | /* Send marker if required */ | ||
1225 | if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) | ||
1226 | return; | ||
1227 | |||
1228 | ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); | ||
1229 | if (ctio == NULL) { | ||
1230 | ql_dbg(ql_dbg_tgt, vha, 0xe04b, | ||
1231 | "qla_target(%d): %s failed: unable to allocate " | ||
1232 | "request packet\n", vha->vp_idx, __func__); | ||
1233 | return; | ||
1234 | } | ||
1235 | |||
1236 | /* | ||
1237 | * We've got on entrance firmware's response on by us generated | ||
1238 | * ABTS response. So, in it ID fields are reversed. | ||
1239 | */ | ||
1240 | |||
1241 | ctio->entry_type = CTIO_TYPE7; | ||
1242 | ctio->entry_count = 1; | ||
1243 | ctio->nport_handle = entry->nport_handle; | ||
1244 | ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; | ||
1245 | ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); | ||
1246 | ctio->vp_index = vha->vp_idx; | ||
1247 | ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; | ||
1248 | ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; | ||
1249 | ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; | ||
1250 | ctio->exchange_addr = entry->exchange_addr_to_abort; | ||
1251 | ctio->u.status1.flags = | ||
1252 | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | | ||
1253 | CTIO7_FLAGS_TERMINATE); | ||
1254 | ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; | ||
1255 | |||
1256 | qla2x00_start_iocbs(vha, vha->req); | ||
1257 | |||
1258 | qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, | ||
1259 | FCP_TMF_CMPL, true); | ||
1260 | } | ||
1261 | |||
1262 | /* ha->hardware_lock supposed to be held on entry */ | ||
1263 | static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, | ||
1264 | struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) | ||
1265 | { | ||
1266 | struct qla_hw_data *ha = vha->hw; | ||
1267 | struct se_session *se_sess = sess->se_sess; | ||
1268 | struct qla_tgt_mgmt_cmd *mcmd; | ||
1269 | struct se_cmd *se_cmd; | ||
1270 | u32 lun = 0; | ||
1271 | int rc; | ||
1272 | bool found_lun = false; | ||
1273 | |||
1274 | spin_lock(&se_sess->sess_cmd_lock); | ||
1275 | list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { | ||
1276 | struct qla_tgt_cmd *cmd = | ||
1277 | container_of(se_cmd, struct qla_tgt_cmd, se_cmd); | ||
1278 | if (cmd->tag == abts->exchange_addr_to_abort) { | ||
1279 | lun = cmd->unpacked_lun; | ||
1280 | found_lun = true; | ||
1281 | break; | ||
1282 | } | ||
1283 | } | ||
1284 | spin_unlock(&se_sess->sess_cmd_lock); | ||
1285 | |||
1286 | if (!found_lun) | ||
1287 | return -ENOENT; | ||
1288 | |||
1289 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, | ||
1290 | "qla_target(%d): task abort (tag=%d)\n", | ||
1291 | vha->vp_idx, abts->exchange_addr_to_abort); | ||
1292 | |||
1293 | mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); | ||
1294 | if (mcmd == NULL) { | ||
1295 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, | ||
1296 | "qla_target(%d): %s: Allocation of ABORT cmd failed", | ||
1297 | vha->vp_idx, __func__); | ||
1298 | return -ENOMEM; | ||
1299 | } | ||
1300 | memset(mcmd, 0, sizeof(*mcmd)); | ||
1301 | |||
1302 | mcmd->sess = sess; | ||
1303 | memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); | ||
1304 | |||
1305 | rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, | ||
1306 | abts->exchange_addr_to_abort); | ||
1307 | if (rc != 0) { | ||
1308 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, | ||
1309 | "qla_target(%d): tgt_ops->handle_tmr()" | ||
1310 | " failed: %d", vha->vp_idx, rc); | ||
1311 | mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); | ||
1312 | return -EFAULT; | ||
1313 | } | ||
1314 | |||
1315 | return 0; | ||
1316 | } | ||
1317 | |||
1318 | /* | ||
1319 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
1320 | */ | ||
1321 | static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, | ||
1322 | struct abts_recv_from_24xx *abts) | ||
1323 | { | ||
1324 | struct qla_hw_data *ha = vha->hw; | ||
1325 | struct qla_tgt_sess *sess; | ||
1326 | uint32_t tag = abts->exchange_addr_to_abort; | ||
1327 | uint8_t s_id[3]; | ||
1328 | int rc; | ||
1329 | |||
1330 | if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { | ||
1331 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, | ||
1332 | "qla_target(%d): ABTS: Abort Sequence not " | ||
1333 | "supported\n", vha->vp_idx); | ||
1334 | qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); | ||
1335 | return; | ||
1336 | } | ||
1337 | |||
1338 | if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { | ||
1339 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, | ||
1340 | "qla_target(%d): ABTS: Unknown Exchange " | ||
1341 | "Address received\n", vha->vp_idx); | ||
1342 | qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); | ||
1343 | return; | ||
1344 | } | ||
1345 | |||
1346 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, | ||
1347 | "qla_target(%d): task abort (s_id=%x:%x:%x, " | ||
1348 | "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], | ||
1349 | abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, | ||
1350 | le32_to_cpu(abts->fcp_hdr_le.parameter)); | ||
1351 | |||
1352 | s_id[0] = abts->fcp_hdr_le.s_id[2]; | ||
1353 | s_id[1] = abts->fcp_hdr_le.s_id[1]; | ||
1354 | s_id[2] = abts->fcp_hdr_le.s_id[0]; | ||
1355 | |||
1356 | sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); | ||
1357 | if (!sess) { | ||
1358 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, | ||
1359 | "qla_target(%d): task abort for non-existant session\n", | ||
1360 | vha->vp_idx); | ||
1361 | rc = qlt_sched_sess_work(ha->tgt.qla_tgt, | ||
1362 | QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); | ||
1363 | if (rc != 0) { | ||
1364 | qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, | ||
1365 | false); | ||
1366 | } | ||
1367 | return; | ||
1368 | } | ||
1369 | |||
1370 | rc = __qlt_24xx_handle_abts(vha, abts, sess); | ||
1371 | if (rc != 0) { | ||
1372 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, | ||
1373 | "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", | ||
1374 | vha->vp_idx, rc); | ||
1375 | qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); | ||
1376 | return; | ||
1377 | } | ||
1378 | } | ||
1379 | |||
1380 | /* | ||
1381 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
1382 | */ | ||
1383 | static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, | ||
1384 | struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) | ||
1385 | { | ||
1386 | struct atio_from_isp *atio = &mcmd->orig_iocb.atio; | ||
1387 | struct ctio7_to_24xx *ctio; | ||
1388 | |||
1389 | ql_dbg(ql_dbg_tgt, ha, 0xe008, | ||
1390 | "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", | ||
1391 | ha, atio, resp_code); | ||
1392 | |||
1393 | /* Send marker if required */ | ||
1394 | if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) | ||
1395 | return; | ||
1396 | |||
1397 | ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); | ||
1398 | if (ctio == NULL) { | ||
1399 | ql_dbg(ql_dbg_tgt, ha, 0xe04c, | ||
1400 | "qla_target(%d): %s failed: unable to allocate " | ||
1401 | "request packet\n", ha->vp_idx, __func__); | ||
1402 | return; | ||
1403 | } | ||
1404 | |||
1405 | ctio->entry_type = CTIO_TYPE7; | ||
1406 | ctio->entry_count = 1; | ||
1407 | ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; | ||
1408 | ctio->nport_handle = mcmd->sess->loop_id; | ||
1409 | ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); | ||
1410 | ctio->vp_index = ha->vp_idx; | ||
1411 | ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; | ||
1412 | ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; | ||
1413 | ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; | ||
1414 | ctio->exchange_addr = atio->u.isp24.exchange_addr; | ||
1415 | ctio->u.status1.flags = (atio->u.isp24.attr << 9) | | ||
1416 | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | | ||
1417 | CTIO7_FLAGS_SEND_STATUS); | ||
1418 | ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); | ||
1419 | ctio->u.status1.scsi_status = | ||
1420 | __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); | ||
1421 | ctio->u.status1.response_len = __constant_cpu_to_le16(8); | ||
1422 | ctio->u.status1.sense_data[0] = resp_code; | ||
1423 | |||
1424 | qla2x00_start_iocbs(ha, ha->req); | ||
1425 | } | ||
1426 | |||
1427 | void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) | ||
1428 | { | ||
1429 | mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); | ||
1430 | } | ||
1431 | EXPORT_SYMBOL(qlt_free_mcmd); | ||
1432 | |||
1433 | /* callback from target fabric module code */ | ||
1434 | void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) | ||
1435 | { | ||
1436 | struct scsi_qla_host *vha = mcmd->sess->vha; | ||
1437 | struct qla_hw_data *ha = vha->hw; | ||
1438 | unsigned long flags; | ||
1439 | |||
1440 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, | ||
1441 | "TM response mcmd (%p) status %#x state %#x", | ||
1442 | mcmd, mcmd->fc_tm_rsp, mcmd->flags); | ||
1443 | |||
1444 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
1445 | if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) | ||
1446 | qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, | ||
1447 | 0, 0, 0, 0, 0, 0); | ||
1448 | else { | ||
1449 | if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) | ||
1450 | qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, | ||
1451 | mcmd->fc_tm_rsp, false); | ||
1452 | else | ||
1453 | qlt_24xx_send_task_mgmt_ctio(vha, mcmd, | ||
1454 | mcmd->fc_tm_rsp); | ||
1455 | } | ||
1456 | /* | ||
1457 | * Make the callback for ->free_mcmd() to queue_work() and invoke | ||
1458 | * target_put_sess_cmd() to drop cmd_kref to 1. The final | ||
1459 | * target_put_sess_cmd() call will be made from TFO->check_stop_free() | ||
1460 | * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd | ||
1461 | * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> | ||
1462 | * qlt_xmit_tm_rsp() returns here.. | ||
1463 | */ | ||
1464 | ha->tgt.tgt_ops->free_mcmd(mcmd); | ||
1465 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
1466 | } | ||
1467 | EXPORT_SYMBOL(qlt_xmit_tm_rsp); | ||
1468 | |||
1469 | /* No locks */ | ||
1470 | static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) | ||
1471 | { | ||
1472 | struct qla_tgt_cmd *cmd = prm->cmd; | ||
1473 | |||
1474 | BUG_ON(cmd->sg_cnt == 0); | ||
1475 | |||
1476 | prm->sg = (struct scatterlist *)cmd->sg; | ||
1477 | prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, | ||
1478 | cmd->sg_cnt, cmd->dma_data_direction); | ||
1479 | if (unlikely(prm->seg_cnt == 0)) | ||
1480 | goto out_err; | ||
1481 | |||
1482 | prm->cmd->sg_mapped = 1; | ||
1483 | |||
1484 | /* | ||
1485 | * If greater than four sg entries then we need to allocate | ||
1486 | * the continuation entries | ||
1487 | */ | ||
1488 | if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) | ||
1489 | prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - | ||
1490 | prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); | ||
1491 | |||
1492 | ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", | ||
1493 | prm->seg_cnt, prm->req_cnt); | ||
1494 | return 0; | ||
1495 | |||
1496 | out_err: | ||
1497 | ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, | ||
1498 | "qla_target(%d): PCI mapping failed: sg_cnt=%d", | ||
1499 | 0, prm->cmd->sg_cnt); | ||
1500 | return -1; | ||
1501 | } | ||
1502 | |||
1503 | static inline void qlt_unmap_sg(struct scsi_qla_host *vha, | ||
1504 | struct qla_tgt_cmd *cmd) | ||
1505 | { | ||
1506 | struct qla_hw_data *ha = vha->hw; | ||
1507 | |||
1508 | BUG_ON(!cmd->sg_mapped); | ||
1509 | pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); | ||
1510 | cmd->sg_mapped = 0; | ||
1511 | } | ||
1512 | |||
1513 | static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, | ||
1514 | uint32_t req_cnt) | ||
1515 | { | ||
1516 | struct qla_hw_data *ha = vha->hw; | ||
1517 | device_reg_t __iomem *reg = ha->iobase; | ||
1518 | uint32_t cnt; | ||
1519 | |||
1520 | if (vha->req->cnt < (req_cnt + 2)) { | ||
1521 | cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out); | ||
1522 | |||
1523 | ql_dbg(ql_dbg_tgt, vha, 0xe00a, | ||
1524 | "Request ring circled: cnt=%d, vha->->ring_index=%d, " | ||
1525 | "vha->req->cnt=%d, req_cnt=%d\n", cnt, | ||
1526 | vha->req->ring_index, vha->req->cnt, req_cnt); | ||
1527 | if (vha->req->ring_index < cnt) | ||
1528 | vha->req->cnt = cnt - vha->req->ring_index; | ||
1529 | else | ||
1530 | vha->req->cnt = vha->req->length - | ||
1531 | (vha->req->ring_index - cnt); | ||
1532 | } | ||
1533 | |||
1534 | if (unlikely(vha->req->cnt < (req_cnt + 2))) { | ||
1535 | ql_dbg(ql_dbg_tgt, vha, 0xe00b, | ||
1536 | "qla_target(%d): There is no room in the " | ||
1537 | "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " | ||
1538 | "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, | ||
1539 | vha->req->cnt, req_cnt); | ||
1540 | return -EAGAIN; | ||
1541 | } | ||
1542 | vha->req->cnt -= req_cnt; | ||
1543 | |||
1544 | return 0; | ||
1545 | } | ||
1546 | |||
1547 | /* | ||
1548 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
1549 | */ | ||
1550 | static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) | ||
1551 | { | ||
1552 | /* Adjust ring index. */ | ||
1553 | vha->req->ring_index++; | ||
1554 | if (vha->req->ring_index == vha->req->length) { | ||
1555 | vha->req->ring_index = 0; | ||
1556 | vha->req->ring_ptr = vha->req->ring; | ||
1557 | } else { | ||
1558 | vha->req->ring_ptr++; | ||
1559 | } | ||
1560 | return (cont_entry_t *)vha->req->ring_ptr; | ||
1561 | } | ||
1562 | |||
1563 | /* ha->hardware_lock supposed to be held on entry */ | ||
1564 | static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) | ||
1565 | { | ||
1566 | struct qla_hw_data *ha = vha->hw; | ||
1567 | uint32_t h; | ||
1568 | |||
1569 | h = ha->tgt.current_handle; | ||
1570 | /* always increment cmd handle */ | ||
1571 | do { | ||
1572 | ++h; | ||
1573 | if (h > MAX_OUTSTANDING_COMMANDS) | ||
1574 | h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ | ||
1575 | if (h == ha->tgt.current_handle) { | ||
1576 | ql_dbg(ql_dbg_tgt, vha, 0xe04e, | ||
1577 | "qla_target(%d): Ran out of " | ||
1578 | "empty cmd slots in ha %p\n", vha->vp_idx, ha); | ||
1579 | h = QLA_TGT_NULL_HANDLE; | ||
1580 | break; | ||
1581 | } | ||
1582 | } while ((h == QLA_TGT_NULL_HANDLE) || | ||
1583 | (h == QLA_TGT_SKIP_HANDLE) || | ||
1584 | (ha->tgt.cmds[h-1] != NULL)); | ||
1585 | |||
1586 | if (h != QLA_TGT_NULL_HANDLE) | ||
1587 | ha->tgt.current_handle = h; | ||
1588 | |||
1589 | return h; | ||
1590 | } | ||
1591 | |||
1592 | /* ha->hardware_lock supposed to be held on entry */ | ||
1593 | static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, | ||
1594 | struct scsi_qla_host *vha) | ||
1595 | { | ||
1596 | uint32_t h; | ||
1597 | struct ctio7_to_24xx *pkt; | ||
1598 | struct qla_hw_data *ha = vha->hw; | ||
1599 | struct atio_from_isp *atio = &prm->cmd->atio; | ||
1600 | |||
1601 | pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; | ||
1602 | prm->pkt = pkt; | ||
1603 | memset(pkt, 0, sizeof(*pkt)); | ||
1604 | |||
1605 | pkt->entry_type = CTIO_TYPE7; | ||
1606 | pkt->entry_count = (uint8_t)prm->req_cnt; | ||
1607 | pkt->vp_index = vha->vp_idx; | ||
1608 | |||
1609 | h = qlt_make_handle(vha); | ||
1610 | if (unlikely(h == QLA_TGT_NULL_HANDLE)) { | ||
1611 | /* | ||
1612 | * CTIO type 7 from the firmware doesn't provide a way to | ||
1613 | * know the initiator's LOOP ID, hence we can't find | ||
1614 | * the session and, so, the command. | ||
1615 | */ | ||
1616 | return -EAGAIN; | ||
1617 | } else | ||
1618 | ha->tgt.cmds[h-1] = prm->cmd; | ||
1619 | |||
1620 | pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; | ||
1621 | pkt->nport_handle = prm->cmd->loop_id; | ||
1622 | pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); | ||
1623 | pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; | ||
1624 | pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; | ||
1625 | pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; | ||
1626 | pkt->exchange_addr = atio->u.isp24.exchange_addr; | ||
1627 | pkt->u.status0.flags |= (atio->u.isp24.attr << 9); | ||
1628 | pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); | ||
1629 | pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); | ||
1630 | |||
1631 | ql_dbg(ql_dbg_tgt, vha, 0xe00c, | ||
1632 | "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", | ||
1633 | vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, | ||
1634 | le16_to_cpu(pkt->u.status0.ox_id)); | ||
1635 | return 0; | ||
1636 | } | ||
1637 | |||
1638 | /* | ||
1639 | * ha->hardware_lock supposed to be held on entry. We have already made sure | ||
1640 | * that there is sufficient amount of request entries to not drop it. | ||
1641 | */ | ||
1642 | static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, | ||
1643 | struct scsi_qla_host *vha) | ||
1644 | { | ||
1645 | int cnt; | ||
1646 | uint32_t *dword_ptr; | ||
1647 | int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; | ||
1648 | |||
1649 | /* Build continuation packets */ | ||
1650 | while (prm->seg_cnt > 0) { | ||
1651 | cont_a64_entry_t *cont_pkt64 = | ||
1652 | (cont_a64_entry_t *)qlt_get_req_pkt(vha); | ||
1653 | |||
1654 | /* | ||
1655 | * Make sure that from cont_pkt64 none of | ||
1656 | * 64-bit specific fields used for 32-bit | ||
1657 | * addressing. Cast to (cont_entry_t *) for | ||
1658 | * that. | ||
1659 | */ | ||
1660 | |||
1661 | memset(cont_pkt64, 0, sizeof(*cont_pkt64)); | ||
1662 | |||
1663 | cont_pkt64->entry_count = 1; | ||
1664 | cont_pkt64->sys_define = 0; | ||
1665 | |||
1666 | if (enable_64bit_addressing) { | ||
1667 | cont_pkt64->entry_type = CONTINUE_A64_TYPE; | ||
1668 | dword_ptr = | ||
1669 | (uint32_t *)&cont_pkt64->dseg_0_address; | ||
1670 | } else { | ||
1671 | cont_pkt64->entry_type = CONTINUE_TYPE; | ||
1672 | dword_ptr = | ||
1673 | (uint32_t *)&((cont_entry_t *) | ||
1674 | cont_pkt64)->dseg_0_address; | ||
1675 | } | ||
1676 | |||
1677 | /* Load continuation entry data segments */ | ||
1678 | for (cnt = 0; | ||
1679 | cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; | ||
1680 | cnt++, prm->seg_cnt--) { | ||
1681 | *dword_ptr++ = | ||
1682 | cpu_to_le32(pci_dma_lo32 | ||
1683 | (sg_dma_address(prm->sg))); | ||
1684 | if (enable_64bit_addressing) { | ||
1685 | *dword_ptr++ = | ||
1686 | cpu_to_le32(pci_dma_hi32 | ||
1687 | (sg_dma_address | ||
1688 | (prm->sg))); | ||
1689 | } | ||
1690 | *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); | ||
1691 | |||
1692 | ql_dbg(ql_dbg_tgt, vha, 0xe00d, | ||
1693 | "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n", | ||
1694 | (long long unsigned int) | ||
1695 | pci_dma_hi32(sg_dma_address(prm->sg)), | ||
1696 | (long long unsigned int) | ||
1697 | pci_dma_lo32(sg_dma_address(prm->sg)), | ||
1698 | (int)sg_dma_len(prm->sg)); | ||
1699 | |||
1700 | prm->sg = sg_next(prm->sg); | ||
1701 | } | ||
1702 | } | ||
1703 | } | ||
1704 | |||
1705 | /* | ||
1706 | * ha->hardware_lock supposed to be held on entry. We have already made sure | ||
1707 | * that there is sufficient amount of request entries to not drop it. | ||
1708 | */ | ||
1709 | static void qlt_load_data_segments(struct qla_tgt_prm *prm, | ||
1710 | struct scsi_qla_host *vha) | ||
1711 | { | ||
1712 | int cnt; | ||
1713 | uint32_t *dword_ptr; | ||
1714 | int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; | ||
1715 | struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; | ||
1716 | |||
1717 | ql_dbg(ql_dbg_tgt, vha, 0xe00e, | ||
1718 | "iocb->scsi_status=%x, iocb->flags=%x\n", | ||
1719 | le16_to_cpu(pkt24->u.status0.scsi_status), | ||
1720 | le16_to_cpu(pkt24->u.status0.flags)); | ||
1721 | |||
1722 | pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); | ||
1723 | |||
1724 | /* Setup packet address segment pointer */ | ||
1725 | dword_ptr = pkt24->u.status0.dseg_0_address; | ||
1726 | |||
1727 | /* Set total data segment count */ | ||
1728 | if (prm->seg_cnt) | ||
1729 | pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); | ||
1730 | |||
1731 | if (prm->seg_cnt == 0) { | ||
1732 | /* No data transfer */ | ||
1733 | *dword_ptr++ = 0; | ||
1734 | *dword_ptr = 0; | ||
1735 | return; | ||
1736 | } | ||
1737 | |||
1738 | /* If scatter gather */ | ||
1739 | ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments..."); | ||
1740 | |||
1741 | /* Load command entry data segments */ | ||
1742 | for (cnt = 0; | ||
1743 | (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; | ||
1744 | cnt++, prm->seg_cnt--) { | ||
1745 | *dword_ptr++ = | ||
1746 | cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); | ||
1747 | if (enable_64bit_addressing) { | ||
1748 | *dword_ptr++ = | ||
1749 | cpu_to_le32(pci_dma_hi32( | ||
1750 | sg_dma_address(prm->sg))); | ||
1751 | } | ||
1752 | *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); | ||
1753 | |||
1754 | ql_dbg(ql_dbg_tgt, vha, 0xe010, | ||
1755 | "S/G Segment phys_addr=%llx:%llx, len=%d\n", | ||
1756 | (long long unsigned int)pci_dma_hi32(sg_dma_address( | ||
1757 | prm->sg)), | ||
1758 | (long long unsigned int)pci_dma_lo32(sg_dma_address( | ||
1759 | prm->sg)), | ||
1760 | (int)sg_dma_len(prm->sg)); | ||
1761 | |||
1762 | prm->sg = sg_next(prm->sg); | ||
1763 | } | ||
1764 | |||
1765 | qlt_load_cont_data_segments(prm, vha); | ||
1766 | } | ||
1767 | |||
1768 | static inline int qlt_has_data(struct qla_tgt_cmd *cmd) | ||
1769 | { | ||
1770 | return cmd->bufflen > 0; | ||
1771 | } | ||
1772 | |||
1773 | /* | ||
1774 | * Called without ha->hardware_lock held | ||
1775 | */ | ||
1776 | static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, | ||
1777 | struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, | ||
1778 | uint32_t *full_req_cnt) | ||
1779 | { | ||
1780 | struct qla_tgt *tgt = cmd->tgt; | ||
1781 | struct scsi_qla_host *vha = tgt->vha; | ||
1782 | struct qla_hw_data *ha = vha->hw; | ||
1783 | struct se_cmd *se_cmd = &cmd->se_cmd; | ||
1784 | |||
1785 | if (unlikely(cmd->aborted)) { | ||
1786 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, | ||
1787 | "qla_target(%d): terminating exchange " | ||
1788 | "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd, | ||
1789 | se_cmd, cmd->tag); | ||
1790 | |||
1791 | cmd->state = QLA_TGT_STATE_ABORTED; | ||
1792 | |||
1793 | qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); | ||
1794 | |||
1795 | /* !! At this point cmd could be already freed !! */ | ||
1796 | return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; | ||
1797 | } | ||
1798 | |||
1799 | ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", | ||
1800 | vha->vp_idx, cmd->tag); | ||
1801 | |||
1802 | prm->cmd = cmd; | ||
1803 | prm->tgt = tgt; | ||
1804 | prm->rq_result = scsi_status; | ||
1805 | prm->sense_buffer = &cmd->sense_buffer[0]; | ||
1806 | prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; | ||
1807 | prm->sg = NULL; | ||
1808 | prm->seg_cnt = -1; | ||
1809 | prm->req_cnt = 1; | ||
1810 | prm->add_status_pkt = 0; | ||
1811 | |||
1812 | ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n", | ||
1813 | prm->rq_result, xmit_type); | ||
1814 | |||
1815 | /* Send marker if required */ | ||
1816 | if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) | ||
1817 | return -EFAULT; | ||
1818 | |||
1819 | ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx); | ||
1820 | |||
1821 | if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { | ||
1822 | if (qlt_pci_map_calc_cnt(prm) != 0) | ||
1823 | return -EAGAIN; | ||
1824 | } | ||
1825 | |||
1826 | *full_req_cnt = prm->req_cnt; | ||
1827 | |||
1828 | if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { | ||
1829 | prm->residual = se_cmd->residual_count; | ||
1830 | ql_dbg(ql_dbg_tgt, vha, 0xe014, | ||
1831 | "Residual underflow: %d (tag %d, " | ||
1832 | "op %x, bufflen %d, rq_result %x)\n", prm->residual, | ||
1833 | cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, | ||
1834 | cmd->bufflen, prm->rq_result); | ||
1835 | prm->rq_result |= SS_RESIDUAL_UNDER; | ||
1836 | } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { | ||
1837 | prm->residual = se_cmd->residual_count; | ||
1838 | ql_dbg(ql_dbg_tgt, vha, 0xe015, | ||
1839 | "Residual overflow: %d (tag %d, " | ||
1840 | "op %x, bufflen %d, rq_result %x)\n", prm->residual, | ||
1841 | cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, | ||
1842 | cmd->bufflen, prm->rq_result); | ||
1843 | prm->rq_result |= SS_RESIDUAL_OVER; | ||
1844 | } | ||
1845 | |||
1846 | if (xmit_type & QLA_TGT_XMIT_STATUS) { | ||
1847 | /* | ||
1848 | * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be | ||
1849 | * ignored in *xmit_response() below | ||
1850 | */ | ||
1851 | if (qlt_has_data(cmd)) { | ||
1852 | if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || | ||
1853 | (IS_FWI2_CAPABLE(ha) && | ||
1854 | (prm->rq_result != 0))) { | ||
1855 | prm->add_status_pkt = 1; | ||
1856 | (*full_req_cnt)++; | ||
1857 | } | ||
1858 | } | ||
1859 | } | ||
1860 | |||
1861 | ql_dbg(ql_dbg_tgt, vha, 0xe016, | ||
1862 | "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n", | ||
1863 | prm->req_cnt, *full_req_cnt, prm->add_status_pkt); | ||
1864 | |||
1865 | return 0; | ||
1866 | } | ||
1867 | |||
1868 | static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, | ||
1869 | struct qla_tgt_cmd *cmd, int sending_sense) | ||
1870 | { | ||
1871 | if (ha->tgt.enable_class_2) | ||
1872 | return 0; | ||
1873 | |||
1874 | if (sending_sense) | ||
1875 | return cmd->conf_compl_supported; | ||
1876 | else | ||
1877 | return ha->tgt.enable_explicit_conf && | ||
1878 | cmd->conf_compl_supported; | ||
1879 | } | ||
1880 | |||
1881 | #ifdef CONFIG_QLA_TGT_DEBUG_SRR | ||
1882 | /* | ||
1883 | * Original taken from the XFS code | ||
1884 | */ | ||
1885 | static unsigned long qlt_srr_random(void) | ||
1886 | { | ||
1887 | static int Inited; | ||
1888 | static unsigned long RandomValue; | ||
1889 | static DEFINE_SPINLOCK(lock); | ||
1890 | /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ | ||
1891 | register long rv; | ||
1892 | register long lo; | ||
1893 | register long hi; | ||
1894 | unsigned long flags; | ||
1895 | |||
1896 | spin_lock_irqsave(&lock, flags); | ||
1897 | if (!Inited) { | ||
1898 | RandomValue = jiffies; | ||
1899 | Inited = 1; | ||
1900 | } | ||
1901 | rv = RandomValue; | ||
1902 | hi = rv / 127773; | ||
1903 | lo = rv % 127773; | ||
1904 | rv = 16807 * lo - 2836 * hi; | ||
1905 | if (rv <= 0) | ||
1906 | rv += 2147483647; | ||
1907 | RandomValue = rv; | ||
1908 | spin_unlock_irqrestore(&lock, flags); | ||
1909 | return rv; | ||
1910 | } | ||
1911 | |||
1912 | static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) | ||
1913 | { | ||
1914 | #if 0 /* This is not a real status packets lost, so it won't lead to SRR */ | ||
1915 | if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) | ||
1916 | == 50) { | ||
1917 | *xmit_type &= ~QLA_TGT_XMIT_STATUS; | ||
1918 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, | ||
1919 | "Dropping cmd %p (tag %d) status", cmd, cmd->tag); | ||
1920 | } | ||
1921 | #endif | ||
1922 | /* | ||
1923 | * It's currently not possible to simulate SRRs for FCP_WRITE without | ||
1924 | * a physical link layer failure, so don't even try here.. | ||
1925 | */ | ||
1926 | if (cmd->dma_data_direction != DMA_FROM_DEVICE) | ||
1927 | return; | ||
1928 | |||
1929 | if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && | ||
1930 | ((qlt_srr_random() % 100) == 20)) { | ||
1931 | int i, leave = 0; | ||
1932 | unsigned int tot_len = 0; | ||
1933 | |||
1934 | while (leave == 0) | ||
1935 | leave = qlt_srr_random() % cmd->sg_cnt; | ||
1936 | |||
1937 | for (i = 0; i < leave; i++) | ||
1938 | tot_len += cmd->sg[i].length; | ||
1939 | |||
1940 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, | ||
1941 | "Cutting cmd %p (tag %d) buffer" | ||
1942 | " tail to len %d, sg_cnt %d (cmd->bufflen %d," | ||
1943 | " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave, | ||
1944 | cmd->bufflen, cmd->sg_cnt); | ||
1945 | |||
1946 | cmd->bufflen = tot_len; | ||
1947 | cmd->sg_cnt = leave; | ||
1948 | } | ||
1949 | |||
1950 | if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { | ||
1951 | unsigned int offset = qlt_srr_random() % cmd->bufflen; | ||
1952 | |||
1953 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, | ||
1954 | "Cutting cmd %p (tag %d) buffer head " | ||
1955 | "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset, | ||
1956 | cmd->bufflen); | ||
1957 | if (offset == 0) | ||
1958 | *xmit_type &= ~QLA_TGT_XMIT_DATA; | ||
1959 | else if (qlt_set_data_offset(cmd, offset)) { | ||
1960 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, | ||
1961 | "qlt_set_data_offset() failed (tag %d)", cmd->tag); | ||
1962 | } | ||
1963 | } | ||
1964 | } | ||
1965 | #else | ||
1966 | static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) | ||
1967 | {} | ||
1968 | #endif | ||
1969 | |||
1970 | static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, | ||
1971 | struct qla_tgt_prm *prm) | ||
1972 | { | ||
1973 | prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, | ||
1974 | (uint32_t)sizeof(ctio->u.status1.sense_data)); | ||
1975 | ctio->u.status0.flags |= | ||
1976 | __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); | ||
1977 | if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { | ||
1978 | ctio->u.status0.flags |= __constant_cpu_to_le16( | ||
1979 | CTIO7_FLAGS_EXPLICIT_CONFORM | | ||
1980 | CTIO7_FLAGS_CONFORM_REQ); | ||
1981 | } | ||
1982 | ctio->u.status0.residual = cpu_to_le32(prm->residual); | ||
1983 | ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); | ||
1984 | if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { | ||
1985 | int i; | ||
1986 | |||
1987 | if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { | ||
1988 | if (prm->cmd->se_cmd.scsi_status != 0) { | ||
1989 | ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, | ||
1990 | "Skipping EXPLICIT_CONFORM and " | ||
1991 | "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " | ||
1992 | "non GOOD status\n"); | ||
1993 | goto skip_explict_conf; | ||
1994 | } | ||
1995 | ctio->u.status1.flags |= __constant_cpu_to_le16( | ||
1996 | CTIO7_FLAGS_EXPLICIT_CONFORM | | ||
1997 | CTIO7_FLAGS_CONFORM_REQ); | ||
1998 | } | ||
1999 | skip_explict_conf: | ||
2000 | ctio->u.status1.flags &= | ||
2001 | ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); | ||
2002 | ctio->u.status1.flags |= | ||
2003 | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); | ||
2004 | ctio->u.status1.scsi_status |= | ||
2005 | __constant_cpu_to_le16(SS_SENSE_LEN_VALID); | ||
2006 | ctio->u.status1.sense_length = | ||
2007 | cpu_to_le16(prm->sense_buffer_len); | ||
2008 | for (i = 0; i < prm->sense_buffer_len/4; i++) | ||
2009 | ((uint32_t *)ctio->u.status1.sense_data)[i] = | ||
2010 | cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); | ||
2011 | #if 0 | ||
2012 | if (unlikely((prm->sense_buffer_len % 4) != 0)) { | ||
2013 | static int q; | ||
2014 | if (q < 10) { | ||
2015 | ql_dbg(ql_dbg_tgt, vha, 0xe04f, | ||
2016 | "qla_target(%d): %d bytes of sense " | ||
2017 | "lost", prm->tgt->ha->vp_idx, | ||
2018 | prm->sense_buffer_len % 4); | ||
2019 | q++; | ||
2020 | } | ||
2021 | } | ||
2022 | #endif | ||
2023 | } else { | ||
2024 | ctio->u.status1.flags &= | ||
2025 | ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); | ||
2026 | ctio->u.status1.flags |= | ||
2027 | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); | ||
2028 | ctio->u.status1.sense_length = 0; | ||
2029 | memset(ctio->u.status1.sense_data, 0, | ||
2030 | sizeof(ctio->u.status1.sense_data)); | ||
2031 | } | ||
2032 | |||
2033 | /* Sense with len > 24, is it possible ??? */ | ||
2034 | } | ||
2035 | |||
2036 | /* | ||
2037 | * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * | ||
2038 | * QLA_TGT_XMIT_STATUS for >= 24xx silicon | ||
2039 | */ | ||
2040 | int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | ||
2041 | uint8_t scsi_status) | ||
2042 | { | ||
2043 | struct scsi_qla_host *vha = cmd->vha; | ||
2044 | struct qla_hw_data *ha = vha->hw; | ||
2045 | struct ctio7_to_24xx *pkt; | ||
2046 | struct qla_tgt_prm prm; | ||
2047 | uint32_t full_req_cnt = 0; | ||
2048 | unsigned long flags = 0; | ||
2049 | int res; | ||
2050 | |||
2051 | memset(&prm, 0, sizeof(prm)); | ||
2052 | qlt_check_srr_debug(cmd, &xmit_type); | ||
2053 | |||
2054 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, | ||
2055 | "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " | ||
2056 | "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? | ||
2057 | 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); | ||
2058 | |||
2059 | res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, | ||
2060 | &full_req_cnt); | ||
2061 | if (unlikely(res != 0)) { | ||
2062 | if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) | ||
2063 | return 0; | ||
2064 | |||
2065 | return res; | ||
2066 | } | ||
2067 | |||
2068 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
2069 | |||
2070 | /* Does F/W have an IOCBs for this request */ | ||
2071 | res = qlt_check_reserve_free_req(vha, full_req_cnt); | ||
2072 | if (unlikely(res)) | ||
2073 | goto out_unmap_unlock; | ||
2074 | |||
2075 | res = qlt_24xx_build_ctio_pkt(&prm, vha); | ||
2076 | if (unlikely(res != 0)) | ||
2077 | goto out_unmap_unlock; | ||
2078 | |||
2079 | |||
2080 | pkt = (struct ctio7_to_24xx *)prm.pkt; | ||
2081 | |||
2082 | if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { | ||
2083 | pkt->u.status0.flags |= | ||
2084 | __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | | ||
2085 | CTIO7_FLAGS_STATUS_MODE_0); | ||
2086 | |||
2087 | qlt_load_data_segments(&prm, vha); | ||
2088 | |||
2089 | if (prm.add_status_pkt == 0) { | ||
2090 | if (xmit_type & QLA_TGT_XMIT_STATUS) { | ||
2091 | pkt->u.status0.scsi_status = | ||
2092 | cpu_to_le16(prm.rq_result); | ||
2093 | pkt->u.status0.residual = | ||
2094 | cpu_to_le32(prm.residual); | ||
2095 | pkt->u.status0.flags |= __constant_cpu_to_le16( | ||
2096 | CTIO7_FLAGS_SEND_STATUS); | ||
2097 | if (qlt_need_explicit_conf(ha, cmd, 0)) { | ||
2098 | pkt->u.status0.flags |= | ||
2099 | __constant_cpu_to_le16( | ||
2100 | CTIO7_FLAGS_EXPLICIT_CONFORM | | ||
2101 | CTIO7_FLAGS_CONFORM_REQ); | ||
2102 | } | ||
2103 | } | ||
2104 | |||
2105 | } else { | ||
2106 | /* | ||
2107 | * We have already made sure that there is sufficient | ||
2108 | * amount of request entries to not drop HW lock in | ||
2109 | * req_pkt(). | ||
2110 | */ | ||
2111 | struct ctio7_to_24xx *ctio = | ||
2112 | (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); | ||
2113 | |||
2114 | ql_dbg(ql_dbg_tgt, vha, 0xe019, | ||
2115 | "Building additional status packet\n"); | ||
2116 | |||
2117 | memcpy(ctio, pkt, sizeof(*ctio)); | ||
2118 | ctio->entry_count = 1; | ||
2119 | ctio->dseg_count = 0; | ||
2120 | ctio->u.status1.flags &= ~__constant_cpu_to_le16( | ||
2121 | CTIO7_FLAGS_DATA_IN); | ||
2122 | |||
2123 | /* Real finish is ctio_m1's finish */ | ||
2124 | pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; | ||
2125 | pkt->u.status0.flags |= __constant_cpu_to_le16( | ||
2126 | CTIO7_FLAGS_DONT_RET_CTIO); | ||
2127 | qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, | ||
2128 | &prm); | ||
2129 | pr_debug("Status CTIO7: %p\n", ctio); | ||
2130 | } | ||
2131 | } else | ||
2132 | qlt_24xx_init_ctio_to_isp(pkt, &prm); | ||
2133 | |||
2134 | |||
2135 | cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ | ||
2136 | |||
2137 | ql_dbg(ql_dbg_tgt, vha, 0xe01a, | ||
2138 | "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", | ||
2139 | pkt, scsi_status); | ||
2140 | |||
2141 | qla2x00_start_iocbs(vha, vha->req); | ||
2142 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2143 | |||
2144 | return 0; | ||
2145 | |||
2146 | out_unmap_unlock: | ||
2147 | if (cmd->sg_mapped) | ||
2148 | qlt_unmap_sg(vha, cmd); | ||
2149 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2150 | |||
2151 | return res; | ||
2152 | } | ||
2153 | EXPORT_SYMBOL(qlt_xmit_response); | ||
2154 | |||
2155 | int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) | ||
2156 | { | ||
2157 | struct ctio7_to_24xx *pkt; | ||
2158 | struct scsi_qla_host *vha = cmd->vha; | ||
2159 | struct qla_hw_data *ha = vha->hw; | ||
2160 | struct qla_tgt *tgt = cmd->tgt; | ||
2161 | struct qla_tgt_prm prm; | ||
2162 | unsigned long flags; | ||
2163 | int res = 0; | ||
2164 | |||
2165 | memset(&prm, 0, sizeof(prm)); | ||
2166 | prm.cmd = cmd; | ||
2167 | prm.tgt = tgt; | ||
2168 | prm.sg = NULL; | ||
2169 | prm.req_cnt = 1; | ||
2170 | |||
2171 | /* Send marker if required */ | ||
2172 | if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) | ||
2173 | return -EIO; | ||
2174 | |||
2175 | ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", | ||
2176 | (int)vha->vp_idx); | ||
2177 | |||
2178 | /* Calculate number of entries and segments required */ | ||
2179 | if (qlt_pci_map_calc_cnt(&prm) != 0) | ||
2180 | return -EAGAIN; | ||
2181 | |||
2182 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
2183 | |||
2184 | /* Does F/W have an IOCBs for this request */ | ||
2185 | res = qlt_check_reserve_free_req(vha, prm.req_cnt); | ||
2186 | if (res != 0) | ||
2187 | goto out_unlock_free_unmap; | ||
2188 | |||
2189 | res = qlt_24xx_build_ctio_pkt(&prm, vha); | ||
2190 | if (unlikely(res != 0)) | ||
2191 | goto out_unlock_free_unmap; | ||
2192 | pkt = (struct ctio7_to_24xx *)prm.pkt; | ||
2193 | pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | | ||
2194 | CTIO7_FLAGS_STATUS_MODE_0); | ||
2195 | qlt_load_data_segments(&prm, vha); | ||
2196 | |||
2197 | cmd->state = QLA_TGT_STATE_NEED_DATA; | ||
2198 | |||
2199 | qla2x00_start_iocbs(vha, vha->req); | ||
2200 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2201 | |||
2202 | return res; | ||
2203 | |||
2204 | out_unlock_free_unmap: | ||
2205 | if (cmd->sg_mapped) | ||
2206 | qlt_unmap_sg(vha, cmd); | ||
2207 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2208 | |||
2209 | return res; | ||
2210 | } | ||
2211 | EXPORT_SYMBOL(qlt_rdy_to_xfer); | ||
2212 | |||
2213 | /* If hardware_lock held on entry, might drop it, then reaquire */ | ||
2214 | /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ | ||
2215 | static int __qlt_send_term_exchange(struct scsi_qla_host *vha, | ||
2216 | struct qla_tgt_cmd *cmd, | ||
2217 | struct atio_from_isp *atio) | ||
2218 | { | ||
2219 | struct ctio7_to_24xx *ctio24; | ||
2220 | struct qla_hw_data *ha = vha->hw; | ||
2221 | request_t *pkt; | ||
2222 | int ret = 0; | ||
2223 | |||
2224 | ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); | ||
2225 | |||
2226 | pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); | ||
2227 | if (pkt == NULL) { | ||
2228 | ql_dbg(ql_dbg_tgt, vha, 0xe050, | ||
2229 | "qla_target(%d): %s failed: unable to allocate " | ||
2230 | "request packet\n", vha->vp_idx, __func__); | ||
2231 | return -ENOMEM; | ||
2232 | } | ||
2233 | |||
2234 | if (cmd != NULL) { | ||
2235 | if (cmd->state < QLA_TGT_STATE_PROCESSED) { | ||
2236 | ql_dbg(ql_dbg_tgt, vha, 0xe051, | ||
2237 | "qla_target(%d): Terminating cmd %p with " | ||
2238 | "incorrect state %d\n", vha->vp_idx, cmd, | ||
2239 | cmd->state); | ||
2240 | } else | ||
2241 | ret = 1; | ||
2242 | } | ||
2243 | |||
2244 | pkt->entry_count = 1; | ||
2245 | pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; | ||
2246 | |||
2247 | ctio24 = (struct ctio7_to_24xx *)pkt; | ||
2248 | ctio24->entry_type = CTIO_TYPE7; | ||
2249 | ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; | ||
2250 | ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); | ||
2251 | ctio24->vp_index = vha->vp_idx; | ||
2252 | ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; | ||
2253 | ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; | ||
2254 | ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; | ||
2255 | ctio24->exchange_addr = atio->u.isp24.exchange_addr; | ||
2256 | ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | | ||
2257 | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | | ||
2258 | CTIO7_FLAGS_TERMINATE); | ||
2259 | ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); | ||
2260 | |||
2261 | /* Most likely, it isn't needed */ | ||
2262 | ctio24->u.status1.residual = get_unaligned((uint32_t *) | ||
2263 | &atio->u.isp24.fcp_cmnd.add_cdb[ | ||
2264 | atio->u.isp24.fcp_cmnd.add_cdb_len]); | ||
2265 | if (ctio24->u.status1.residual != 0) | ||
2266 | ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; | ||
2267 | |||
2268 | qla2x00_start_iocbs(vha, vha->req); | ||
2269 | return ret; | ||
2270 | } | ||
2271 | |||
2272 | static void qlt_send_term_exchange(struct scsi_qla_host *vha, | ||
2273 | struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) | ||
2274 | { | ||
2275 | unsigned long flags; | ||
2276 | int rc; | ||
2277 | |||
2278 | if (qlt_issue_marker(vha, ha_locked) < 0) | ||
2279 | return; | ||
2280 | |||
2281 | if (ha_locked) { | ||
2282 | rc = __qlt_send_term_exchange(vha, cmd, atio); | ||
2283 | goto done; | ||
2284 | } | ||
2285 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); | ||
2286 | rc = __qlt_send_term_exchange(vha, cmd, atio); | ||
2287 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | ||
2288 | done: | ||
2289 | if (rc == 1) { | ||
2290 | if (!ha_locked && !in_interrupt()) | ||
2291 | msleep(250); /* just in case */ | ||
2292 | |||
2293 | vha->hw->tgt.tgt_ops->free_cmd(cmd); | ||
2294 | } | ||
2295 | } | ||
2296 | |||
2297 | void qlt_free_cmd(struct qla_tgt_cmd *cmd) | ||
2298 | { | ||
2299 | BUG_ON(cmd->sg_mapped); | ||
2300 | |||
2301 | if (unlikely(cmd->free_sg)) | ||
2302 | kfree(cmd->sg); | ||
2303 | kmem_cache_free(qla_tgt_cmd_cachep, cmd); | ||
2304 | } | ||
2305 | EXPORT_SYMBOL(qlt_free_cmd); | ||
2306 | |||
2307 | /* ha->hardware_lock supposed to be held on entry */ | ||
2308 | static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, | ||
2309 | struct qla_tgt_cmd *cmd, void *ctio) | ||
2310 | { | ||
2311 | struct qla_tgt_srr_ctio *sc; | ||
2312 | struct qla_hw_data *ha = vha->hw; | ||
2313 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
2314 | struct qla_tgt_srr_imm *imm; | ||
2315 | |||
2316 | tgt->ctio_srr_id++; | ||
2317 | |||
2318 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, | ||
2319 | "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); | ||
2320 | |||
2321 | if (!ctio) { | ||
2322 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, | ||
2323 | "qla_target(%d): SRR CTIO, but ctio is NULL\n", | ||
2324 | vha->vp_idx); | ||
2325 | return -EINVAL; | ||
2326 | } | ||
2327 | |||
2328 | sc = kzalloc(sizeof(*sc), GFP_ATOMIC); | ||
2329 | if (sc != NULL) { | ||
2330 | sc->cmd = cmd; | ||
2331 | /* IRQ is already OFF */ | ||
2332 | spin_lock(&tgt->srr_lock); | ||
2333 | sc->srr_id = tgt->ctio_srr_id; | ||
2334 | list_add_tail(&sc->srr_list_entry, | ||
2335 | &tgt->srr_ctio_list); | ||
2336 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, | ||
2337 | "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); | ||
2338 | if (tgt->imm_srr_id == tgt->ctio_srr_id) { | ||
2339 | int found = 0; | ||
2340 | list_for_each_entry(imm, &tgt->srr_imm_list, | ||
2341 | srr_list_entry) { | ||
2342 | if (imm->srr_id == sc->srr_id) { | ||
2343 | found = 1; | ||
2344 | break; | ||
2345 | } | ||
2346 | } | ||
2347 | if (found) { | ||
2348 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, | ||
2349 | "Scheduling srr work\n"); | ||
2350 | schedule_work(&tgt->srr_work); | ||
2351 | } else { | ||
2352 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, | ||
2353 | "qla_target(%d): imm_srr_id " | ||
2354 | "== ctio_srr_id (%d), but there is no " | ||
2355 | "corresponding SRR IMM, deleting CTIO " | ||
2356 | "SRR %p\n", vha->vp_idx, | ||
2357 | tgt->ctio_srr_id, sc); | ||
2358 | list_del(&sc->srr_list_entry); | ||
2359 | spin_unlock(&tgt->srr_lock); | ||
2360 | |||
2361 | kfree(sc); | ||
2362 | return -EINVAL; | ||
2363 | } | ||
2364 | } | ||
2365 | spin_unlock(&tgt->srr_lock); | ||
2366 | } else { | ||
2367 | struct qla_tgt_srr_imm *ti; | ||
2368 | |||
2369 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, | ||
2370 | "qla_target(%d): Unable to allocate SRR CTIO entry\n", | ||
2371 | vha->vp_idx); | ||
2372 | spin_lock(&tgt->srr_lock); | ||
2373 | list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, | ||
2374 | srr_list_entry) { | ||
2375 | if (imm->srr_id == tgt->ctio_srr_id) { | ||
2376 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, | ||
2377 | "IMM SRR %p deleted (id %d)\n", | ||
2378 | imm, imm->srr_id); | ||
2379 | list_del(&imm->srr_list_entry); | ||
2380 | qlt_reject_free_srr_imm(vha, imm, 1); | ||
2381 | } | ||
2382 | } | ||
2383 | spin_unlock(&tgt->srr_lock); | ||
2384 | |||
2385 | return -ENOMEM; | ||
2386 | } | ||
2387 | |||
2388 | return 0; | ||
2389 | } | ||
2390 | |||
2391 | /* | ||
2392 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
2393 | */ | ||
2394 | static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, | ||
2395 | struct qla_tgt_cmd *cmd, uint32_t status) | ||
2396 | { | ||
2397 | int term = 0; | ||
2398 | |||
2399 | if (ctio != NULL) { | ||
2400 | struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; | ||
2401 | term = !(c->flags & | ||
2402 | __constant_cpu_to_le16(OF_TERM_EXCH)); | ||
2403 | } else | ||
2404 | term = 1; | ||
2405 | |||
2406 | if (term) | ||
2407 | qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); | ||
2408 | |||
2409 | return term; | ||
2410 | } | ||
2411 | |||
2412 | /* ha->hardware_lock supposed to be held on entry */ | ||
2413 | static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, | ||
2414 | uint32_t handle) | ||
2415 | { | ||
2416 | struct qla_hw_data *ha = vha->hw; | ||
2417 | |||
2418 | handle--; | ||
2419 | if (ha->tgt.cmds[handle] != NULL) { | ||
2420 | struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; | ||
2421 | ha->tgt.cmds[handle] = NULL; | ||
2422 | return cmd; | ||
2423 | } else | ||
2424 | return NULL; | ||
2425 | } | ||
2426 | |||
2427 | /* ha->hardware_lock supposed to be held on entry */ | ||
2428 | static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, | ||
2429 | uint32_t handle, void *ctio) | ||
2430 | { | ||
2431 | struct qla_tgt_cmd *cmd = NULL; | ||
2432 | |||
2433 | /* Clear out internal marks */ | ||
2434 | handle &= ~(CTIO_COMPLETION_HANDLE_MARK | | ||
2435 | CTIO_INTERMEDIATE_HANDLE_MARK); | ||
2436 | |||
2437 | if (handle != QLA_TGT_NULL_HANDLE) { | ||
2438 | if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) { | ||
2439 | ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s", | ||
2440 | "SKIP_HANDLE CTIO\n"); | ||
2441 | return NULL; | ||
2442 | } | ||
2443 | /* handle-1 is actually used */ | ||
2444 | if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) { | ||
2445 | ql_dbg(ql_dbg_tgt, vha, 0xe052, | ||
2446 | "qla_target(%d): Wrong handle %x received\n", | ||
2447 | vha->vp_idx, handle); | ||
2448 | return NULL; | ||
2449 | } | ||
2450 | cmd = qlt_get_cmd(vha, handle); | ||
2451 | if (unlikely(cmd == NULL)) { | ||
2452 | ql_dbg(ql_dbg_tgt, vha, 0xe053, | ||
2453 | "qla_target(%d): Suspicious: unable to " | ||
2454 | "find the command with handle %x\n", vha->vp_idx, | ||
2455 | handle); | ||
2456 | return NULL; | ||
2457 | } | ||
2458 | } else if (ctio != NULL) { | ||
2459 | /* We can't get loop ID from CTIO7 */ | ||
2460 | ql_dbg(ql_dbg_tgt, vha, 0xe054, | ||
2461 | "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " | ||
2462 | "support NULL handles\n", vha->vp_idx); | ||
2463 | return NULL; | ||
2464 | } | ||
2465 | |||
2466 | return cmd; | ||
2467 | } | ||
2468 | |||
2469 | /* | ||
2470 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
2471 | */ | ||
2472 | static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | ||
2473 | uint32_t status, void *ctio) | ||
2474 | { | ||
2475 | struct qla_hw_data *ha = vha->hw; | ||
2476 | struct se_cmd *se_cmd; | ||
2477 | struct target_core_fabric_ops *tfo; | ||
2478 | struct qla_tgt_cmd *cmd; | ||
2479 | |||
2480 | ql_dbg(ql_dbg_tgt, vha, 0xe01e, | ||
2481 | "qla_target(%d): handle(ctio %p status %#x) <- %08x\n", | ||
2482 | vha->vp_idx, ctio, status, handle); | ||
2483 | |||
2484 | if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { | ||
2485 | /* That could happen only in case of an error/reset/abort */ | ||
2486 | if (status != CTIO_SUCCESS) { | ||
2487 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, | ||
2488 | "Intermediate CTIO received" | ||
2489 | " (status %x)\n", status); | ||
2490 | } | ||
2491 | return; | ||
2492 | } | ||
2493 | |||
2494 | cmd = qlt_ctio_to_cmd(vha, handle, ctio); | ||
2495 | if (cmd == NULL) | ||
2496 | return; | ||
2497 | |||
2498 | se_cmd = &cmd->se_cmd; | ||
2499 | tfo = se_cmd->se_tfo; | ||
2500 | |||
2501 | if (cmd->sg_mapped) | ||
2502 | qlt_unmap_sg(vha, cmd); | ||
2503 | |||
2504 | if (unlikely(status != CTIO_SUCCESS)) { | ||
2505 | switch (status & 0xFFFF) { | ||
2506 | case CTIO_LIP_RESET: | ||
2507 | case CTIO_TARGET_RESET: | ||
2508 | case CTIO_ABORTED: | ||
2509 | case CTIO_TIMEOUT: | ||
2510 | case CTIO_INVALID_RX_ID: | ||
2511 | /* They are OK */ | ||
2512 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, | ||
2513 | "qla_target(%d): CTIO with " | ||
2514 | "status %#x received, state %x, se_cmd %p, " | ||
2515 | "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " | ||
2516 | "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, | ||
2517 | status, cmd->state, se_cmd); | ||
2518 | break; | ||
2519 | |||
2520 | case CTIO_PORT_LOGGED_OUT: | ||
2521 | case CTIO_PORT_UNAVAILABLE: | ||
2522 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, | ||
2523 | "qla_target(%d): CTIO with PORT LOGGED " | ||
2524 | "OUT (29) or PORT UNAVAILABLE (28) status %x " | ||
2525 | "received (state %x, se_cmd %p)\n", vha->vp_idx, | ||
2526 | status, cmd->state, se_cmd); | ||
2527 | break; | ||
2528 | |||
2529 | case CTIO_SRR_RECEIVED: | ||
2530 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, | ||
2531 | "qla_target(%d): CTIO with SRR_RECEIVED" | ||
2532 | " status %x received (state %x, se_cmd %p)\n", | ||
2533 | vha->vp_idx, status, cmd->state, se_cmd); | ||
2534 | if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) | ||
2535 | break; | ||
2536 | else | ||
2537 | return; | ||
2538 | |||
2539 | default: | ||
2540 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, | ||
2541 | "qla_target(%d): CTIO with error status " | ||
2542 | "0x%x received (state %x, se_cmd %p\n", | ||
2543 | vha->vp_idx, status, cmd->state, se_cmd); | ||
2544 | break; | ||
2545 | } | ||
2546 | |||
2547 | if (cmd->state != QLA_TGT_STATE_NEED_DATA) | ||
2548 | if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) | ||
2549 | return; | ||
2550 | } | ||
2551 | |||
2552 | if (cmd->state == QLA_TGT_STATE_PROCESSED) { | ||
2553 | ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); | ||
2554 | } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { | ||
2555 | int rx_status = 0; | ||
2556 | |||
2557 | cmd->state = QLA_TGT_STATE_DATA_IN; | ||
2558 | |||
2559 | if (unlikely(status != CTIO_SUCCESS)) | ||
2560 | rx_status = -EIO; | ||
2561 | else | ||
2562 | cmd->write_data_transferred = 1; | ||
2563 | |||
2564 | ql_dbg(ql_dbg_tgt, vha, 0xe020, | ||
2565 | "Data received, context %x, rx_status %d\n", | ||
2566 | 0x0, rx_status); | ||
2567 | |||
2568 | ha->tgt.tgt_ops->handle_data(cmd); | ||
2569 | return; | ||
2570 | } else if (cmd->state == QLA_TGT_STATE_ABORTED) { | ||
2571 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, | ||
2572 | "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); | ||
2573 | } else { | ||
2574 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, | ||
2575 | "qla_target(%d): A command in state (%d) should " | ||
2576 | "not return a CTIO complete\n", vha->vp_idx, cmd->state); | ||
2577 | } | ||
2578 | |||
2579 | if (unlikely(status != CTIO_SUCCESS)) { | ||
2580 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); | ||
2581 | dump_stack(); | ||
2582 | } | ||
2583 | |||
2584 | ha->tgt.tgt_ops->free_cmd(cmd); | ||
2585 | } | ||
2586 | |||
2587 | /* ha->hardware_lock supposed to be held on entry */ | ||
2588 | /* called via callback from qla2xxx */ | ||
2589 | void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle) | ||
2590 | { | ||
2591 | struct qla_hw_data *ha = vha->hw; | ||
2592 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
2593 | |||
2594 | if (likely(tgt == NULL)) { | ||
2595 | ql_dbg(ql_dbg_tgt, vha, 0xe021, | ||
2596 | "CTIO, but target mode not enabled" | ||
2597 | " (ha %d %p handle %#x)", vha->vp_idx, ha, handle); | ||
2598 | return; | ||
2599 | } | ||
2600 | |||
2601 | tgt->irq_cmd_count++; | ||
2602 | qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL); | ||
2603 | tgt->irq_cmd_count--; | ||
2604 | } | ||
2605 | |||
2606 | static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, | ||
2607 | uint8_t task_codes) | ||
2608 | { | ||
2609 | int fcp_task_attr; | ||
2610 | |||
2611 | switch (task_codes) { | ||
2612 | case ATIO_SIMPLE_QUEUE: | ||
2613 | fcp_task_attr = MSG_SIMPLE_TAG; | ||
2614 | break; | ||
2615 | case ATIO_HEAD_OF_QUEUE: | ||
2616 | fcp_task_attr = MSG_HEAD_TAG; | ||
2617 | break; | ||
2618 | case ATIO_ORDERED_QUEUE: | ||
2619 | fcp_task_attr = MSG_ORDERED_TAG; | ||
2620 | break; | ||
2621 | case ATIO_ACA_QUEUE: | ||
2622 | fcp_task_attr = MSG_ACA_TAG; | ||
2623 | break; | ||
2624 | case ATIO_UNTAGGED: | ||
2625 | fcp_task_attr = MSG_SIMPLE_TAG; | ||
2626 | break; | ||
2627 | default: | ||
2628 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, | ||
2629 | "qla_target: unknown task code %x, use ORDERED instead\n", | ||
2630 | task_codes); | ||
2631 | fcp_task_attr = MSG_ORDERED_TAG; | ||
2632 | break; | ||
2633 | } | ||
2634 | |||
2635 | return fcp_task_attr; | ||
2636 | } | ||
2637 | |||
2638 | static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, | ||
2639 | uint8_t *); | ||
2640 | /* | ||
2641 | * Process context for I/O path into tcm_qla2xxx code | ||
2642 | */ | ||
2643 | static void qlt_do_work(struct work_struct *work) | ||
2644 | { | ||
2645 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); | ||
2646 | scsi_qla_host_t *vha = cmd->vha; | ||
2647 | struct qla_hw_data *ha = vha->hw; | ||
2648 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
2649 | struct qla_tgt_sess *sess = NULL; | ||
2650 | struct atio_from_isp *atio = &cmd->atio; | ||
2651 | unsigned char *cdb; | ||
2652 | unsigned long flags; | ||
2653 | uint32_t data_length; | ||
2654 | int ret, fcp_task_attr, data_dir, bidi = 0; | ||
2655 | |||
2656 | if (tgt->tgt_stop) | ||
2657 | goto out_term; | ||
2658 | |||
2659 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
2660 | sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, | ||
2661 | atio->u.isp24.fcp_hdr.s_id); | ||
2662 | /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ | ||
2663 | if (sess) | ||
2664 | kref_get(&sess->se_sess->sess_kref); | ||
2665 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2666 | |||
2667 | if (unlikely(!sess)) { | ||
2668 | uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id; | ||
2669 | |||
2670 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, | ||
2671 | "qla_target(%d): Unable to find wwn login" | ||
2672 | " (s_id %x:%x:%x), trying to create it manually\n", | ||
2673 | vha->vp_idx, s_id[0], s_id[1], s_id[2]); | ||
2674 | |||
2675 | if (atio->u.raw.entry_count > 1) { | ||
2676 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, | ||
2677 | "Dropping multy entry cmd %p\n", cmd); | ||
2678 | goto out_term; | ||
2679 | } | ||
2680 | |||
2681 | mutex_lock(&ha->tgt.tgt_mutex); | ||
2682 | sess = qlt_make_local_sess(vha, s_id); | ||
2683 | /* sess has an extra creation ref. */ | ||
2684 | mutex_unlock(&ha->tgt.tgt_mutex); | ||
2685 | |||
2686 | if (!sess) | ||
2687 | goto out_term; | ||
2688 | } | ||
2689 | |||
2690 | cmd->sess = sess; | ||
2691 | cmd->loop_id = sess->loop_id; | ||
2692 | cmd->conf_compl_supported = sess->conf_compl_supported; | ||
2693 | |||
2694 | cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; | ||
2695 | cmd->tag = atio->u.isp24.exchange_addr; | ||
2696 | cmd->unpacked_lun = scsilun_to_int( | ||
2697 | (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); | ||
2698 | |||
2699 | if (atio->u.isp24.fcp_cmnd.rddata && | ||
2700 | atio->u.isp24.fcp_cmnd.wrdata) { | ||
2701 | bidi = 1; | ||
2702 | data_dir = DMA_TO_DEVICE; | ||
2703 | } else if (atio->u.isp24.fcp_cmnd.rddata) | ||
2704 | data_dir = DMA_FROM_DEVICE; | ||
2705 | else if (atio->u.isp24.fcp_cmnd.wrdata) | ||
2706 | data_dir = DMA_TO_DEVICE; | ||
2707 | else | ||
2708 | data_dir = DMA_NONE; | ||
2709 | |||
2710 | fcp_task_attr = qlt_get_fcp_task_attr(vha, | ||
2711 | atio->u.isp24.fcp_cmnd.task_attr); | ||
2712 | data_length = be32_to_cpu(get_unaligned((uint32_t *) | ||
2713 | &atio->u.isp24.fcp_cmnd.add_cdb[ | ||
2714 | atio->u.isp24.fcp_cmnd.add_cdb_len])); | ||
2715 | |||
2716 | ql_dbg(ql_dbg_tgt, vha, 0xe022, | ||
2717 | "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", | ||
2718 | cmd, cmd->unpacked_lun, cmd->tag); | ||
2719 | |||
2720 | ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, | ||
2721 | fcp_task_attr, data_dir, bidi); | ||
2722 | if (ret != 0) | ||
2723 | goto out_term; | ||
2724 | /* | ||
2725 | * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( | ||
2726 | */ | ||
2727 | ha->tgt.tgt_ops->put_sess(sess); | ||
2728 | return; | ||
2729 | |||
2730 | out_term: | ||
2731 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); | ||
2732 | /* | ||
2733 | * cmd has not sent to target yet, so pass NULL as the second | ||
2734 | * argument to qlt_send_term_exchange() and free the memory here. | ||
2735 | */ | ||
2736 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
2737 | qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); | ||
2738 | kmem_cache_free(qla_tgt_cmd_cachep, cmd); | ||
2739 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2740 | if (sess) | ||
2741 | ha->tgt.tgt_ops->put_sess(sess); | ||
2742 | } | ||
2743 | |||
2744 | /* ha->hardware_lock supposed to be held on entry */ | ||
2745 | static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, | ||
2746 | struct atio_from_isp *atio) | ||
2747 | { | ||
2748 | struct qla_hw_data *ha = vha->hw; | ||
2749 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
2750 | struct qla_tgt_cmd *cmd; | ||
2751 | |||
2752 | if (unlikely(tgt->tgt_stop)) { | ||
2753 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, | ||
2754 | "New command while device %p is shutting down\n", tgt); | ||
2755 | return -EFAULT; | ||
2756 | } | ||
2757 | |||
2758 | cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); | ||
2759 | if (!cmd) { | ||
2760 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, | ||
2761 | "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); | ||
2762 | return -ENOMEM; | ||
2763 | } | ||
2764 | |||
2765 | INIT_LIST_HEAD(&cmd->cmd_list); | ||
2766 | |||
2767 | memcpy(&cmd->atio, atio, sizeof(*atio)); | ||
2768 | cmd->state = QLA_TGT_STATE_NEW; | ||
2769 | cmd->tgt = ha->tgt.qla_tgt; | ||
2770 | cmd->vha = vha; | ||
2771 | |||
2772 | INIT_WORK(&cmd->work, qlt_do_work); | ||
2773 | queue_work(qla_tgt_wq, &cmd->work); | ||
2774 | return 0; | ||
2775 | |||
2776 | } | ||
2777 | |||
2778 | /* ha->hardware_lock supposed to be held on entry */ | ||
2779 | static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, | ||
2780 | int fn, void *iocb, int flags) | ||
2781 | { | ||
2782 | struct scsi_qla_host *vha = sess->vha; | ||
2783 | struct qla_hw_data *ha = vha->hw; | ||
2784 | struct qla_tgt_mgmt_cmd *mcmd; | ||
2785 | int res; | ||
2786 | uint8_t tmr_func; | ||
2787 | |||
2788 | mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); | ||
2789 | if (!mcmd) { | ||
2790 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, | ||
2791 | "qla_target(%d): Allocation of management " | ||
2792 | "command failed, some commands and their data could " | ||
2793 | "leak\n", vha->vp_idx); | ||
2794 | return -ENOMEM; | ||
2795 | } | ||
2796 | memset(mcmd, 0, sizeof(*mcmd)); | ||
2797 | mcmd->sess = sess; | ||
2798 | |||
2799 | if (iocb) { | ||
2800 | memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, | ||
2801 | sizeof(mcmd->orig_iocb.imm_ntfy)); | ||
2802 | } | ||
2803 | mcmd->tmr_func = fn; | ||
2804 | mcmd->flags = flags; | ||
2805 | |||
2806 | switch (fn) { | ||
2807 | case QLA_TGT_CLEAR_ACA: | ||
2808 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, | ||
2809 | "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); | ||
2810 | tmr_func = TMR_CLEAR_ACA; | ||
2811 | break; | ||
2812 | |||
2813 | case QLA_TGT_TARGET_RESET: | ||
2814 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, | ||
2815 | "qla_target(%d): TARGET_RESET received\n", | ||
2816 | sess->vha->vp_idx); | ||
2817 | tmr_func = TMR_TARGET_WARM_RESET; | ||
2818 | break; | ||
2819 | |||
2820 | case QLA_TGT_LUN_RESET: | ||
2821 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, | ||
2822 | "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); | ||
2823 | tmr_func = TMR_LUN_RESET; | ||
2824 | break; | ||
2825 | |||
2826 | case QLA_TGT_CLEAR_TS: | ||
2827 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, | ||
2828 | "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); | ||
2829 | tmr_func = TMR_CLEAR_TASK_SET; | ||
2830 | break; | ||
2831 | |||
2832 | case QLA_TGT_ABORT_TS: | ||
2833 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, | ||
2834 | "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); | ||
2835 | tmr_func = TMR_ABORT_TASK_SET; | ||
2836 | break; | ||
2837 | #if 0 | ||
2838 | case QLA_TGT_ABORT_ALL: | ||
2839 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, | ||
2840 | "qla_target(%d): Doing ABORT_ALL_TASKS\n", | ||
2841 | sess->vha->vp_idx); | ||
2842 | tmr_func = 0; | ||
2843 | break; | ||
2844 | |||
2845 | case QLA_TGT_ABORT_ALL_SESS: | ||
2846 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, | ||
2847 | "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", | ||
2848 | sess->vha->vp_idx); | ||
2849 | tmr_func = 0; | ||
2850 | break; | ||
2851 | |||
2852 | case QLA_TGT_NEXUS_LOSS_SESS: | ||
2853 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, | ||
2854 | "qla_target(%d): Doing NEXUS_LOSS_SESS\n", | ||
2855 | sess->vha->vp_idx); | ||
2856 | tmr_func = 0; | ||
2857 | break; | ||
2858 | |||
2859 | case QLA_TGT_NEXUS_LOSS: | ||
2860 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, | ||
2861 | "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); | ||
2862 | tmr_func = 0; | ||
2863 | break; | ||
2864 | #endif | ||
2865 | default: | ||
2866 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, | ||
2867 | "qla_target(%d): Unknown task mgmt fn 0x%x\n", | ||
2868 | sess->vha->vp_idx, fn); | ||
2869 | mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); | ||
2870 | return -ENOSYS; | ||
2871 | } | ||
2872 | |||
2873 | res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); | ||
2874 | if (res != 0) { | ||
2875 | ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, | ||
2876 | "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", | ||
2877 | sess->vha->vp_idx, res); | ||
2878 | mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); | ||
2879 | return -EFAULT; | ||
2880 | } | ||
2881 | |||
2882 | return 0; | ||
2883 | } | ||
2884 | |||
2885 | /* ha->hardware_lock supposed to be held on entry */ | ||
2886 | static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) | ||
2887 | { | ||
2888 | struct atio_from_isp *a = (struct atio_from_isp *)iocb; | ||
2889 | struct qla_hw_data *ha = vha->hw; | ||
2890 | struct qla_tgt *tgt; | ||
2891 | struct qla_tgt_sess *sess; | ||
2892 | uint32_t lun, unpacked_lun; | ||
2893 | int lun_size, fn; | ||
2894 | |||
2895 | tgt = ha->tgt.qla_tgt; | ||
2896 | |||
2897 | lun = a->u.isp24.fcp_cmnd.lun; | ||
2898 | lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); | ||
2899 | fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; | ||
2900 | sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, | ||
2901 | a->u.isp24.fcp_hdr.s_id); | ||
2902 | unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); | ||
2903 | |||
2904 | if (!sess) { | ||
2905 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, | ||
2906 | "qla_target(%d): task mgmt fn 0x%x for " | ||
2907 | "non-existant session\n", vha->vp_idx, fn); | ||
2908 | return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb, | ||
2909 | sizeof(struct atio_from_isp)); | ||
2910 | } | ||
2911 | |||
2912 | return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); | ||
2913 | } | ||
2914 | |||
2915 | /* ha->hardware_lock supposed to be held on entry */ | ||
2916 | static int __qlt_abort_task(struct scsi_qla_host *vha, | ||
2917 | struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) | ||
2918 | { | ||
2919 | struct atio_from_isp *a = (struct atio_from_isp *)iocb; | ||
2920 | struct qla_hw_data *ha = vha->hw; | ||
2921 | struct qla_tgt_mgmt_cmd *mcmd; | ||
2922 | uint32_t lun, unpacked_lun; | ||
2923 | int rc; | ||
2924 | |||
2925 | mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); | ||
2926 | if (mcmd == NULL) { | ||
2927 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, | ||
2928 | "qla_target(%d): %s: Allocation of ABORT cmd failed\n", | ||
2929 | vha->vp_idx, __func__); | ||
2930 | return -ENOMEM; | ||
2931 | } | ||
2932 | memset(mcmd, 0, sizeof(*mcmd)); | ||
2933 | |||
2934 | mcmd->sess = sess; | ||
2935 | memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, | ||
2936 | sizeof(mcmd->orig_iocb.imm_ntfy)); | ||
2937 | |||
2938 | lun = a->u.isp24.fcp_cmnd.lun; | ||
2939 | unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); | ||
2940 | |||
2941 | rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, | ||
2942 | le16_to_cpu(iocb->u.isp2x.seq_id)); | ||
2943 | if (rc != 0) { | ||
2944 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, | ||
2945 | "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", | ||
2946 | vha->vp_idx, rc); | ||
2947 | mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); | ||
2948 | return -EFAULT; | ||
2949 | } | ||
2950 | |||
2951 | return 0; | ||
2952 | } | ||
2953 | |||
2954 | /* ha->hardware_lock supposed to be held on entry */ | ||
2955 | static int qlt_abort_task(struct scsi_qla_host *vha, | ||
2956 | struct imm_ntfy_from_isp *iocb) | ||
2957 | { | ||
2958 | struct qla_hw_data *ha = vha->hw; | ||
2959 | struct qla_tgt_sess *sess; | ||
2960 | int loop_id; | ||
2961 | |||
2962 | loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); | ||
2963 | |||
2964 | sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); | ||
2965 | if (sess == NULL) { | ||
2966 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, | ||
2967 | "qla_target(%d): task abort for unexisting " | ||
2968 | "session\n", vha->vp_idx); | ||
2969 | return qlt_sched_sess_work(ha->tgt.qla_tgt, | ||
2970 | QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); | ||
2971 | } | ||
2972 | |||
2973 | return __qlt_abort_task(vha, iocb, sess); | ||
2974 | } | ||
2975 | |||
2976 | /* | ||
2977 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
2978 | */ | ||
2979 | static int qlt_24xx_handle_els(struct scsi_qla_host *vha, | ||
2980 | struct imm_ntfy_from_isp *iocb) | ||
2981 | { | ||
2982 | struct qla_hw_data *ha = vha->hw; | ||
2983 | int res = 0; | ||
2984 | |||
2985 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, | ||
2986 | "qla_target(%d): Port ID: 0x%02x:%02x:%02x" | ||
2987 | " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0], | ||
2988 | iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2], | ||
2989 | iocb->u.isp24.status_subcode); | ||
2990 | |||
2991 | switch (iocb->u.isp24.status_subcode) { | ||
2992 | case ELS_PLOGI: | ||
2993 | case ELS_FLOGI: | ||
2994 | case ELS_PRLI: | ||
2995 | case ELS_LOGO: | ||
2996 | case ELS_PRLO: | ||
2997 | res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); | ||
2998 | break; | ||
2999 | case ELS_PDISC: | ||
3000 | case ELS_ADISC: | ||
3001 | { | ||
3002 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
3003 | if (tgt->link_reinit_iocb_pending) { | ||
3004 | qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, | ||
3005 | 0, 0, 0, 0, 0, 0); | ||
3006 | tgt->link_reinit_iocb_pending = 0; | ||
3007 | } | ||
3008 | res = 1; /* send notify ack */ | ||
3009 | break; | ||
3010 | } | ||
3011 | |||
3012 | default: | ||
3013 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, | ||
3014 | "qla_target(%d): Unsupported ELS command %x " | ||
3015 | "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); | ||
3016 | res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); | ||
3017 | break; | ||
3018 | } | ||
3019 | |||
3020 | return res; | ||
3021 | } | ||
3022 | |||
3023 | static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) | ||
3024 | { | ||
3025 | struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; | ||
3026 | size_t first_offset = 0, rem_offset = offset, tmp = 0; | ||
3027 | int i, sg_srr_cnt, bufflen = 0; | ||
3028 | |||
3029 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, | ||
3030 | "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " | ||
3031 | "cmd->sg_cnt: %u, direction: %d\n", | ||
3032 | cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); | ||
3033 | |||
3034 | /* | ||
3035 | * FIXME: Reject non zero SRR relative offset until we can test | ||
3036 | * this code properly. | ||
3037 | */ | ||
3038 | pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); | ||
3039 | return -1; | ||
3040 | |||
3041 | if (!cmd->sg || !cmd->sg_cnt) { | ||
3042 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, | ||
3043 | "Missing cmd->sg or zero cmd->sg_cnt in" | ||
3044 | " qla_tgt_set_data_offset\n"); | ||
3045 | return -EINVAL; | ||
3046 | } | ||
3047 | /* | ||
3048 | * Walk the current cmd->sg list until we locate the new sg_srr_start | ||
3049 | */ | ||
3050 | for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { | ||
3051 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, | ||
3052 | "sg[%d]: %p page: %p, length: %d, offset: %d\n", | ||
3053 | i, sg, sg_page(sg), sg->length, sg->offset); | ||
3054 | |||
3055 | if ((sg->length + tmp) > offset) { | ||
3056 | first_offset = rem_offset; | ||
3057 | sg_srr_start = sg; | ||
3058 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, | ||
3059 | "Found matching sg[%d], using %p as sg_srr_start, " | ||
3060 | "and using first_offset: %zu\n", i, sg, | ||
3061 | first_offset); | ||
3062 | break; | ||
3063 | } | ||
3064 | tmp += sg->length; | ||
3065 | rem_offset -= sg->length; | ||
3066 | } | ||
3067 | |||
3068 | if (!sg_srr_start) { | ||
3069 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, | ||
3070 | "Unable to locate sg_srr_start for offset: %u\n", offset); | ||
3071 | return -EINVAL; | ||
3072 | } | ||
3073 | sg_srr_cnt = (cmd->sg_cnt - i); | ||
3074 | |||
3075 | sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); | ||
3076 | if (!sg_srr) { | ||
3077 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, | ||
3078 | "Unable to allocate sgp\n"); | ||
3079 | return -ENOMEM; | ||
3080 | } | ||
3081 | sg_init_table(sg_srr, sg_srr_cnt); | ||
3082 | sgp = &sg_srr[0]; | ||
3083 | /* | ||
3084 | * Walk the remaining list for sg_srr_start, mapping to the newly | ||
3085 | * allocated sg_srr taking first_offset into account. | ||
3086 | */ | ||
3087 | for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { | ||
3088 | if (first_offset) { | ||
3089 | sg_set_page(sgp, sg_page(sg), | ||
3090 | (sg->length - first_offset), first_offset); | ||
3091 | first_offset = 0; | ||
3092 | } else { | ||
3093 | sg_set_page(sgp, sg_page(sg), sg->length, 0); | ||
3094 | } | ||
3095 | bufflen += sgp->length; | ||
3096 | |||
3097 | sgp = sg_next(sgp); | ||
3098 | if (!sgp) | ||
3099 | break; | ||
3100 | } | ||
3101 | |||
3102 | cmd->sg = sg_srr; | ||
3103 | cmd->sg_cnt = sg_srr_cnt; | ||
3104 | cmd->bufflen = bufflen; | ||
3105 | cmd->offset += offset; | ||
3106 | cmd->free_sg = 1; | ||
3107 | |||
3108 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); | ||
3109 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", | ||
3110 | cmd->sg_cnt); | ||
3111 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", | ||
3112 | cmd->bufflen); | ||
3113 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", | ||
3114 | cmd->offset); | ||
3115 | |||
3116 | if (cmd->sg_cnt < 0) | ||
3117 | BUG(); | ||
3118 | |||
3119 | if (cmd->bufflen < 0) | ||
3120 | BUG(); | ||
3121 | |||
3122 | return 0; | ||
3123 | } | ||
3124 | |||
3125 | static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, | ||
3126 | uint32_t srr_rel_offs, int *xmit_type) | ||
3127 | { | ||
3128 | int res = 0, rel_offs; | ||
3129 | |||
3130 | rel_offs = srr_rel_offs - cmd->offset; | ||
3131 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", | ||
3132 | srr_rel_offs, rel_offs); | ||
3133 | |||
3134 | *xmit_type = QLA_TGT_XMIT_ALL; | ||
3135 | |||
3136 | if (rel_offs < 0) { | ||
3137 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, | ||
3138 | "qla_target(%d): SRR rel_offs (%d) < 0", | ||
3139 | cmd->vha->vp_idx, rel_offs); | ||
3140 | res = -1; | ||
3141 | } else if (rel_offs == cmd->bufflen) | ||
3142 | *xmit_type = QLA_TGT_XMIT_STATUS; | ||
3143 | else if (rel_offs > 0) | ||
3144 | res = qlt_set_data_offset(cmd, rel_offs); | ||
3145 | |||
3146 | return res; | ||
3147 | } | ||
3148 | |||
3149 | /* No locks, thread context */ | ||
3150 | static void qlt_handle_srr(struct scsi_qla_host *vha, | ||
3151 | struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) | ||
3152 | { | ||
3153 | struct imm_ntfy_from_isp *ntfy = | ||
3154 | (struct imm_ntfy_from_isp *)&imm->imm_ntfy; | ||
3155 | struct qla_hw_data *ha = vha->hw; | ||
3156 | struct qla_tgt_cmd *cmd = sctio->cmd; | ||
3157 | struct se_cmd *se_cmd = &cmd->se_cmd; | ||
3158 | unsigned long flags; | ||
3159 | int xmit_type = 0, resp = 0; | ||
3160 | uint32_t offset; | ||
3161 | uint16_t srr_ui; | ||
3162 | |||
3163 | offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); | ||
3164 | srr_ui = ntfy->u.isp24.srr_ui; | ||
3165 | |||
3166 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", | ||
3167 | cmd, srr_ui); | ||
3168 | |||
3169 | switch (srr_ui) { | ||
3170 | case SRR_IU_STATUS: | ||
3171 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
3172 | qlt_send_notify_ack(vha, ntfy, | ||
3173 | 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); | ||
3174 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
3175 | xmit_type = QLA_TGT_XMIT_STATUS; | ||
3176 | resp = 1; | ||
3177 | break; | ||
3178 | case SRR_IU_DATA_IN: | ||
3179 | if (!cmd->sg || !cmd->sg_cnt) { | ||
3180 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, | ||
3181 | "Unable to process SRR_IU_DATA_IN due to" | ||
3182 | " missing cmd->sg, state: %d\n", cmd->state); | ||
3183 | dump_stack(); | ||
3184 | goto out_reject; | ||
3185 | } | ||
3186 | if (se_cmd->scsi_status != 0) { | ||
3187 | ql_dbg(ql_dbg_tgt, vha, 0xe02a, | ||
3188 | "Rejecting SRR_IU_DATA_IN with non GOOD " | ||
3189 | "scsi_status\n"); | ||
3190 | goto out_reject; | ||
3191 | } | ||
3192 | cmd->bufflen = se_cmd->data_length; | ||
3193 | |||
3194 | if (qlt_has_data(cmd)) { | ||
3195 | if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) | ||
3196 | goto out_reject; | ||
3197 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
3198 | qlt_send_notify_ack(vha, ntfy, | ||
3199 | 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); | ||
3200 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
3201 | resp = 1; | ||
3202 | } else { | ||
3203 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, | ||
3204 | "qla_target(%d): SRR for in data for cmd " | ||
3205 | "without them (tag %d, SCSI status %d), " | ||
3206 | "reject", vha->vp_idx, cmd->tag, | ||
3207 | cmd->se_cmd.scsi_status); | ||
3208 | goto out_reject; | ||
3209 | } | ||
3210 | break; | ||
3211 | case SRR_IU_DATA_OUT: | ||
3212 | if (!cmd->sg || !cmd->sg_cnt) { | ||
3213 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, | ||
3214 | "Unable to process SRR_IU_DATA_OUT due to" | ||
3215 | " missing cmd->sg\n"); | ||
3216 | dump_stack(); | ||
3217 | goto out_reject; | ||
3218 | } | ||
3219 | if (se_cmd->scsi_status != 0) { | ||
3220 | ql_dbg(ql_dbg_tgt, vha, 0xe02b, | ||
3221 | "Rejecting SRR_IU_DATA_OUT" | ||
3222 | " with non GOOD scsi_status\n"); | ||
3223 | goto out_reject; | ||
3224 | } | ||
3225 | cmd->bufflen = se_cmd->data_length; | ||
3226 | |||
3227 | if (qlt_has_data(cmd)) { | ||
3228 | if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) | ||
3229 | goto out_reject; | ||
3230 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
3231 | qlt_send_notify_ack(vha, ntfy, | ||
3232 | 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); | ||
3233 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
3234 | if (xmit_type & QLA_TGT_XMIT_DATA) | ||
3235 | qlt_rdy_to_xfer(cmd); | ||
3236 | } else { | ||
3237 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, | ||
3238 | "qla_target(%d): SRR for out data for cmd " | ||
3239 | "without them (tag %d, SCSI status %d), " | ||
3240 | "reject", vha->vp_idx, cmd->tag, | ||
3241 | cmd->se_cmd.scsi_status); | ||
3242 | goto out_reject; | ||
3243 | } | ||
3244 | break; | ||
3245 | default: | ||
3246 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, | ||
3247 | "qla_target(%d): Unknown srr_ui value %x", | ||
3248 | vha->vp_idx, srr_ui); | ||
3249 | goto out_reject; | ||
3250 | } | ||
3251 | |||
3252 | /* Transmit response in case of status and data-in cases */ | ||
3253 | if (resp) | ||
3254 | qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); | ||
3255 | |||
3256 | return; | ||
3257 | |||
3258 | out_reject: | ||
3259 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
3260 | qlt_send_notify_ack(vha, ntfy, 0, 0, 0, | ||
3261 | NOTIFY_ACK_SRR_FLAGS_REJECT, | ||
3262 | NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, | ||
3263 | NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); | ||
3264 | if (cmd->state == QLA_TGT_STATE_NEED_DATA) { | ||
3265 | cmd->state = QLA_TGT_STATE_DATA_IN; | ||
3266 | dump_stack(); | ||
3267 | } else | ||
3268 | qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); | ||
3269 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
3270 | } | ||
3271 | |||
3272 | static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, | ||
3273 | struct qla_tgt_srr_imm *imm, int ha_locked) | ||
3274 | { | ||
3275 | struct qla_hw_data *ha = vha->hw; | ||
3276 | unsigned long flags = 0; | ||
3277 | |||
3278 | if (!ha_locked) | ||
3279 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
3280 | |||
3281 | qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, | ||
3282 | NOTIFY_ACK_SRR_FLAGS_REJECT, | ||
3283 | NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, | ||
3284 | NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); | ||
3285 | |||
3286 | if (!ha_locked) | ||
3287 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
3288 | |||
3289 | kfree(imm); | ||
3290 | } | ||
3291 | |||
3292 | static void qlt_handle_srr_work(struct work_struct *work) | ||
3293 | { | ||
3294 | struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); | ||
3295 | struct scsi_qla_host *vha = tgt->vha; | ||
3296 | struct qla_tgt_srr_ctio *sctio; | ||
3297 | unsigned long flags; | ||
3298 | |||
3299 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", | ||
3300 | tgt); | ||
3301 | |||
3302 | restart: | ||
3303 | spin_lock_irqsave(&tgt->srr_lock, flags); | ||
3304 | list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { | ||
3305 | struct qla_tgt_srr_imm *imm, *i, *ti; | ||
3306 | struct qla_tgt_cmd *cmd; | ||
3307 | struct se_cmd *se_cmd; | ||
3308 | |||
3309 | imm = NULL; | ||
3310 | list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, | ||
3311 | srr_list_entry) { | ||
3312 | if (i->srr_id == sctio->srr_id) { | ||
3313 | list_del(&i->srr_list_entry); | ||
3314 | if (imm) { | ||
3315 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, | ||
3316 | "qla_target(%d): There must be " | ||
3317 | "only one IMM SRR per CTIO SRR " | ||
3318 | "(IMM SRR %p, id %d, CTIO %p\n", | ||
3319 | vha->vp_idx, i, i->srr_id, sctio); | ||
3320 | qlt_reject_free_srr_imm(tgt->vha, i, 0); | ||
3321 | } else | ||
3322 | imm = i; | ||
3323 | } | ||
3324 | } | ||
3325 | |||
3326 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, | ||
3327 | "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, | ||
3328 | sctio->srr_id); | ||
3329 | |||
3330 | if (imm == NULL) { | ||
3331 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, | ||
3332 | "Not found matching IMM for SRR CTIO (id %d)\n", | ||
3333 | sctio->srr_id); | ||
3334 | continue; | ||
3335 | } else | ||
3336 | list_del(&sctio->srr_list_entry); | ||
3337 | |||
3338 | spin_unlock_irqrestore(&tgt->srr_lock, flags); | ||
3339 | |||
3340 | cmd = sctio->cmd; | ||
3341 | /* | ||
3342 | * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow | ||
3343 | * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() | ||
3344 | * logic.. | ||
3345 | */ | ||
3346 | cmd->offset = 0; | ||
3347 | if (cmd->free_sg) { | ||
3348 | kfree(cmd->sg); | ||
3349 | cmd->sg = NULL; | ||
3350 | cmd->free_sg = 0; | ||
3351 | } | ||
3352 | se_cmd = &cmd->se_cmd; | ||
3353 | |||
3354 | cmd->sg_cnt = se_cmd->t_data_nents; | ||
3355 | cmd->sg = se_cmd->t_data_sg; | ||
3356 | |||
3357 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, | ||
3358 | "SRR cmd %p (se_cmd %p, tag %d, op %x), " | ||
3359 | "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, | ||
3360 | se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); | ||
3361 | |||
3362 | qlt_handle_srr(vha, sctio, imm); | ||
3363 | |||
3364 | kfree(imm); | ||
3365 | kfree(sctio); | ||
3366 | goto restart; | ||
3367 | } | ||
3368 | spin_unlock_irqrestore(&tgt->srr_lock, flags); | ||
3369 | } | ||
3370 | |||
3371 | /* ha->hardware_lock supposed to be held on entry */ | ||
3372 | static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, | ||
3373 | struct imm_ntfy_from_isp *iocb) | ||
3374 | { | ||
3375 | struct qla_tgt_srr_imm *imm; | ||
3376 | struct qla_hw_data *ha = vha->hw; | ||
3377 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
3378 | struct qla_tgt_srr_ctio *sctio; | ||
3379 | |||
3380 | tgt->imm_srr_id++; | ||
3381 | |||
3382 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n", | ||
3383 | vha->vp_idx); | ||
3384 | |||
3385 | imm = kzalloc(sizeof(*imm), GFP_ATOMIC); | ||
3386 | if (imm != NULL) { | ||
3387 | memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); | ||
3388 | |||
3389 | /* IRQ is already OFF */ | ||
3390 | spin_lock(&tgt->srr_lock); | ||
3391 | imm->srr_id = tgt->imm_srr_id; | ||
3392 | list_add_tail(&imm->srr_list_entry, | ||
3393 | &tgt->srr_imm_list); | ||
3394 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, | ||
3395 | "IMM NTFY SRR %p added (id %d, ui %x)\n", | ||
3396 | imm, imm->srr_id, iocb->u.isp24.srr_ui); | ||
3397 | if (tgt->imm_srr_id == tgt->ctio_srr_id) { | ||
3398 | int found = 0; | ||
3399 | list_for_each_entry(sctio, &tgt->srr_ctio_list, | ||
3400 | srr_list_entry) { | ||
3401 | if (sctio->srr_id == imm->srr_id) { | ||
3402 | found = 1; | ||
3403 | break; | ||
3404 | } | ||
3405 | } | ||
3406 | if (found) { | ||
3407 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", | ||
3408 | "Scheduling srr work\n"); | ||
3409 | schedule_work(&tgt->srr_work); | ||
3410 | } else { | ||
3411 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, | ||
3412 | "qla_target(%d): imm_srr_id " | ||
3413 | "== ctio_srr_id (%d), but there is no " | ||
3414 | "corresponding SRR CTIO, deleting IMM " | ||
3415 | "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, | ||
3416 | imm); | ||
3417 | list_del(&imm->srr_list_entry); | ||
3418 | |||
3419 | kfree(imm); | ||
3420 | |||
3421 | spin_unlock(&tgt->srr_lock); | ||
3422 | goto out_reject; | ||
3423 | } | ||
3424 | } | ||
3425 | spin_unlock(&tgt->srr_lock); | ||
3426 | } else { | ||
3427 | struct qla_tgt_srr_ctio *ts; | ||
3428 | |||
3429 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, | ||
3430 | "qla_target(%d): Unable to allocate SRR IMM " | ||
3431 | "entry, SRR request will be rejected\n", vha->vp_idx); | ||
3432 | |||
3433 | /* IRQ is already OFF */ | ||
3434 | spin_lock(&tgt->srr_lock); | ||
3435 | list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, | ||
3436 | srr_list_entry) { | ||
3437 | if (sctio->srr_id == tgt->imm_srr_id) { | ||
3438 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, | ||
3439 | "CTIO SRR %p deleted (id %d)\n", | ||
3440 | sctio, sctio->srr_id); | ||
3441 | list_del(&sctio->srr_list_entry); | ||
3442 | qlt_send_term_exchange(vha, sctio->cmd, | ||
3443 | &sctio->cmd->atio, 1); | ||
3444 | kfree(sctio); | ||
3445 | } | ||
3446 | } | ||
3447 | spin_unlock(&tgt->srr_lock); | ||
3448 | goto out_reject; | ||
3449 | } | ||
3450 | |||
3451 | return; | ||
3452 | |||
3453 | out_reject: | ||
3454 | qlt_send_notify_ack(vha, iocb, 0, 0, 0, | ||
3455 | NOTIFY_ACK_SRR_FLAGS_REJECT, | ||
3456 | NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, | ||
3457 | NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); | ||
3458 | } | ||
3459 | |||
3460 | /* | ||
3461 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
3462 | */ | ||
3463 | static void qlt_handle_imm_notify(struct scsi_qla_host *vha, | ||
3464 | struct imm_ntfy_from_isp *iocb) | ||
3465 | { | ||
3466 | struct qla_hw_data *ha = vha->hw; | ||
3467 | uint32_t add_flags = 0; | ||
3468 | int send_notify_ack = 1; | ||
3469 | uint16_t status; | ||
3470 | |||
3471 | status = le16_to_cpu(iocb->u.isp2x.status); | ||
3472 | switch (status) { | ||
3473 | case IMM_NTFY_LIP_RESET: | ||
3474 | { | ||
3475 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, | ||
3476 | "qla_target(%d): LIP reset (loop %#x), subcode %x\n", | ||
3477 | vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), | ||
3478 | iocb->u.isp24.status_subcode); | ||
3479 | |||
3480 | if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) | ||
3481 | send_notify_ack = 0; | ||
3482 | break; | ||
3483 | } | ||
3484 | |||
3485 | case IMM_NTFY_LIP_LINK_REINIT: | ||
3486 | { | ||
3487 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
3488 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, | ||
3489 | "qla_target(%d): LINK REINIT (loop %#x, " | ||
3490 | "subcode %x)\n", vha->vp_idx, | ||
3491 | le16_to_cpu(iocb->u.isp24.nport_handle), | ||
3492 | iocb->u.isp24.status_subcode); | ||
3493 | if (tgt->link_reinit_iocb_pending) { | ||
3494 | qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, | ||
3495 | 0, 0, 0, 0, 0, 0); | ||
3496 | } | ||
3497 | memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); | ||
3498 | tgt->link_reinit_iocb_pending = 1; | ||
3499 | /* | ||
3500 | * QLogic requires to wait after LINK REINIT for possible | ||
3501 | * PDISC or ADISC ELS commands | ||
3502 | */ | ||
3503 | send_notify_ack = 0; | ||
3504 | break; | ||
3505 | } | ||
3506 | |||
3507 | case IMM_NTFY_PORT_LOGOUT: | ||
3508 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, | ||
3509 | "qla_target(%d): Port logout (loop " | ||
3510 | "%#x, subcode %x)\n", vha->vp_idx, | ||
3511 | le16_to_cpu(iocb->u.isp24.nport_handle), | ||
3512 | iocb->u.isp24.status_subcode); | ||
3513 | |||
3514 | if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) | ||
3515 | send_notify_ack = 0; | ||
3516 | /* The sessions will be cleared in the callback, if needed */ | ||
3517 | break; | ||
3518 | |||
3519 | case IMM_NTFY_GLBL_TPRLO: | ||
3520 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, | ||
3521 | "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); | ||
3522 | if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) | ||
3523 | send_notify_ack = 0; | ||
3524 | /* The sessions will be cleared in the callback, if needed */ | ||
3525 | break; | ||
3526 | |||
3527 | case IMM_NTFY_PORT_CONFIG: | ||
3528 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, | ||
3529 | "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, | ||
3530 | status); | ||
3531 | if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) | ||
3532 | send_notify_ack = 0; | ||
3533 | /* The sessions will be cleared in the callback, if needed */ | ||
3534 | break; | ||
3535 | |||
3536 | case IMM_NTFY_GLBL_LOGO: | ||
3537 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, | ||
3538 | "qla_target(%d): Link failure detected\n", | ||
3539 | vha->vp_idx); | ||
3540 | /* I_T nexus loss */ | ||
3541 | if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) | ||
3542 | send_notify_ack = 0; | ||
3543 | break; | ||
3544 | |||
3545 | case IMM_NTFY_IOCB_OVERFLOW: | ||
3546 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, | ||
3547 | "qla_target(%d): Cannot provide requested " | ||
3548 | "capability (IOCB overflowed the immediate notify " | ||
3549 | "resource count)\n", vha->vp_idx); | ||
3550 | break; | ||
3551 | |||
3552 | case IMM_NTFY_ABORT_TASK: | ||
3553 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, | ||
3554 | "qla_target(%d): Abort Task (S %08x I %#x -> " | ||
3555 | "L %#x)\n", vha->vp_idx, | ||
3556 | le16_to_cpu(iocb->u.isp2x.seq_id), | ||
3557 | GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), | ||
3558 | le16_to_cpu(iocb->u.isp2x.lun)); | ||
3559 | if (qlt_abort_task(vha, iocb) == 0) | ||
3560 | send_notify_ack = 0; | ||
3561 | break; | ||
3562 | |||
3563 | case IMM_NTFY_RESOURCE: | ||
3564 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, | ||
3565 | "qla_target(%d): Out of resources, host %ld\n", | ||
3566 | vha->vp_idx, vha->host_no); | ||
3567 | break; | ||
3568 | |||
3569 | case IMM_NTFY_MSG_RX: | ||
3570 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, | ||
3571 | "qla_target(%d): Immediate notify task %x\n", | ||
3572 | vha->vp_idx, iocb->u.isp2x.task_flags); | ||
3573 | if (qlt_handle_task_mgmt(vha, iocb) == 0) | ||
3574 | send_notify_ack = 0; | ||
3575 | break; | ||
3576 | |||
3577 | case IMM_NTFY_ELS: | ||
3578 | if (qlt_24xx_handle_els(vha, iocb) == 0) | ||
3579 | send_notify_ack = 0; | ||
3580 | break; | ||
3581 | |||
3582 | case IMM_NTFY_SRR: | ||
3583 | qlt_prepare_srr_imm(vha, iocb); | ||
3584 | send_notify_ack = 0; | ||
3585 | break; | ||
3586 | |||
3587 | default: | ||
3588 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, | ||
3589 | "qla_target(%d): Received unknown immediate " | ||
3590 | "notify status %x\n", vha->vp_idx, status); | ||
3591 | break; | ||
3592 | } | ||
3593 | |||
3594 | if (send_notify_ack) | ||
3595 | qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); | ||
3596 | } | ||
3597 | |||
3598 | /* | ||
3599 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
3600 | * This function sends busy to ISP 2xxx or 24xx. | ||
3601 | */ | ||
3602 | static void qlt_send_busy(struct scsi_qla_host *vha, | ||
3603 | struct atio_from_isp *atio, uint16_t status) | ||
3604 | { | ||
3605 | struct ctio7_to_24xx *ctio24; | ||
3606 | struct qla_hw_data *ha = vha->hw; | ||
3607 | request_t *pkt; | ||
3608 | struct qla_tgt_sess *sess = NULL; | ||
3609 | |||
3610 | sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, | ||
3611 | atio->u.isp24.fcp_hdr.s_id); | ||
3612 | if (!sess) { | ||
3613 | qlt_send_term_exchange(vha, NULL, atio, 1); | ||
3614 | return; | ||
3615 | } | ||
3616 | /* Sending marker isn't necessary, since we called from ISR */ | ||
3617 | |||
3618 | pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); | ||
3619 | if (!pkt) { | ||
3620 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e, | ||
3621 | "qla_target(%d): %s failed: unable to allocate " | ||
3622 | "request packet", vha->vp_idx, __func__); | ||
3623 | return; | ||
3624 | } | ||
3625 | |||
3626 | pkt->entry_count = 1; | ||
3627 | pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; | ||
3628 | |||
3629 | ctio24 = (struct ctio7_to_24xx *)pkt; | ||
3630 | ctio24->entry_type = CTIO_TYPE7; | ||
3631 | ctio24->nport_handle = sess->loop_id; | ||
3632 | ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); | ||
3633 | ctio24->vp_index = vha->vp_idx; | ||
3634 | ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; | ||
3635 | ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; | ||
3636 | ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; | ||
3637 | ctio24->exchange_addr = atio->u.isp24.exchange_addr; | ||
3638 | ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | | ||
3639 | __constant_cpu_to_le16( | ||
3640 | CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | | ||
3641 | CTIO7_FLAGS_DONT_RET_CTIO); | ||
3642 | /* | ||
3643 | * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, | ||
3644 | * if the explicit conformation is used. | ||
3645 | */ | ||
3646 | ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); | ||
3647 | ctio24->u.status1.scsi_status = cpu_to_le16(status); | ||
3648 | ctio24->u.status1.residual = get_unaligned((uint32_t *) | ||
3649 | &atio->u.isp24.fcp_cmnd.add_cdb[ | ||
3650 | atio->u.isp24.fcp_cmnd.add_cdb_len]); | ||
3651 | if (ctio24->u.status1.residual != 0) | ||
3652 | ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; | ||
3653 | |||
3654 | qla2x00_start_iocbs(vha, vha->req); | ||
3655 | } | ||
3656 | |||
3657 | /* ha->hardware_lock supposed to be held on entry */ | ||
3658 | /* called via callback from qla2xxx */ | ||
3659 | static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, | ||
3660 | struct atio_from_isp *atio) | ||
3661 | { | ||
3662 | struct qla_hw_data *ha = vha->hw; | ||
3663 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
3664 | int rc; | ||
3665 | |||
3666 | if (unlikely(tgt == NULL)) { | ||
3667 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039, | ||
3668 | "ATIO pkt, but no tgt (ha %p)", ha); | ||
3669 | return; | ||
3670 | } | ||
3671 | ql_dbg(ql_dbg_tgt, vha, 0xe02c, | ||
3672 | "qla_target(%d): ATIO pkt %p: type %02x count %02x", | ||
3673 | vha->vp_idx, atio, atio->u.raw.entry_type, | ||
3674 | atio->u.raw.entry_count); | ||
3675 | /* | ||
3676 | * In tgt_stop mode we also should allow all requests to pass. | ||
3677 | * Otherwise, some commands can stuck. | ||
3678 | */ | ||
3679 | |||
3680 | tgt->irq_cmd_count++; | ||
3681 | |||
3682 | switch (atio->u.raw.entry_type) { | ||
3683 | case ATIO_TYPE7: | ||
3684 | ql_dbg(ql_dbg_tgt, vha, 0xe02d, | ||
3685 | "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " | ||
3686 | "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", | ||
3687 | vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, | ||
3688 | atio->u.isp24.fcp_cmnd.rddata, | ||
3689 | atio->u.isp24.fcp_cmnd.wrdata, | ||
3690 | atio->u.isp24.fcp_cmnd.add_cdb_len, | ||
3691 | be32_to_cpu(get_unaligned((uint32_t *) | ||
3692 | &atio->u.isp24.fcp_cmnd.add_cdb[ | ||
3693 | atio->u.isp24.fcp_cmnd.add_cdb_len])), | ||
3694 | atio->u.isp24.fcp_hdr.s_id[0], | ||
3695 | atio->u.isp24.fcp_hdr.s_id[1], | ||
3696 | atio->u.isp24.fcp_hdr.s_id[2]); | ||
3697 | |||
3698 | if (unlikely(atio->u.isp24.exchange_addr == | ||
3699 | ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { | ||
3700 | ql_dbg(ql_dbg_tgt, vha, 0xe058, | ||
3701 | "qla_target(%d): ATIO_TYPE7 " | ||
3702 | "received with UNKNOWN exchange address, " | ||
3703 | "sending QUEUE_FULL\n", vha->vp_idx); | ||
3704 | qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); | ||
3705 | break; | ||
3706 | } | ||
3707 | if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) | ||
3708 | rc = qlt_handle_cmd_for_atio(vha, atio); | ||
3709 | else | ||
3710 | rc = qlt_handle_task_mgmt(vha, atio); | ||
3711 | if (unlikely(rc != 0)) { | ||
3712 | if (rc == -ESRCH) { | ||
3713 | #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ | ||
3714 | qlt_send_busy(vha, atio, SAM_STAT_BUSY); | ||
3715 | #else | ||
3716 | qlt_send_term_exchange(vha, NULL, atio, 1); | ||
3717 | #endif | ||
3718 | } else { | ||
3719 | if (tgt->tgt_stop) { | ||
3720 | ql_dbg(ql_dbg_tgt, vha, 0xe059, | ||
3721 | "qla_target: Unable to send " | ||
3722 | "command to target for req, " | ||
3723 | "ignoring.\n"); | ||
3724 | } else { | ||
3725 | ql_dbg(ql_dbg_tgt, vha, 0xe05a, | ||
3726 | "qla_target(%d): Unable to send " | ||
3727 | "command to target, sending BUSY " | ||
3728 | "status.\n", vha->vp_idx); | ||
3729 | qlt_send_busy(vha, atio, SAM_STAT_BUSY); | ||
3730 | } | ||
3731 | } | ||
3732 | } | ||
3733 | break; | ||
3734 | |||
3735 | case IMMED_NOTIFY_TYPE: | ||
3736 | { | ||
3737 | if (unlikely(atio->u.isp2x.entry_status != 0)) { | ||
3738 | ql_dbg(ql_dbg_tgt, vha, 0xe05b, | ||
3739 | "qla_target(%d): Received ATIO packet %x " | ||
3740 | "with error status %x\n", vha->vp_idx, | ||
3741 | atio->u.raw.entry_type, | ||
3742 | atio->u.isp2x.entry_status); | ||
3743 | break; | ||
3744 | } | ||
3745 | ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); | ||
3746 | qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); | ||
3747 | break; | ||
3748 | } | ||
3749 | |||
3750 | default: | ||
3751 | ql_dbg(ql_dbg_tgt, vha, 0xe05c, | ||
3752 | "qla_target(%d): Received unknown ATIO atio " | ||
3753 | "type %x\n", vha->vp_idx, atio->u.raw.entry_type); | ||
3754 | break; | ||
3755 | } | ||
3756 | |||
3757 | tgt->irq_cmd_count--; | ||
3758 | } | ||
3759 | |||
3760 | /* ha->hardware_lock supposed to be held on entry */ | ||
3761 | /* called via callback from qla2xxx */ | ||
3762 | static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) | ||
3763 | { | ||
3764 | struct qla_hw_data *ha = vha->hw; | ||
3765 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
3766 | |||
3767 | if (unlikely(tgt == NULL)) { | ||
3768 | ql_dbg(ql_dbg_tgt, vha, 0xe05d, | ||
3769 | "qla_target(%d): Response pkt %x received, but no " | ||
3770 | "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); | ||
3771 | return; | ||
3772 | } | ||
3773 | |||
3774 | ql_dbg(ql_dbg_tgt, vha, 0xe02f, | ||
3775 | "qla_target(%d): response pkt %p: T %02x C %02x S %02x " | ||
3776 | "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type, | ||
3777 | pkt->entry_count, pkt->entry_status, pkt->handle); | ||
3778 | |||
3779 | /* | ||
3780 | * In tgt_stop mode we also should allow all requests to pass. | ||
3781 | * Otherwise, some commands can stuck. | ||
3782 | */ | ||
3783 | |||
3784 | tgt->irq_cmd_count++; | ||
3785 | |||
3786 | switch (pkt->entry_type) { | ||
3787 | case CTIO_TYPE7: | ||
3788 | { | ||
3789 | struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; | ||
3790 | ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", | ||
3791 | vha->vp_idx); | ||
3792 | qlt_do_ctio_completion(vha, entry->handle, | ||
3793 | le16_to_cpu(entry->status)|(pkt->entry_status << 16), | ||
3794 | entry); | ||
3795 | break; | ||
3796 | } | ||
3797 | |||
3798 | case ACCEPT_TGT_IO_TYPE: | ||
3799 | { | ||
3800 | struct atio_from_isp *atio = (struct atio_from_isp *)pkt; | ||
3801 | int rc; | ||
3802 | ql_dbg(ql_dbg_tgt, vha, 0xe031, | ||
3803 | "ACCEPT_TGT_IO instance %d status %04x " | ||
3804 | "lun %04x read/write %d data_length %04x " | ||
3805 | "target_id %02x rx_id %04x\n ", vha->vp_idx, | ||
3806 | le16_to_cpu(atio->u.isp2x.status), | ||
3807 | le16_to_cpu(atio->u.isp2x.lun), | ||
3808 | atio->u.isp2x.execution_codes, | ||
3809 | le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha, | ||
3810 | atio), atio->u.isp2x.rx_id); | ||
3811 | if (atio->u.isp2x.status != | ||
3812 | __constant_cpu_to_le16(ATIO_CDB_VALID)) { | ||
3813 | ql_dbg(ql_dbg_tgt, vha, 0xe05e, | ||
3814 | "qla_target(%d): ATIO with error " | ||
3815 | "status %x received\n", vha->vp_idx, | ||
3816 | le16_to_cpu(atio->u.isp2x.status)); | ||
3817 | break; | ||
3818 | } | ||
3819 | ql_dbg(ql_dbg_tgt, vha, 0xe032, | ||
3820 | "FCP CDB: 0x%02x, sizeof(cdb): %lu", | ||
3821 | atio->u.isp2x.cdb[0], (unsigned long | ||
3822 | int)sizeof(atio->u.isp2x.cdb)); | ||
3823 | |||
3824 | rc = qlt_handle_cmd_for_atio(vha, atio); | ||
3825 | if (unlikely(rc != 0)) { | ||
3826 | if (rc == -ESRCH) { | ||
3827 | #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ | ||
3828 | qlt_send_busy(vha, atio, 0); | ||
3829 | #else | ||
3830 | qlt_send_term_exchange(vha, NULL, atio, 1); | ||
3831 | #endif | ||
3832 | } else { | ||
3833 | if (tgt->tgt_stop) { | ||
3834 | ql_dbg(ql_dbg_tgt, vha, 0xe05f, | ||
3835 | "qla_target: Unable to send " | ||
3836 | "command to target, sending TERM " | ||
3837 | "EXCHANGE for rsp\n"); | ||
3838 | qlt_send_term_exchange(vha, NULL, | ||
3839 | atio, 1); | ||
3840 | } else { | ||
3841 | ql_dbg(ql_dbg_tgt, vha, 0xe060, | ||
3842 | "qla_target(%d): Unable to send " | ||
3843 | "command to target, sending BUSY " | ||
3844 | "status\n", vha->vp_idx); | ||
3845 | qlt_send_busy(vha, atio, 0); | ||
3846 | } | ||
3847 | } | ||
3848 | } | ||
3849 | } | ||
3850 | break; | ||
3851 | |||
3852 | case CONTINUE_TGT_IO_TYPE: | ||
3853 | { | ||
3854 | struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; | ||
3855 | ql_dbg(ql_dbg_tgt, vha, 0xe033, | ||
3856 | "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx); | ||
3857 | qlt_do_ctio_completion(vha, entry->handle, | ||
3858 | le16_to_cpu(entry->status)|(pkt->entry_status << 16), | ||
3859 | entry); | ||
3860 | break; | ||
3861 | } | ||
3862 | |||
3863 | case CTIO_A64_TYPE: | ||
3864 | { | ||
3865 | struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; | ||
3866 | ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n", | ||
3867 | vha->vp_idx); | ||
3868 | qlt_do_ctio_completion(vha, entry->handle, | ||
3869 | le16_to_cpu(entry->status)|(pkt->entry_status << 16), | ||
3870 | entry); | ||
3871 | break; | ||
3872 | } | ||
3873 | |||
3874 | case IMMED_NOTIFY_TYPE: | ||
3875 | ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); | ||
3876 | qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); | ||
3877 | break; | ||
3878 | |||
3879 | case NOTIFY_ACK_TYPE: | ||
3880 | if (tgt->notify_ack_expected > 0) { | ||
3881 | struct nack_to_isp *entry = (struct nack_to_isp *)pkt; | ||
3882 | ql_dbg(ql_dbg_tgt, vha, 0xe036, | ||
3883 | "NOTIFY_ACK seq %08x status %x\n", | ||
3884 | le16_to_cpu(entry->u.isp2x.seq_id), | ||
3885 | le16_to_cpu(entry->u.isp2x.status)); | ||
3886 | tgt->notify_ack_expected--; | ||
3887 | if (entry->u.isp2x.status != | ||
3888 | __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) { | ||
3889 | ql_dbg(ql_dbg_tgt, vha, 0xe061, | ||
3890 | "qla_target(%d): NOTIFY_ACK " | ||
3891 | "failed %x\n", vha->vp_idx, | ||
3892 | le16_to_cpu(entry->u.isp2x.status)); | ||
3893 | } | ||
3894 | } else { | ||
3895 | ql_dbg(ql_dbg_tgt, vha, 0xe062, | ||
3896 | "qla_target(%d): Unexpected NOTIFY_ACK received\n", | ||
3897 | vha->vp_idx); | ||
3898 | } | ||
3899 | break; | ||
3900 | |||
3901 | case ABTS_RECV_24XX: | ||
3902 | ql_dbg(ql_dbg_tgt, vha, 0xe037, | ||
3903 | "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); | ||
3904 | qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); | ||
3905 | break; | ||
3906 | |||
3907 | case ABTS_RESP_24XX: | ||
3908 | if (tgt->abts_resp_expected > 0) { | ||
3909 | struct abts_resp_from_24xx_fw *entry = | ||
3910 | (struct abts_resp_from_24xx_fw *)pkt; | ||
3911 | ql_dbg(ql_dbg_tgt, vha, 0xe038, | ||
3912 | "ABTS_RESP_24XX: compl_status %x\n", | ||
3913 | entry->compl_status); | ||
3914 | tgt->abts_resp_expected--; | ||
3915 | if (le16_to_cpu(entry->compl_status) != | ||
3916 | ABTS_RESP_COMPL_SUCCESS) { | ||
3917 | if ((entry->error_subcode1 == 0x1E) && | ||
3918 | (entry->error_subcode2 == 0)) { | ||
3919 | /* | ||
3920 | * We've got a race here: aborted | ||
3921 | * exchange not terminated, i.e. | ||
3922 | * response for the aborted command was | ||
3923 | * sent between the abort request was | ||
3924 | * received and processed. | ||
3925 | * Unfortunately, the firmware has a | ||
3926 | * silly requirement that all aborted | ||
3927 | * exchanges must be explicitely | ||
3928 | * terminated, otherwise it refuses to | ||
3929 | * send responses for the abort | ||
3930 | * requests. So, we have to | ||
3931 | * (re)terminate the exchange and retry | ||
3932 | * the abort response. | ||
3933 | */ | ||
3934 | qlt_24xx_retry_term_exchange(vha, | ||
3935 | entry); | ||
3936 | } else | ||
3937 | ql_dbg(ql_dbg_tgt, vha, 0xe063, | ||
3938 | "qla_target(%d): ABTS_RESP_24XX " | ||
3939 | "failed %x (subcode %x:%x)", | ||
3940 | vha->vp_idx, entry->compl_status, | ||
3941 | entry->error_subcode1, | ||
3942 | entry->error_subcode2); | ||
3943 | } | ||
3944 | } else { | ||
3945 | ql_dbg(ql_dbg_tgt, vha, 0xe064, | ||
3946 | "qla_target(%d): Unexpected ABTS_RESP_24XX " | ||
3947 | "received\n", vha->vp_idx); | ||
3948 | } | ||
3949 | break; | ||
3950 | |||
3951 | default: | ||
3952 | ql_dbg(ql_dbg_tgt, vha, 0xe065, | ||
3953 | "qla_target(%d): Received unknown response pkt " | ||
3954 | "type %x\n", vha->vp_idx, pkt->entry_type); | ||
3955 | break; | ||
3956 | } | ||
3957 | |||
3958 | tgt->irq_cmd_count--; | ||
3959 | } | ||
3960 | |||
3961 | /* | ||
3962 | * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire | ||
3963 | */ | ||
3964 | void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, | ||
3965 | uint16_t *mailbox) | ||
3966 | { | ||
3967 | struct qla_hw_data *ha = vha->hw; | ||
3968 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
3969 | int login_code; | ||
3970 | |||
3971 | ql_dbg(ql_dbg_tgt, vha, 0xe039, | ||
3972 | "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", | ||
3973 | vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done, | ||
3974 | ha->operating_mode, ha->current_topology); | ||
3975 | |||
3976 | if (!ha->tgt.tgt_ops) | ||
3977 | return; | ||
3978 | |||
3979 | if (unlikely(tgt == NULL)) { | ||
3980 | ql_dbg(ql_dbg_tgt, vha, 0xe03a, | ||
3981 | "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); | ||
3982 | return; | ||
3983 | } | ||
3984 | |||
3985 | if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && | ||
3986 | IS_QLA2100(ha)) | ||
3987 | return; | ||
3988 | /* | ||
3989 | * In tgt_stop mode we also should allow all requests to pass. | ||
3990 | * Otherwise, some commands can stuck. | ||
3991 | */ | ||
3992 | |||
3993 | tgt->irq_cmd_count++; | ||
3994 | |||
3995 | switch (code) { | ||
3996 | case MBA_RESET: /* Reset */ | ||
3997 | case MBA_SYSTEM_ERR: /* System Error */ | ||
3998 | case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ | ||
3999 | case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ | ||
4000 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, | ||
4001 | "qla_target(%d): System error async event %#x " | ||
4002 | "occurred", vha->vp_idx, code); | ||
4003 | break; | ||
4004 | case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ | ||
4005 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | ||
4006 | break; | ||
4007 | |||
4008 | case MBA_LOOP_UP: | ||
4009 | { | ||
4010 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, | ||
4011 | "qla_target(%d): Async LOOP_UP occurred " | ||
4012 | "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, | ||
4013 | le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), | ||
4014 | le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); | ||
4015 | if (tgt->link_reinit_iocb_pending) { | ||
4016 | qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, | ||
4017 | 0, 0, 0, 0, 0, 0); | ||
4018 | tgt->link_reinit_iocb_pending = 0; | ||
4019 | } | ||
4020 | break; | ||
4021 | } | ||
4022 | |||
4023 | case MBA_LIP_OCCURRED: | ||
4024 | case MBA_LOOP_DOWN: | ||
4025 | case MBA_LIP_RESET: | ||
4026 | case MBA_RSCN_UPDATE: | ||
4027 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, | ||
4028 | "qla_target(%d): Async event %#x occurred " | ||
4029 | "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, | ||
4030 | le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), | ||
4031 | le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); | ||
4032 | break; | ||
4033 | |||
4034 | case MBA_PORT_UPDATE: | ||
4035 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, | ||
4036 | "qla_target(%d): Port update async event %#x " | ||
4037 | "occurred: updating the ports database (m[0]=%x, m[1]=%x, " | ||
4038 | "m[2]=%x, m[3]=%x)", vha->vp_idx, code, | ||
4039 | le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), | ||
4040 | le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); | ||
4041 | |||
4042 | login_code = le16_to_cpu(mailbox[2]); | ||
4043 | if (login_code == 0x4) | ||
4044 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, | ||
4045 | "Async MB 2: Got PLOGI Complete\n"); | ||
4046 | else if (login_code == 0x7) | ||
4047 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, | ||
4048 | "Async MB 2: Port Logged Out\n"); | ||
4049 | break; | ||
4050 | |||
4051 | default: | ||
4052 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, | ||
4053 | "qla_target(%d): Async event %#x occurred: " | ||
4054 | "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, | ||
4055 | code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), | ||
4056 | le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); | ||
4057 | break; | ||
4058 | } | ||
4059 | |||
4060 | tgt->irq_cmd_count--; | ||
4061 | } | ||
4062 | |||
4063 | static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, | ||
4064 | uint16_t loop_id) | ||
4065 | { | ||
4066 | fc_port_t *fcport; | ||
4067 | int rc; | ||
4068 | |||
4069 | fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); | ||
4070 | if (!fcport) { | ||
4071 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, | ||
4072 | "qla_target(%d): Allocation of tmp FC port failed", | ||
4073 | vha->vp_idx); | ||
4074 | return NULL; | ||
4075 | } | ||
4076 | |||
4077 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id); | ||
4078 | |||
4079 | fcport->loop_id = loop_id; | ||
4080 | |||
4081 | rc = qla2x00_get_port_database(vha, fcport, 0); | ||
4082 | if (rc != QLA_SUCCESS) { | ||
4083 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, | ||
4084 | "qla_target(%d): Failed to retrieve fcport " | ||
4085 | "information -- get_port_database() returned %x " | ||
4086 | "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); | ||
4087 | kfree(fcport); | ||
4088 | return NULL; | ||
4089 | } | ||
4090 | |||
4091 | return fcport; | ||
4092 | } | ||
4093 | |||
4094 | /* Must be called under tgt_mutex */ | ||
4095 | static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, | ||
4096 | uint8_t *s_id) | ||
4097 | { | ||
4098 | struct qla_hw_data *ha = vha->hw; | ||
4099 | struct qla_tgt_sess *sess = NULL; | ||
4100 | fc_port_t *fcport = NULL; | ||
4101 | int rc, global_resets; | ||
4102 | uint16_t loop_id = 0; | ||
4103 | |||
4104 | retry: | ||
4105 | global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); | ||
4106 | |||
4107 | rc = qla24xx_get_loop_id(vha, s_id, &loop_id); | ||
4108 | if (rc != 0) { | ||
4109 | if ((s_id[0] == 0xFF) && | ||
4110 | (s_id[1] == 0xFC)) { | ||
4111 | /* | ||
4112 | * This is Domain Controller, so it should be | ||
4113 | * OK to drop SCSI commands from it. | ||
4114 | */ | ||
4115 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, | ||
4116 | "Unable to find initiator with S_ID %x:%x:%x", | ||
4117 | s_id[0], s_id[1], s_id[2]); | ||
4118 | } else | ||
4119 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071, | ||
4120 | "qla_target(%d): Unable to find " | ||
4121 | "initiator with S_ID %x:%x:%x", | ||
4122 | vha->vp_idx, s_id[0], s_id[1], | ||
4123 | s_id[2]); | ||
4124 | return NULL; | ||
4125 | } | ||
4126 | |||
4127 | fcport = qlt_get_port_database(vha, loop_id); | ||
4128 | if (!fcport) | ||
4129 | return NULL; | ||
4130 | |||
4131 | if (global_resets != | ||
4132 | atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { | ||
4133 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, | ||
4134 | "qla_target(%d): global reset during session discovery " | ||
4135 | "(counter was %d, new %d), retrying", vha->vp_idx, | ||
4136 | global_resets, | ||
4137 | atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); | ||
4138 | goto retry; | ||
4139 | } | ||
4140 | |||
4141 | sess = qlt_create_sess(vha, fcport, true); | ||
4142 | |||
4143 | kfree(fcport); | ||
4144 | return sess; | ||
4145 | } | ||
4146 | |||
4147 | static void qlt_abort_work(struct qla_tgt *tgt, | ||
4148 | struct qla_tgt_sess_work_param *prm) | ||
4149 | { | ||
4150 | struct scsi_qla_host *vha = tgt->vha; | ||
4151 | struct qla_hw_data *ha = vha->hw; | ||
4152 | struct qla_tgt_sess *sess = NULL; | ||
4153 | unsigned long flags; | ||
4154 | uint32_t be_s_id; | ||
4155 | uint8_t s_id[3]; | ||
4156 | int rc; | ||
4157 | |||
4158 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
4159 | |||
4160 | if (tgt->tgt_stop) | ||
4161 | goto out_term; | ||
4162 | |||
4163 | s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; | ||
4164 | s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; | ||
4165 | s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; | ||
4166 | |||
4167 | sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, | ||
4168 | (unsigned char *)&be_s_id); | ||
4169 | if (!sess) { | ||
4170 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4171 | |||
4172 | mutex_lock(&ha->tgt.tgt_mutex); | ||
4173 | sess = qlt_make_local_sess(vha, s_id); | ||
4174 | /* sess has got an extra creation ref */ | ||
4175 | mutex_unlock(&ha->tgt.tgt_mutex); | ||
4176 | |||
4177 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
4178 | if (!sess) | ||
4179 | goto out_term; | ||
4180 | } else { | ||
4181 | kref_get(&sess->se_sess->sess_kref); | ||
4182 | } | ||
4183 | |||
4184 | if (tgt->tgt_stop) | ||
4185 | goto out_term; | ||
4186 | |||
4187 | rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); | ||
4188 | if (rc != 0) | ||
4189 | goto out_term; | ||
4190 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4191 | |||
4192 | ha->tgt.tgt_ops->put_sess(sess); | ||
4193 | return; | ||
4194 | |||
4195 | out_term: | ||
4196 | qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); | ||
4197 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4198 | if (sess) | ||
4199 | ha->tgt.tgt_ops->put_sess(sess); | ||
4200 | } | ||
4201 | |||
4202 | static void qlt_tmr_work(struct qla_tgt *tgt, | ||
4203 | struct qla_tgt_sess_work_param *prm) | ||
4204 | { | ||
4205 | struct atio_from_isp *a = &prm->tm_iocb2; | ||
4206 | struct scsi_qla_host *vha = tgt->vha; | ||
4207 | struct qla_hw_data *ha = vha->hw; | ||
4208 | struct qla_tgt_sess *sess = NULL; | ||
4209 | unsigned long flags; | ||
4210 | uint8_t *s_id = NULL; /* to hide compiler warnings */ | ||
4211 | int rc; | ||
4212 | uint32_t lun, unpacked_lun; | ||
4213 | int lun_size, fn; | ||
4214 | void *iocb; | ||
4215 | |||
4216 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
4217 | |||
4218 | if (tgt->tgt_stop) | ||
4219 | goto out_term; | ||
4220 | |||
4221 | s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; | ||
4222 | sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); | ||
4223 | if (!sess) { | ||
4224 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4225 | |||
4226 | mutex_lock(&ha->tgt.tgt_mutex); | ||
4227 | sess = qlt_make_local_sess(vha, s_id); | ||
4228 | /* sess has got an extra creation ref */ | ||
4229 | mutex_unlock(&ha->tgt.tgt_mutex); | ||
4230 | |||
4231 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
4232 | if (!sess) | ||
4233 | goto out_term; | ||
4234 | } else { | ||
4235 | kref_get(&sess->se_sess->sess_kref); | ||
4236 | } | ||
4237 | |||
4238 | iocb = a; | ||
4239 | lun = a->u.isp24.fcp_cmnd.lun; | ||
4240 | lun_size = sizeof(lun); | ||
4241 | fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; | ||
4242 | unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); | ||
4243 | |||
4244 | rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); | ||
4245 | if (rc != 0) | ||
4246 | goto out_term; | ||
4247 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4248 | |||
4249 | ha->tgt.tgt_ops->put_sess(sess); | ||
4250 | return; | ||
4251 | |||
4252 | out_term: | ||
4253 | qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); | ||
4254 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4255 | if (sess) | ||
4256 | ha->tgt.tgt_ops->put_sess(sess); | ||
4257 | } | ||
4258 | |||
4259 | static void qlt_sess_work_fn(struct work_struct *work) | ||
4260 | { | ||
4261 | struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); | ||
4262 | struct scsi_qla_host *vha = tgt->vha; | ||
4263 | unsigned long flags; | ||
4264 | |||
4265 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); | ||
4266 | |||
4267 | spin_lock_irqsave(&tgt->sess_work_lock, flags); | ||
4268 | while (!list_empty(&tgt->sess_works_list)) { | ||
4269 | struct qla_tgt_sess_work_param *prm = list_entry( | ||
4270 | tgt->sess_works_list.next, typeof(*prm), | ||
4271 | sess_works_list_entry); | ||
4272 | |||
4273 | /* | ||
4274 | * This work can be scheduled on several CPUs at time, so we | ||
4275 | * must delete the entry to eliminate double processing | ||
4276 | */ | ||
4277 | list_del(&prm->sess_works_list_entry); | ||
4278 | |||
4279 | spin_unlock_irqrestore(&tgt->sess_work_lock, flags); | ||
4280 | |||
4281 | switch (prm->type) { | ||
4282 | case QLA_TGT_SESS_WORK_ABORT: | ||
4283 | qlt_abort_work(tgt, prm); | ||
4284 | break; | ||
4285 | case QLA_TGT_SESS_WORK_TM: | ||
4286 | qlt_tmr_work(tgt, prm); | ||
4287 | break; | ||
4288 | default: | ||
4289 | BUG_ON(1); | ||
4290 | break; | ||
4291 | } | ||
4292 | |||
4293 | spin_lock_irqsave(&tgt->sess_work_lock, flags); | ||
4294 | |||
4295 | kfree(prm); | ||
4296 | } | ||
4297 | spin_unlock_irqrestore(&tgt->sess_work_lock, flags); | ||
4298 | } | ||
4299 | |||
4300 | /* Must be called under tgt_host_action_mutex */ | ||
4301 | int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) | ||
4302 | { | ||
4303 | struct qla_tgt *tgt; | ||
4304 | |||
4305 | if (!QLA_TGT_MODE_ENABLED()) | ||
4306 | return 0; | ||
4307 | |||
4308 | ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, | ||
4309 | "Registering target for host %ld(%p)", base_vha->host_no, ha); | ||
4310 | |||
4311 | BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL)); | ||
4312 | |||
4313 | tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); | ||
4314 | if (!tgt) { | ||
4315 | ql_dbg(ql_dbg_tgt, base_vha, 0xe066, | ||
4316 | "Unable to allocate struct qla_tgt\n"); | ||
4317 | return -ENOMEM; | ||
4318 | } | ||
4319 | |||
4320 | if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) | ||
4321 | base_vha->host->hostt->supported_mode |= MODE_TARGET; | ||
4322 | |||
4323 | tgt->ha = ha; | ||
4324 | tgt->vha = base_vha; | ||
4325 | init_waitqueue_head(&tgt->waitQ); | ||
4326 | INIT_LIST_HEAD(&tgt->sess_list); | ||
4327 | INIT_LIST_HEAD(&tgt->del_sess_list); | ||
4328 | INIT_DELAYED_WORK(&tgt->sess_del_work, | ||
4329 | (void (*)(struct work_struct *))qlt_del_sess_work_fn); | ||
4330 | spin_lock_init(&tgt->sess_work_lock); | ||
4331 | INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); | ||
4332 | INIT_LIST_HEAD(&tgt->sess_works_list); | ||
4333 | spin_lock_init(&tgt->srr_lock); | ||
4334 | INIT_LIST_HEAD(&tgt->srr_ctio_list); | ||
4335 | INIT_LIST_HEAD(&tgt->srr_imm_list); | ||
4336 | INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); | ||
4337 | atomic_set(&tgt->tgt_global_resets_count, 0); | ||
4338 | |||
4339 | ha->tgt.qla_tgt = tgt; | ||
4340 | |||
4341 | ql_dbg(ql_dbg_tgt, base_vha, 0xe067, | ||
4342 | "qla_target(%d): using 64 Bit PCI addressing", | ||
4343 | base_vha->vp_idx); | ||
4344 | tgt->tgt_enable_64bit_addr = 1; | ||
4345 | /* 3 is reserved */ | ||
4346 | tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); | ||
4347 | tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; | ||
4348 | tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; | ||
4349 | |||
4350 | mutex_lock(&qla_tgt_mutex); | ||
4351 | list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); | ||
4352 | mutex_unlock(&qla_tgt_mutex); | ||
4353 | |||
4354 | return 0; | ||
4355 | } | ||
4356 | |||
4357 | /* Must be called under tgt_host_action_mutex */ | ||
4358 | int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) | ||
4359 | { | ||
4360 | if (!ha->tgt.qla_tgt) | ||
4361 | return 0; | ||
4362 | |||
4363 | mutex_lock(&qla_tgt_mutex); | ||
4364 | list_del(&ha->tgt.qla_tgt->tgt_list_entry); | ||
4365 | mutex_unlock(&qla_tgt_mutex); | ||
4366 | |||
4367 | ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", | ||
4368 | vha->host_no, ha); | ||
4369 | qlt_release(ha->tgt.qla_tgt); | ||
4370 | |||
4371 | return 0; | ||
4372 | } | ||
4373 | |||
4374 | static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, | ||
4375 | unsigned char *b) | ||
4376 | { | ||
4377 | int i; | ||
4378 | |||
4379 | pr_debug("qla2xxx HW vha->node_name: "); | ||
4380 | for (i = 0; i < WWN_SIZE; i++) | ||
4381 | pr_debug("%02x ", vha->node_name[i]); | ||
4382 | pr_debug("\n"); | ||
4383 | pr_debug("qla2xxx HW vha->port_name: "); | ||
4384 | for (i = 0; i < WWN_SIZE; i++) | ||
4385 | pr_debug("%02x ", vha->port_name[i]); | ||
4386 | pr_debug("\n"); | ||
4387 | |||
4388 | pr_debug("qla2xxx passed configfs WWPN: "); | ||
4389 | put_unaligned_be64(wwpn, b); | ||
4390 | for (i = 0; i < WWN_SIZE; i++) | ||
4391 | pr_debug("%02x ", b[i]); | ||
4392 | pr_debug("\n"); | ||
4393 | } | ||
4394 | |||
4395 | /** | ||
4396 | * qla_tgt_lport_register - register lport with external module | ||
4397 | * | ||
4398 | * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops | ||
4399 | * @wwpn: Passwd FC target WWPN | ||
4400 | * @callback: lport initialization callback for tcm_qla2xxx code | ||
4401 | * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data | ||
4402 | */ | ||
4403 | int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, | ||
4404 | int (*callback)(struct scsi_qla_host *), void *target_lport_ptr) | ||
4405 | { | ||
4406 | struct qla_tgt *tgt; | ||
4407 | struct scsi_qla_host *vha; | ||
4408 | struct qla_hw_data *ha; | ||
4409 | struct Scsi_Host *host; | ||
4410 | unsigned long flags; | ||
4411 | int rc; | ||
4412 | u8 b[WWN_SIZE]; | ||
4413 | |||
4414 | mutex_lock(&qla_tgt_mutex); | ||
4415 | list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { | ||
4416 | vha = tgt->vha; | ||
4417 | ha = vha->hw; | ||
4418 | |||
4419 | host = vha->host; | ||
4420 | if (!host) | ||
4421 | continue; | ||
4422 | |||
4423 | if (ha->tgt.tgt_ops != NULL) | ||
4424 | continue; | ||
4425 | |||
4426 | if (!(host->hostt->supported_mode & MODE_TARGET)) | ||
4427 | continue; | ||
4428 | |||
4429 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
4430 | if (host->active_mode & MODE_TARGET) { | ||
4431 | pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", | ||
4432 | host->host_no); | ||
4433 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4434 | continue; | ||
4435 | } | ||
4436 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4437 | |||
4438 | if (!scsi_host_get(host)) { | ||
4439 | ql_dbg(ql_dbg_tgt, vha, 0xe068, | ||
4440 | "Unable to scsi_host_get() for" | ||
4441 | " qla2xxx scsi_host\n"); | ||
4442 | continue; | ||
4443 | } | ||
4444 | qlt_lport_dump(vha, wwpn, b); | ||
4445 | |||
4446 | if (memcmp(vha->port_name, b, WWN_SIZE)) { | ||
4447 | scsi_host_put(host); | ||
4448 | continue; | ||
4449 | } | ||
4450 | /* | ||
4451 | * Setup passed parameters ahead of invoking callback | ||
4452 | */ | ||
4453 | ha->tgt.tgt_ops = qla_tgt_ops; | ||
4454 | ha->tgt.target_lport_ptr = target_lport_ptr; | ||
4455 | rc = (*callback)(vha); | ||
4456 | if (rc != 0) { | ||
4457 | ha->tgt.tgt_ops = NULL; | ||
4458 | ha->tgt.target_lport_ptr = NULL; | ||
4459 | } | ||
4460 | mutex_unlock(&qla_tgt_mutex); | ||
4461 | return rc; | ||
4462 | } | ||
4463 | mutex_unlock(&qla_tgt_mutex); | ||
4464 | |||
4465 | return -ENODEV; | ||
4466 | } | ||
4467 | EXPORT_SYMBOL(qlt_lport_register); | ||
4468 | |||
4469 | /** | ||
4470 | * qla_tgt_lport_deregister - Degister lport | ||
4471 | * | ||
4472 | * @vha: Registered scsi_qla_host pointer | ||
4473 | */ | ||
4474 | void qlt_lport_deregister(struct scsi_qla_host *vha) | ||
4475 | { | ||
4476 | struct qla_hw_data *ha = vha->hw; | ||
4477 | struct Scsi_Host *sh = vha->host; | ||
4478 | /* | ||
4479 | * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data | ||
4480 | */ | ||
4481 | ha->tgt.target_lport_ptr = NULL; | ||
4482 | ha->tgt.tgt_ops = NULL; | ||
4483 | /* | ||
4484 | * Release the Scsi_Host reference for the underlying qla2xxx host | ||
4485 | */ | ||
4486 | scsi_host_put(sh); | ||
4487 | } | ||
4488 | EXPORT_SYMBOL(qlt_lport_deregister); | ||
4489 | |||
4490 | /* Must be called under HW lock */ | ||
4491 | void qlt_set_mode(struct scsi_qla_host *vha) | ||
4492 | { | ||
4493 | struct qla_hw_data *ha = vha->hw; | ||
4494 | |||
4495 | switch (ql2x_ini_mode) { | ||
4496 | case QLA2XXX_INI_MODE_DISABLED: | ||
4497 | case QLA2XXX_INI_MODE_EXCLUSIVE: | ||
4498 | vha->host->active_mode = MODE_TARGET; | ||
4499 | break; | ||
4500 | case QLA2XXX_INI_MODE_ENABLED: | ||
4501 | vha->host->active_mode |= MODE_TARGET; | ||
4502 | break; | ||
4503 | default: | ||
4504 | break; | ||
4505 | } | ||
4506 | |||
4507 | if (ha->tgt.ini_mode_force_reverse) | ||
4508 | qla_reverse_ini_mode(vha); | ||
4509 | } | ||
4510 | |||
4511 | /* Must be called under HW lock */ | ||
4512 | void qlt_clear_mode(struct scsi_qla_host *vha) | ||
4513 | { | ||
4514 | struct qla_hw_data *ha = vha->hw; | ||
4515 | |||
4516 | switch (ql2x_ini_mode) { | ||
4517 | case QLA2XXX_INI_MODE_DISABLED: | ||
4518 | vha->host->active_mode = MODE_UNKNOWN; | ||
4519 | break; | ||
4520 | case QLA2XXX_INI_MODE_EXCLUSIVE: | ||
4521 | vha->host->active_mode = MODE_INITIATOR; | ||
4522 | break; | ||
4523 | case QLA2XXX_INI_MODE_ENABLED: | ||
4524 | vha->host->active_mode &= ~MODE_TARGET; | ||
4525 | break; | ||
4526 | default: | ||
4527 | break; | ||
4528 | } | ||
4529 | |||
4530 | if (ha->tgt.ini_mode_force_reverse) | ||
4531 | qla_reverse_ini_mode(vha); | ||
4532 | } | ||
4533 | |||
4534 | /* | ||
4535 | * qla_tgt_enable_vha - NO LOCK HELD | ||
4536 | * | ||
4537 | * host_reset, bring up w/ Target Mode Enabled | ||
4538 | */ | ||
4539 | void | ||
4540 | qlt_enable_vha(struct scsi_qla_host *vha) | ||
4541 | { | ||
4542 | struct qla_hw_data *ha = vha->hw; | ||
4543 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
4544 | unsigned long flags; | ||
4545 | |||
4546 | if (!tgt) { | ||
4547 | ql_dbg(ql_dbg_tgt, vha, 0xe069, | ||
4548 | "Unable to locate qla_tgt pointer from" | ||
4549 | " struct qla_hw_data\n"); | ||
4550 | dump_stack(); | ||
4551 | return; | ||
4552 | } | ||
4553 | |||
4554 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
4555 | tgt->tgt_stopped = 0; | ||
4556 | qlt_set_mode(vha); | ||
4557 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4558 | |||
4559 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | ||
4560 | qla2xxx_wake_dpc(vha); | ||
4561 | qla2x00_wait_for_hba_online(vha); | ||
4562 | } | ||
4563 | EXPORT_SYMBOL(qlt_enable_vha); | ||
4564 | |||
4565 | /* | ||
4566 | * qla_tgt_disable_vha - NO LOCK HELD | ||
4567 | * | ||
4568 | * Disable Target Mode and reset the adapter | ||
4569 | */ | ||
4570 | void | ||
4571 | qlt_disable_vha(struct scsi_qla_host *vha) | ||
4572 | { | ||
4573 | struct qla_hw_data *ha = vha->hw; | ||
4574 | struct qla_tgt *tgt = ha->tgt.qla_tgt; | ||
4575 | unsigned long flags; | ||
4576 | |||
4577 | if (!tgt) { | ||
4578 | ql_dbg(ql_dbg_tgt, vha, 0xe06a, | ||
4579 | "Unable to locate qla_tgt pointer from" | ||
4580 | " struct qla_hw_data\n"); | ||
4581 | dump_stack(); | ||
4582 | return; | ||
4583 | } | ||
4584 | |||
4585 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
4586 | qlt_clear_mode(vha); | ||
4587 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
4588 | |||
4589 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | ||
4590 | qla2xxx_wake_dpc(vha); | ||
4591 | qla2x00_wait_for_hba_online(vha); | ||
4592 | } | ||
4593 | |||
4594 | /* | ||
4595 | * Called from qla_init.c:qla24xx_vport_create() contex to setup | ||
4596 | * the target mode specific struct scsi_qla_host and struct qla_hw_data | ||
4597 | * members. | ||
4598 | */ | ||
4599 | void | ||
4600 | qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) | ||
4601 | { | ||
4602 | if (!qla_tgt_mode_enabled(vha)) | ||
4603 | return; | ||
4604 | |||
4605 | mutex_init(&ha->tgt.tgt_mutex); | ||
4606 | mutex_init(&ha->tgt.tgt_host_action_mutex); | ||
4607 | |||
4608 | qlt_clear_mode(vha); | ||
4609 | |||
4610 | /* | ||
4611 | * NOTE: Currently the value is kept the same for <24xx and | ||
4612 | * >=24xx ISPs. If it is necessary to change it, | ||
4613 | * the check should be added for specific ISPs, | ||
4614 | * assigning the value appropriately. | ||
4615 | */ | ||
4616 | ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; | ||
4617 | } | ||
4618 | |||
4619 | void | ||
4620 | qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) | ||
4621 | { | ||
4622 | /* | ||
4623 | * FC-4 Feature bit 0 indicates target functionality to the name server. | ||
4624 | */ | ||
4625 | if (qla_tgt_mode_enabled(vha)) { | ||
4626 | if (qla_ini_mode_enabled(vha)) | ||
4627 | ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; | ||
4628 | else | ||
4629 | ct_req->req.rff_id.fc4_feature = BIT_0; | ||
4630 | } else if (qla_ini_mode_enabled(vha)) { | ||
4631 | ct_req->req.rff_id.fc4_feature = BIT_1; | ||
4632 | } | ||
4633 | } | ||
4634 | |||
4635 | /* | ||
4636 | * qlt_init_atio_q_entries() - Initializes ATIO queue entries. | ||
4637 | * @ha: HA context | ||
4638 | * | ||
4639 | * Beginning of ATIO ring has initialization control block already built | ||
4640 | * by nvram config routine. | ||
4641 | * | ||
4642 | * Returns 0 on success. | ||
4643 | */ | ||
4644 | void | ||
4645 | qlt_init_atio_q_entries(struct scsi_qla_host *vha) | ||
4646 | { | ||
4647 | struct qla_hw_data *ha = vha->hw; | ||
4648 | uint16_t cnt; | ||
4649 | struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; | ||
4650 | |||
4651 | if (!qla_tgt_mode_enabled(vha)) | ||
4652 | return; | ||
4653 | |||
4654 | for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { | ||
4655 | pkt->u.raw.signature = ATIO_PROCESSED; | ||
4656 | pkt++; | ||
4657 | } | ||
4658 | |||
4659 | } | ||
4660 | |||
4661 | /* | ||
4662 | * qlt_24xx_process_atio_queue() - Process ATIO queue entries. | ||
4663 | * @ha: SCSI driver HA context | ||
4664 | */ | ||
4665 | void | ||
4666 | qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) | ||
4667 | { | ||
4668 | struct qla_hw_data *ha = vha->hw; | ||
4669 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | ||
4670 | struct atio_from_isp *pkt; | ||
4671 | int cnt, i; | ||
4672 | |||
4673 | if (!vha->flags.online) | ||
4674 | return; | ||
4675 | |||
4676 | while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { | ||
4677 | pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; | ||
4678 | cnt = pkt->u.raw.entry_count; | ||
4679 | |||
4680 | qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt); | ||
4681 | |||
4682 | for (i = 0; i < cnt; i++) { | ||
4683 | ha->tgt.atio_ring_index++; | ||
4684 | if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { | ||
4685 | ha->tgt.atio_ring_index = 0; | ||
4686 | ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; | ||
4687 | } else | ||
4688 | ha->tgt.atio_ring_ptr++; | ||
4689 | |||
4690 | pkt->u.raw.signature = ATIO_PROCESSED; | ||
4691 | pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; | ||
4692 | } | ||
4693 | wmb(); | ||
4694 | } | ||
4695 | |||
4696 | /* Adjust ring index */ | ||
4697 | WRT_REG_DWORD(®->atio_q_out, ha->tgt.atio_ring_index); | ||
4698 | } | ||
4699 | |||
4700 | void | ||
4701 | qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg) | ||
4702 | { | ||
4703 | struct qla_hw_data *ha = vha->hw; | ||
4704 | |||
4705 | /* FIXME: atio_q in/out for ha->mqenable=1..? */ | ||
4706 | if (ha->mqenable) { | ||
4707 | #if 0 | ||
4708 | WRT_REG_DWORD(®->isp25mq.atio_q_in, 0); | ||
4709 | WRT_REG_DWORD(®->isp25mq.atio_q_out, 0); | ||
4710 | RD_REG_DWORD(®->isp25mq.atio_q_out); | ||
4711 | #endif | ||
4712 | } else { | ||
4713 | /* Setup APTIO registers for target mode */ | ||
4714 | WRT_REG_DWORD(®->isp24.atio_q_in, 0); | ||
4715 | WRT_REG_DWORD(®->isp24.atio_q_out, 0); | ||
4716 | RD_REG_DWORD(®->isp24.atio_q_out); | ||
4717 | } | ||
4718 | } | ||
4719 | |||
4720 | void | ||
4721 | qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) | ||
4722 | { | ||
4723 | struct qla_hw_data *ha = vha->hw; | ||
4724 | |||
4725 | if (qla_tgt_mode_enabled(vha)) { | ||
4726 | if (!ha->tgt.saved_set) { | ||
4727 | /* We save only once */ | ||
4728 | ha->tgt.saved_exchange_count = nv->exchange_count; | ||
4729 | ha->tgt.saved_firmware_options_1 = | ||
4730 | nv->firmware_options_1; | ||
4731 | ha->tgt.saved_firmware_options_2 = | ||
4732 | nv->firmware_options_2; | ||
4733 | ha->tgt.saved_firmware_options_3 = | ||
4734 | nv->firmware_options_3; | ||
4735 | ha->tgt.saved_set = 1; | ||
4736 | } | ||
4737 | |||
4738 | nv->exchange_count = __constant_cpu_to_le16(0xFFFF); | ||
4739 | |||
4740 | /* Enable target mode */ | ||
4741 | nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); | ||
4742 | |||
4743 | /* Disable ini mode, if requested */ | ||
4744 | if (!qla_ini_mode_enabled(vha)) | ||
4745 | nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); | ||
4746 | |||
4747 | /* Disable Full Login after LIP */ | ||
4748 | nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); | ||
4749 | /* Enable initial LIP */ | ||
4750 | nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); | ||
4751 | /* Enable FC tapes support */ | ||
4752 | nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); | ||
4753 | /* Disable Full Login after LIP */ | ||
4754 | nv->host_p &= __constant_cpu_to_le32(~BIT_10); | ||
4755 | /* Enable target PRLI control */ | ||
4756 | nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); | ||
4757 | } else { | ||
4758 | if (ha->tgt.saved_set) { | ||
4759 | nv->exchange_count = ha->tgt.saved_exchange_count; | ||
4760 | nv->firmware_options_1 = | ||
4761 | ha->tgt.saved_firmware_options_1; | ||
4762 | nv->firmware_options_2 = | ||
4763 | ha->tgt.saved_firmware_options_2; | ||
4764 | nv->firmware_options_3 = | ||
4765 | ha->tgt.saved_firmware_options_3; | ||
4766 | } | ||
4767 | return; | ||
4768 | } | ||
4769 | |||
4770 | /* out-of-order frames reassembly */ | ||
4771 | nv->firmware_options_3 |= BIT_6|BIT_9; | ||
4772 | |||
4773 | if (ha->tgt.enable_class_2) { | ||
4774 | if (vha->flags.init_done) | ||
4775 | fc_host_supported_classes(vha->host) = | ||
4776 | FC_COS_CLASS2 | FC_COS_CLASS3; | ||
4777 | |||
4778 | nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); | ||
4779 | } else { | ||
4780 | if (vha->flags.init_done) | ||
4781 | fc_host_supported_classes(vha->host) = FC_COS_CLASS3; | ||
4782 | |||
4783 | nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); | ||
4784 | } | ||
4785 | } | ||
4786 | |||
4787 | void | ||
4788 | qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, | ||
4789 | struct init_cb_24xx *icb) | ||
4790 | { | ||
4791 | struct qla_hw_data *ha = vha->hw; | ||
4792 | |||
4793 | if (ha->tgt.node_name_set) { | ||
4794 | memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); | ||
4795 | icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); | ||
4796 | } | ||
4797 | } | ||
4798 | |||
4799 | int | ||
4800 | qlt_24xx_process_response_error(struct scsi_qla_host *vha, | ||
4801 | struct sts_entry_24xx *pkt) | ||
4802 | { | ||
4803 | switch (pkt->entry_type) { | ||
4804 | case ABTS_RECV_24XX: | ||
4805 | case ABTS_RESP_24XX: | ||
4806 | case CTIO_TYPE7: | ||
4807 | case NOTIFY_ACK_TYPE: | ||
4808 | return 1; | ||
4809 | default: | ||
4810 | return 0; | ||
4811 | } | ||
4812 | } | ||
4813 | |||
4814 | void | ||
4815 | qlt_modify_vp_config(struct scsi_qla_host *vha, | ||
4816 | struct vp_config_entry_24xx *vpmod) | ||
4817 | { | ||
4818 | if (qla_tgt_mode_enabled(vha)) | ||
4819 | vpmod->options_idx1 &= ~BIT_5; | ||
4820 | /* Disable ini mode, if requested */ | ||
4821 | if (!qla_ini_mode_enabled(vha)) | ||
4822 | vpmod->options_idx1 &= ~BIT_4; | ||
4823 | } | ||
4824 | |||
4825 | void | ||
4826 | qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) | ||
4827 | { | ||
4828 | if (!QLA_TGT_MODE_ENABLED()) | ||
4829 | return; | ||
4830 | |||
4831 | mutex_init(&ha->tgt.tgt_mutex); | ||
4832 | mutex_init(&ha->tgt.tgt_host_action_mutex); | ||
4833 | qlt_clear_mode(base_vha); | ||
4834 | } | ||
4835 | |||
4836 | int | ||
4837 | qlt_mem_alloc(struct qla_hw_data *ha) | ||
4838 | { | ||
4839 | if (!QLA_TGT_MODE_ENABLED()) | ||
4840 | return 0; | ||
4841 | |||
4842 | ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * | ||
4843 | MAX_MULTI_ID_FABRIC, GFP_KERNEL); | ||
4844 | if (!ha->tgt.tgt_vp_map) | ||
4845 | return -ENOMEM; | ||
4846 | |||
4847 | ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, | ||
4848 | (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), | ||
4849 | &ha->tgt.atio_dma, GFP_KERNEL); | ||
4850 | if (!ha->tgt.atio_ring) { | ||
4851 | kfree(ha->tgt.tgt_vp_map); | ||
4852 | return -ENOMEM; | ||
4853 | } | ||
4854 | return 0; | ||
4855 | } | ||
4856 | |||
4857 | void | ||
4858 | qlt_mem_free(struct qla_hw_data *ha) | ||
4859 | { | ||
4860 | if (!QLA_TGT_MODE_ENABLED()) | ||
4861 | return; | ||
4862 | |||
4863 | if (ha->tgt.atio_ring) { | ||
4864 | dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * | ||
4865 | sizeof(struct atio_from_isp), ha->tgt.atio_ring, | ||
4866 | ha->tgt.atio_dma); | ||
4867 | } | ||
4868 | kfree(ha->tgt.tgt_vp_map); | ||
4869 | } | ||
4870 | |||
4871 | /* vport_slock to be held by the caller */ | ||
4872 | void | ||
4873 | qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) | ||
4874 | { | ||
4875 | if (!QLA_TGT_MODE_ENABLED()) | ||
4876 | return; | ||
4877 | |||
4878 | switch (cmd) { | ||
4879 | case SET_VP_IDX: | ||
4880 | vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; | ||
4881 | break; | ||
4882 | case SET_AL_PA: | ||
4883 | vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; | ||
4884 | break; | ||
4885 | case RESET_VP_IDX: | ||
4886 | vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; | ||
4887 | break; | ||
4888 | case RESET_AL_PA: | ||
4889 | vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; | ||
4890 | break; | ||
4891 | } | ||
4892 | } | ||
4893 | |||
4894 | static int __init qlt_parse_ini_mode(void) | ||
4895 | { | ||
4896 | if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) | ||
4897 | ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; | ||
4898 | else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) | ||
4899 | ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; | ||
4900 | else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) | ||
4901 | ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; | ||
4902 | else | ||
4903 | return false; | ||
4904 | |||
4905 | return true; | ||
4906 | } | ||
4907 | |||
4908 | int __init qlt_init(void) | ||
4909 | { | ||
4910 | int ret; | ||
4911 | |||
4912 | if (!qlt_parse_ini_mode()) { | ||
4913 | ql_log(ql_log_fatal, NULL, 0xe06b, | ||
4914 | "qlt_parse_ini_mode() failed\n"); | ||
4915 | return -EINVAL; | ||
4916 | } | ||
4917 | |||
4918 | if (!QLA_TGT_MODE_ENABLED()) | ||
4919 | return 0; | ||
4920 | |||
4921 | qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", | ||
4922 | sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0, | ||
4923 | NULL); | ||
4924 | if (!qla_tgt_cmd_cachep) { | ||
4925 | ql_log(ql_log_fatal, NULL, 0xe06c, | ||
4926 | "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); | ||
4927 | return -ENOMEM; | ||
4928 | } | ||
4929 | |||
4930 | qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", | ||
4931 | sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct | ||
4932 | qla_tgt_mgmt_cmd), 0, NULL); | ||
4933 | if (!qla_tgt_mgmt_cmd_cachep) { | ||
4934 | ql_log(ql_log_fatal, NULL, 0xe06d, | ||
4935 | "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); | ||
4936 | ret = -ENOMEM; | ||
4937 | goto out; | ||
4938 | } | ||
4939 | |||
4940 | qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, | ||
4941 | mempool_free_slab, qla_tgt_mgmt_cmd_cachep); | ||
4942 | if (!qla_tgt_mgmt_cmd_mempool) { | ||
4943 | ql_log(ql_log_fatal, NULL, 0xe06e, | ||
4944 | "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); | ||
4945 | ret = -ENOMEM; | ||
4946 | goto out_mgmt_cmd_cachep; | ||
4947 | } | ||
4948 | |||
4949 | qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); | ||
4950 | if (!qla_tgt_wq) { | ||
4951 | ql_log(ql_log_fatal, NULL, 0xe06f, | ||
4952 | "alloc_workqueue for qla_tgt_wq failed\n"); | ||
4953 | ret = -ENOMEM; | ||
4954 | goto out_cmd_mempool; | ||
4955 | } | ||
4956 | /* | ||
4957 | * Return 1 to signal that initiator-mode is being disabled | ||
4958 | */ | ||
4959 | return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; | ||
4960 | |||
4961 | out_cmd_mempool: | ||
4962 | mempool_destroy(qla_tgt_mgmt_cmd_mempool); | ||
4963 | out_mgmt_cmd_cachep: | ||
4964 | kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); | ||
4965 | out: | ||
4966 | kmem_cache_destroy(qla_tgt_cmd_cachep); | ||
4967 | return ret; | ||
4968 | } | ||
4969 | |||
4970 | void qlt_exit(void) | ||
4971 | { | ||
4972 | if (!QLA_TGT_MODE_ENABLED()) | ||
4973 | return; | ||
4974 | |||
4975 | destroy_workqueue(qla_tgt_wq); | ||
4976 | mempool_destroy(qla_tgt_mgmt_cmd_mempool); | ||
4977 | kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); | ||
4978 | kmem_cache_destroy(qla_tgt_cmd_cachep); | ||
4979 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h deleted file mode 100644 index bad749561ec..00000000000 --- a/drivers/scsi/qla2xxx/qla_target.h +++ /dev/null | |||
@@ -1,1004 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> | ||
3 | * Copyright (C) 2004 - 2005 Leonid Stoljar | ||
4 | * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> | ||
5 | * Copyright (C) 2007 - 2010 ID7 Ltd. | ||
6 | * | ||
7 | * Forward port and refactoring to modern qla2xxx and target/configfs | ||
8 | * | ||
9 | * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org> | ||
10 | * | ||
11 | * Additional file for the target driver support. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version 2 | ||
16 | * of the License, or (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | */ | ||
23 | /* | ||
24 | * This is the global def file that is useful for including from the | ||
25 | * target portion. | ||
26 | */ | ||
27 | |||
28 | #ifndef __QLA_TARGET_H | ||
29 | #define __QLA_TARGET_H | ||
30 | |||
31 | #include "qla_def.h" | ||
32 | |||
33 | /* | ||
34 | * Must be changed on any change in any initiator visible interfaces or | ||
35 | * data in the target add-on | ||
36 | */ | ||
37 | #define QLA2XXX_TARGET_MAGIC 269 | ||
38 | |||
39 | /* | ||
40 | * Must be changed on any change in any target visible interfaces or | ||
41 | * data in the initiator | ||
42 | */ | ||
43 | #define QLA2XXX_INITIATOR_MAGIC 57222 | ||
44 | |||
45 | #define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive" | ||
46 | #define QLA2XXX_INI_MODE_STR_DISABLED "disabled" | ||
47 | #define QLA2XXX_INI_MODE_STR_ENABLED "enabled" | ||
48 | |||
49 | #define QLA2XXX_INI_MODE_EXCLUSIVE 0 | ||
50 | #define QLA2XXX_INI_MODE_DISABLED 1 | ||
51 | #define QLA2XXX_INI_MODE_ENABLED 2 | ||
52 | |||
53 | #define QLA2XXX_COMMAND_COUNT_INIT 250 | ||
54 | #define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250 | ||
55 | |||
56 | /* | ||
57 | * Used to mark which completion handles (for RIO Status's) are for CTIO's | ||
58 | * vs. regular (non-target) info. This is checked for in | ||
59 | * qla2x00_process_response_queue() to see if a handle coming back in a | ||
60 | * multi-complete should come to the tgt driver or be handled there by qla2xxx | ||
61 | */ | ||
62 | #define CTIO_COMPLETION_HANDLE_MARK BIT_29 | ||
63 | #if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS) | ||
64 | #error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS" | ||
65 | #endif | ||
66 | #define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK) | ||
67 | |||
68 | /* Used to mark CTIO as intermediate */ | ||
69 | #define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30 | ||
70 | |||
71 | #ifndef OF_SS_MODE_0 | ||
72 | /* | ||
73 | * ISP target entries - Flags bit definitions. | ||
74 | */ | ||
75 | #define OF_SS_MODE_0 0 | ||
76 | #define OF_SS_MODE_1 1 | ||
77 | #define OF_SS_MODE_2 2 | ||
78 | #define OF_SS_MODE_3 3 | ||
79 | |||
80 | #define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */ | ||
81 | #define OF_DATA_IN BIT_6 /* Data in to initiator */ | ||
82 | /* (data from target to initiator) */ | ||
83 | #define OF_DATA_OUT BIT_7 /* Data out from initiator */ | ||
84 | /* (data from initiator to target) */ | ||
85 | #define OF_NO_DATA (BIT_7 | BIT_6) | ||
86 | #define OF_INC_RC BIT_8 /* Increment command resource count */ | ||
87 | #define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */ | ||
88 | #define OF_CONF_REQ BIT_13 /* Confirmation Requested */ | ||
89 | #define OF_TERM_EXCH BIT_14 /* Terminate exchange */ | ||
90 | #define OF_SSTS BIT_15 /* Send SCSI status */ | ||
91 | #endif | ||
92 | |||
93 | #ifndef QLA_TGT_DATASEGS_PER_CMD32 | ||
94 | #define QLA_TGT_DATASEGS_PER_CMD32 3 | ||
95 | #define QLA_TGT_DATASEGS_PER_CONT32 7 | ||
96 | #define QLA_TGT_MAX_SG32(ql) \ | ||
97 | (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \ | ||
98 | QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0) | ||
99 | |||
100 | #define QLA_TGT_DATASEGS_PER_CMD64 2 | ||
101 | #define QLA_TGT_DATASEGS_PER_CONT64 5 | ||
102 | #define QLA_TGT_MAX_SG64(ql) \ | ||
103 | (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \ | ||
104 | QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0) | ||
105 | #endif | ||
106 | |||
107 | #ifndef QLA_TGT_DATASEGS_PER_CMD_24XX | ||
108 | #define QLA_TGT_DATASEGS_PER_CMD_24XX 1 | ||
109 | #define QLA_TGT_DATASEGS_PER_CONT_24XX 5 | ||
110 | #define QLA_TGT_MAX_SG_24XX(ql) \ | ||
111 | (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \ | ||
112 | QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0)) | ||
113 | #endif | ||
114 | #endif | ||
115 | |||
116 | #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \ | ||
117 | ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ | ||
118 | : (uint16_t)(iocb)->u.isp2x.target.id.standard) | ||
119 | |||
120 | #ifndef IMMED_NOTIFY_TYPE | ||
121 | #define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */ | ||
122 | /* | ||
123 | * ISP queue - immediate notify entry structure definition. | ||
124 | * This is sent by the ISP to the Target driver. | ||
125 | * This IOCB would have report of events sent by the | ||
126 | * initiator, that needs to be handled by the target | ||
127 | * driver immediately. | ||
128 | */ | ||
129 | struct imm_ntfy_from_isp { | ||
130 | uint8_t entry_type; /* Entry type. */ | ||
131 | uint8_t entry_count; /* Entry count. */ | ||
132 | uint8_t sys_define; /* System defined. */ | ||
133 | uint8_t entry_status; /* Entry Status. */ | ||
134 | union { | ||
135 | struct { | ||
136 | uint32_t sys_define_2; /* System defined. */ | ||
137 | target_id_t target; | ||
138 | uint16_t lun; | ||
139 | uint8_t target_id; | ||
140 | uint8_t reserved_1; | ||
141 | uint16_t status_modifier; | ||
142 | uint16_t status; | ||
143 | uint16_t task_flags; | ||
144 | uint16_t seq_id; | ||
145 | uint16_t srr_rx_id; | ||
146 | uint32_t srr_rel_offs; | ||
147 | uint16_t srr_ui; | ||
148 | #define SRR_IU_DATA_IN 0x1 | ||
149 | #define SRR_IU_DATA_OUT 0x5 | ||
150 | #define SRR_IU_STATUS 0x7 | ||
151 | uint16_t srr_ox_id; | ||
152 | uint8_t reserved_2[28]; | ||
153 | } isp2x; | ||
154 | struct { | ||
155 | uint32_t reserved; | ||
156 | uint16_t nport_handle; | ||
157 | uint16_t reserved_2; | ||
158 | uint16_t flags; | ||
159 | #define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 | ||
160 | #define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 | ||
161 | uint16_t srr_rx_id; | ||
162 | uint16_t status; | ||
163 | uint8_t status_subcode; | ||
164 | uint8_t reserved_3; | ||
165 | uint32_t exchange_address; | ||
166 | uint32_t srr_rel_offs; | ||
167 | uint16_t srr_ui; | ||
168 | uint16_t srr_ox_id; | ||
169 | uint8_t reserved_4[19]; | ||
170 | uint8_t vp_index; | ||
171 | uint32_t reserved_5; | ||
172 | uint8_t port_id[3]; | ||
173 | uint8_t reserved_6; | ||
174 | } isp24; | ||
175 | } u; | ||
176 | uint16_t reserved_7; | ||
177 | uint16_t ox_id; | ||
178 | } __packed; | ||
179 | #endif | ||
180 | |||
181 | #ifndef NOTIFY_ACK_TYPE | ||
182 | #define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */ | ||
183 | /* | ||
184 | * ISP queue - notify acknowledge entry structure definition. | ||
185 | * This is sent to the ISP from the target driver. | ||
186 | */ | ||
187 | struct nack_to_isp { | ||
188 | uint8_t entry_type; /* Entry type. */ | ||
189 | uint8_t entry_count; /* Entry count. */ | ||
190 | uint8_t sys_define; /* System defined. */ | ||
191 | uint8_t entry_status; /* Entry Status. */ | ||
192 | union { | ||
193 | struct { | ||
194 | uint32_t sys_define_2; /* System defined. */ | ||
195 | target_id_t target; | ||
196 | uint8_t target_id; | ||
197 | uint8_t reserved_1; | ||
198 | uint16_t flags; | ||
199 | uint16_t resp_code; | ||
200 | uint16_t status; | ||
201 | uint16_t task_flags; | ||
202 | uint16_t seq_id; | ||
203 | uint16_t srr_rx_id; | ||
204 | uint32_t srr_rel_offs; | ||
205 | uint16_t srr_ui; | ||
206 | uint16_t srr_flags; | ||
207 | uint16_t srr_reject_code; | ||
208 | uint8_t srr_reject_vendor_uniq; | ||
209 | uint8_t srr_reject_code_expl; | ||
210 | uint8_t reserved_2[24]; | ||
211 | } isp2x; | ||
212 | struct { | ||
213 | uint32_t handle; | ||
214 | uint16_t nport_handle; | ||
215 | uint16_t reserved_1; | ||
216 | uint16_t flags; | ||
217 | uint16_t srr_rx_id; | ||
218 | uint16_t status; | ||
219 | uint8_t status_subcode; | ||
220 | uint8_t reserved_3; | ||
221 | uint32_t exchange_address; | ||
222 | uint32_t srr_rel_offs; | ||
223 | uint16_t srr_ui; | ||
224 | uint16_t srr_flags; | ||
225 | uint8_t reserved_4[19]; | ||
226 | uint8_t vp_index; | ||
227 | uint8_t srr_reject_vendor_uniq; | ||
228 | uint8_t srr_reject_code_expl; | ||
229 | uint8_t srr_reject_code; | ||
230 | uint8_t reserved_5[5]; | ||
231 | } isp24; | ||
232 | } u; | ||
233 | uint8_t reserved[2]; | ||
234 | uint16_t ox_id; | ||
235 | } __packed; | ||
236 | #define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 | ||
237 | #define NOTIFY_ACK_SRR_FLAGS_REJECT 1 | ||
238 | |||
239 | #define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9 | ||
240 | |||
241 | #define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0 | ||
242 | #define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a | ||
243 | |||
244 | #define NOTIFY_ACK_SUCCESS 0x01 | ||
245 | #endif | ||
246 | |||
247 | #ifndef ACCEPT_TGT_IO_TYPE | ||
248 | #define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */ | ||
249 | #endif | ||
250 | |||
251 | #ifndef CONTINUE_TGT_IO_TYPE | ||
252 | #define CONTINUE_TGT_IO_TYPE 0x17 | ||
253 | /* | ||
254 | * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure. | ||
255 | * This structure is sent to the ISP 2xxx from target driver. | ||
256 | */ | ||
257 | struct ctio_to_2xxx { | ||
258 | uint8_t entry_type; /* Entry type. */ | ||
259 | uint8_t entry_count; /* Entry count. */ | ||
260 | uint8_t sys_define; /* System defined. */ | ||
261 | uint8_t entry_status; /* Entry Status. */ | ||
262 | uint32_t handle; /* System defined handle */ | ||
263 | target_id_t target; | ||
264 | uint16_t rx_id; | ||
265 | uint16_t flags; | ||
266 | uint16_t status; | ||
267 | uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ | ||
268 | uint16_t dseg_count; /* Data segment count. */ | ||
269 | uint32_t relative_offset; | ||
270 | uint32_t residual; | ||
271 | uint16_t reserved_1[3]; | ||
272 | uint16_t scsi_status; | ||
273 | uint32_t transfer_length; | ||
274 | uint32_t dseg_0_address; /* Data segment 0 address. */ | ||
275 | uint32_t dseg_0_length; /* Data segment 0 length. */ | ||
276 | uint32_t dseg_1_address; /* Data segment 1 address. */ | ||
277 | uint32_t dseg_1_length; /* Data segment 1 length. */ | ||
278 | uint32_t dseg_2_address; /* Data segment 2 address. */ | ||
279 | uint32_t dseg_2_length; /* Data segment 2 length. */ | ||
280 | } __packed; | ||
281 | #define ATIO_PATH_INVALID 0x07 | ||
282 | #define ATIO_CANT_PROV_CAP 0x16 | ||
283 | #define ATIO_CDB_VALID 0x3D | ||
284 | |||
285 | #define ATIO_EXEC_READ BIT_1 | ||
286 | #define ATIO_EXEC_WRITE BIT_0 | ||
287 | #endif | ||
288 | |||
289 | #ifndef CTIO_A64_TYPE | ||
290 | #define CTIO_A64_TYPE 0x1F | ||
291 | #define CTIO_SUCCESS 0x01 | ||
292 | #define CTIO_ABORTED 0x02 | ||
293 | #define CTIO_INVALID_RX_ID 0x08 | ||
294 | #define CTIO_TIMEOUT 0x0B | ||
295 | #define CTIO_LIP_RESET 0x0E | ||
296 | #define CTIO_TARGET_RESET 0x17 | ||
297 | #define CTIO_PORT_UNAVAILABLE 0x28 | ||
298 | #define CTIO_PORT_LOGGED_OUT 0x29 | ||
299 | #define CTIO_PORT_CONF_CHANGED 0x2A | ||
300 | #define CTIO_SRR_RECEIVED 0x45 | ||
301 | #endif | ||
302 | |||
303 | #ifndef CTIO_RET_TYPE | ||
304 | #define CTIO_RET_TYPE 0x17 /* CTIO return entry */ | ||
305 | #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */ | ||
306 | |||
307 | struct fcp_hdr { | ||
308 | uint8_t r_ctl; | ||
309 | uint8_t d_id[3]; | ||
310 | uint8_t cs_ctl; | ||
311 | uint8_t s_id[3]; | ||
312 | uint8_t type; | ||
313 | uint8_t f_ctl[3]; | ||
314 | uint8_t seq_id; | ||
315 | uint8_t df_ctl; | ||
316 | uint16_t seq_cnt; | ||
317 | uint16_t ox_id; | ||
318 | uint16_t rx_id; | ||
319 | uint32_t parameter; | ||
320 | } __packed; | ||
321 | |||
322 | struct fcp_hdr_le { | ||
323 | uint8_t d_id[3]; | ||
324 | uint8_t r_ctl; | ||
325 | uint8_t s_id[3]; | ||
326 | uint8_t cs_ctl; | ||
327 | uint8_t f_ctl[3]; | ||
328 | uint8_t type; | ||
329 | uint16_t seq_cnt; | ||
330 | uint8_t df_ctl; | ||
331 | uint8_t seq_id; | ||
332 | uint16_t rx_id; | ||
333 | uint16_t ox_id; | ||
334 | uint32_t parameter; | ||
335 | } __packed; | ||
336 | |||
337 | #define F_CTL_EXCH_CONTEXT_RESP BIT_23 | ||
338 | #define F_CTL_SEQ_CONTEXT_RESIP BIT_22 | ||
339 | #define F_CTL_LAST_SEQ BIT_20 | ||
340 | #define F_CTL_END_SEQ BIT_19 | ||
341 | #define F_CTL_SEQ_INITIATIVE BIT_16 | ||
342 | |||
343 | #define R_CTL_BASIC_LINK_SERV 0x80 | ||
344 | #define R_CTL_B_ACC 0x4 | ||
345 | #define R_CTL_B_RJT 0x5 | ||
346 | |||
347 | struct atio7_fcp_cmnd { | ||
348 | uint64_t lun; | ||
349 | uint8_t cmnd_ref; | ||
350 | uint8_t task_attr:3; | ||
351 | uint8_t reserved:5; | ||
352 | uint8_t task_mgmt_flags; | ||
353 | #define FCP_CMND_TASK_MGMT_CLEAR_ACA 6 | ||
354 | #define FCP_CMND_TASK_MGMT_TARGET_RESET 5 | ||
355 | #define FCP_CMND_TASK_MGMT_LU_RESET 4 | ||
356 | #define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2 | ||
357 | #define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1 | ||
358 | uint8_t wrdata:1; | ||
359 | uint8_t rddata:1; | ||
360 | uint8_t add_cdb_len:6; | ||
361 | uint8_t cdb[16]; | ||
362 | /* | ||
363 | * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4 | ||
364 | * only to make sizeof(struct atio7_fcp_cmnd) be as expected by | ||
365 | * BUILD_BUG_ON in qlt_init(). | ||
366 | */ | ||
367 | uint8_t add_cdb[4]; | ||
368 | /* uint32_t data_length; */ | ||
369 | } __packed; | ||
370 | |||
371 | /* | ||
372 | * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure. | ||
373 | * This is sent from the ISP to the target driver. | ||
374 | */ | ||
375 | struct atio_from_isp { | ||
376 | union { | ||
377 | struct { | ||
378 | uint16_t entry_hdr; | ||
379 | uint8_t sys_define; /* System defined. */ | ||
380 | uint8_t entry_status; /* Entry Status. */ | ||
381 | uint32_t sys_define_2; /* System defined. */ | ||
382 | target_id_t target; | ||
383 | uint16_t rx_id; | ||
384 | uint16_t flags; | ||
385 | uint16_t status; | ||
386 | uint8_t command_ref; | ||
387 | uint8_t task_codes; | ||
388 | uint8_t task_flags; | ||
389 | uint8_t execution_codes; | ||
390 | uint8_t cdb[MAX_CMDSZ]; | ||
391 | uint32_t data_length; | ||
392 | uint16_t lun; | ||
393 | uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */ | ||
394 | uint16_t reserved_32[6]; | ||
395 | uint16_t ox_id; | ||
396 | } isp2x; | ||
397 | struct { | ||
398 | uint16_t entry_hdr; | ||
399 | uint8_t fcp_cmnd_len_low; | ||
400 | uint8_t fcp_cmnd_len_high:4; | ||
401 | uint8_t attr:4; | ||
402 | uint32_t exchange_addr; | ||
403 | #define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF | ||
404 | struct fcp_hdr fcp_hdr; | ||
405 | struct atio7_fcp_cmnd fcp_cmnd; | ||
406 | } isp24; | ||
407 | struct { | ||
408 | uint8_t entry_type; /* Entry type. */ | ||
409 | uint8_t entry_count; /* Entry count. */ | ||
410 | uint8_t data[58]; | ||
411 | uint32_t signature; | ||
412 | #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ | ||
413 | } raw; | ||
414 | } u; | ||
415 | } __packed; | ||
416 | |||
417 | #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ | ||
418 | |||
419 | /* | ||
420 | * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure. | ||
421 | * This structure is sent to the ISP 24xx from the target driver. | ||
422 | */ | ||
423 | |||
424 | struct ctio7_to_24xx { | ||
425 | uint8_t entry_type; /* Entry type. */ | ||
426 | uint8_t entry_count; /* Entry count. */ | ||
427 | uint8_t sys_define; /* System defined. */ | ||
428 | uint8_t entry_status; /* Entry Status. */ | ||
429 | uint32_t handle; /* System defined handle */ | ||
430 | uint16_t nport_handle; | ||
431 | #define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF | ||
432 | uint16_t timeout; | ||
433 | uint16_t dseg_count; /* Data segment count. */ | ||
434 | uint8_t vp_index; | ||
435 | uint8_t add_flags; | ||
436 | uint8_t initiator_id[3]; | ||
437 | uint8_t reserved; | ||
438 | uint32_t exchange_addr; | ||
439 | union { | ||
440 | struct { | ||
441 | uint16_t reserved1; | ||
442 | uint16_t flags; | ||
443 | uint32_t residual; | ||
444 | uint16_t ox_id; | ||
445 | uint16_t scsi_status; | ||
446 | uint32_t relative_offset; | ||
447 | uint32_t reserved2; | ||
448 | uint32_t transfer_length; | ||
449 | uint32_t reserved3; | ||
450 | /* Data segment 0 address. */ | ||
451 | uint32_t dseg_0_address[2]; | ||
452 | /* Data segment 0 length. */ | ||
453 | uint32_t dseg_0_length; | ||
454 | } status0; | ||
455 | struct { | ||
456 | uint16_t sense_length; | ||
457 | uint16_t flags; | ||
458 | uint32_t residual; | ||
459 | uint16_t ox_id; | ||
460 | uint16_t scsi_status; | ||
461 | uint16_t response_len; | ||
462 | uint16_t reserved; | ||
463 | uint8_t sense_data[24]; | ||
464 | } status1; | ||
465 | } u; | ||
466 | } __packed; | ||
467 | |||
468 | /* | ||
469 | * ISP queue - CTIO type 7 from ISP 24xx to target driver | ||
470 | * returned entry structure. | ||
471 | */ | ||
472 | struct ctio7_from_24xx { | ||
473 | uint8_t entry_type; /* Entry type. */ | ||
474 | uint8_t entry_count; /* Entry count. */ | ||
475 | uint8_t sys_define; /* System defined. */ | ||
476 | uint8_t entry_status; /* Entry Status. */ | ||
477 | uint32_t handle; /* System defined handle */ | ||
478 | uint16_t status; | ||
479 | uint16_t timeout; | ||
480 | uint16_t dseg_count; /* Data segment count. */ | ||
481 | uint8_t vp_index; | ||
482 | uint8_t reserved1[5]; | ||
483 | uint32_t exchange_address; | ||
484 | uint16_t reserved2; | ||
485 | uint16_t flags; | ||
486 | uint32_t residual; | ||
487 | uint16_t ox_id; | ||
488 | uint16_t reserved3; | ||
489 | uint32_t relative_offset; | ||
490 | uint8_t reserved4[24]; | ||
491 | } __packed; | ||
492 | |||
493 | /* CTIO7 flags values */ | ||
494 | #define CTIO7_FLAGS_SEND_STATUS BIT_15 | ||
495 | #define CTIO7_FLAGS_TERMINATE BIT_14 | ||
496 | #define CTIO7_FLAGS_CONFORM_REQ BIT_13 | ||
497 | #define CTIO7_FLAGS_DONT_RET_CTIO BIT_8 | ||
498 | #define CTIO7_FLAGS_STATUS_MODE_0 0 | ||
499 | #define CTIO7_FLAGS_STATUS_MODE_1 BIT_6 | ||
500 | #define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5 | ||
501 | #define CTIO7_FLAGS_CONFIRM_SATISF BIT_4 | ||
502 | #define CTIO7_FLAGS_DSD_PTR BIT_2 | ||
503 | #define CTIO7_FLAGS_DATA_IN BIT_1 | ||
504 | #define CTIO7_FLAGS_DATA_OUT BIT_0 | ||
505 | |||
506 | #define ELS_PLOGI 0x3 | ||
507 | #define ELS_FLOGI 0x4 | ||
508 | #define ELS_LOGO 0x5 | ||
509 | #define ELS_PRLI 0x20 | ||
510 | #define ELS_PRLO 0x21 | ||
511 | #define ELS_TPRLO 0x24 | ||
512 | #define ELS_PDISC 0x50 | ||
513 | #define ELS_ADISC 0x52 | ||
514 | |||
515 | /* | ||
516 | * ISP queue - ABTS received/response entries structure definition for 24xx. | ||
517 | */ | ||
518 | #define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */ | ||
519 | #define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */ | ||
520 | |||
521 | /* | ||
522 | * ISP queue - ABTS received IOCB entry structure definition for 24xx. | ||
523 | * The ABTS BLS received from the wire is sent to the | ||
524 | * target driver by the ISP 24xx. | ||
525 | * The IOCB is placed on the response queue. | ||
526 | */ | ||
527 | struct abts_recv_from_24xx { | ||
528 | uint8_t entry_type; /* Entry type. */ | ||
529 | uint8_t entry_count; /* Entry count. */ | ||
530 | uint8_t sys_define; /* System defined. */ | ||
531 | uint8_t entry_status; /* Entry Status. */ | ||
532 | uint8_t reserved_1[6]; | ||
533 | uint16_t nport_handle; | ||
534 | uint8_t reserved_2[2]; | ||
535 | uint8_t vp_index; | ||
536 | uint8_t reserved_3:4; | ||
537 | uint8_t sof_type:4; | ||
538 | uint32_t exchange_address; | ||
539 | struct fcp_hdr_le fcp_hdr_le; | ||
540 | uint8_t reserved_4[16]; | ||
541 | uint32_t exchange_addr_to_abort; | ||
542 | } __packed; | ||
543 | |||
544 | #define ABTS_PARAM_ABORT_SEQ BIT_0 | ||
545 | |||
546 | struct ba_acc_le { | ||
547 | uint16_t reserved; | ||
548 | uint8_t seq_id_last; | ||
549 | uint8_t seq_id_valid; | ||
550 | #define SEQ_ID_VALID 0x80 | ||
551 | #define SEQ_ID_INVALID 0x00 | ||
552 | uint16_t rx_id; | ||
553 | uint16_t ox_id; | ||
554 | uint16_t high_seq_cnt; | ||
555 | uint16_t low_seq_cnt; | ||
556 | } __packed; | ||
557 | |||
558 | struct ba_rjt_le { | ||
559 | uint8_t vendor_uniq; | ||
560 | uint8_t reason_expl; | ||
561 | uint8_t reason_code; | ||
562 | #define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1 | ||
563 | #define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9 | ||
564 | uint8_t reserved; | ||
565 | } __packed; | ||
566 | |||
567 | /* | ||
568 | * ISP queue - ABTS Response IOCB entry structure definition for 24xx. | ||
569 | * The ABTS response to the ABTS received is sent by the | ||
570 | * target driver to the ISP 24xx. | ||
571 | * The IOCB is placed on the request queue. | ||
572 | */ | ||
573 | struct abts_resp_to_24xx { | ||
574 | uint8_t entry_type; /* Entry type. */ | ||
575 | uint8_t entry_count; /* Entry count. */ | ||
576 | uint8_t sys_define; /* System defined. */ | ||
577 | uint8_t entry_status; /* Entry Status. */ | ||
578 | uint32_t handle; | ||
579 | uint16_t reserved_1; | ||
580 | uint16_t nport_handle; | ||
581 | uint16_t control_flags; | ||
582 | #define ABTS_CONTR_FLG_TERM_EXCHG BIT_0 | ||
583 | uint8_t vp_index; | ||
584 | uint8_t reserved_3:4; | ||
585 | uint8_t sof_type:4; | ||
586 | uint32_t exchange_address; | ||
587 | struct fcp_hdr_le fcp_hdr_le; | ||
588 | union { | ||
589 | struct ba_acc_le ba_acct; | ||
590 | struct ba_rjt_le ba_rjt; | ||
591 | } __packed payload; | ||
592 | uint32_t reserved_4; | ||
593 | uint32_t exchange_addr_to_abort; | ||
594 | } __packed; | ||
595 | |||
596 | /* | ||
597 | * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure. | ||
598 | * The ABTS response with completion status to the ABTS response | ||
599 | * (sent by the target driver to the ISP 24xx) is sent by the | ||
600 | * ISP24xx firmware to the target driver. | ||
601 | * The IOCB is placed on the response queue. | ||
602 | */ | ||
603 | struct abts_resp_from_24xx_fw { | ||
604 | uint8_t entry_type; /* Entry type. */ | ||
605 | uint8_t entry_count; /* Entry count. */ | ||
606 | uint8_t sys_define; /* System defined. */ | ||
607 | uint8_t entry_status; /* Entry Status. */ | ||
608 | uint32_t handle; | ||
609 | uint16_t compl_status; | ||
610 | #define ABTS_RESP_COMPL_SUCCESS 0 | ||
611 | #define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31 | ||
612 | uint16_t nport_handle; | ||
613 | uint16_t reserved_1; | ||
614 | uint8_t reserved_2; | ||
615 | uint8_t reserved_3:4; | ||
616 | uint8_t sof_type:4; | ||
617 | uint32_t exchange_address; | ||
618 | struct fcp_hdr_le fcp_hdr_le; | ||
619 | uint8_t reserved_4[8]; | ||
620 | uint32_t error_subcode1; | ||
621 | #define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E | ||
622 | uint32_t error_subcode2; | ||
623 | uint32_t exchange_addr_to_abort; | ||
624 | } __packed; | ||
625 | |||
626 | /********************************************************************\ | ||
627 | * Type Definitions used by initiator & target halves | ||
628 | \********************************************************************/ | ||
629 | |||
630 | struct qla_tgt_mgmt_cmd; | ||
631 | struct qla_tgt_sess; | ||
632 | |||
633 | /* | ||
634 | * This structure provides a template of function calls that the | ||
635 | * target driver (from within qla_target.c) can issue to the | ||
636 | * target module (tcm_qla2xxx). | ||
637 | */ | ||
638 | struct qla_tgt_func_tmpl { | ||
639 | |||
640 | int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, | ||
641 | unsigned char *, uint32_t, int, int, int); | ||
642 | void (*handle_data)(struct qla_tgt_cmd *); | ||
643 | int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, | ||
644 | uint32_t); | ||
645 | void (*free_cmd)(struct qla_tgt_cmd *); | ||
646 | void (*free_mcmd)(struct qla_tgt_mgmt_cmd *); | ||
647 | void (*free_session)(struct qla_tgt_sess *); | ||
648 | |||
649 | int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *, | ||
650 | void *, uint8_t *, uint16_t); | ||
651 | void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool); | ||
652 | struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *, | ||
653 | const uint16_t); | ||
654 | struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *, | ||
655 | const uint8_t *); | ||
656 | void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *); | ||
657 | void (*put_sess)(struct qla_tgt_sess *); | ||
658 | void (*shutdown_sess)(struct qla_tgt_sess *); | ||
659 | }; | ||
660 | |||
661 | int qla2x00_wait_for_hba_online(struct scsi_qla_host *); | ||
662 | |||
663 | #include <target/target_core_base.h> | ||
664 | |||
665 | #define QLA_TGT_TIMEOUT 10 /* in seconds */ | ||
666 | |||
667 | #define QLA_TGT_MAX_HW_PENDING_TIME 60 /* in seconds */ | ||
668 | |||
669 | /* Immediate notify status constants */ | ||
670 | #define IMM_NTFY_LIP_RESET 0x000E | ||
671 | #define IMM_NTFY_LIP_LINK_REINIT 0x000F | ||
672 | #define IMM_NTFY_IOCB_OVERFLOW 0x0016 | ||
673 | #define IMM_NTFY_ABORT_TASK 0x0020 | ||
674 | #define IMM_NTFY_PORT_LOGOUT 0x0029 | ||
675 | #define IMM_NTFY_PORT_CONFIG 0x002A | ||
676 | #define IMM_NTFY_GLBL_TPRLO 0x002D | ||
677 | #define IMM_NTFY_GLBL_LOGO 0x002E | ||
678 | #define IMM_NTFY_RESOURCE 0x0034 | ||
679 | #define IMM_NTFY_MSG_RX 0x0036 | ||
680 | #define IMM_NTFY_SRR 0x0045 | ||
681 | #define IMM_NTFY_ELS 0x0046 | ||
682 | |||
683 | /* Immediate notify task flags */ | ||
684 | #define IMM_NTFY_TASK_MGMT_SHIFT 8 | ||
685 | |||
686 | #define QLA_TGT_CLEAR_ACA 0x40 | ||
687 | #define QLA_TGT_TARGET_RESET 0x20 | ||
688 | #define QLA_TGT_LUN_RESET 0x10 | ||
689 | #define QLA_TGT_CLEAR_TS 0x04 | ||
690 | #define QLA_TGT_ABORT_TS 0x02 | ||
691 | #define QLA_TGT_ABORT_ALL_SESS 0xFFFF | ||
692 | #define QLA_TGT_ABORT_ALL 0xFFFE | ||
693 | #define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD | ||
694 | #define QLA_TGT_NEXUS_LOSS 0xFFFC | ||
695 | |||
696 | /* Notify Acknowledge flags */ | ||
697 | #define NOTIFY_ACK_RES_COUNT BIT_8 | ||
698 | #define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5 | ||
699 | #define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4 | ||
700 | |||
701 | /* Command's states */ | ||
702 | #define QLA_TGT_STATE_NEW 0 /* New command + target processing */ | ||
703 | #define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */ | ||
704 | #define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */ | ||
705 | #define QLA_TGT_STATE_PROCESSED 3 /* target done processing */ | ||
706 | #define QLA_TGT_STATE_ABORTED 4 /* Command aborted */ | ||
707 | |||
708 | /* Special handles */ | ||
709 | #define QLA_TGT_NULL_HANDLE 0 | ||
710 | #define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK) | ||
711 | |||
712 | /* ATIO task_codes field */ | ||
713 | #define ATIO_SIMPLE_QUEUE 0 | ||
714 | #define ATIO_HEAD_OF_QUEUE 1 | ||
715 | #define ATIO_ORDERED_QUEUE 2 | ||
716 | #define ATIO_ACA_QUEUE 4 | ||
717 | #define ATIO_UNTAGGED 5 | ||
718 | |||
719 | /* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */ | ||
720 | #define FC_TM_SUCCESS 0 | ||
721 | #define FC_TM_BAD_FCP_DATA 1 | ||
722 | #define FC_TM_BAD_CMD 2 | ||
723 | #define FC_TM_FCP_DATA_MISMATCH 3 | ||
724 | #define FC_TM_REJECT 4 | ||
725 | #define FC_TM_FAILED 5 | ||
726 | |||
727 | /* | ||
728 | * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was | ||
729 | * terminated, so no more actions is needed and success should be returned | ||
730 | * to target. | ||
731 | */ | ||
732 | #define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717 | ||
733 | |||
734 | #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G) | ||
735 | #define pci_dma_lo32(a) (a & 0xffffffff) | ||
736 | #define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff) | ||
737 | #else | ||
738 | #define pci_dma_lo32(a) (a & 0xffffffff) | ||
739 | #define pci_dma_hi32(a) 0 | ||
740 | #endif | ||
741 | |||
742 | #define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \ | ||
743 | (((const uint8_t *)(sense))[0] & 0x70) == 0x70) | ||
744 | |||
745 | struct qla_port_24xx_data { | ||
746 | uint8_t port_name[WWN_SIZE]; | ||
747 | uint16_t loop_id; | ||
748 | uint16_t reserved; | ||
749 | }; | ||
750 | |||
751 | struct qla_tgt { | ||
752 | struct scsi_qla_host *vha; | ||
753 | struct qla_hw_data *ha; | ||
754 | |||
755 | /* | ||
756 | * To sync between IRQ handlers and qlt_target_release(). Needed, | ||
757 | * because req_pkt() can drop/reaquire HW lock inside. Protected by | ||
758 | * HW lock. | ||
759 | */ | ||
760 | int irq_cmd_count; | ||
761 | |||
762 | int datasegs_per_cmd, datasegs_per_cont, sg_tablesize; | ||
763 | |||
764 | /* Target's flags, serialized by pha->hardware_lock */ | ||
765 | unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */ | ||
766 | unsigned int link_reinit_iocb_pending:1; | ||
767 | |||
768 | /* | ||
769 | * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex | ||
770 | * OR hardware_lock for reading. | ||
771 | */ | ||
772 | int tgt_stop; /* the target mode driver is being stopped */ | ||
773 | int tgt_stopped; /* the target mode driver has been stopped */ | ||
774 | |||
775 | /* Count of sessions refering qla_tgt. Protected by hardware_lock. */ | ||
776 | int sess_count; | ||
777 | |||
778 | /* Protected by hardware_lock. Addition also protected by tgt_mutex. */ | ||
779 | struct list_head sess_list; | ||
780 | |||
781 | /* Protected by hardware_lock */ | ||
782 | struct list_head del_sess_list; | ||
783 | struct delayed_work sess_del_work; | ||
784 | |||
785 | spinlock_t sess_work_lock; | ||
786 | struct list_head sess_works_list; | ||
787 | struct work_struct sess_work; | ||
788 | |||
789 | struct imm_ntfy_from_isp link_reinit_iocb; | ||
790 | wait_queue_head_t waitQ; | ||
791 | int notify_ack_expected; | ||
792 | int abts_resp_expected; | ||
793 | int modify_lun_expected; | ||
794 | |||
795 | int ctio_srr_id; | ||
796 | int imm_srr_id; | ||
797 | spinlock_t srr_lock; | ||
798 | struct list_head srr_ctio_list; | ||
799 | struct list_head srr_imm_list; | ||
800 | struct work_struct srr_work; | ||
801 | |||
802 | atomic_t tgt_global_resets_count; | ||
803 | |||
804 | struct list_head tgt_list_entry; | ||
805 | }; | ||
806 | |||
807 | /* | ||
808 | * Equivilant to IT Nexus (Initiator-Target) | ||
809 | */ | ||
810 | struct qla_tgt_sess { | ||
811 | uint16_t loop_id; | ||
812 | port_id_t s_id; | ||
813 | |||
814 | unsigned int conf_compl_supported:1; | ||
815 | unsigned int deleted:1; | ||
816 | unsigned int local:1; | ||
817 | |||
818 | struct se_session *se_sess; | ||
819 | struct scsi_qla_host *vha; | ||
820 | struct qla_tgt *tgt; | ||
821 | |||
822 | struct list_head sess_list_entry; | ||
823 | unsigned long expires; | ||
824 | struct list_head del_list_entry; | ||
825 | |||
826 | uint8_t port_name[WWN_SIZE]; | ||
827 | struct work_struct free_work; | ||
828 | }; | ||
829 | |||
830 | struct qla_tgt_cmd { | ||
831 | struct qla_tgt_sess *sess; | ||
832 | int state; | ||
833 | struct se_cmd se_cmd; | ||
834 | struct work_struct free_work; | ||
835 | struct work_struct work; | ||
836 | /* Sense buffer that will be mapped into outgoing status */ | ||
837 | unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; | ||
838 | |||
839 | /* to save extra sess dereferences */ | ||
840 | unsigned int conf_compl_supported:1; | ||
841 | unsigned int sg_mapped:1; | ||
842 | unsigned int free_sg:1; | ||
843 | unsigned int aborted:1; /* Needed in case of SRR */ | ||
844 | unsigned int write_data_transferred:1; | ||
845 | |||
846 | struct scatterlist *sg; /* cmd data buffer SG vector */ | ||
847 | int sg_cnt; /* SG segments count */ | ||
848 | int bufflen; /* cmd buffer length */ | ||
849 | int offset; | ||
850 | uint32_t tag; | ||
851 | uint32_t unpacked_lun; | ||
852 | enum dma_data_direction dma_data_direction; | ||
853 | |||
854 | uint16_t loop_id; /* to save extra sess dereferences */ | ||
855 | struct qla_tgt *tgt; /* to save extra sess dereferences */ | ||
856 | struct scsi_qla_host *vha; | ||
857 | struct list_head cmd_list; | ||
858 | |||
859 | struct atio_from_isp atio; | ||
860 | }; | ||
861 | |||
862 | struct qla_tgt_sess_work_param { | ||
863 | struct list_head sess_works_list_entry; | ||
864 | |||
865 | #define QLA_TGT_SESS_WORK_ABORT 1 | ||
866 | #define QLA_TGT_SESS_WORK_TM 2 | ||
867 | int type; | ||
868 | |||
869 | union { | ||
870 | struct abts_recv_from_24xx abts; | ||
871 | struct imm_ntfy_from_isp tm_iocb; | ||
872 | struct atio_from_isp tm_iocb2; | ||
873 | }; | ||
874 | }; | ||
875 | |||
876 | struct qla_tgt_mgmt_cmd { | ||
877 | uint8_t tmr_func; | ||
878 | uint8_t fc_tm_rsp; | ||
879 | struct qla_tgt_sess *sess; | ||
880 | struct se_cmd se_cmd; | ||
881 | struct work_struct free_work; | ||
882 | unsigned int flags; | ||
883 | #define QLA24XX_MGMT_SEND_NACK 1 | ||
884 | union { | ||
885 | struct atio_from_isp atio; | ||
886 | struct imm_ntfy_from_isp imm_ntfy; | ||
887 | struct abts_recv_from_24xx abts; | ||
888 | } __packed orig_iocb; | ||
889 | }; | ||
890 | |||
891 | struct qla_tgt_prm { | ||
892 | struct qla_tgt_cmd *cmd; | ||
893 | struct qla_tgt *tgt; | ||
894 | void *pkt; | ||
895 | struct scatterlist *sg; /* cmd data buffer SG vector */ | ||
896 | int seg_cnt; | ||
897 | int req_cnt; | ||
898 | uint16_t rq_result; | ||
899 | uint16_t scsi_status; | ||
900 | unsigned char *sense_buffer; | ||
901 | int sense_buffer_len; | ||
902 | int residual; | ||
903 | int add_status_pkt; | ||
904 | }; | ||
905 | |||
906 | struct qla_tgt_srr_imm { | ||
907 | struct list_head srr_list_entry; | ||
908 | int srr_id; | ||
909 | struct imm_ntfy_from_isp imm_ntfy; | ||
910 | }; | ||
911 | |||
912 | struct qla_tgt_srr_ctio { | ||
913 | struct list_head srr_list_entry; | ||
914 | int srr_id; | ||
915 | struct qla_tgt_cmd *cmd; | ||
916 | }; | ||
917 | |||
918 | #define QLA_TGT_XMIT_DATA 1 | ||
919 | #define QLA_TGT_XMIT_STATUS 2 | ||
920 | #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) | ||
921 | |||
922 | |||
923 | extern struct qla_tgt_data qla_target; | ||
924 | /* | ||
925 | * Internal function prototypes | ||
926 | */ | ||
927 | void qlt_disable_vha(struct scsi_qla_host *); | ||
928 | |||
929 | /* | ||
930 | * Function prototypes for qla_target.c logic used by qla2xxx LLD code. | ||
931 | */ | ||
932 | extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *); | ||
933 | extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); | ||
934 | extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64, | ||
935 | int (*callback)(struct scsi_qla_host *), void *); | ||
936 | extern void qlt_lport_deregister(struct scsi_qla_host *); | ||
937 | extern void qlt_unreg_sess(struct qla_tgt_sess *); | ||
938 | extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); | ||
939 | extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); | ||
940 | extern void qlt_set_mode(struct scsi_qla_host *ha); | ||
941 | extern void qlt_clear_mode(struct scsi_qla_host *ha); | ||
942 | extern int __init qlt_init(void); | ||
943 | extern void qlt_exit(void); | ||
944 | extern void qlt_update_vp_map(struct scsi_qla_host *, int); | ||
945 | |||
946 | /* | ||
947 | * This macro is used during early initializations when host->active_mode | ||
948 | * is not set. Right now, ha value is ignored. | ||
949 | */ | ||
950 | #define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED) | ||
951 | |||
952 | static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha) | ||
953 | { | ||
954 | return ha->host->active_mode & MODE_TARGET; | ||
955 | } | ||
956 | |||
957 | static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha) | ||
958 | { | ||
959 | return ha->host->active_mode & MODE_INITIATOR; | ||
960 | } | ||
961 | |||
962 | static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha) | ||
963 | { | ||
964 | if (ha->host->active_mode & MODE_INITIATOR) | ||
965 | ha->host->active_mode &= ~MODE_INITIATOR; | ||
966 | else | ||
967 | ha->host->active_mode |= MODE_INITIATOR; | ||
968 | } | ||
969 | |||
970 | /* | ||
971 | * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. | ||
972 | */ | ||
973 | extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *, | ||
974 | struct atio_from_isp *); | ||
975 | extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); | ||
976 | extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); | ||
977 | extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); | ||
978 | extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); | ||
979 | extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); | ||
980 | extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); | ||
981 | extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t); | ||
982 | extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *); | ||
983 | extern void qlt_enable_vha(struct scsi_qla_host *); | ||
984 | extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *); | ||
985 | extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *); | ||
986 | extern void qlt_init_atio_q_entries(struct scsi_qla_host *); | ||
987 | extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *); | ||
988 | extern void qlt_24xx_config_rings(struct scsi_qla_host *, | ||
989 | device_reg_t __iomem *); | ||
990 | extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *, | ||
991 | struct nvram_24xx *); | ||
992 | extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *, | ||
993 | struct init_cb_24xx *); | ||
994 | extern int qlt_24xx_process_response_error(struct scsi_qla_host *, | ||
995 | struct sts_entry_24xx *); | ||
996 | extern void qlt_modify_vp_config(struct scsi_qla_host *, | ||
997 | struct vp_config_entry_24xx *); | ||
998 | extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *); | ||
999 | extern int qlt_mem_alloc(struct qla_hw_data *); | ||
1000 | extern void qlt_mem_free(struct qla_hw_data *); | ||
1001 | extern void qlt_stop_phase1(struct qla_tgt *); | ||
1002 | extern void qlt_stop_phase2(struct qla_tgt *); | ||
1003 | |||
1004 | #endif /* __QLA_TARGET_H */ | ||
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 49697ca41e7..13b6357c1fa 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -1,15 +1,15 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2012 QLogic Corporation | 3 | * Copyright (c) 2003-2011 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.04.00.08-k" | 10 | #define QLA2XXX_VERSION "8.03.07.07-k" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 4 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
14 | #define QLA_DRIVER_PATCH_VER 0 | 14 | #define QLA_DRIVER_PATCH_VER 7 |
15 | #define QLA_DRIVER_BETA_VER 0 | 15 | #define QLA_DRIVER_BETA_VER 3 |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c deleted file mode 100644 index d182c96e17e..00000000000 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ /dev/null | |||
@@ -1,1964 +0,0 @@ | |||
1 | /******************************************************************************* | ||
2 | * This file contains tcm implementation using v4 configfs fabric infrastructure | ||
3 | * for QLogic target mode HBAs | ||
4 | * | ||
5 | * ?? Copyright 2010-2011 RisingTide Systems LLC. | ||
6 | * | ||
7 | * Licensed to the Linux Foundation under the General Public License (GPL) | ||
8 | * version 2. | ||
9 | * | ||
10 | * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> | ||
11 | * | ||
12 | * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from | ||
13 | * the TCM_FC / Open-FCoE.org fabric module. | ||
14 | * | ||
15 | * Copyright (c) 2010 Cisco Systems, Inc | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or modify | ||
18 | * it under the terms of the GNU General Public License as published by | ||
19 | * the Free Software Foundation; either version 2 of the License, or | ||
20 | * (at your option) any later version. | ||
21 | * | ||
22 | * This program is distributed in the hope that it will be useful, | ||
23 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
24 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
25 | * GNU General Public License for more details. | ||
26 | ****************************************************************************/ | ||
27 | |||
28 | |||
29 | #include <linux/module.h> | ||
30 | #include <linux/moduleparam.h> | ||
31 | #include <generated/utsrelease.h> | ||
32 | #include <linux/utsname.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/list.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/kthread.h> | ||
37 | #include <linux/types.h> | ||
38 | #include <linux/string.h> | ||
39 | #include <linux/configfs.h> | ||
40 | #include <linux/ctype.h> | ||
41 | #include <asm/unaligned.h> | ||
42 | #include <scsi/scsi.h> | ||
43 | #include <scsi/scsi_host.h> | ||
44 | #include <scsi/scsi_device.h> | ||
45 | #include <scsi/scsi_cmnd.h> | ||
46 | #include <target/target_core_base.h> | ||
47 | #include <target/target_core_fabric.h> | ||
48 | #include <target/target_core_fabric_configfs.h> | ||
49 | #include <target/target_core_configfs.h> | ||
50 | #include <target/configfs_macros.h> | ||
51 | |||
52 | #include "qla_def.h" | ||
53 | #include "qla_target.h" | ||
54 | #include "tcm_qla2xxx.h" | ||
55 | |||
56 | struct workqueue_struct *tcm_qla2xxx_free_wq; | ||
57 | struct workqueue_struct *tcm_qla2xxx_cmd_wq; | ||
58 | |||
59 | static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg) | ||
60 | { | ||
61 | return 1; | ||
62 | } | ||
63 | |||
64 | static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg) | ||
65 | { | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Parse WWN. | ||
71 | * If strict, we require lower-case hex and colon separators to be sure | ||
72 | * the name is the same as what would be generated by ft_format_wwn() | ||
73 | * so the name and wwn are mapped one-to-one. | ||
74 | */ | ||
75 | static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict) | ||
76 | { | ||
77 | const char *cp; | ||
78 | char c; | ||
79 | u32 nibble; | ||
80 | u32 byte = 0; | ||
81 | u32 pos = 0; | ||
82 | u32 err; | ||
83 | |||
84 | *wwn = 0; | ||
85 | for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) { | ||
86 | c = *cp; | ||
87 | if (c == '\n' && cp[1] == '\0') | ||
88 | continue; | ||
89 | if (strict && pos++ == 2 && byte++ < 7) { | ||
90 | pos = 0; | ||
91 | if (c == ':') | ||
92 | continue; | ||
93 | err = 1; | ||
94 | goto fail; | ||
95 | } | ||
96 | if (c == '\0') { | ||
97 | err = 2; | ||
98 | if (strict && byte != 8) | ||
99 | goto fail; | ||
100 | return cp - name; | ||
101 | } | ||
102 | err = 3; | ||
103 | if (isdigit(c)) | ||
104 | nibble = c - '0'; | ||
105 | else if (isxdigit(c) && (islower(c) || !strict)) | ||
106 | nibble = tolower(c) - 'a' + 10; | ||
107 | else | ||
108 | goto fail; | ||
109 | *wwn = (*wwn << 4) | nibble; | ||
110 | } | ||
111 | err = 4; | ||
112 | fail: | ||
113 | pr_debug("err %u len %zu pos %u byte %u\n", | ||
114 | err, cp - name, pos, byte); | ||
115 | return -1; | ||
116 | } | ||
117 | |||
118 | static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn) | ||
119 | { | ||
120 | u8 b[8]; | ||
121 | |||
122 | put_unaligned_be64(wwn, b); | ||
123 | return snprintf(buf, len, | ||
124 | "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", | ||
125 | b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); | ||
126 | } | ||
127 | |||
128 | static char *tcm_qla2xxx_get_fabric_name(void) | ||
129 | { | ||
130 | return "qla2xxx"; | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn | ||
135 | */ | ||
136 | static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) | ||
137 | { | ||
138 | unsigned int i, j; | ||
139 | u8 wwn[8]; | ||
140 | |||
141 | memset(wwn, 0, sizeof(wwn)); | ||
142 | |||
143 | /* Validate and store the new name */ | ||
144 | for (i = 0, j = 0; i < 16; i++) { | ||
145 | int value; | ||
146 | |||
147 | value = hex_to_bin(*ns++); | ||
148 | if (value >= 0) | ||
149 | j = (j << 4) | value; | ||
150 | else | ||
151 | return -EINVAL; | ||
152 | |||
153 | if (i % 2) { | ||
154 | wwn[i/2] = j & 0xff; | ||
155 | j = 0; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | *nm = wwn_to_u64(wwn); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * This parsing logic follows drivers/scsi/scsi_transport_fc.c: | ||
165 | * store_fc_host_vport_create() | ||
166 | */ | ||
167 | static int tcm_qla2xxx_npiv_parse_wwn( | ||
168 | const char *name, | ||
169 | size_t count, | ||
170 | u64 *wwpn, | ||
171 | u64 *wwnn) | ||
172 | { | ||
173 | unsigned int cnt = count; | ||
174 | int rc; | ||
175 | |||
176 | *wwpn = 0; | ||
177 | *wwnn = 0; | ||
178 | |||
179 | /* count may include a LF at end of string */ | ||
180 | if (name[cnt-1] == '\n') | ||
181 | cnt--; | ||
182 | |||
183 | /* validate we have enough characters for WWPN */ | ||
184 | if ((cnt != (16+1+16)) || (name[16] != ':')) | ||
185 | return -EINVAL; | ||
186 | |||
187 | rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn); | ||
188 | if (rc != 0) | ||
189 | return rc; | ||
190 | |||
191 | rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn); | ||
192 | if (rc != 0) | ||
193 | return rc; | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len, | ||
199 | u64 wwpn, u64 wwnn) | ||
200 | { | ||
201 | u8 b[8], b2[8]; | ||
202 | |||
203 | put_unaligned_be64(wwpn, b); | ||
204 | put_unaligned_be64(wwnn, b2); | ||
205 | return snprintf(buf, len, | ||
206 | "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x," | ||
207 | "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", | ||
208 | b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], | ||
209 | b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]); | ||
210 | } | ||
211 | |||
212 | static char *tcm_qla2xxx_npiv_get_fabric_name(void) | ||
213 | { | ||
214 | return "qla2xxx_npiv"; | ||
215 | } | ||
216 | |||
217 | static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
218 | { | ||
219 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
220 | struct tcm_qla2xxx_tpg, se_tpg); | ||
221 | struct tcm_qla2xxx_lport *lport = tpg->lport; | ||
222 | u8 proto_id; | ||
223 | |||
224 | switch (lport->lport_proto_id) { | ||
225 | case SCSI_PROTOCOL_FCP: | ||
226 | default: | ||
227 | proto_id = fc_get_fabric_proto_ident(se_tpg); | ||
228 | break; | ||
229 | } | ||
230 | |||
231 | return proto_id; | ||
232 | } | ||
233 | |||
234 | static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) | ||
235 | { | ||
236 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
237 | struct tcm_qla2xxx_tpg, se_tpg); | ||
238 | struct tcm_qla2xxx_lport *lport = tpg->lport; | ||
239 | |||
240 | return lport->lport_naa_name; | ||
241 | } | ||
242 | |||
243 | static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg) | ||
244 | { | ||
245 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
246 | struct tcm_qla2xxx_tpg, se_tpg); | ||
247 | struct tcm_qla2xxx_lport *lport = tpg->lport; | ||
248 | |||
249 | return &lport->lport_npiv_name[0]; | ||
250 | } | ||
251 | |||
252 | static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) | ||
253 | { | ||
254 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
255 | struct tcm_qla2xxx_tpg, se_tpg); | ||
256 | return tpg->lport_tpgt; | ||
257 | } | ||
258 | |||
259 | static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg) | ||
260 | { | ||
261 | return 1; | ||
262 | } | ||
263 | |||
264 | static u32 tcm_qla2xxx_get_pr_transport_id( | ||
265 | struct se_portal_group *se_tpg, | ||
266 | struct se_node_acl *se_nacl, | ||
267 | struct t10_pr_registration *pr_reg, | ||
268 | int *format_code, | ||
269 | unsigned char *buf) | ||
270 | { | ||
271 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
272 | struct tcm_qla2xxx_tpg, se_tpg); | ||
273 | struct tcm_qla2xxx_lport *lport = tpg->lport; | ||
274 | int ret = 0; | ||
275 | |||
276 | switch (lport->lport_proto_id) { | ||
277 | case SCSI_PROTOCOL_FCP: | ||
278 | default: | ||
279 | ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
280 | format_code, buf); | ||
281 | break; | ||
282 | } | ||
283 | |||
284 | return ret; | ||
285 | } | ||
286 | |||
287 | static u32 tcm_qla2xxx_get_pr_transport_id_len( | ||
288 | struct se_portal_group *se_tpg, | ||
289 | struct se_node_acl *se_nacl, | ||
290 | struct t10_pr_registration *pr_reg, | ||
291 | int *format_code) | ||
292 | { | ||
293 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
294 | struct tcm_qla2xxx_tpg, se_tpg); | ||
295 | struct tcm_qla2xxx_lport *lport = tpg->lport; | ||
296 | int ret = 0; | ||
297 | |||
298 | switch (lport->lport_proto_id) { | ||
299 | case SCSI_PROTOCOL_FCP: | ||
300 | default: | ||
301 | ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
302 | format_code); | ||
303 | break; | ||
304 | } | ||
305 | |||
306 | return ret; | ||
307 | } | ||
308 | |||
309 | static char *tcm_qla2xxx_parse_pr_out_transport_id( | ||
310 | struct se_portal_group *se_tpg, | ||
311 | const char *buf, | ||
312 | u32 *out_tid_len, | ||
313 | char **port_nexus_ptr) | ||
314 | { | ||
315 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
316 | struct tcm_qla2xxx_tpg, se_tpg); | ||
317 | struct tcm_qla2xxx_lport *lport = tpg->lport; | ||
318 | char *tid = NULL; | ||
319 | |||
320 | switch (lport->lport_proto_id) { | ||
321 | case SCSI_PROTOCOL_FCP: | ||
322 | default: | ||
323 | tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
324 | port_nexus_ptr); | ||
325 | break; | ||
326 | } | ||
327 | |||
328 | return tid; | ||
329 | } | ||
330 | |||
331 | static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) | ||
332 | { | ||
333 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
334 | struct tcm_qla2xxx_tpg, se_tpg); | ||
335 | |||
336 | return QLA_TPG_ATTRIB(tpg)->generate_node_acls; | ||
337 | } | ||
338 | |||
339 | static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) | ||
340 | { | ||
341 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
342 | struct tcm_qla2xxx_tpg, se_tpg); | ||
343 | |||
344 | return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls; | ||
345 | } | ||
346 | |||
347 | static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) | ||
348 | { | ||
349 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
350 | struct tcm_qla2xxx_tpg, se_tpg); | ||
351 | |||
352 | return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect; | ||
353 | } | ||
354 | |||
355 | static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) | ||
356 | { | ||
357 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
358 | struct tcm_qla2xxx_tpg, se_tpg); | ||
359 | |||
360 | return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; | ||
361 | } | ||
362 | |||
363 | static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( | ||
364 | struct se_portal_group *se_tpg) | ||
365 | { | ||
366 | struct tcm_qla2xxx_nacl *nacl; | ||
367 | |||
368 | nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL); | ||
369 | if (!nacl) { | ||
370 | pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n"); | ||
371 | return NULL; | ||
372 | } | ||
373 | |||
374 | return &nacl->se_node_acl; | ||
375 | } | ||
376 | |||
377 | static void tcm_qla2xxx_release_fabric_acl( | ||
378 | struct se_portal_group *se_tpg, | ||
379 | struct se_node_acl *se_nacl) | ||
380 | { | ||
381 | struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, | ||
382 | struct tcm_qla2xxx_nacl, se_node_acl); | ||
383 | kfree(nacl); | ||
384 | } | ||
385 | |||
386 | static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) | ||
387 | { | ||
388 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
389 | struct tcm_qla2xxx_tpg, se_tpg); | ||
390 | |||
391 | return tpg->lport_tpgt; | ||
392 | } | ||
393 | |||
394 | static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) | ||
395 | { | ||
396 | struct qla_tgt_mgmt_cmd *mcmd = container_of(work, | ||
397 | struct qla_tgt_mgmt_cmd, free_work); | ||
398 | |||
399 | transport_generic_free_cmd(&mcmd->se_cmd, 0); | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * Called from qla_target_template->free_mcmd(), and will call | ||
404 | * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops | ||
405 | * release callback. qla_hw_data->hardware_lock is expected to be held | ||
406 | */ | ||
407 | static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) | ||
408 | { | ||
409 | INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); | ||
410 | queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); | ||
411 | } | ||
412 | |||
413 | static void tcm_qla2xxx_complete_free(struct work_struct *work) | ||
414 | { | ||
415 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); | ||
416 | |||
417 | transport_generic_free_cmd(&cmd->se_cmd, 0); | ||
418 | } | ||
419 | |||
420 | /* | ||
421 | * Called from qla_target_template->free_cmd(), and will call | ||
422 | * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops | ||
423 | * release callback. qla_hw_data->hardware_lock is expected to be held | ||
424 | */ | ||
425 | static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) | ||
426 | { | ||
427 | INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); | ||
428 | queue_work(tcm_qla2xxx_free_wq, &cmd->work); | ||
429 | } | ||
430 | |||
431 | /* | ||
432 | * Called from struct target_core_fabric_ops->check_stop_free() context | ||
433 | */ | ||
434 | static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) | ||
435 | { | ||
436 | return target_put_sess_cmd(se_cmd->se_sess, se_cmd); | ||
437 | } | ||
438 | |||
439 | /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying | ||
440 | * fabric descriptor @se_cmd command to release | ||
441 | */ | ||
442 | static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) | ||
443 | { | ||
444 | struct qla_tgt_cmd *cmd; | ||
445 | |||
446 | if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { | ||
447 | struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, | ||
448 | struct qla_tgt_mgmt_cmd, se_cmd); | ||
449 | qlt_free_mcmd(mcmd); | ||
450 | return; | ||
451 | } | ||
452 | |||
453 | cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); | ||
454 | qlt_free_cmd(cmd); | ||
455 | } | ||
456 | |||
457 | static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) | ||
458 | { | ||
459 | struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; | ||
460 | struct scsi_qla_host *vha; | ||
461 | unsigned long flags; | ||
462 | |||
463 | BUG_ON(!sess); | ||
464 | vha = sess->vha; | ||
465 | |||
466 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); | ||
467 | target_sess_cmd_list_set_waiting(se_sess); | ||
468 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | ||
469 | |||
470 | return 1; | ||
471 | } | ||
472 | |||
473 | static void tcm_qla2xxx_close_session(struct se_session *se_sess) | ||
474 | { | ||
475 | struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; | ||
476 | struct scsi_qla_host *vha; | ||
477 | unsigned long flags; | ||
478 | |||
479 | BUG_ON(!sess); | ||
480 | vha = sess->vha; | ||
481 | |||
482 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); | ||
483 | qlt_unreg_sess(sess); | ||
484 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | ||
485 | } | ||
486 | |||
487 | static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) | ||
488 | { | ||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | /* | ||
493 | * The LIO target core uses DMA_TO_DEVICE to mean that data is going | ||
494 | * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean | ||
495 | * that data is coming from the target (eg handling a READ). However, | ||
496 | * this is just the opposite of what we have to tell the DMA mapping | ||
497 | * layer -- eg when handling a READ, the HBA will have to DMA the data | ||
498 | * out of memory so it can send it to the initiator, which means we | ||
499 | * need to use DMA_TO_DEVICE when we map the data. | ||
500 | */ | ||
501 | static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd) | ||
502 | { | ||
503 | if (se_cmd->se_cmd_flags & SCF_BIDI) | ||
504 | return DMA_BIDIRECTIONAL; | ||
505 | |||
506 | switch (se_cmd->data_direction) { | ||
507 | case DMA_TO_DEVICE: | ||
508 | return DMA_FROM_DEVICE; | ||
509 | case DMA_FROM_DEVICE: | ||
510 | return DMA_TO_DEVICE; | ||
511 | case DMA_NONE: | ||
512 | default: | ||
513 | return DMA_NONE; | ||
514 | } | ||
515 | } | ||
516 | |||
517 | static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) | ||
518 | { | ||
519 | struct qla_tgt_cmd *cmd = container_of(se_cmd, | ||
520 | struct qla_tgt_cmd, se_cmd); | ||
521 | |||
522 | cmd->bufflen = se_cmd->data_length; | ||
523 | cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); | ||
524 | |||
525 | cmd->sg_cnt = se_cmd->t_data_nents; | ||
526 | cmd->sg = se_cmd->t_data_sg; | ||
527 | |||
528 | /* | ||
529 | * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup | ||
530 | * the SGL mappings into PCIe memory for incoming FCP WRITE data. | ||
531 | */ | ||
532 | return qlt_rdy_to_xfer(cmd); | ||
533 | } | ||
534 | |||
535 | static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) | ||
536 | { | ||
537 | unsigned long flags; | ||
538 | /* | ||
539 | * Check for WRITE_PENDING status to determine if we need to wait for | ||
540 | * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data(). | ||
541 | */ | ||
542 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); | ||
543 | if (se_cmd->t_state == TRANSPORT_WRITE_PENDING || | ||
544 | se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { | ||
545 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); | ||
546 | wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, | ||
547 | 3000); | ||
548 | return 0; | ||
549 | } | ||
550 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); | ||
551 | |||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl) | ||
556 | { | ||
557 | return; | ||
558 | } | ||
559 | |||
560 | static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd) | ||
561 | { | ||
562 | struct qla_tgt_cmd *cmd = container_of(se_cmd, | ||
563 | struct qla_tgt_cmd, se_cmd); | ||
564 | |||
565 | return cmd->tag; | ||
566 | } | ||
567 | |||
568 | static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) | ||
569 | { | ||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | /* | ||
574 | * Called from process context in qla_target.c:qlt_do_work() code | ||
575 | */ | ||
576 | static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, | ||
577 | unsigned char *cdb, uint32_t data_length, int fcp_task_attr, | ||
578 | int data_dir, int bidi) | ||
579 | { | ||
580 | struct se_cmd *se_cmd = &cmd->se_cmd; | ||
581 | struct se_session *se_sess; | ||
582 | struct qla_tgt_sess *sess; | ||
583 | int flags = TARGET_SCF_ACK_KREF; | ||
584 | |||
585 | if (bidi) | ||
586 | flags |= TARGET_SCF_BIDI_OP; | ||
587 | |||
588 | sess = cmd->sess; | ||
589 | if (!sess) { | ||
590 | pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); | ||
591 | return -EINVAL; | ||
592 | } | ||
593 | |||
594 | se_sess = sess->se_sess; | ||
595 | if (!se_sess) { | ||
596 | pr_err("Unable to locate active struct se_session\n"); | ||
597 | return -EINVAL; | ||
598 | } | ||
599 | |||
600 | return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], | ||
601 | cmd->unpacked_lun, data_length, fcp_task_attr, | ||
602 | data_dir, flags); | ||
603 | } | ||
604 | |||
605 | static void tcm_qla2xxx_handle_data_work(struct work_struct *work) | ||
606 | { | ||
607 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); | ||
608 | |||
609 | /* | ||
610 | * Ensure that the complete FCP WRITE payload has been received. | ||
611 | * Otherwise return an exception via CHECK_CONDITION status. | ||
612 | */ | ||
613 | if (!cmd->write_data_transferred) { | ||
614 | /* | ||
615 | * Check if se_cmd has already been aborted via LUN_RESET, and | ||
616 | * waiting upon completion in tcm_qla2xxx_write_pending_status() | ||
617 | */ | ||
618 | if (cmd->se_cmd.transport_state & CMD_T_ABORTED) { | ||
619 | complete(&cmd->se_cmd.t_transport_stop_comp); | ||
620 | return; | ||
621 | } | ||
622 | |||
623 | transport_generic_request_failure(&cmd->se_cmd, | ||
624 | TCM_CHECK_CONDITION_ABORT_CMD); | ||
625 | return; | ||
626 | } | ||
627 | |||
628 | return target_execute_cmd(&cmd->se_cmd); | ||
629 | } | ||
630 | |||
631 | /* | ||
632 | * Called from qla_target.c:qlt_do_ctio_completion() | ||
633 | */ | ||
634 | static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) | ||
635 | { | ||
636 | INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); | ||
637 | queue_work(tcm_qla2xxx_free_wq, &cmd->work); | ||
638 | } | ||
639 | |||
640 | /* | ||
641 | * Called from qla_target.c:qlt_issue_task_mgmt() | ||
642 | */ | ||
643 | static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, | ||
644 | uint8_t tmr_func, uint32_t tag) | ||
645 | { | ||
646 | struct qla_tgt_sess *sess = mcmd->sess; | ||
647 | struct se_cmd *se_cmd = &mcmd->se_cmd; | ||
648 | |||
649 | return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, | ||
650 | tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); | ||
651 | } | ||
652 | |||
653 | static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) | ||
654 | { | ||
655 | struct qla_tgt_cmd *cmd = container_of(se_cmd, | ||
656 | struct qla_tgt_cmd, se_cmd); | ||
657 | |||
658 | cmd->bufflen = se_cmd->data_length; | ||
659 | cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); | ||
660 | cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); | ||
661 | |||
662 | cmd->sg_cnt = se_cmd->t_data_nents; | ||
663 | cmd->sg = se_cmd->t_data_sg; | ||
664 | cmd->offset = 0; | ||
665 | |||
666 | /* | ||
667 | * Now queue completed DATA_IN the qla2xxx LLD and response ring | ||
668 | */ | ||
669 | return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS, | ||
670 | se_cmd->scsi_status); | ||
671 | } | ||
672 | |||
673 | static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) | ||
674 | { | ||
675 | struct qla_tgt_cmd *cmd = container_of(se_cmd, | ||
676 | struct qla_tgt_cmd, se_cmd); | ||
677 | int xmit_type = QLA_TGT_XMIT_STATUS; | ||
678 | |||
679 | cmd->bufflen = se_cmd->data_length; | ||
680 | cmd->sg = NULL; | ||
681 | cmd->sg_cnt = 0; | ||
682 | cmd->offset = 0; | ||
683 | cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); | ||
684 | cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); | ||
685 | |||
686 | if (se_cmd->data_direction == DMA_FROM_DEVICE) { | ||
687 | /* | ||
688 | * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen | ||
689 | * for qla_tgt_xmit_response LLD code | ||
690 | */ | ||
691 | se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | ||
692 | se_cmd->residual_count = se_cmd->data_length; | ||
693 | |||
694 | cmd->bufflen = 0; | ||
695 | } | ||
696 | /* | ||
697 | * Now queue status response to qla2xxx LLD code and response ring | ||
698 | */ | ||
699 | return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); | ||
700 | } | ||
701 | |||
702 | static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) | ||
703 | { | ||
704 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; | ||
705 | struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, | ||
706 | struct qla_tgt_mgmt_cmd, se_cmd); | ||
707 | |||
708 | pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n", | ||
709 | mcmd, se_tmr->function, se_tmr->response); | ||
710 | /* | ||
711 | * Do translation between TCM TM response codes and | ||
712 | * QLA2xxx FC TM response codes. | ||
713 | */ | ||
714 | switch (se_tmr->response) { | ||
715 | case TMR_FUNCTION_COMPLETE: | ||
716 | mcmd->fc_tm_rsp = FC_TM_SUCCESS; | ||
717 | break; | ||
718 | case TMR_TASK_DOES_NOT_EXIST: | ||
719 | mcmd->fc_tm_rsp = FC_TM_BAD_CMD; | ||
720 | break; | ||
721 | case TMR_FUNCTION_REJECTED: | ||
722 | mcmd->fc_tm_rsp = FC_TM_REJECT; | ||
723 | break; | ||
724 | case TMR_LUN_DOES_NOT_EXIST: | ||
725 | default: | ||
726 | mcmd->fc_tm_rsp = FC_TM_FAILED; | ||
727 | break; | ||
728 | } | ||
729 | /* | ||
730 | * Queue the TM response to QLA2xxx LLD to build a | ||
731 | * CTIO response packet. | ||
732 | */ | ||
733 | qlt_xmit_tm_rsp(mcmd); | ||
734 | |||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | /* Local pointer to allocated TCM configfs fabric module */ | ||
739 | struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; | ||
740 | struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; | ||
741 | |||
742 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, | ||
743 | struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); | ||
744 | /* | ||
745 | * Expected to be called with struct qla_hw_data->hardware_lock held | ||
746 | */ | ||
747 | static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) | ||
748 | { | ||
749 | struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; | ||
750 | struct se_portal_group *se_tpg = se_nacl->se_tpg; | ||
751 | struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; | ||
752 | struct tcm_qla2xxx_lport *lport = container_of(se_wwn, | ||
753 | struct tcm_qla2xxx_lport, lport_wwn); | ||
754 | struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, | ||
755 | struct tcm_qla2xxx_nacl, se_node_acl); | ||
756 | void *node; | ||
757 | |||
758 | pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); | ||
759 | |||
760 | node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); | ||
761 | WARN_ON(node && (node != se_nacl)); | ||
762 | |||
763 | pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", | ||
764 | se_nacl, nacl->nport_wwnn, nacl->nport_id); | ||
765 | /* | ||
766 | * Now clear the se_nacl and session pointers from our HW lport lookup | ||
767 | * table mapping for this initiator's fabric S_ID and LOOP_ID entries. | ||
768 | * | ||
769 | * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> | ||
770 | * target_wait_for_sess_cmds() before the session waits for outstanding | ||
771 | * I/O to complete, to avoid a race between session shutdown execution | ||
772 | * and incoming ATIOs or TMRs picking up a stale se_node_act reference. | ||
773 | */ | ||
774 | tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); | ||
775 | } | ||
776 | |||
777 | static void tcm_qla2xxx_release_session(struct kref *kref) | ||
778 | { | ||
779 | struct se_session *se_sess = container_of(kref, | ||
780 | struct se_session, sess_kref); | ||
781 | |||
782 | qlt_unreg_sess(se_sess->fabric_sess_ptr); | ||
783 | } | ||
784 | |||
785 | static void tcm_qla2xxx_put_session(struct se_session *se_sess) | ||
786 | { | ||
787 | struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; | ||
788 | struct qla_hw_data *ha = sess->vha->hw; | ||
789 | unsigned long flags; | ||
790 | |||
791 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
792 | kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session); | ||
793 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
794 | } | ||
795 | |||
796 | static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) | ||
797 | { | ||
798 | tcm_qla2xxx_put_session(sess->se_sess); | ||
799 | } | ||
800 | |||
801 | static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) | ||
802 | { | ||
803 | tcm_qla2xxx_shutdown_session(sess->se_sess); | ||
804 | } | ||
805 | |||
806 | static struct se_node_acl *tcm_qla2xxx_make_nodeacl( | ||
807 | struct se_portal_group *se_tpg, | ||
808 | struct config_group *group, | ||
809 | const char *name) | ||
810 | { | ||
811 | struct se_node_acl *se_nacl, *se_nacl_new; | ||
812 | struct tcm_qla2xxx_nacl *nacl; | ||
813 | u64 wwnn; | ||
814 | u32 qla2xxx_nexus_depth; | ||
815 | |||
816 | if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) | ||
817 | return ERR_PTR(-EINVAL); | ||
818 | |||
819 | se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg); | ||
820 | if (!se_nacl_new) | ||
821 | return ERR_PTR(-ENOMEM); | ||
822 | /* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */ | ||
823 | qla2xxx_nexus_depth = 1; | ||
824 | |||
825 | /* | ||
826 | * se_nacl_new may be released by core_tpg_add_initiator_node_acl() | ||
827 | * when converting a NodeACL from demo mode -> explict | ||
828 | */ | ||
829 | se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, | ||
830 | name, qla2xxx_nexus_depth); | ||
831 | if (IS_ERR(se_nacl)) { | ||
832 | tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); | ||
833 | return se_nacl; | ||
834 | } | ||
835 | /* | ||
836 | * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN | ||
837 | */ | ||
838 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); | ||
839 | nacl->nport_wwnn = wwnn; | ||
840 | tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); | ||
841 | |||
842 | return se_nacl; | ||
843 | } | ||
844 | |||
845 | static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl) | ||
846 | { | ||
847 | struct se_portal_group *se_tpg = se_acl->se_tpg; | ||
848 | struct tcm_qla2xxx_nacl *nacl = container_of(se_acl, | ||
849 | struct tcm_qla2xxx_nacl, se_node_acl); | ||
850 | |||
851 | core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1); | ||
852 | kfree(nacl); | ||
853 | } | ||
854 | |||
855 | /* Start items for tcm_qla2xxx_tpg_attrib_cit */ | ||
856 | |||
857 | #define DEF_QLA_TPG_ATTRIB(name) \ | ||
858 | \ | ||
859 | static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \ | ||
860 | struct se_portal_group *se_tpg, \ | ||
861 | char *page) \ | ||
862 | { \ | ||
863 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ | ||
864 | struct tcm_qla2xxx_tpg, se_tpg); \ | ||
865 | \ | ||
866 | return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \ | ||
867 | } \ | ||
868 | \ | ||
869 | static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ | ||
870 | struct se_portal_group *se_tpg, \ | ||
871 | const char *page, \ | ||
872 | size_t count) \ | ||
873 | { \ | ||
874 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ | ||
875 | struct tcm_qla2xxx_tpg, se_tpg); \ | ||
876 | unsigned long val; \ | ||
877 | int ret; \ | ||
878 | \ | ||
879 | ret = kstrtoul(page, 0, &val); \ | ||
880 | if (ret < 0) { \ | ||
881 | pr_err("kstrtoul() failed with" \ | ||
882 | " ret: %d\n", ret); \ | ||
883 | return -EINVAL; \ | ||
884 | } \ | ||
885 | ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \ | ||
886 | \ | ||
887 | return (!ret) ? count : -EINVAL; \ | ||
888 | } | ||
889 | |||
890 | #define DEF_QLA_TPG_ATTR_BOOL(_name) \ | ||
891 | \ | ||
892 | static int tcm_qla2xxx_set_attrib_##_name( \ | ||
893 | struct tcm_qla2xxx_tpg *tpg, \ | ||
894 | unsigned long val) \ | ||
895 | { \ | ||
896 | struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ | ||
897 | \ | ||
898 | if ((val != 0) && (val != 1)) { \ | ||
899 | pr_err("Illegal boolean value %lu\n", val); \ | ||
900 | return -EINVAL; \ | ||
901 | } \ | ||
902 | \ | ||
903 | a->_name = val; \ | ||
904 | return 0; \ | ||
905 | } | ||
906 | |||
907 | #define QLA_TPG_ATTR(_name, _mode) \ | ||
908 | TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode); | ||
909 | |||
910 | /* | ||
911 | * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls | ||
912 | */ | ||
913 | DEF_QLA_TPG_ATTR_BOOL(generate_node_acls); | ||
914 | DEF_QLA_TPG_ATTRIB(generate_node_acls); | ||
915 | QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR); | ||
916 | |||
917 | /* | ||
918 | Define tcm_qla2xxx_attrib_s_cache_dynamic_acls | ||
919 | */ | ||
920 | DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls); | ||
921 | DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); | ||
922 | QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR); | ||
923 | |||
924 | /* | ||
925 | * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect | ||
926 | */ | ||
927 | DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect); | ||
928 | DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); | ||
929 | QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR); | ||
930 | |||
931 | /* | ||
932 | * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect | ||
933 | */ | ||
934 | DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect); | ||
935 | DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); | ||
936 | QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); | ||
937 | |||
938 | static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { | ||
939 | &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr, | ||
940 | &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr, | ||
941 | &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr, | ||
942 | &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr, | ||
943 | NULL, | ||
944 | }; | ||
945 | |||
946 | /* End items for tcm_qla2xxx_tpg_attrib_cit */ | ||
947 | |||
948 | static ssize_t tcm_qla2xxx_tpg_show_enable( | ||
949 | struct se_portal_group *se_tpg, | ||
950 | char *page) | ||
951 | { | ||
952 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
953 | struct tcm_qla2xxx_tpg, se_tpg); | ||
954 | |||
955 | return snprintf(page, PAGE_SIZE, "%d\n", | ||
956 | atomic_read(&tpg->lport_tpg_enabled)); | ||
957 | } | ||
958 | |||
959 | static ssize_t tcm_qla2xxx_tpg_store_enable( | ||
960 | struct se_portal_group *se_tpg, | ||
961 | const char *page, | ||
962 | size_t count) | ||
963 | { | ||
964 | struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; | ||
965 | struct tcm_qla2xxx_lport *lport = container_of(se_wwn, | ||
966 | struct tcm_qla2xxx_lport, lport_wwn); | ||
967 | struct scsi_qla_host *vha = lport->qla_vha; | ||
968 | struct qla_hw_data *ha = vha->hw; | ||
969 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
970 | struct tcm_qla2xxx_tpg, se_tpg); | ||
971 | unsigned long op; | ||
972 | int rc; | ||
973 | |||
974 | rc = kstrtoul(page, 0, &op); | ||
975 | if (rc < 0) { | ||
976 | pr_err("kstrtoul() returned %d\n", rc); | ||
977 | return -EINVAL; | ||
978 | } | ||
979 | if ((op != 1) && (op != 0)) { | ||
980 | pr_err("Illegal value for tpg_enable: %lu\n", op); | ||
981 | return -EINVAL; | ||
982 | } | ||
983 | |||
984 | if (op) { | ||
985 | atomic_set(&tpg->lport_tpg_enabled, 1); | ||
986 | qlt_enable_vha(vha); | ||
987 | } else { | ||
988 | if (!ha->tgt.qla_tgt) { | ||
989 | pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n"); | ||
990 | return -ENODEV; | ||
991 | } | ||
992 | atomic_set(&tpg->lport_tpg_enabled, 0); | ||
993 | qlt_stop_phase1(ha->tgt.qla_tgt); | ||
994 | } | ||
995 | |||
996 | return count; | ||
997 | } | ||
998 | |||
999 | TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR); | ||
1000 | |||
1001 | static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { | ||
1002 | &tcm_qla2xxx_tpg_enable.attr, | ||
1003 | NULL, | ||
1004 | }; | ||
1005 | |||
1006 | static struct se_portal_group *tcm_qla2xxx_make_tpg( | ||
1007 | struct se_wwn *wwn, | ||
1008 | struct config_group *group, | ||
1009 | const char *name) | ||
1010 | { | ||
1011 | struct tcm_qla2xxx_lport *lport = container_of(wwn, | ||
1012 | struct tcm_qla2xxx_lport, lport_wwn); | ||
1013 | struct tcm_qla2xxx_tpg *tpg; | ||
1014 | unsigned long tpgt; | ||
1015 | int ret; | ||
1016 | |||
1017 | if (strstr(name, "tpgt_") != name) | ||
1018 | return ERR_PTR(-EINVAL); | ||
1019 | if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) | ||
1020 | return ERR_PTR(-EINVAL); | ||
1021 | |||
1022 | if (!lport->qla_npiv_vp && (tpgt != 1)) { | ||
1023 | pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); | ||
1024 | return ERR_PTR(-ENOSYS); | ||
1025 | } | ||
1026 | |||
1027 | tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); | ||
1028 | if (!tpg) { | ||
1029 | pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); | ||
1030 | return ERR_PTR(-ENOMEM); | ||
1031 | } | ||
1032 | tpg->lport = lport; | ||
1033 | tpg->lport_tpgt = tpgt; | ||
1034 | /* | ||
1035 | * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic | ||
1036 | * NodeACLs | ||
1037 | */ | ||
1038 | QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; | ||
1039 | QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; | ||
1040 | QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; | ||
1041 | |||
1042 | ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, | ||
1043 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); | ||
1044 | if (ret < 0) { | ||
1045 | kfree(tpg); | ||
1046 | return NULL; | ||
1047 | } | ||
1048 | /* | ||
1049 | * Setup local TPG=1 pointer for non NPIV mode. | ||
1050 | */ | ||
1051 | if (lport->qla_npiv_vp == NULL) | ||
1052 | lport->tpg_1 = tpg; | ||
1053 | |||
1054 | return &tpg->se_tpg; | ||
1055 | } | ||
1056 | |||
1057 | static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) | ||
1058 | { | ||
1059 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
1060 | struct tcm_qla2xxx_tpg, se_tpg); | ||
1061 | struct tcm_qla2xxx_lport *lport = tpg->lport; | ||
1062 | struct scsi_qla_host *vha = lport->qla_vha; | ||
1063 | struct qla_hw_data *ha = vha->hw; | ||
1064 | /* | ||
1065 | * Call into qla2x_target.c LLD logic to shutdown the active | ||
1066 | * FC Nexuses and disable target mode operation for this qla_hw_data | ||
1067 | */ | ||
1068 | if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop) | ||
1069 | qlt_stop_phase1(ha->tgt.qla_tgt); | ||
1070 | |||
1071 | core_tpg_deregister(se_tpg); | ||
1072 | /* | ||
1073 | * Clear local TPG=1 pointer for non NPIV mode. | ||
1074 | */ | ||
1075 | if (lport->qla_npiv_vp == NULL) | ||
1076 | lport->tpg_1 = NULL; | ||
1077 | |||
1078 | kfree(tpg); | ||
1079 | } | ||
1080 | |||
1081 | static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( | ||
1082 | struct se_wwn *wwn, | ||
1083 | struct config_group *group, | ||
1084 | const char *name) | ||
1085 | { | ||
1086 | struct tcm_qla2xxx_lport *lport = container_of(wwn, | ||
1087 | struct tcm_qla2xxx_lport, lport_wwn); | ||
1088 | struct tcm_qla2xxx_tpg *tpg; | ||
1089 | unsigned long tpgt; | ||
1090 | int ret; | ||
1091 | |||
1092 | if (strstr(name, "tpgt_") != name) | ||
1093 | return ERR_PTR(-EINVAL); | ||
1094 | if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) | ||
1095 | return ERR_PTR(-EINVAL); | ||
1096 | |||
1097 | tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); | ||
1098 | if (!tpg) { | ||
1099 | pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); | ||
1100 | return ERR_PTR(-ENOMEM); | ||
1101 | } | ||
1102 | tpg->lport = lport; | ||
1103 | tpg->lport_tpgt = tpgt; | ||
1104 | |||
1105 | ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, | ||
1106 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); | ||
1107 | if (ret < 0) { | ||
1108 | kfree(tpg); | ||
1109 | return NULL; | ||
1110 | } | ||
1111 | return &tpg->se_tpg; | ||
1112 | } | ||
1113 | |||
1114 | /* | ||
1115 | * Expected to be called with struct qla_hw_data->hardware_lock held | ||
1116 | */ | ||
1117 | static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( | ||
1118 | scsi_qla_host_t *vha, | ||
1119 | const uint8_t *s_id) | ||
1120 | { | ||
1121 | struct qla_hw_data *ha = vha->hw; | ||
1122 | struct tcm_qla2xxx_lport *lport; | ||
1123 | struct se_node_acl *se_nacl; | ||
1124 | struct tcm_qla2xxx_nacl *nacl; | ||
1125 | u32 key; | ||
1126 | |||
1127 | lport = ha->tgt.target_lport_ptr; | ||
1128 | if (!lport) { | ||
1129 | pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); | ||
1130 | dump_stack(); | ||
1131 | return NULL; | ||
1132 | } | ||
1133 | |||
1134 | key = (((unsigned long)s_id[0] << 16) | | ||
1135 | ((unsigned long)s_id[1] << 8) | | ||
1136 | (unsigned long)s_id[2]); | ||
1137 | pr_debug("find_sess_by_s_id: 0x%06x\n", key); | ||
1138 | |||
1139 | se_nacl = btree_lookup32(&lport->lport_fcport_map, key); | ||
1140 | if (!se_nacl) { | ||
1141 | pr_debug("Unable to locate s_id: 0x%06x\n", key); | ||
1142 | return NULL; | ||
1143 | } | ||
1144 | pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n", | ||
1145 | se_nacl, se_nacl->initiatorname); | ||
1146 | |||
1147 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); | ||
1148 | if (!nacl->qla_tgt_sess) { | ||
1149 | pr_err("Unable to locate struct qla_tgt_sess\n"); | ||
1150 | return NULL; | ||
1151 | } | ||
1152 | |||
1153 | return nacl->qla_tgt_sess; | ||
1154 | } | ||
1155 | |||
1156 | /* | ||
1157 | * Expected to be called with struct qla_hw_data->hardware_lock held | ||
1158 | */ | ||
1159 | static void tcm_qla2xxx_set_sess_by_s_id( | ||
1160 | struct tcm_qla2xxx_lport *lport, | ||
1161 | struct se_node_acl *new_se_nacl, | ||
1162 | struct tcm_qla2xxx_nacl *nacl, | ||
1163 | struct se_session *se_sess, | ||
1164 | struct qla_tgt_sess *qla_tgt_sess, | ||
1165 | uint8_t *s_id) | ||
1166 | { | ||
1167 | u32 key; | ||
1168 | void *slot; | ||
1169 | int rc; | ||
1170 | |||
1171 | key = (((unsigned long)s_id[0] << 16) | | ||
1172 | ((unsigned long)s_id[1] << 8) | | ||
1173 | (unsigned long)s_id[2]); | ||
1174 | pr_debug("set_sess_by_s_id: %06x\n", key); | ||
1175 | |||
1176 | slot = btree_lookup32(&lport->lport_fcport_map, key); | ||
1177 | if (!slot) { | ||
1178 | if (new_se_nacl) { | ||
1179 | pr_debug("Setting up new fc_port entry to new_se_nacl\n"); | ||
1180 | nacl->nport_id = key; | ||
1181 | rc = btree_insert32(&lport->lport_fcport_map, key, | ||
1182 | new_se_nacl, GFP_ATOMIC); | ||
1183 | if (rc) | ||
1184 | printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n", | ||
1185 | (int)key); | ||
1186 | } else { | ||
1187 | pr_debug("Wiping nonexisting fc_port entry\n"); | ||
1188 | } | ||
1189 | |||
1190 | qla_tgt_sess->se_sess = se_sess; | ||
1191 | nacl->qla_tgt_sess = qla_tgt_sess; | ||
1192 | return; | ||
1193 | } | ||
1194 | |||
1195 | if (nacl->qla_tgt_sess) { | ||
1196 | if (new_se_nacl == NULL) { | ||
1197 | pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n"); | ||
1198 | btree_remove32(&lport->lport_fcport_map, key); | ||
1199 | nacl->qla_tgt_sess = NULL; | ||
1200 | return; | ||
1201 | } | ||
1202 | pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n"); | ||
1203 | btree_update32(&lport->lport_fcport_map, key, new_se_nacl); | ||
1204 | qla_tgt_sess->se_sess = se_sess; | ||
1205 | nacl->qla_tgt_sess = qla_tgt_sess; | ||
1206 | return; | ||
1207 | } | ||
1208 | |||
1209 | if (new_se_nacl == NULL) { | ||
1210 | pr_debug("Clearing existing fc_port entry\n"); | ||
1211 | btree_remove32(&lport->lport_fcport_map, key); | ||
1212 | return; | ||
1213 | } | ||
1214 | |||
1215 | pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n"); | ||
1216 | btree_update32(&lport->lport_fcport_map, key, new_se_nacl); | ||
1217 | qla_tgt_sess->se_sess = se_sess; | ||
1218 | nacl->qla_tgt_sess = qla_tgt_sess; | ||
1219 | |||
1220 | pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n", | ||
1221 | nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); | ||
1222 | } | ||
1223 | |||
1224 | /* | ||
1225 | * Expected to be called with struct qla_hw_data->hardware_lock held | ||
1226 | */ | ||
1227 | static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( | ||
1228 | scsi_qla_host_t *vha, | ||
1229 | const uint16_t loop_id) | ||
1230 | { | ||
1231 | struct qla_hw_data *ha = vha->hw; | ||
1232 | struct tcm_qla2xxx_lport *lport; | ||
1233 | struct se_node_acl *se_nacl; | ||
1234 | struct tcm_qla2xxx_nacl *nacl; | ||
1235 | struct tcm_qla2xxx_fc_loopid *fc_loopid; | ||
1236 | |||
1237 | lport = ha->tgt.target_lport_ptr; | ||
1238 | if (!lport) { | ||
1239 | pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); | ||
1240 | dump_stack(); | ||
1241 | return NULL; | ||
1242 | } | ||
1243 | |||
1244 | pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); | ||
1245 | |||
1246 | fc_loopid = lport->lport_loopid_map + loop_id; | ||
1247 | se_nacl = fc_loopid->se_nacl; | ||
1248 | if (!se_nacl) { | ||
1249 | pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n", | ||
1250 | loop_id); | ||
1251 | return NULL; | ||
1252 | } | ||
1253 | |||
1254 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); | ||
1255 | |||
1256 | if (!nacl->qla_tgt_sess) { | ||
1257 | pr_err("Unable to locate struct qla_tgt_sess\n"); | ||
1258 | return NULL; | ||
1259 | } | ||
1260 | |||
1261 | return nacl->qla_tgt_sess; | ||
1262 | } | ||
1263 | |||
1264 | /* | ||
1265 | * Expected to be called with struct qla_hw_data->hardware_lock held | ||
1266 | */ | ||
1267 | static void tcm_qla2xxx_set_sess_by_loop_id( | ||
1268 | struct tcm_qla2xxx_lport *lport, | ||
1269 | struct se_node_acl *new_se_nacl, | ||
1270 | struct tcm_qla2xxx_nacl *nacl, | ||
1271 | struct se_session *se_sess, | ||
1272 | struct qla_tgt_sess *qla_tgt_sess, | ||
1273 | uint16_t loop_id) | ||
1274 | { | ||
1275 | struct se_node_acl *saved_nacl; | ||
1276 | struct tcm_qla2xxx_fc_loopid *fc_loopid; | ||
1277 | |||
1278 | pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); | ||
1279 | |||
1280 | fc_loopid = &((struct tcm_qla2xxx_fc_loopid *) | ||
1281 | lport->lport_loopid_map)[loop_id]; | ||
1282 | |||
1283 | saved_nacl = fc_loopid->se_nacl; | ||
1284 | if (!saved_nacl) { | ||
1285 | pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); | ||
1286 | fc_loopid->se_nacl = new_se_nacl; | ||
1287 | if (qla_tgt_sess->se_sess != se_sess) | ||
1288 | qla_tgt_sess->se_sess = se_sess; | ||
1289 | if (nacl->qla_tgt_sess != qla_tgt_sess) | ||
1290 | nacl->qla_tgt_sess = qla_tgt_sess; | ||
1291 | return; | ||
1292 | } | ||
1293 | |||
1294 | if (nacl->qla_tgt_sess) { | ||
1295 | if (new_se_nacl == NULL) { | ||
1296 | pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); | ||
1297 | fc_loopid->se_nacl = NULL; | ||
1298 | nacl->qla_tgt_sess = NULL; | ||
1299 | return; | ||
1300 | } | ||
1301 | |||
1302 | pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); | ||
1303 | fc_loopid->se_nacl = new_se_nacl; | ||
1304 | if (qla_tgt_sess->se_sess != se_sess) | ||
1305 | qla_tgt_sess->se_sess = se_sess; | ||
1306 | if (nacl->qla_tgt_sess != qla_tgt_sess) | ||
1307 | nacl->qla_tgt_sess = qla_tgt_sess; | ||
1308 | return; | ||
1309 | } | ||
1310 | |||
1311 | if (new_se_nacl == NULL) { | ||
1312 | pr_debug("Clearing fc_loopid->se_nacl\n"); | ||
1313 | fc_loopid->se_nacl = NULL; | ||
1314 | return; | ||
1315 | } | ||
1316 | |||
1317 | pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n"); | ||
1318 | fc_loopid->se_nacl = new_se_nacl; | ||
1319 | if (qla_tgt_sess->se_sess != se_sess) | ||
1320 | qla_tgt_sess->se_sess = se_sess; | ||
1321 | if (nacl->qla_tgt_sess != qla_tgt_sess) | ||
1322 | nacl->qla_tgt_sess = qla_tgt_sess; | ||
1323 | |||
1324 | pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n", | ||
1325 | nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); | ||
1326 | } | ||
1327 | |||
1328 | /* | ||
1329 | * Should always be called with qla_hw_data->hardware_lock held. | ||
1330 | */ | ||
1331 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, | ||
1332 | struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) | ||
1333 | { | ||
1334 | struct se_session *se_sess = sess->se_sess; | ||
1335 | unsigned char be_sid[3]; | ||
1336 | |||
1337 | be_sid[0] = sess->s_id.b.domain; | ||
1338 | be_sid[1] = sess->s_id.b.area; | ||
1339 | be_sid[2] = sess->s_id.b.al_pa; | ||
1340 | |||
1341 | tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, | ||
1342 | sess, be_sid); | ||
1343 | tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, | ||
1344 | sess, sess->loop_id); | ||
1345 | } | ||
1346 | |||
1347 | static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) | ||
1348 | { | ||
1349 | struct qla_tgt *tgt = sess->tgt; | ||
1350 | struct qla_hw_data *ha = tgt->ha; | ||
1351 | struct se_session *se_sess; | ||
1352 | struct se_node_acl *se_nacl; | ||
1353 | struct tcm_qla2xxx_lport *lport; | ||
1354 | struct tcm_qla2xxx_nacl *nacl; | ||
1355 | |||
1356 | BUG_ON(in_interrupt()); | ||
1357 | |||
1358 | se_sess = sess->se_sess; | ||
1359 | if (!se_sess) { | ||
1360 | pr_err("struct qla_tgt_sess->se_sess is NULL\n"); | ||
1361 | dump_stack(); | ||
1362 | return; | ||
1363 | } | ||
1364 | se_nacl = se_sess->se_node_acl; | ||
1365 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); | ||
1366 | |||
1367 | lport = ha->tgt.target_lport_ptr; | ||
1368 | if (!lport) { | ||
1369 | pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); | ||
1370 | dump_stack(); | ||
1371 | return; | ||
1372 | } | ||
1373 | target_wait_for_sess_cmds(se_sess, 0); | ||
1374 | |||
1375 | transport_deregister_session_configfs(sess->se_sess); | ||
1376 | transport_deregister_session(sess->se_sess); | ||
1377 | } | ||
1378 | |||
1379 | /* | ||
1380 | * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() | ||
1381 | * to locate struct se_node_acl | ||
1382 | */ | ||
1383 | static int tcm_qla2xxx_check_initiator_node_acl( | ||
1384 | scsi_qla_host_t *vha, | ||
1385 | unsigned char *fc_wwpn, | ||
1386 | void *qla_tgt_sess, | ||
1387 | uint8_t *s_id, | ||
1388 | uint16_t loop_id) | ||
1389 | { | ||
1390 | struct qla_hw_data *ha = vha->hw; | ||
1391 | struct tcm_qla2xxx_lport *lport; | ||
1392 | struct tcm_qla2xxx_tpg *tpg; | ||
1393 | struct tcm_qla2xxx_nacl *nacl; | ||
1394 | struct se_portal_group *se_tpg; | ||
1395 | struct se_node_acl *se_nacl; | ||
1396 | struct se_session *se_sess; | ||
1397 | struct qla_tgt_sess *sess = qla_tgt_sess; | ||
1398 | unsigned char port_name[36]; | ||
1399 | unsigned long flags; | ||
1400 | |||
1401 | lport = ha->tgt.target_lport_ptr; | ||
1402 | if (!lport) { | ||
1403 | pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); | ||
1404 | dump_stack(); | ||
1405 | return -EINVAL; | ||
1406 | } | ||
1407 | /* | ||
1408 | * Locate the TPG=1 reference.. | ||
1409 | */ | ||
1410 | tpg = lport->tpg_1; | ||
1411 | if (!tpg) { | ||
1412 | pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n"); | ||
1413 | return -EINVAL; | ||
1414 | } | ||
1415 | se_tpg = &tpg->se_tpg; | ||
1416 | |||
1417 | se_sess = transport_init_session(); | ||
1418 | if (IS_ERR(se_sess)) { | ||
1419 | pr_err("Unable to initialize struct se_session\n"); | ||
1420 | return PTR_ERR(se_sess); | ||
1421 | } | ||
1422 | /* | ||
1423 | * Format the FCP Initiator port_name into colon seperated values to | ||
1424 | * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. | ||
1425 | */ | ||
1426 | memset(&port_name, 0, 36); | ||
1427 | snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", | ||
1428 | fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4], | ||
1429 | fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]); | ||
1430 | /* | ||
1431 | * Locate our struct se_node_acl either from an explict NodeACL created | ||
1432 | * via ConfigFS, or via running in TPG demo mode. | ||
1433 | */ | ||
1434 | se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg, | ||
1435 | port_name); | ||
1436 | if (!se_sess->se_node_acl) { | ||
1437 | transport_free_session(se_sess); | ||
1438 | return -EINVAL; | ||
1439 | } | ||
1440 | se_nacl = se_sess->se_node_acl; | ||
1441 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); | ||
1442 | /* | ||
1443 | * And now setup the new se_nacl and session pointers into our HW lport | ||
1444 | * mappings for fabric S_ID and LOOP_ID. | ||
1445 | */ | ||
1446 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
1447 | tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, | ||
1448 | qla_tgt_sess, s_id); | ||
1449 | tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess, | ||
1450 | qla_tgt_sess, loop_id); | ||
1451 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
1452 | /* | ||
1453 | * Finally register the new FC Nexus with TCM | ||
1454 | */ | ||
1455 | __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); | ||
1456 | |||
1457 | return 0; | ||
1458 | } | ||
1459 | |||
1460 | static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, | ||
1461 | uint16_t loop_id, bool conf_compl_supported) | ||
1462 | { | ||
1463 | struct qla_tgt *tgt = sess->tgt; | ||
1464 | struct qla_hw_data *ha = tgt->ha; | ||
1465 | struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr; | ||
1466 | struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; | ||
1467 | struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, | ||
1468 | struct tcm_qla2xxx_nacl, se_node_acl); | ||
1469 | u32 key; | ||
1470 | |||
1471 | |||
1472 | if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24) | ||
1473 | pr_info("Updating session %p from port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", | ||
1474 | sess, | ||
1475 | sess->port_name[0], sess->port_name[1], | ||
1476 | sess->port_name[2], sess->port_name[3], | ||
1477 | sess->port_name[4], sess->port_name[5], | ||
1478 | sess->port_name[6], sess->port_name[7], | ||
1479 | sess->loop_id, loop_id, | ||
1480 | sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, | ||
1481 | s_id.b.domain, s_id.b.area, s_id.b.al_pa); | ||
1482 | |||
1483 | if (sess->loop_id != loop_id) { | ||
1484 | /* | ||
1485 | * Because we can shuffle loop IDs around and we | ||
1486 | * update different sessions non-atomically, we might | ||
1487 | * have overwritten this session's old loop ID | ||
1488 | * already, and we might end up overwriting some other | ||
1489 | * session that will be updated later. So we have to | ||
1490 | * be extra careful and we can't warn about those things... | ||
1491 | */ | ||
1492 | if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl) | ||
1493 | lport->lport_loopid_map[sess->loop_id].se_nacl = NULL; | ||
1494 | |||
1495 | lport->lport_loopid_map[loop_id].se_nacl = se_nacl; | ||
1496 | |||
1497 | sess->loop_id = loop_id; | ||
1498 | } | ||
1499 | |||
1500 | if (sess->s_id.b24 != s_id.b24) { | ||
1501 | key = (((u32) sess->s_id.b.domain << 16) | | ||
1502 | ((u32) sess->s_id.b.area << 8) | | ||
1503 | ((u32) sess->s_id.b.al_pa)); | ||
1504 | |||
1505 | if (btree_lookup32(&lport->lport_fcport_map, key)) | ||
1506 | WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl, | ||
1507 | "Found wrong se_nacl when updating s_id %x:%x:%x\n", | ||
1508 | sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); | ||
1509 | else | ||
1510 | WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", | ||
1511 | sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); | ||
1512 | |||
1513 | key = (((u32) s_id.b.domain << 16) | | ||
1514 | ((u32) s_id.b.area << 8) | | ||
1515 | ((u32) s_id.b.al_pa)); | ||
1516 | |||
1517 | if (btree_lookup32(&lport->lport_fcport_map, key)) { | ||
1518 | WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n", | ||
1519 | s_id.b.domain, s_id.b.area, s_id.b.al_pa); | ||
1520 | btree_update32(&lport->lport_fcport_map, key, se_nacl); | ||
1521 | } else { | ||
1522 | btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC); | ||
1523 | } | ||
1524 | |||
1525 | sess->s_id = s_id; | ||
1526 | nacl->nport_id = key; | ||
1527 | } | ||
1528 | |||
1529 | sess->conf_compl_supported = conf_compl_supported; | ||
1530 | } | ||
1531 | |||
1532 | /* | ||
1533 | * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. | ||
1534 | */ | ||
1535 | static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { | ||
1536 | .handle_cmd = tcm_qla2xxx_handle_cmd, | ||
1537 | .handle_data = tcm_qla2xxx_handle_data, | ||
1538 | .handle_tmr = tcm_qla2xxx_handle_tmr, | ||
1539 | .free_cmd = tcm_qla2xxx_free_cmd, | ||
1540 | .free_mcmd = tcm_qla2xxx_free_mcmd, | ||
1541 | .free_session = tcm_qla2xxx_free_session, | ||
1542 | .update_sess = tcm_qla2xxx_update_sess, | ||
1543 | .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, | ||
1544 | .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, | ||
1545 | .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, | ||
1546 | .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, | ||
1547 | .put_sess = tcm_qla2xxx_put_sess, | ||
1548 | .shutdown_sess = tcm_qla2xxx_shutdown_sess, | ||
1549 | }; | ||
1550 | |||
1551 | static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) | ||
1552 | { | ||
1553 | int rc; | ||
1554 | |||
1555 | rc = btree_init32(&lport->lport_fcport_map); | ||
1556 | if (rc) { | ||
1557 | pr_err("Unable to initialize lport->lport_fcport_map btree\n"); | ||
1558 | return rc; | ||
1559 | } | ||
1560 | |||
1561 | lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * | ||
1562 | 65536); | ||
1563 | if (!lport->lport_loopid_map) { | ||
1564 | pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", | ||
1565 | sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); | ||
1566 | btree_destroy32(&lport->lport_fcport_map); | ||
1567 | return -ENOMEM; | ||
1568 | } | ||
1569 | memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid) | ||
1570 | * 65536); | ||
1571 | pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", | ||
1572 | sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); | ||
1573 | return 0; | ||
1574 | } | ||
1575 | |||
1576 | static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha) | ||
1577 | { | ||
1578 | struct qla_hw_data *ha = vha->hw; | ||
1579 | struct tcm_qla2xxx_lport *lport; | ||
1580 | /* | ||
1581 | * Setup local pointer to vha, NPIV VP pointer (if present) and | ||
1582 | * vha->tcm_lport pointer | ||
1583 | */ | ||
1584 | lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr; | ||
1585 | lport->qla_vha = vha; | ||
1586 | |||
1587 | return 0; | ||
1588 | } | ||
1589 | |||
1590 | static struct se_wwn *tcm_qla2xxx_make_lport( | ||
1591 | struct target_fabric_configfs *tf, | ||
1592 | struct config_group *group, | ||
1593 | const char *name) | ||
1594 | { | ||
1595 | struct tcm_qla2xxx_lport *lport; | ||
1596 | u64 wwpn; | ||
1597 | int ret = -ENODEV; | ||
1598 | |||
1599 | if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0) | ||
1600 | return ERR_PTR(-EINVAL); | ||
1601 | |||
1602 | lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); | ||
1603 | if (!lport) { | ||
1604 | pr_err("Unable to allocate struct tcm_qla2xxx_lport\n"); | ||
1605 | return ERR_PTR(-ENOMEM); | ||
1606 | } | ||
1607 | lport->lport_wwpn = wwpn; | ||
1608 | tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, | ||
1609 | wwpn); | ||
1610 | sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn); | ||
1611 | |||
1612 | ret = tcm_qla2xxx_init_lport(lport); | ||
1613 | if (ret != 0) | ||
1614 | goto out; | ||
1615 | |||
1616 | ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn, | ||
1617 | tcm_qla2xxx_lport_register_cb, lport); | ||
1618 | if (ret != 0) | ||
1619 | goto out_lport; | ||
1620 | |||
1621 | return &lport->lport_wwn; | ||
1622 | out_lport: | ||
1623 | vfree(lport->lport_loopid_map); | ||
1624 | btree_destroy32(&lport->lport_fcport_map); | ||
1625 | out: | ||
1626 | kfree(lport); | ||
1627 | return ERR_PTR(ret); | ||
1628 | } | ||
1629 | |||
1630 | static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) | ||
1631 | { | ||
1632 | struct tcm_qla2xxx_lport *lport = container_of(wwn, | ||
1633 | struct tcm_qla2xxx_lport, lport_wwn); | ||
1634 | struct scsi_qla_host *vha = lport->qla_vha; | ||
1635 | struct qla_hw_data *ha = vha->hw; | ||
1636 | struct se_node_acl *node; | ||
1637 | u32 key = 0; | ||
1638 | |||
1639 | /* | ||
1640 | * Call into qla2x_target.c LLD logic to complete the | ||
1641 | * shutdown of struct qla_tgt after the call to | ||
1642 | * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. | ||
1643 | */ | ||
1644 | if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped) | ||
1645 | qlt_stop_phase2(ha->tgt.qla_tgt); | ||
1646 | |||
1647 | qlt_lport_deregister(vha); | ||
1648 | |||
1649 | vfree(lport->lport_loopid_map); | ||
1650 | btree_for_each_safe32(&lport->lport_fcport_map, key, node) | ||
1651 | btree_remove32(&lport->lport_fcport_map, key); | ||
1652 | btree_destroy32(&lport->lport_fcport_map); | ||
1653 | kfree(lport); | ||
1654 | } | ||
1655 | |||
1656 | static struct se_wwn *tcm_qla2xxx_npiv_make_lport( | ||
1657 | struct target_fabric_configfs *tf, | ||
1658 | struct config_group *group, | ||
1659 | const char *name) | ||
1660 | { | ||
1661 | struct tcm_qla2xxx_lport *lport; | ||
1662 | u64 npiv_wwpn, npiv_wwnn; | ||
1663 | int ret; | ||
1664 | |||
1665 | if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1, | ||
1666 | &npiv_wwpn, &npiv_wwnn) < 0) | ||
1667 | return ERR_PTR(-EINVAL); | ||
1668 | |||
1669 | lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); | ||
1670 | if (!lport) { | ||
1671 | pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n"); | ||
1672 | return ERR_PTR(-ENOMEM); | ||
1673 | } | ||
1674 | lport->lport_npiv_wwpn = npiv_wwpn; | ||
1675 | lport->lport_npiv_wwnn = npiv_wwnn; | ||
1676 | tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0], | ||
1677 | TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); | ||
1678 | sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); | ||
1679 | |||
1680 | /* FIXME: tcm_qla2xxx_npiv_make_lport */ | ||
1681 | ret = -ENOSYS; | ||
1682 | if (ret != 0) | ||
1683 | goto out; | ||
1684 | |||
1685 | return &lport->lport_wwn; | ||
1686 | out: | ||
1687 | kfree(lport); | ||
1688 | return ERR_PTR(ret); | ||
1689 | } | ||
1690 | |||
1691 | static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) | ||
1692 | { | ||
1693 | struct tcm_qla2xxx_lport *lport = container_of(wwn, | ||
1694 | struct tcm_qla2xxx_lport, lport_wwn); | ||
1695 | struct scsi_qla_host *vha = lport->qla_vha; | ||
1696 | struct Scsi_Host *sh = vha->host; | ||
1697 | /* | ||
1698 | * Notify libfc that we want to release the lport->npiv_vport | ||
1699 | */ | ||
1700 | fc_vport_terminate(lport->npiv_vport); | ||
1701 | |||
1702 | scsi_host_put(sh); | ||
1703 | kfree(lport); | ||
1704 | } | ||
1705 | |||
1706 | |||
1707 | static ssize_t tcm_qla2xxx_wwn_show_attr_version( | ||
1708 | struct target_fabric_configfs *tf, | ||
1709 | char *page) | ||
1710 | { | ||
1711 | return sprintf(page, | ||
1712 | "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " | ||
1713 | UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, | ||
1714 | utsname()->machine); | ||
1715 | } | ||
1716 | |||
1717 | TF_WWN_ATTR_RO(tcm_qla2xxx, version); | ||
1718 | |||
1719 | static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { | ||
1720 | &tcm_qla2xxx_wwn_version.attr, | ||
1721 | NULL, | ||
1722 | }; | ||
1723 | |||
1724 | static struct target_core_fabric_ops tcm_qla2xxx_ops = { | ||
1725 | .get_fabric_name = tcm_qla2xxx_get_fabric_name, | ||
1726 | .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, | ||
1727 | .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, | ||
1728 | .tpg_get_tag = tcm_qla2xxx_get_tag, | ||
1729 | .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, | ||
1730 | .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, | ||
1731 | .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, | ||
1732 | .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, | ||
1733 | .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, | ||
1734 | .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, | ||
1735 | .tpg_check_demo_mode_write_protect = | ||
1736 | tcm_qla2xxx_check_demo_write_protect, | ||
1737 | .tpg_check_prod_mode_write_protect = | ||
1738 | tcm_qla2xxx_check_prod_write_protect, | ||
1739 | .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, | ||
1740 | .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, | ||
1741 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, | ||
1742 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, | ||
1743 | .check_stop_free = tcm_qla2xxx_check_stop_free, | ||
1744 | .release_cmd = tcm_qla2xxx_release_cmd, | ||
1745 | .put_session = tcm_qla2xxx_put_session, | ||
1746 | .shutdown_session = tcm_qla2xxx_shutdown_session, | ||
1747 | .close_session = tcm_qla2xxx_close_session, | ||
1748 | .sess_get_index = tcm_qla2xxx_sess_get_index, | ||
1749 | .sess_get_initiator_sid = NULL, | ||
1750 | .write_pending = tcm_qla2xxx_write_pending, | ||
1751 | .write_pending_status = tcm_qla2xxx_write_pending_status, | ||
1752 | .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, | ||
1753 | .get_task_tag = tcm_qla2xxx_get_task_tag, | ||
1754 | .get_cmd_state = tcm_qla2xxx_get_cmd_state, | ||
1755 | .queue_data_in = tcm_qla2xxx_queue_data_in, | ||
1756 | .queue_status = tcm_qla2xxx_queue_status, | ||
1757 | .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, | ||
1758 | /* | ||
1759 | * Setup function pointers for generic logic in | ||
1760 | * target_core_fabric_configfs.c | ||
1761 | */ | ||
1762 | .fabric_make_wwn = tcm_qla2xxx_make_lport, | ||
1763 | .fabric_drop_wwn = tcm_qla2xxx_drop_lport, | ||
1764 | .fabric_make_tpg = tcm_qla2xxx_make_tpg, | ||
1765 | .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, | ||
1766 | .fabric_post_link = NULL, | ||
1767 | .fabric_pre_unlink = NULL, | ||
1768 | .fabric_make_np = NULL, | ||
1769 | .fabric_drop_np = NULL, | ||
1770 | .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, | ||
1771 | .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, | ||
1772 | }; | ||
1773 | |||
1774 | static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { | ||
1775 | .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, | ||
1776 | .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, | ||
1777 | .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn, | ||
1778 | .tpg_get_tag = tcm_qla2xxx_get_tag, | ||
1779 | .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, | ||
1780 | .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, | ||
1781 | .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, | ||
1782 | .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, | ||
1783 | .tpg_check_demo_mode = tcm_qla2xxx_check_false, | ||
1784 | .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true, | ||
1785 | .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true, | ||
1786 | .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false, | ||
1787 | .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, | ||
1788 | .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, | ||
1789 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, | ||
1790 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, | ||
1791 | .release_cmd = tcm_qla2xxx_release_cmd, | ||
1792 | .put_session = tcm_qla2xxx_put_session, | ||
1793 | .shutdown_session = tcm_qla2xxx_shutdown_session, | ||
1794 | .close_session = tcm_qla2xxx_close_session, | ||
1795 | .sess_get_index = tcm_qla2xxx_sess_get_index, | ||
1796 | .sess_get_initiator_sid = NULL, | ||
1797 | .write_pending = tcm_qla2xxx_write_pending, | ||
1798 | .write_pending_status = tcm_qla2xxx_write_pending_status, | ||
1799 | .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, | ||
1800 | .get_task_tag = tcm_qla2xxx_get_task_tag, | ||
1801 | .get_cmd_state = tcm_qla2xxx_get_cmd_state, | ||
1802 | .queue_data_in = tcm_qla2xxx_queue_data_in, | ||
1803 | .queue_status = tcm_qla2xxx_queue_status, | ||
1804 | .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, | ||
1805 | /* | ||
1806 | * Setup function pointers for generic logic in | ||
1807 | * target_core_fabric_configfs.c | ||
1808 | */ | ||
1809 | .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport, | ||
1810 | .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, | ||
1811 | .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, | ||
1812 | .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, | ||
1813 | .fabric_post_link = NULL, | ||
1814 | .fabric_pre_unlink = NULL, | ||
1815 | .fabric_make_np = NULL, | ||
1816 | .fabric_drop_np = NULL, | ||
1817 | .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, | ||
1818 | .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, | ||
1819 | }; | ||
1820 | |||
1821 | static int tcm_qla2xxx_register_configfs(void) | ||
1822 | { | ||
1823 | struct target_fabric_configfs *fabric, *npiv_fabric; | ||
1824 | int ret; | ||
1825 | |||
1826 | pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " | ||
1827 | UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, | ||
1828 | utsname()->machine); | ||
1829 | /* | ||
1830 | * Register the top level struct config_item_type with TCM core | ||
1831 | */ | ||
1832 | fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx"); | ||
1833 | if (IS_ERR(fabric)) { | ||
1834 | pr_err("target_fabric_configfs_init() failed\n"); | ||
1835 | return PTR_ERR(fabric); | ||
1836 | } | ||
1837 | /* | ||
1838 | * Setup fabric->tf_ops from our local tcm_qla2xxx_ops | ||
1839 | */ | ||
1840 | fabric->tf_ops = tcm_qla2xxx_ops; | ||
1841 | /* | ||
1842 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
1843 | */ | ||
1844 | TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; | ||
1845 | TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; | ||
1846 | TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = | ||
1847 | tcm_qla2xxx_tpg_attrib_attrs; | ||
1848 | TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; | ||
1849 | TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
1850 | TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; | ||
1851 | TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
1852 | TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
1853 | TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
1854 | /* | ||
1855 | * Register the fabric for use within TCM | ||
1856 | */ | ||
1857 | ret = target_fabric_configfs_register(fabric); | ||
1858 | if (ret < 0) { | ||
1859 | pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); | ||
1860 | return ret; | ||
1861 | } | ||
1862 | /* | ||
1863 | * Setup our local pointer to *fabric | ||
1864 | */ | ||
1865 | tcm_qla2xxx_fabric_configfs = fabric; | ||
1866 | pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n"); | ||
1867 | |||
1868 | /* | ||
1869 | * Register the top level struct config_item_type for NPIV with TCM core | ||
1870 | */ | ||
1871 | npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv"); | ||
1872 | if (IS_ERR(npiv_fabric)) { | ||
1873 | pr_err("target_fabric_configfs_init() failed\n"); | ||
1874 | ret = PTR_ERR(npiv_fabric); | ||
1875 | goto out_fabric; | ||
1876 | } | ||
1877 | /* | ||
1878 | * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops | ||
1879 | */ | ||
1880 | npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops; | ||
1881 | /* | ||
1882 | * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl | ||
1883 | */ | ||
1884 | TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; | ||
1885 | TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL; | ||
1886 | TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; | ||
1887 | TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL; | ||
1888 | TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
1889 | TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; | ||
1890 | TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
1891 | TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
1892 | TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
1893 | /* | ||
1894 | * Register the npiv_fabric for use within TCM | ||
1895 | */ | ||
1896 | ret = target_fabric_configfs_register(npiv_fabric); | ||
1897 | if (ret < 0) { | ||
1898 | pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); | ||
1899 | goto out_fabric; | ||
1900 | } | ||
1901 | /* | ||
1902 | * Setup our local pointer to *npiv_fabric | ||
1903 | */ | ||
1904 | tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric; | ||
1905 | pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n"); | ||
1906 | |||
1907 | tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", | ||
1908 | WQ_MEM_RECLAIM, 0); | ||
1909 | if (!tcm_qla2xxx_free_wq) { | ||
1910 | ret = -ENOMEM; | ||
1911 | goto out_fabric_npiv; | ||
1912 | } | ||
1913 | |||
1914 | tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0); | ||
1915 | if (!tcm_qla2xxx_cmd_wq) { | ||
1916 | ret = -ENOMEM; | ||
1917 | goto out_free_wq; | ||
1918 | } | ||
1919 | |||
1920 | return 0; | ||
1921 | |||
1922 | out_free_wq: | ||
1923 | destroy_workqueue(tcm_qla2xxx_free_wq); | ||
1924 | out_fabric_npiv: | ||
1925 | target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); | ||
1926 | out_fabric: | ||
1927 | target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); | ||
1928 | return ret; | ||
1929 | } | ||
1930 | |||
1931 | static void tcm_qla2xxx_deregister_configfs(void) | ||
1932 | { | ||
1933 | destroy_workqueue(tcm_qla2xxx_cmd_wq); | ||
1934 | destroy_workqueue(tcm_qla2xxx_free_wq); | ||
1935 | |||
1936 | target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); | ||
1937 | tcm_qla2xxx_fabric_configfs = NULL; | ||
1938 | pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n"); | ||
1939 | |||
1940 | target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); | ||
1941 | tcm_qla2xxx_npiv_fabric_configfs = NULL; | ||
1942 | pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n"); | ||
1943 | } | ||
1944 | |||
1945 | static int __init tcm_qla2xxx_init(void) | ||
1946 | { | ||
1947 | int ret; | ||
1948 | |||
1949 | ret = tcm_qla2xxx_register_configfs(); | ||
1950 | if (ret < 0) | ||
1951 | return ret; | ||
1952 | |||
1953 | return 0; | ||
1954 | } | ||
1955 | |||
1956 | static void __exit tcm_qla2xxx_exit(void) | ||
1957 | { | ||
1958 | tcm_qla2xxx_deregister_configfs(); | ||
1959 | } | ||
1960 | |||
1961 | MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver"); | ||
1962 | MODULE_LICENSE("GPL"); | ||
1963 | module_init(tcm_qla2xxx_init); | ||
1964 | module_exit(tcm_qla2xxx_exit); | ||
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h deleted file mode 100644 index 9ba075fe978..00000000000 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | #include <target/target_core_base.h> | ||
2 | #include <linux/btree.h> | ||
3 | |||
4 | #define TCM_QLA2XXX_VERSION "v0.1" | ||
5 | /* length of ASCII WWPNs including pad */ | ||
6 | #define TCM_QLA2XXX_NAMELEN 32 | ||
7 | /* lenth of ASCII NPIV 'WWPN+WWNN' including pad */ | ||
8 | #define TCM_QLA2XXX_NPIV_NAMELEN 66 | ||
9 | |||
10 | #include "qla_target.h" | ||
11 | |||
12 | struct tcm_qla2xxx_nacl { | ||
13 | /* From libfc struct fc_rport->port_id */ | ||
14 | u32 nport_id; | ||
15 | /* Binary World Wide unique Node Name for remote FC Initiator Nport */ | ||
16 | u64 nport_wwnn; | ||
17 | /* ASCII formatted WWPN for FC Initiator Nport */ | ||
18 | char nport_name[TCM_QLA2XXX_NAMELEN]; | ||
19 | /* Pointer to qla_tgt_sess */ | ||
20 | struct qla_tgt_sess *qla_tgt_sess; | ||
21 | /* Pointer to TCM FC nexus */ | ||
22 | struct se_session *nport_nexus; | ||
23 | /* Returned by tcm_qla2xxx_make_nodeacl() */ | ||
24 | struct se_node_acl se_node_acl; | ||
25 | }; | ||
26 | |||
27 | struct tcm_qla2xxx_tpg_attrib { | ||
28 | int generate_node_acls; | ||
29 | int cache_dynamic_acls; | ||
30 | int demo_mode_write_protect; | ||
31 | int prod_mode_write_protect; | ||
32 | }; | ||
33 | |||
34 | struct tcm_qla2xxx_tpg { | ||
35 | /* FC lport target portal group tag for TCM */ | ||
36 | u16 lport_tpgt; | ||
37 | /* Atomic bit to determine TPG active status */ | ||
38 | atomic_t lport_tpg_enabled; | ||
39 | /* Pointer back to tcm_qla2xxx_lport */ | ||
40 | struct tcm_qla2xxx_lport *lport; | ||
41 | /* Used by tcm_qla2xxx_tpg_attrib_cit */ | ||
42 | struct tcm_qla2xxx_tpg_attrib tpg_attrib; | ||
43 | /* Returned by tcm_qla2xxx_make_tpg() */ | ||
44 | struct se_portal_group se_tpg; | ||
45 | }; | ||
46 | |||
47 | #define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib) | ||
48 | |||
49 | struct tcm_qla2xxx_fc_loopid { | ||
50 | struct se_node_acl *se_nacl; | ||
51 | }; | ||
52 | |||
53 | struct tcm_qla2xxx_lport { | ||
54 | /* SCSI protocol the lport is providing */ | ||
55 | u8 lport_proto_id; | ||
56 | /* Binary World Wide unique Port Name for FC Target Lport */ | ||
57 | u64 lport_wwpn; | ||
58 | /* Binary World Wide unique Port Name for FC NPIV Target Lport */ | ||
59 | u64 lport_npiv_wwpn; | ||
60 | /* Binary World Wide unique Node Name for FC NPIV Target Lport */ | ||
61 | u64 lport_npiv_wwnn; | ||
62 | /* ASCII formatted WWPN for FC Target Lport */ | ||
63 | char lport_name[TCM_QLA2XXX_NAMELEN]; | ||
64 | /* ASCII formatted naa WWPN for VPD page 83 etc */ | ||
65 | char lport_naa_name[TCM_QLA2XXX_NAMELEN]; | ||
66 | /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */ | ||
67 | char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN]; | ||
68 | /* map for fc_port pointers in 24-bit FC Port ID space */ | ||
69 | struct btree_head32 lport_fcport_map; | ||
70 | /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */ | ||
71 | struct tcm_qla2xxx_fc_loopid *lport_loopid_map; | ||
72 | /* Pointer to struct scsi_qla_host from qla2xxx LLD */ | ||
73 | struct scsi_qla_host *qla_vha; | ||
74 | /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */ | ||
75 | struct scsi_qla_host *qla_npiv_vp; | ||
76 | /* Pointer to struct qla_tgt pointer */ | ||
77 | struct qla_tgt lport_qla_tgt; | ||
78 | /* Pointer to struct fc_vport for NPIV vport from libfc */ | ||
79 | struct fc_vport *npiv_vport; | ||
80 | /* Pointer to TPG=1 for non NPIV mode */ | ||
81 | struct tcm_qla2xxx_tpg *tpg_1; | ||
82 | /* Returned by tcm_qla2xxx_make_lport() */ | ||
83 | struct se_wwn lport_wwn; | ||
84 | }; | ||