aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
authorAnirban Chakraborty <anirban.chakraborty@qlogic.com>2008-11-06 13:40:51 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-12-29 12:24:16 -0500
commite315cd28b9ef0d7b71e462ac16e18dbaa2f5adfe (patch)
tree1e20bdd40b56b36f211bde8fff0c63792b088a0a /drivers/scsi/qla2xxx
parent7b867cf76fbcc8d77867cbec6f509f71dce8a98f (diff)
[SCSI] qla2xxx: Code changes for qla data structure refactoring
Following changes have been made: 1. Outstanding commands are based on a request queue, scsi_qla_host does not maintain it anymore. 2. start_scsi is accessed via isp_ops struct instead of direct invocation. 3. Interrupt registrations are done using response queue instead of device id. Signed-off-by: Anirban Chakraborty <anirban.chakraborty@qlogic.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1113
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h27
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c258
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c633
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1265
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
6 files changed, 1749 insertions, 1551 deletions
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4218f20f5ed5..7bee87f90f6d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -35,7 +35,7 @@ static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
35 35
36static int qla2x00_restart_isp(scsi_qla_host_t *); 36static int qla2x00_restart_isp(scsi_qla_host_t *);
37 37
38static int qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev); 38static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
39 39
40static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 40static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
41static int qla84xx_init_chip(scsi_qla_host_t *); 41static int qla84xx_init_chip(scsi_qla_host_t *);
@@ -55,77 +55,77 @@ static int qla84xx_init_chip(scsi_qla_host_t *);
55* 0 = success 55* 0 = success
56*/ 56*/
57int 57int
58qla2x00_initialize_adapter(scsi_qla_host_t *ha) 58qla2x00_initialize_adapter(scsi_qla_host_t *vha)
59{ 59{
60 int rval; 60 int rval;
61 61 struct qla_hw_data *ha = vha->hw;
62 /* Clear adapter flags. */ 62 /* Clear adapter flags. */
63 ha->flags.online = 0; 63 vha->flags.online = 0;
64 ha->flags.reset_active = 0; 64 vha->flags.reset_active = 0;
65 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 65 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
66 atomic_set(&ha->loop_state, LOOP_DOWN); 66 atomic_set(&vha->loop_state, LOOP_DOWN);
67 ha->device_flags = DFLG_NO_CABLE; 67 vha->device_flags = DFLG_NO_CABLE;
68 ha->dpc_flags = 0; 68 vha->dpc_flags = 0;
69 ha->flags.management_server_logged_in = 0; 69 vha->flags.management_server_logged_in = 0;
70 ha->marker_needed = 0; 70 vha->marker_needed = 0;
71 ha->mbx_flags = 0; 71 ha->mbx_flags = 0;
72 ha->isp_abort_cnt = 0; 72 ha->isp_abort_cnt = 0;
73 ha->beacon_blink_led = 0; 73 ha->beacon_blink_led = 0;
74 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 74 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
75 75
76 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 76 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
77 rval = ha->isp_ops->pci_config(ha); 77 rval = ha->isp_ops->pci_config(vha);
78 if (rval) { 78 if (rval) {
79 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 79 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
80 ha->host_no)); 80 vha->host_no));
81 return (rval); 81 return (rval);
82 } 82 }
83 83
84 ha->isp_ops->reset_chip(ha); 84 ha->isp_ops->reset_chip(vha);
85 85
86 rval = qla2xxx_get_flash_info(ha); 86 rval = qla2xxx_get_flash_info(vha);
87 if (rval) { 87 if (rval) {
88 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", 88 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
89 ha->host_no)); 89 vha->host_no));
90 return (rval); 90 return (rval);
91 } 91 }
92 92
93 ha->isp_ops->get_flash_version(ha, ha->request_ring); 93 ha->isp_ops->get_flash_version(vha, ha->req->ring);
94 94
95 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 95 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
96 96
97 ha->isp_ops->nvram_config(ha); 97 ha->isp_ops->nvram_config(vha);
98 98
99 if (ha->flags.disable_serdes) { 99 if (ha->flags.disable_serdes) {
100 /* Mask HBA via NVRAM settings? */ 100 /* Mask HBA via NVRAM settings? */
101 qla_printk(KERN_INFO, ha, "Masking HBA WWPN " 101 qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
102 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 102 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
103 ha->port_name[0], ha->port_name[1], 103 vha->port_name[0], vha->port_name[1],
104 ha->port_name[2], ha->port_name[3], 104 vha->port_name[2], vha->port_name[3],
105 ha->port_name[4], ha->port_name[5], 105 vha->port_name[4], vha->port_name[5],
106 ha->port_name[6], ha->port_name[7]); 106 vha->port_name[6], vha->port_name[7]);
107 return QLA_FUNCTION_FAILED; 107 return QLA_FUNCTION_FAILED;
108 } 108 }
109 109
110 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 110 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
111 111
112 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) { 112 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
113 rval = ha->isp_ops->chip_diag(ha); 113 rval = ha->isp_ops->chip_diag(vha);
114 if (rval) 114 if (rval)
115 return (rval); 115 return (rval);
116 rval = qla2x00_setup_chip(ha); 116 rval = qla2x00_setup_chip(vha);
117 if (rval) 117 if (rval)
118 return (rval); 118 return (rval);
119 } 119 }
120 if (IS_QLA84XX(ha)) { 120 if (IS_QLA84XX(ha)) {
121 ha->cs84xx = qla84xx_get_chip(ha); 121 ha->cs84xx = qla84xx_get_chip(vha);
122 if (!ha->cs84xx) { 122 if (!ha->cs84xx) {
123 qla_printk(KERN_ERR, ha, 123 qla_printk(KERN_ERR, ha,
124 "Unable to configure ISP84XX.\n"); 124 "Unable to configure ISP84XX.\n");
125 return QLA_FUNCTION_FAILED; 125 return QLA_FUNCTION_FAILED;
126 } 126 }
127 } 127 }
128 rval = qla2x00_init_rings(ha); 128 rval = qla2x00_init_rings(vha);
129 129
130 return (rval); 130 return (rval);
131} 131}
@@ -137,10 +137,11 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
137 * Returns 0 on success. 137 * Returns 0 on success.
138 */ 138 */
139int 139int
140qla2100_pci_config(scsi_qla_host_t *ha) 140qla2100_pci_config(scsi_qla_host_t *vha)
141{ 141{
142 uint16_t w; 142 uint16_t w;
143 unsigned long flags; 143 unsigned long flags;
144 struct qla_hw_data *ha = vha->hw;
144 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 145 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
145 146
146 pci_set_master(ha->pdev); 147 pci_set_master(ha->pdev);
@@ -167,11 +168,12 @@ qla2100_pci_config(scsi_qla_host_t *ha)
167 * Returns 0 on success. 168 * Returns 0 on success.
168 */ 169 */
169int 170int
170qla2300_pci_config(scsi_qla_host_t *ha) 171qla2300_pci_config(scsi_qla_host_t *vha)
171{ 172{
172 uint16_t w; 173 uint16_t w;
173 unsigned long flags = 0; 174 unsigned long flags = 0;
174 uint32_t cnt; 175 uint32_t cnt;
176 struct qla_hw_data *ha = vha->hw;
175 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 177 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
176 178
177 pci_set_master(ha->pdev); 179 pci_set_master(ha->pdev);
@@ -248,10 +250,11 @@ qla2300_pci_config(scsi_qla_host_t *ha)
248 * Returns 0 on success. 250 * Returns 0 on success.
249 */ 251 */
250int 252int
251qla24xx_pci_config(scsi_qla_host_t *ha) 253qla24xx_pci_config(scsi_qla_host_t *vha)
252{ 254{
253 uint16_t w; 255 uint16_t w;
254 unsigned long flags = 0; 256 unsigned long flags = 0;
257 struct qla_hw_data *ha = vha->hw;
255 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 258 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
256 259
257 pci_set_master(ha->pdev); 260 pci_set_master(ha->pdev);
@@ -291,9 +294,10 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
291 * Returns 0 on success. 294 * Returns 0 on success.
292 */ 295 */
293int 296int
294qla25xx_pci_config(scsi_qla_host_t *ha) 297qla25xx_pci_config(scsi_qla_host_t *vha)
295{ 298{
296 uint16_t w; 299 uint16_t w;
300 struct qla_hw_data *ha = vha->hw;
297 301
298 pci_set_master(ha->pdev); 302 pci_set_master(ha->pdev);
299 pci_try_set_mwi(ha->pdev); 303 pci_try_set_mwi(ha->pdev);
@@ -321,32 +325,33 @@ qla25xx_pci_config(scsi_qla_host_t *ha)
321 * Returns 0 on success. 325 * Returns 0 on success.
322 */ 326 */
323static int 327static int
324qla2x00_isp_firmware(scsi_qla_host_t *ha) 328qla2x00_isp_firmware(scsi_qla_host_t *vha)
325{ 329{
326 int rval; 330 int rval;
327 uint16_t loop_id, topo, sw_cap; 331 uint16_t loop_id, topo, sw_cap;
328 uint8_t domain, area, al_pa; 332 uint8_t domain, area, al_pa;
333 struct qla_hw_data *ha = vha->hw;
329 334
330 /* Assume loading risc code */ 335 /* Assume loading risc code */
331 rval = QLA_FUNCTION_FAILED; 336 rval = QLA_FUNCTION_FAILED;
332 337
333 if (ha->flags.disable_risc_code_load) { 338 if (ha->flags.disable_risc_code_load) {
334 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", 339 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
335 ha->host_no)); 340 vha->host_no));
336 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n"); 341 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
337 342
338 /* Verify checksum of loaded RISC code. */ 343 /* Verify checksum of loaded RISC code. */
339 rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address); 344 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
340 if (rval == QLA_SUCCESS) { 345 if (rval == QLA_SUCCESS) {
341 /* And, verify we are not in ROM code. */ 346 /* And, verify we are not in ROM code. */
342 rval = qla2x00_get_adapter_id(ha, &loop_id, &al_pa, 347 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
343 &area, &domain, &topo, &sw_cap); 348 &area, &domain, &topo, &sw_cap);
344 } 349 }
345 } 350 }
346 351
347 if (rval) { 352 if (rval) {
348 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", 353 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
349 ha->host_no)); 354 vha->host_no));
350 } 355 }
351 356
352 return (rval); 357 return (rval);
@@ -359,9 +364,10 @@ qla2x00_isp_firmware(scsi_qla_host_t *ha)
359 * Returns 0 on success. 364 * Returns 0 on success.
360 */ 365 */
361void 366void
362qla2x00_reset_chip(scsi_qla_host_t *ha) 367qla2x00_reset_chip(scsi_qla_host_t *vha)
363{ 368{
364 unsigned long flags = 0; 369 unsigned long flags = 0;
370 struct qla_hw_data *ha = vha->hw;
365 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 371 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
366 uint32_t cnt; 372 uint32_t cnt;
367 uint16_t cmd; 373 uint16_t cmd;
@@ -499,10 +505,11 @@ qla2x00_reset_chip(scsi_qla_host_t *ha)
499 * Returns 0 on success. 505 * Returns 0 on success.
500 */ 506 */
501static inline void 507static inline void
502qla24xx_reset_risc(scsi_qla_host_t *ha) 508qla24xx_reset_risc(scsi_qla_host_t *vha)
503{ 509{
504 int hw_evt = 0; 510 int hw_evt = 0;
505 unsigned long flags = 0; 511 unsigned long flags = 0;
512 struct qla_hw_data *ha = vha->hw;
506 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 513 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
507 uint32_t cnt, d2; 514 uint32_t cnt, d2;
508 uint16_t wd; 515 uint16_t wd;
@@ -541,7 +548,7 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
541 barrier(); 548 barrier();
542 } 549 }
543 if (cnt == 0 || hw_evt) 550 if (cnt == 0 || hw_evt)
544 qla2xxx_hw_event_log(ha, HW_EVENT_RESET_ERR, 551 qla2xxx_hw_event_log(vha, HW_EVENT_RESET_ERR,
545 RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2), 552 RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2),
546 RD_REG_WORD(&reg->mailbox3)); 553 RD_REG_WORD(&reg->mailbox3));
547 554
@@ -571,12 +578,13 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
571 * Returns 0 on success. 578 * Returns 0 on success.
572 */ 579 */
573void 580void
574qla24xx_reset_chip(scsi_qla_host_t *ha) 581qla24xx_reset_chip(scsi_qla_host_t *vha)
575{ 582{
583 struct qla_hw_data *ha = vha->hw;
576 ha->isp_ops->disable_intrs(ha); 584 ha->isp_ops->disable_intrs(ha);
577 585
578 /* Perform RISC reset. */ 586 /* Perform RISC reset. */
579 qla24xx_reset_risc(ha); 587 qla24xx_reset_risc(vha);
580} 588}
581 589
582/** 590/**
@@ -586,9 +594,10 @@ qla24xx_reset_chip(scsi_qla_host_t *ha)
586 * Returns 0 on success. 594 * Returns 0 on success.
587 */ 595 */
588int 596int
589qla2x00_chip_diag(scsi_qla_host_t *ha) 597qla2x00_chip_diag(scsi_qla_host_t *vha)
590{ 598{
591 int rval; 599 int rval;
600 struct qla_hw_data *ha = vha->hw;
592 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 601 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
593 unsigned long flags = 0; 602 unsigned long flags = 0;
594 uint16_t data; 603 uint16_t data;
@@ -599,7 +608,7 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
599 rval = QLA_FUNCTION_FAILED; 608 rval = QLA_FUNCTION_FAILED;
600 609
601 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", 610 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
602 ha->host_no, (u_long)&reg->flash_address)); 611 vha->host_no, (u_long)&reg->flash_address));
603 612
604 spin_lock_irqsave(&ha->hardware_lock, flags); 613 spin_lock_irqsave(&ha->hardware_lock, flags);
605 614
@@ -662,17 +671,17 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
662 ha->product_id[3] = mb[4]; 671 ha->product_id[3] = mb[4];
663 672
664 /* Adjust fw RISC transfer size */ 673 /* Adjust fw RISC transfer size */
665 if (ha->request_q_length > 1024) 674 if (ha->req->length > 1024)
666 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 675 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
667 else 676 else
668 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 677 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
669 ha->request_q_length; 678 ha->req->length;
670 679
671 if (IS_QLA2200(ha) && 680 if (IS_QLA2200(ha) &&
672 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 681 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
673 /* Limit firmware transfer size with a 2200A */ 682 /* Limit firmware transfer size with a 2200A */
674 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", 683 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
675 ha->host_no)); 684 vha->host_no));
676 685
677 ha->device_type |= DT_ISP2200A; 686 ha->device_type |= DT_ISP2200A;
678 ha->fw_transfer_size = 128; 687 ha->fw_transfer_size = 128;
@@ -681,11 +690,11 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
681 /* Wrap Incoming Mailboxes Test. */ 690 /* Wrap Incoming Mailboxes Test. */
682 spin_unlock_irqrestore(&ha->hardware_lock, flags); 691 spin_unlock_irqrestore(&ha->hardware_lock, flags);
683 692
684 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", ha->host_no)); 693 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
685 rval = qla2x00_mbx_reg_test(ha); 694 rval = qla2x00_mbx_reg_test(vha);
686 if (rval) { 695 if (rval) {
687 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 696 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
688 ha->host_no)); 697 vha->host_no));
689 qla_printk(KERN_WARNING, ha, 698 qla_printk(KERN_WARNING, ha,
690 "Failed mailbox send register test\n"); 699 "Failed mailbox send register test\n");
691 } 700 }
@@ -698,7 +707,7 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
698chip_diag_failed: 707chip_diag_failed:
699 if (rval) 708 if (rval)
700 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " 709 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
701 "****\n", ha->host_no)); 710 "****\n", vha->host_no));
702 711
703 spin_unlock_irqrestore(&ha->hardware_lock, flags); 712 spin_unlock_irqrestore(&ha->hardware_lock, flags);
704 713
@@ -712,19 +721,20 @@ chip_diag_failed:
712 * Returns 0 on success. 721 * Returns 0 on success.
713 */ 722 */
714int 723int
715qla24xx_chip_diag(scsi_qla_host_t *ha) 724qla24xx_chip_diag(scsi_qla_host_t *vha)
716{ 725{
717 int rval; 726 int rval;
727 struct qla_hw_data *ha = vha->hw;
718 728
719 /* Perform RISC reset. */ 729 /* Perform RISC reset. */
720 qla24xx_reset_risc(ha); 730 qla24xx_reset_risc(vha);
721 731
722 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->request_q_length; 732 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->req->length;
723 733
724 rval = qla2x00_mbx_reg_test(ha); 734 rval = qla2x00_mbx_reg_test(vha);
725 if (rval) { 735 if (rval) {
726 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 736 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
727 ha->host_no)); 737 vha->host_no));
728 qla_printk(KERN_WARNING, ha, 738 qla_printk(KERN_WARNING, ha,
729 "Failed mailbox send register test\n"); 739 "Failed mailbox send register test\n");
730 } else { 740 } else {
@@ -736,13 +746,14 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
736} 746}
737 747
738void 748void
739qla2x00_alloc_fw_dump(scsi_qla_host_t *ha) 749qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
740{ 750{
741 int rval; 751 int rval;
742 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 752 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
743 eft_size, fce_size; 753 eft_size, fce_size;
744 dma_addr_t tc_dma; 754 dma_addr_t tc_dma;
745 void *tc; 755 void *tc;
756 struct qla_hw_data *ha = vha->hw;
746 757
747 if (ha->fw_dump) { 758 if (ha->fw_dump) {
748 qla_printk(KERN_WARNING, ha, 759 qla_printk(KERN_WARNING, ha,
@@ -778,7 +789,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
778 } 789 }
779 790
780 memset(tc, 0, FCE_SIZE); 791 memset(tc, 0, FCE_SIZE);
781 rval = qla2x00_enable_fce_trace(ha, tc_dma, FCE_NUM_BUFFERS, 792 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
782 ha->fce_mb, &ha->fce_bufs); 793 ha->fce_mb, &ha->fce_bufs);
783 if (rval) { 794 if (rval) {
784 qla_printk(KERN_WARNING, ha, "Unable to initialize " 795 qla_printk(KERN_WARNING, ha, "Unable to initialize "
@@ -807,7 +818,7 @@ try_eft:
807 } 818 }
808 819
809 memset(tc, 0, EFT_SIZE); 820 memset(tc, 0, EFT_SIZE);
810 rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS); 821 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
811 if (rval) { 822 if (rval) {
812 qla_printk(KERN_WARNING, ha, "Unable to initialize " 823 qla_printk(KERN_WARNING, ha, "Unable to initialize "
813 "EFT (%d).\n", rval); 824 "EFT (%d).\n", rval);
@@ -824,8 +835,8 @@ try_eft:
824 ha->eft = tc; 835 ha->eft = tc;
825 } 836 }
826cont_alloc: 837cont_alloc:
827 req_q_size = ha->request_q_length * sizeof(request_t); 838 req_q_size = ha->req->length * sizeof(request_t);
828 rsp_q_size = ha->response_q_length * sizeof(response_t); 839 rsp_q_size = ha->rsp->length * sizeof(response_t);
829 840
830 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 841 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
831 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 842 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
@@ -875,27 +886,29 @@ cont_alloc:
875 * Returns 0 on success. 886 * Returns 0 on success.
876 */ 887 */
877static void 888static void
878qla2x00_resize_request_q(scsi_qla_host_t *ha) 889qla2x00_resize_request_q(scsi_qla_host_t *vha)
879{ 890{
880 int rval; 891 int rval;
881 uint16_t fw_iocb_cnt = 0; 892 uint16_t fw_iocb_cnt = 0;
882 uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM; 893 uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM;
883 dma_addr_t request_dma; 894 dma_addr_t request_dma;
884 request_t *request_ring; 895 request_t *request_ring;
896 struct qla_hw_data *ha = vha->hw;
897 struct req_que *req = ha->req;
885 898
886 /* Valid only on recent ISPs. */ 899 /* Valid only on recent ISPs. */
887 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 900 if (IS_QLA2100(ha) || IS_QLA2200(ha))
888 return; 901 return;
889 902
890 /* Retrieve IOCB counts available to the firmware. */ 903 /* Retrieve IOCB counts available to the firmware. */
891 rval = qla2x00_get_resource_cnts(ha, NULL, NULL, NULL, &fw_iocb_cnt, 904 rval = qla2x00_get_resource_cnts(vha, NULL, NULL, NULL, &fw_iocb_cnt,
892 &ha->max_npiv_vports); 905 &ha->max_npiv_vports);
893 if (rval) 906 if (rval)
894 return; 907 return;
895 /* No point in continuing if current settings are sufficient. */ 908 /* No point in continuing if current settings are sufficient. */
896 if (fw_iocb_cnt < 1024) 909 if (fw_iocb_cnt < 1024)
897 return; 910 return;
898 if (ha->request_q_length >= request_q_length) 911 if (req->length >= request_q_length)
899 return; 912 return;
900 913
901 /* Attempt to claim larger area for request queue. */ 914 /* Attempt to claim larger area for request queue. */
@@ -909,17 +922,17 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
909 qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n", 922 qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n",
910 (ha->fw_memory_size + 1) / 1024); 923 (ha->fw_memory_size + 1) / 1024);
911 qla_printk(KERN_INFO, ha, "Resizing request queue depth " 924 qla_printk(KERN_INFO, ha, "Resizing request queue depth "
912 "(%d -> %d)...\n", ha->request_q_length, request_q_length); 925 "(%d -> %d)...\n", req->length, request_q_length);
913 926
914 /* Clear old allocations. */ 927 /* Clear old allocations. */
915 dma_free_coherent(&ha->pdev->dev, 928 dma_free_coherent(&ha->pdev->dev,
916 (ha->request_q_length + 1) * sizeof(request_t), ha->request_ring, 929 (req->length + 1) * sizeof(request_t), req->ring,
917 ha->request_dma); 930 req->dma);
918 931
919 /* Begin using larger queue. */ 932 /* Begin using larger queue. */
920 ha->request_q_length = request_q_length; 933 req->length = request_q_length;
921 ha->request_ring = request_ring; 934 req->ring = request_ring;
922 ha->request_dma = request_dma; 935 req->dma = request_dma;
923} 936}
924 937
925/** 938/**
@@ -929,10 +942,11 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
929 * Returns 0 on success. 942 * Returns 0 on success.
930 */ 943 */
931static int 944static int
932qla2x00_setup_chip(scsi_qla_host_t *ha) 945qla2x00_setup_chip(scsi_qla_host_t *vha)
933{ 946{
934 int rval; 947 int rval;
935 uint32_t srisc_address = 0; 948 uint32_t srisc_address = 0;
949 struct qla_hw_data *ha = vha->hw;
936 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 950 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
937 unsigned long flags; 951 unsigned long flags;
938 952
@@ -945,28 +959,27 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
945 } 959 }
946 960
947 /* Load firmware sequences */ 961 /* Load firmware sequences */
948 rval = ha->isp_ops->load_risc(ha, &srisc_address); 962 rval = ha->isp_ops->load_risc(vha, &srisc_address);
949 if (rval == QLA_SUCCESS) { 963 if (rval == QLA_SUCCESS) {
950 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 964 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
951 "code.\n", ha->host_no)); 965 "code.\n", vha->host_no));
952 966
953 rval = qla2x00_verify_checksum(ha, srisc_address); 967 rval = qla2x00_verify_checksum(vha, srisc_address);
954 if (rval == QLA_SUCCESS) { 968 if (rval == QLA_SUCCESS) {
955 /* Start firmware execution. */ 969 /* Start firmware execution. */
956 DEBUG(printk("scsi(%ld): Checksum OK, start " 970 DEBUG(printk("scsi(%ld): Checksum OK, start "
957 "firmware.\n", ha->host_no)); 971 "firmware.\n", vha->host_no));
958 972
959 rval = qla2x00_execute_fw(ha, srisc_address); 973 rval = qla2x00_execute_fw(vha, srisc_address);
960 /* Retrieve firmware information. */ 974 /* Retrieve firmware information. */
961 if (rval == QLA_SUCCESS && ha->fw_major_version == 0) { 975 if (rval == QLA_SUCCESS && ha->fw_major_version == 0) {
962 qla2x00_get_fw_version(ha, 976 qla2x00_get_fw_version(vha,
963 &ha->fw_major_version, 977 &ha->fw_major_version,
964 &ha->fw_minor_version, 978 &ha->fw_minor_version,
965 &ha->fw_subminor_version, 979 &ha->fw_subminor_version,
966 &ha->fw_attributes, &ha->fw_memory_size); 980 &ha->fw_attributes, &ha->fw_memory_size);
967 ha->flags.npiv_supported = 0; 981 ha->flags.npiv_supported = 0;
968 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) || 982 if (IS_QLA2XXX_MIDTYPE(ha) &&
969 IS_QLA84XX(ha)) &&
970 (ha->fw_attributes & BIT_2)) { 983 (ha->fw_attributes & BIT_2)) {
971 ha->flags.npiv_supported = 1; 984 ha->flags.npiv_supported = 1;
972 if ((!ha->max_npiv_vports) || 985 if ((!ha->max_npiv_vports) ||
@@ -975,15 +988,15 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
975 ha->max_npiv_vports = 988 ha->max_npiv_vports =
976 MIN_MULTI_ID_FABRIC - 1; 989 MIN_MULTI_ID_FABRIC - 1;
977 } 990 }
978 qla2x00_resize_request_q(ha); 991 qla2x00_resize_request_q(vha);
979 992
980 if (ql2xallocfwdump) 993 if (ql2xallocfwdump)
981 qla2x00_alloc_fw_dump(ha); 994 qla2x00_alloc_fw_dump(vha);
982 } 995 }
983 } else { 996 } else {
984 DEBUG2(printk(KERN_INFO 997 DEBUG2(printk(KERN_INFO
985 "scsi(%ld): ISP Firmware failed checksum.\n", 998 "scsi(%ld): ISP Firmware failed checksum.\n",
986 ha->host_no)); 999 vha->host_no));
987 } 1000 }
988 } 1001 }
989 1002
@@ -1002,7 +1015,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
1002 1015
1003 if (rval) { 1016 if (rval) {
1004 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1017 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
1005 ha->host_no)); 1018 vha->host_no));
1006 } 1019 }
1007 1020
1008 return (rval); 1021 return (rval);
@@ -1018,13 +1031,14 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
1018 * Returns 0 on success. 1031 * Returns 0 on success.
1019 */ 1032 */
1020static void 1033static void
1021qla2x00_init_response_q_entries(scsi_qla_host_t *ha) 1034qla2x00_init_response_q_entries(scsi_qla_host_t *vha)
1022{ 1035{
1023 uint16_t cnt; 1036 uint16_t cnt;
1024 response_t *pkt; 1037 response_t *pkt;
1038 struct rsp_que *rsp = vha->hw->rsp;
1025 1039
1026 pkt = ha->response_ring_ptr; 1040 pkt = rsp->ring_ptr;
1027 for (cnt = 0; cnt < ha->response_q_length; cnt++) { 1041 for (cnt = 0; cnt < rsp->length; cnt++) {
1028 pkt->signature = RESPONSE_PROCESSED; 1042 pkt->signature = RESPONSE_PROCESSED;
1029 pkt++; 1043 pkt++;
1030 } 1044 }
@@ -1038,19 +1052,20 @@ qla2x00_init_response_q_entries(scsi_qla_host_t *ha)
1038 * Returns 0 on success. 1052 * Returns 0 on success.
1039 */ 1053 */
1040void 1054void
1041qla2x00_update_fw_options(scsi_qla_host_t *ha) 1055qla2x00_update_fw_options(scsi_qla_host_t *vha)
1042{ 1056{
1043 uint16_t swing, emphasis, tx_sens, rx_sens; 1057 uint16_t swing, emphasis, tx_sens, rx_sens;
1058 struct qla_hw_data *ha = vha->hw;
1044 1059
1045 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 1060 memset(ha->fw_options, 0, sizeof(ha->fw_options));
1046 qla2x00_get_fw_options(ha, ha->fw_options); 1061 qla2x00_get_fw_options(vha, ha->fw_options);
1047 1062
1048 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1063 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1049 return; 1064 return;
1050 1065
1051 /* Serial Link options. */ 1066 /* Serial Link options. */
1052 DEBUG3(printk("scsi(%ld): Serial link options:\n", 1067 DEBUG3(printk("scsi(%ld): Serial link options:\n",
1053 ha->host_no)); 1068 vha->host_no));
1054 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, 1069 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
1055 sizeof(ha->fw_seriallink_options))); 1070 sizeof(ha->fw_seriallink_options)));
1056 1071
@@ -1108,19 +1123,20 @@ qla2x00_update_fw_options(scsi_qla_host_t *ha)
1108 ha->fw_options[2] |= BIT_13; 1123 ha->fw_options[2] |= BIT_13;
1109 1124
1110 /* Update firmware options. */ 1125 /* Update firmware options. */
1111 qla2x00_set_fw_options(ha, ha->fw_options); 1126 qla2x00_set_fw_options(vha, ha->fw_options);
1112} 1127}
1113 1128
1114void 1129void
1115qla24xx_update_fw_options(scsi_qla_host_t *ha) 1130qla24xx_update_fw_options(scsi_qla_host_t *vha)
1116{ 1131{
1117 int rval; 1132 int rval;
1133 struct qla_hw_data *ha = vha->hw;
1118 1134
1119 /* Update Serial Link options. */ 1135 /* Update Serial Link options. */
1120 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 1136 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1121 return; 1137 return;
1122 1138
1123 rval = qla2x00_set_serdes_params(ha, 1139 rval = qla2x00_set_serdes_params(vha,
1124 le16_to_cpu(ha->fw_seriallink_options24[1]), 1140 le16_to_cpu(ha->fw_seriallink_options24[1]),
1125 le16_to_cpu(ha->fw_seriallink_options24[2]), 1141 le16_to_cpu(ha->fw_seriallink_options24[2]),
1126 le16_to_cpu(ha->fw_seriallink_options24[3])); 1142 le16_to_cpu(ha->fw_seriallink_options24[3]));
@@ -1131,19 +1147,22 @@ qla24xx_update_fw_options(scsi_qla_host_t *ha)
1131} 1147}
1132 1148
1133void 1149void
1134qla2x00_config_rings(struct scsi_qla_host *ha) 1150qla2x00_config_rings(struct scsi_qla_host *vha)
1135{ 1151{
1152 struct qla_hw_data *ha = vha->hw;
1136 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1153 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1154 struct req_que *req = ha->req;
1155 struct rsp_que *rsp = ha->rsp;
1137 1156
1138 /* Setup ring parameters in initialization control block. */ 1157 /* Setup ring parameters in initialization control block. */
1139 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); 1158 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1140 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0); 1159 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1141 ha->init_cb->request_q_length = cpu_to_le16(ha->request_q_length); 1160 ha->init_cb->request_q_length = cpu_to_le16(req->length);
1142 ha->init_cb->response_q_length = cpu_to_le16(ha->response_q_length); 1161 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1143 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma)); 1162 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1144 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma)); 1163 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1145 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma)); 1164 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1146 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma)); 1165 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1147 1166
1148 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); 1167 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1149 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); 1168 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
@@ -1153,21 +1172,24 @@ qla2x00_config_rings(struct scsi_qla_host *ha)
1153} 1172}
1154 1173
1155void 1174void
1156qla24xx_config_rings(struct scsi_qla_host *ha) 1175qla24xx_config_rings(struct scsi_qla_host *vha)
1157{ 1176{
1177 struct qla_hw_data *ha = vha->hw;
1158 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1178 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1159 struct init_cb_24xx *icb; 1179 struct init_cb_24xx *icb;
1180 struct req_que *req = ha->req;
1181 struct rsp_que *rsp = ha->rsp;
1160 1182
1161 /* Setup ring parameters in initialization control block. */ 1183 /* Setup ring parameters in initialization control block. */
1162 icb = (struct init_cb_24xx *)ha->init_cb; 1184 icb = (struct init_cb_24xx *)ha->init_cb;
1163 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1185 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1164 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1186 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1165 icb->request_q_length = cpu_to_le16(ha->request_q_length); 1187 icb->request_q_length = cpu_to_le16(req->length);
1166 icb->response_q_length = cpu_to_le16(ha->response_q_length); 1188 icb->response_q_length = cpu_to_le16(rsp->length);
1167 icb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma)); 1189 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1168 icb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma)); 1190 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1169 icb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma)); 1191 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1170 icb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma)); 1192 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1171 1193
1172 WRT_REG_DWORD(&reg->req_q_in, 0); 1194 WRT_REG_DWORD(&reg->req_q_in, 0);
1173 WRT_REG_DWORD(&reg->req_q_out, 0); 1195 WRT_REG_DWORD(&reg->req_q_out, 0);
@@ -1186,11 +1208,14 @@ qla24xx_config_rings(struct scsi_qla_host *ha)
1186 * Returns 0 on success. 1208 * Returns 0 on success.
1187 */ 1209 */
1188static int 1210static int
1189qla2x00_init_rings(scsi_qla_host_t *ha) 1211qla2x00_init_rings(scsi_qla_host_t *vha)
1190{ 1212{
1191 int rval; 1213 int rval;
1192 unsigned long flags = 0; 1214 unsigned long flags = 0;
1193 int cnt; 1215 int cnt;
1216 struct qla_hw_data *ha = vha->hw;
1217 struct req_que *req = ha->req;
1218 struct rsp_que *rsp = ha->rsp;
1194 struct mid_init_cb_24xx *mid_init_cb = 1219 struct mid_init_cb_24xx *mid_init_cb =
1195 (struct mid_init_cb_24xx *) ha->init_cb; 1220 (struct mid_init_cb_24xx *) ha->init_cb;
1196 1221
@@ -1198,45 +1223,45 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1198 1223
1199 /* Clear outstanding commands array. */ 1224 /* Clear outstanding commands array. */
1200 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1225 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1201 ha->outstanding_cmds[cnt] = NULL; 1226 req->outstanding_cmds[cnt] = NULL;
1202 1227
1203 ha->current_outstanding_cmd = 0; 1228 req->current_outstanding_cmd = 0;
1204 1229
1205 /* Clear RSCN queue. */ 1230 /* Clear RSCN queue. */
1206 ha->rscn_in_ptr = 0; 1231 vha->rscn_in_ptr = 0;
1207 ha->rscn_out_ptr = 0; 1232 vha->rscn_out_ptr = 0;
1208 1233
1209 /* Initialize firmware. */ 1234 /* Initialize firmware. */
1210 ha->request_ring_ptr = ha->request_ring; 1235 req->ring_ptr = req->ring;
1211 ha->req_ring_index = 0; 1236 req->ring_index = 0;
1212 ha->req_q_cnt = ha->request_q_length; 1237 req->cnt = req->length;
1213 ha->response_ring_ptr = ha->response_ring; 1238 rsp->ring_ptr = rsp->ring;
1214 ha->rsp_ring_index = 0; 1239 rsp->ring_index = 0;
1215 1240
1216 /* Initialize response queue entries */ 1241 /* Initialize response queue entries */
1217 qla2x00_init_response_q_entries(ha); 1242 qla2x00_init_response_q_entries(vha);
1218 1243
1219 ha->isp_ops->config_rings(ha); 1244 ha->isp_ops->config_rings(vha);
1220 1245
1221 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1246 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1222 1247
1223 /* Update any ISP specific firmware options before initialization. */ 1248 /* Update any ISP specific firmware options before initialization. */
1224 ha->isp_ops->update_fw_options(ha); 1249 ha->isp_ops->update_fw_options(vha);
1225 1250
1226 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no)); 1251 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
1227 1252
1228 if (ha->flags.npiv_supported) 1253 if (ha->flags.npiv_supported)
1229 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 1254 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1230 1255
1231 mid_init_cb->options = __constant_cpu_to_le16(BIT_1); 1256 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1232 1257
1233 rval = qla2x00_init_firmware(ha, ha->init_cb_size); 1258 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1234 if (rval) { 1259 if (rval) {
1235 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1260 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
1236 ha->host_no)); 1261 vha->host_no));
1237 } else { 1262 } else {
1238 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", 1263 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
1239 ha->host_no)); 1264 vha->host_no));
1240 } 1265 }
1241 1266
1242 return (rval); 1267 return (rval);
@@ -1249,13 +1274,14 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1249 * Returns 0 on success. 1274 * Returns 0 on success.
1250 */ 1275 */
1251static int 1276static int
1252qla2x00_fw_ready(scsi_qla_host_t *ha) 1277qla2x00_fw_ready(scsi_qla_host_t *vha)
1253{ 1278{
1254 int rval; 1279 int rval;
1255 unsigned long wtime, mtime, cs84xx_time; 1280 unsigned long wtime, mtime, cs84xx_time;
1256 uint16_t min_wait; /* Minimum wait time if loop is down */ 1281 uint16_t min_wait; /* Minimum wait time if loop is down */
1257 uint16_t wait_time; /* Wait time if loop is coming ready */ 1282 uint16_t wait_time; /* Wait time if loop is coming ready */
1258 uint16_t state[3]; 1283 uint16_t state[3];
1284 struct qla_hw_data *ha = vha->hw;
1259 1285
1260 rval = QLA_SUCCESS; 1286 rval = QLA_SUCCESS;
1261 1287
@@ -1277,29 +1303,29 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1277 wtime = jiffies + (wait_time * HZ); 1303 wtime = jiffies + (wait_time * HZ);
1278 1304
1279 /* Wait for ISP to finish LIP */ 1305 /* Wait for ISP to finish LIP */
1280 if (!ha->flags.init_done) 1306 if (!vha->flags.init_done)
1281 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); 1307 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
1282 1308
1283 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n", 1309 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1284 ha->host_no)); 1310 vha->host_no));
1285 1311
1286 do { 1312 do {
1287 rval = qla2x00_get_firmware_state(ha, state); 1313 rval = qla2x00_get_firmware_state(vha, state);
1288 if (rval == QLA_SUCCESS) { 1314 if (rval == QLA_SUCCESS) {
1289 if (state[0] < FSTATE_LOSS_OF_SYNC) { 1315 if (state[0] < FSTATE_LOSS_OF_SYNC) {
1290 ha->device_flags &= ~DFLG_NO_CABLE; 1316 vha->device_flags &= ~DFLG_NO_CABLE;
1291 } 1317 }
1292 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1318 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1293 DEBUG16(printk("scsi(%ld): fw_state=%x " 1319 DEBUG16(printk("scsi(%ld): fw_state=%x "
1294 "84xx=%x.\n", ha->host_no, state[0], 1320 "84xx=%x.\n", vha->host_no, state[0],
1295 state[2])); 1321 state[2]));
1296 if ((state[2] & FSTATE_LOGGED_IN) && 1322 if ((state[2] & FSTATE_LOGGED_IN) &&
1297 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1323 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1298 DEBUG16(printk("scsi(%ld): Sending " 1324 DEBUG16(printk("scsi(%ld): Sending "
1299 "verify iocb.\n", ha->host_no)); 1325 "verify iocb.\n", vha->host_no));
1300 1326
1301 cs84xx_time = jiffies; 1327 cs84xx_time = jiffies;
1302 rval = qla84xx_init_chip(ha); 1328 rval = qla84xx_init_chip(vha);
1303 if (rval != QLA_SUCCESS) 1329 if (rval != QLA_SUCCESS)
1304 break; 1330 break;
1305 1331
@@ -1309,13 +1335,13 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1309 mtime += cs84xx_time; 1335 mtime += cs84xx_time;
1310 DEBUG16(printk("scsi(%ld): Increasing " 1336 DEBUG16(printk("scsi(%ld): Increasing "
1311 "wait time by %ld. New time %ld\n", 1337 "wait time by %ld. New time %ld\n",
1312 ha->host_no, cs84xx_time, wtime)); 1338 vha->host_no, cs84xx_time, wtime));
1313 } 1339 }
1314 } else if (state[0] == FSTATE_READY) { 1340 } else if (state[0] == FSTATE_READY) {
1315 DEBUG(printk("scsi(%ld): F/W Ready - OK \n", 1341 DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
1316 ha->host_no)); 1342 vha->host_no));
1317 1343
1318 qla2x00_get_retry_cnt(ha, &ha->retry_count, 1344 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1319 &ha->login_timeout, &ha->r_a_tov); 1345 &ha->login_timeout, &ha->r_a_tov);
1320 1346
1321 rval = QLA_SUCCESS; 1347 rval = QLA_SUCCESS;
@@ -1324,7 +1350,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1324 1350
1325 rval = QLA_FUNCTION_FAILED; 1351 rval = QLA_FUNCTION_FAILED;
1326 1352
1327 if (atomic_read(&ha->loop_down_timer) && 1353 if (atomic_read(&vha->loop_down_timer) &&
1328 state[0] != FSTATE_READY) { 1354 state[0] != FSTATE_READY) {
1329 /* Loop down. Timeout on min_wait for states 1355 /* Loop down. Timeout on min_wait for states
1330 * other than Wait for Login. 1356 * other than Wait for Login.
@@ -1333,7 +1359,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1333 qla_printk(KERN_INFO, ha, 1359 qla_printk(KERN_INFO, ha,
1334 "Cable is unplugged...\n"); 1360 "Cable is unplugged...\n");
1335 1361
1336 ha->device_flags |= DFLG_NO_CABLE; 1362 vha->device_flags |= DFLG_NO_CABLE;
1337 break; 1363 break;
1338 } 1364 }
1339 } 1365 }
@@ -1350,15 +1376,15 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1350 msleep(500); 1376 msleep(500);
1351 1377
1352 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1378 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1353 ha->host_no, state[0], jiffies)); 1379 vha->host_no, state[0], jiffies));
1354 } while (1); 1380 } while (1);
1355 1381
1356 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1382 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1357 ha->host_no, state[0], jiffies)); 1383 vha->host_no, state[0], jiffies));
1358 1384
1359 if (rval) { 1385 if (rval) {
1360 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1386 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
1361 ha->host_no)); 1387 vha->host_no));
1362 } 1388 }
1363 1389
1364 return (rval); 1390 return (rval);
@@ -1378,7 +1404,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1378* Kernel context. 1404* Kernel context.
1379*/ 1405*/
1380static int 1406static int
1381qla2x00_configure_hba(scsi_qla_host_t *ha) 1407qla2x00_configure_hba(scsi_qla_host_t *vha)
1382{ 1408{
1383 int rval; 1409 int rval;
1384 uint16_t loop_id; 1410 uint16_t loop_id;
@@ -1388,19 +1414,20 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1388 uint8_t area; 1414 uint8_t area;
1389 uint8_t domain; 1415 uint8_t domain;
1390 char connect_type[22]; 1416 char connect_type[22];
1417 struct qla_hw_data *ha = vha->hw;
1391 1418
1392 /* Get host addresses. */ 1419 /* Get host addresses. */
1393 rval = qla2x00_get_adapter_id(ha, 1420 rval = qla2x00_get_adapter_id(vha,
1394 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 1421 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1395 if (rval != QLA_SUCCESS) { 1422 if (rval != QLA_SUCCESS) {
1396 if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) || 1423 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
1397 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 1424 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
1398 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 1425 DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
1399 __func__, ha->host_no)); 1426 __func__, vha->host_no));
1400 } else { 1427 } else {
1401 qla_printk(KERN_WARNING, ha, 1428 qla_printk(KERN_WARNING, ha,
1402 "ERROR -- Unable to get host loop ID.\n"); 1429 "ERROR -- Unable to get host loop ID.\n");
1403 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1430 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1404 } 1431 }
1405 return (rval); 1432 return (rval);
1406 } 1433 }
@@ -1411,7 +1438,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1411 return (QLA_FUNCTION_FAILED); 1438 return (QLA_FUNCTION_FAILED);
1412 } 1439 }
1413 1440
1414 ha->loop_id = loop_id; 1441 vha->loop_id = loop_id;
1415 1442
1416 /* initialize */ 1443 /* initialize */
1417 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 1444 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
@@ -1421,14 +1448,14 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1421 switch (topo) { 1448 switch (topo) {
1422 case 0: 1449 case 0:
1423 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", 1450 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
1424 ha->host_no)); 1451 vha->host_no));
1425 ha->current_topology = ISP_CFG_NL; 1452 ha->current_topology = ISP_CFG_NL;
1426 strcpy(connect_type, "(Loop)"); 1453 strcpy(connect_type, "(Loop)");
1427 break; 1454 break;
1428 1455
1429 case 1: 1456 case 1:
1430 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 1457 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
1431 ha->host_no)); 1458 vha->host_no));
1432 ha->switch_cap = sw_cap; 1459 ha->switch_cap = sw_cap;
1433 ha->current_topology = ISP_CFG_FL; 1460 ha->current_topology = ISP_CFG_FL;
1434 strcpy(connect_type, "(FL_Port)"); 1461 strcpy(connect_type, "(FL_Port)");
@@ -1436,7 +1463,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1436 1463
1437 case 2: 1464 case 2:
1438 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", 1465 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
1439 ha->host_no)); 1466 vha->host_no));
1440 ha->operating_mode = P2P; 1467 ha->operating_mode = P2P;
1441 ha->current_topology = ISP_CFG_N; 1468 ha->current_topology = ISP_CFG_N;
1442 strcpy(connect_type, "(N_Port-to-N_Port)"); 1469 strcpy(connect_type, "(N_Port-to-N_Port)");
@@ -1444,7 +1471,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1444 1471
1445 case 3: 1472 case 3:
1446 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 1473 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
1447 ha->host_no)); 1474 vha->host_no));
1448 ha->switch_cap = sw_cap; 1475 ha->switch_cap = sw_cap;
1449 ha->operating_mode = P2P; 1476 ha->operating_mode = P2P;
1450 ha->current_topology = ISP_CFG_F; 1477 ha->current_topology = ISP_CFG_F;
@@ -1454,7 +1481,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1454 default: 1481 default:
1455 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " 1482 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
1456 "Using NL.\n", 1483 "Using NL.\n",
1457 ha->host_no, topo)); 1484 vha->host_no, topo));
1458 ha->current_topology = ISP_CFG_NL; 1485 ha->current_topology = ISP_CFG_NL;
1459 strcpy(connect_type, "(Loop)"); 1486 strcpy(connect_type, "(Loop)");
1460 break; 1487 break;
@@ -1462,29 +1489,31 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1462 1489
1463 /* Save Host port and loop ID. */ 1490 /* Save Host port and loop ID. */
1464 /* byte order - Big Endian */ 1491 /* byte order - Big Endian */
1465 ha->d_id.b.domain = domain; 1492 vha->d_id.b.domain = domain;
1466 ha->d_id.b.area = area; 1493 vha->d_id.b.area = area;
1467 ha->d_id.b.al_pa = al_pa; 1494 vha->d_id.b.al_pa = al_pa;
1468 1495
1469 if (!ha->flags.init_done) 1496 if (!vha->flags.init_done)
1470 qla_printk(KERN_INFO, ha, 1497 qla_printk(KERN_INFO, ha,
1471 "Topology - %s, Host Loop address 0x%x\n", 1498 "Topology - %s, Host Loop address 0x%x\n",
1472 connect_type, ha->loop_id); 1499 connect_type, vha->loop_id);
1473 1500
1474 if (rval) { 1501 if (rval) {
1475 DEBUG2_3(printk("scsi(%ld): FAILED.\n", ha->host_no)); 1502 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
1476 } else { 1503 } else {
1477 DEBUG3(printk("scsi(%ld): exiting normally.\n", ha->host_no)); 1504 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
1478 } 1505 }
1479 1506
1480 return(rval); 1507 return(rval);
1481} 1508}
1482 1509
1483static inline void 1510static inline void
1484qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *def) 1511qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1512 char *def)
1485{ 1513{
1486 char *st, *en; 1514 char *st, *en;
1487 uint16_t index; 1515 uint16_t index;
1516 struct qla_hw_data *ha = vha->hw;
1488 1517
1489 if (memcmp(model, BINZERO, len) != 0) { 1518 if (memcmp(model, BINZERO, len) != 0) {
1490 strncpy(ha->model_number, model, len); 1519 strncpy(ha->model_number, model, len);
@@ -1516,16 +1545,17 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
1516 } 1545 }
1517 } 1546 }
1518 if (IS_FWI2_CAPABLE(ha)) 1547 if (IS_FWI2_CAPABLE(ha))
1519 qla2xxx_get_vpd_field(ha, "\x82", ha->model_desc, 1548 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1520 sizeof(ha->model_desc)); 1549 sizeof(ha->model_desc));
1521} 1550}
1522 1551
1523/* On sparc systems, obtain port and node WWN from firmware 1552/* On sparc systems, obtain port and node WWN from firmware
1524 * properties. 1553 * properties.
1525 */ 1554 */
1526static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv) 1555static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
1527{ 1556{
1528#ifdef CONFIG_SPARC 1557#ifdef CONFIG_SPARC
1558 struct qla_hw_data *ha = vha->hw;
1529 struct pci_dev *pdev = ha->pdev; 1559 struct pci_dev *pdev = ha->pdev;
1530 struct device_node *dp = pci_device_to_OF_node(pdev); 1560 struct device_node *dp = pci_device_to_OF_node(pdev);
1531 const u8 *val; 1561 const u8 *val;
@@ -1555,12 +1585,13 @@ static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
1555* 0 = success. 1585* 0 = success.
1556*/ 1586*/
1557int 1587int
1558qla2x00_nvram_config(scsi_qla_host_t *ha) 1588qla2x00_nvram_config(scsi_qla_host_t *vha)
1559{ 1589{
1560 int rval; 1590 int rval;
1561 uint8_t chksum = 0; 1591 uint8_t chksum = 0;
1562 uint16_t cnt; 1592 uint16_t cnt;
1563 uint8_t *dptr1, *dptr2; 1593 uint8_t *dptr1, *dptr2;
1594 struct qla_hw_data *ha = vha->hw;
1564 init_cb_t *icb = ha->init_cb; 1595 init_cb_t *icb = ha->init_cb;
1565 nvram_t *nv = ha->nvram; 1596 nvram_t *nv = ha->nvram;
1566 uint8_t *ptr = ha->nvram; 1597 uint8_t *ptr = ha->nvram;
@@ -1576,11 +1607,11 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1576 ha->nvram_base = 0x80; 1607 ha->nvram_base = 0x80;
1577 1608
1578 /* Get NVRAM data and calculate checksum. */ 1609 /* Get NVRAM data and calculate checksum. */
1579 ha->isp_ops->read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size); 1610 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
1580 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 1611 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
1581 chksum += *ptr++; 1612 chksum += *ptr++;
1582 1613
1583 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 1614 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
1584 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 1615 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
1585 1616
1586 /* Bad NVRAM data, set defaults parameters. */ 1617 /* Bad NVRAM data, set defaults parameters. */
@@ -1594,7 +1625,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1594 "invalid -- WWPN) defaults.\n"); 1625 "invalid -- WWPN) defaults.\n");
1595 1626
1596 if (chksum) 1627 if (chksum)
1597 qla2xxx_hw_event_log(ha, HW_EVENT_NVRAM_CHKSUM_ERR, 0, 1628 qla2xxx_hw_event_log(vha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
1598 MSW(chksum), LSW(chksum)); 1629 MSW(chksum), LSW(chksum));
1599 1630
1600 /* 1631 /*
@@ -1631,7 +1662,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1631 nv->port_name[3] = 224; 1662 nv->port_name[3] = 224;
1632 nv->port_name[4] = 139; 1663 nv->port_name[4] = 139;
1633 1664
1634 qla2xxx_nvram_wwn_from_ofw(ha, nv); 1665 qla2xxx_nvram_wwn_from_ofw(vha, nv);
1635 1666
1636 nv->login_timeout = 4; 1667 nv->login_timeout = 4;
1637 1668
@@ -1684,7 +1715,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1684 strcpy(ha->model_number, "QLA2300"); 1715 strcpy(ha->model_number, "QLA2300");
1685 } 1716 }
1686 } else { 1717 } else {
1687 qla2x00_set_model_info(ha, nv->model_number, 1718 qla2x00_set_model_info(vha, nv->model_number,
1688 sizeof(nv->model_number), "QLA23xx"); 1719 sizeof(nv->model_number), "QLA23xx");
1689 } 1720 }
1690 } else if (IS_QLA2200(ha)) { 1721 } else if (IS_QLA2200(ha)) {
@@ -1760,8 +1791,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1760 ha->serial0 = icb->port_name[5]; 1791 ha->serial0 = icb->port_name[5];
1761 ha->serial1 = icb->port_name[6]; 1792 ha->serial1 = icb->port_name[6];
1762 ha->serial2 = icb->port_name[7]; 1793 ha->serial2 = icb->port_name[7];
1763 ha->node_name = icb->node_name; 1794 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
1764 ha->port_name = icb->port_name; 1795 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1765 1796
1766 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 1797 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
1767 1798
@@ -1829,10 +1860,10 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1829 icb->response_accumulation_timer = 3; 1860 icb->response_accumulation_timer = 3;
1830 icb->interrupt_delay_timer = 5; 1861 icb->interrupt_delay_timer = 5;
1831 1862
1832 ha->flags.process_response_queue = 1; 1863 vha->flags.process_response_queue = 1;
1833 } else { 1864 } else {
1834 /* Enable ZIO. */ 1865 /* Enable ZIO. */
1835 if (!ha->flags.init_done) { 1866 if (!vha->flags.init_done) {
1836 ha->zio_mode = icb->add_firmware_options[0] & 1867 ha->zio_mode = icb->add_firmware_options[0] &
1837 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1868 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1838 ha->zio_timer = icb->interrupt_delay_timer ? 1869 ha->zio_timer = icb->interrupt_delay_timer ?
@@ -1840,12 +1871,12 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1840 } 1871 }
1841 icb->add_firmware_options[0] &= 1872 icb->add_firmware_options[0] &=
1842 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 1873 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
1843 ha->flags.process_response_queue = 0; 1874 vha->flags.process_response_queue = 0;
1844 if (ha->zio_mode != QLA_ZIO_DISABLED) { 1875 if (ha->zio_mode != QLA_ZIO_DISABLED) {
1845 ha->zio_mode = QLA_ZIO_MODE_6; 1876 ha->zio_mode = QLA_ZIO_MODE_6;
1846 1877
1847 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " 1878 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
1848 "delay (%d us).\n", ha->host_no, ha->zio_mode, 1879 "delay (%d us).\n", vha->host_no, ha->zio_mode,
1849 ha->zio_timer * 100)); 1880 ha->zio_timer * 100));
1850 qla_printk(KERN_INFO, ha, 1881 qla_printk(KERN_INFO, ha,
1851 "ZIO mode %d enabled; timer delay (%d us).\n", 1882 "ZIO mode %d enabled; timer delay (%d us).\n",
@@ -1853,13 +1884,13 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1853 1884
1854 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 1885 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
1855 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 1886 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
1856 ha->flags.process_response_queue = 1; 1887 vha->flags.process_response_queue = 1;
1857 } 1888 }
1858 } 1889 }
1859 1890
1860 if (rval) { 1891 if (rval) {
1861 DEBUG2_3(printk(KERN_WARNING 1892 DEBUG2_3(printk(KERN_WARNING
1862 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no)); 1893 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
1863 } 1894 }
1864 return (rval); 1895 return (rval);
1865} 1896}
@@ -1870,10 +1901,10 @@ qla2x00_rport_del(void *data)
1870 fc_port_t *fcport = data; 1901 fc_port_t *fcport = data;
1871 struct fc_rport *rport; 1902 struct fc_rport *rport;
1872 1903
1873 spin_lock_irq(fcport->ha->host->host_lock); 1904 spin_lock_irq(fcport->vha->host->host_lock);
1874 rport = fcport->drport; 1905 rport = fcport->drport;
1875 fcport->drport = NULL; 1906 fcport->drport = NULL;
1876 spin_unlock_irq(fcport->ha->host->host_lock); 1907 spin_unlock_irq(fcport->vha->host->host_lock);
1877 if (rport) 1908 if (rport)
1878 fc_remote_port_delete(rport); 1909 fc_remote_port_delete(rport);
1879} 1910}
@@ -1886,7 +1917,7 @@ qla2x00_rport_del(void *data)
1886 * Returns a pointer to the allocated fcport, or NULL, if none available. 1917 * Returns a pointer to the allocated fcport, or NULL, if none available.
1887 */ 1918 */
1888static fc_port_t * 1919static fc_port_t *
1889qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags) 1920qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1890{ 1921{
1891 fc_port_t *fcport; 1922 fc_port_t *fcport;
1892 1923
@@ -1895,8 +1926,8 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1895 return NULL; 1926 return NULL;
1896 1927
1897 /* Setup fcport template structure. */ 1928 /* Setup fcport template structure. */
1898 fcport->ha = ha; 1929 fcport->vha = vha;
1899 fcport->vp_idx = ha->vp_idx; 1930 fcport->vp_idx = vha->vp_idx;
1900 fcport->port_type = FCT_UNKNOWN; 1931 fcport->port_type = FCT_UNKNOWN;
1901 fcport->loop_id = FC_NO_LOOP_ID; 1932 fcport->loop_id = FC_NO_LOOP_ID;
1902 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1933 atomic_set(&fcport->state, FCS_UNCONFIGURED);
@@ -1919,100 +1950,98 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1919 * 2 = database was full and device was not configured. 1950 * 2 = database was full and device was not configured.
1920 */ 1951 */
1921static int 1952static int
1922qla2x00_configure_loop(scsi_qla_host_t *ha) 1953qla2x00_configure_loop(scsi_qla_host_t *vha)
1923{ 1954{
1924 int rval; 1955 int rval;
1925 unsigned long flags, save_flags; 1956 unsigned long flags, save_flags;
1926 1957 struct qla_hw_data *ha = vha->hw;
1927 rval = QLA_SUCCESS; 1958 rval = QLA_SUCCESS;
1928 1959
1929 /* Get Initiator ID */ 1960 /* Get Initiator ID */
1930 if (test_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags)) { 1961 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
1931 rval = qla2x00_configure_hba(ha); 1962 rval = qla2x00_configure_hba(vha);
1932 if (rval != QLA_SUCCESS) { 1963 if (rval != QLA_SUCCESS) {
1933 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", 1964 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
1934 ha->host_no)); 1965 vha->host_no));
1935 return (rval); 1966 return (rval);
1936 } 1967 }
1937 } 1968 }
1938 1969
1939 save_flags = flags = ha->dpc_flags; 1970 save_flags = flags = vha->dpc_flags;
1940 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", 1971 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
1941 ha->host_no, flags)); 1972 vha->host_no, flags));
1942 1973
1943 /* 1974 /*
1944 * If we have both an RSCN and PORT UPDATE pending then handle them 1975 * If we have both an RSCN and PORT UPDATE pending then handle them
1945 * both at the same time. 1976 * both at the same time.
1946 */ 1977 */
1947 clear_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1978 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1948 clear_bit(RSCN_UPDATE, &ha->dpc_flags); 1979 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
1949 1980
1950 /* Determine what we need to do */ 1981 /* Determine what we need to do */
1951 if (ha->current_topology == ISP_CFG_FL && 1982 if (ha->current_topology == ISP_CFG_FL &&
1952 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 1983 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
1953 1984
1954 ha->flags.rscn_queue_overflow = 1; 1985 vha->flags.rscn_queue_overflow = 1;
1955 set_bit(RSCN_UPDATE, &flags); 1986 set_bit(RSCN_UPDATE, &flags);
1956 1987
1957 } else if (ha->current_topology == ISP_CFG_F && 1988 } else if (ha->current_topology == ISP_CFG_F &&
1958 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 1989 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
1959 1990
1960 ha->flags.rscn_queue_overflow = 1; 1991 vha->flags.rscn_queue_overflow = 1;
1961 set_bit(RSCN_UPDATE, &flags); 1992 set_bit(RSCN_UPDATE, &flags);
1962 clear_bit(LOCAL_LOOP_UPDATE, &flags); 1993 clear_bit(LOCAL_LOOP_UPDATE, &flags);
1963 1994
1964 } else if (ha->current_topology == ISP_CFG_N) { 1995 } else if (ha->current_topology == ISP_CFG_N) {
1965 clear_bit(RSCN_UPDATE, &flags); 1996 clear_bit(RSCN_UPDATE, &flags);
1966 1997
1967 } else if (!ha->flags.online || 1998 } else if (!vha->flags.online ||
1968 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 1999 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
1969 2000
1970 ha->flags.rscn_queue_overflow = 1; 2001 vha->flags.rscn_queue_overflow = 1;
1971 set_bit(RSCN_UPDATE, &flags); 2002 set_bit(RSCN_UPDATE, &flags);
1972 set_bit(LOCAL_LOOP_UPDATE, &flags); 2003 set_bit(LOCAL_LOOP_UPDATE, &flags);
1973 } 2004 }
1974 2005
1975 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2006 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
1976 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2007 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1977 rval = QLA_FUNCTION_FAILED; 2008 rval = QLA_FUNCTION_FAILED;
1978 } else { 2009 else
1979 rval = qla2x00_configure_local_loop(ha); 2010 rval = qla2x00_configure_local_loop(vha);
1980 }
1981 } 2011 }
1982 2012
1983 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2013 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
1984 if (LOOP_TRANSITION(ha)) { 2014 if (LOOP_TRANSITION(vha))
1985 rval = QLA_FUNCTION_FAILED; 2015 rval = QLA_FUNCTION_FAILED;
1986 } else { 2016 else
1987 rval = qla2x00_configure_fabric(ha); 2017 rval = qla2x00_configure_fabric(vha);
1988 }
1989 } 2018 }
1990 2019
1991 if (rval == QLA_SUCCESS) { 2020 if (rval == QLA_SUCCESS) {
1992 if (atomic_read(&ha->loop_down_timer) || 2021 if (atomic_read(&vha->loop_down_timer) ||
1993 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2022 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1994 rval = QLA_FUNCTION_FAILED; 2023 rval = QLA_FUNCTION_FAILED;
1995 } else { 2024 } else {
1996 atomic_set(&ha->loop_state, LOOP_READY); 2025 atomic_set(&vha->loop_state, LOOP_READY);
1997 2026
1998 DEBUG(printk("scsi(%ld): LOOP READY\n", ha->host_no)); 2027 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
1999 } 2028 }
2000 } 2029 }
2001 2030
2002 if (rval) { 2031 if (rval) {
2003 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", 2032 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
2004 __func__, ha->host_no)); 2033 __func__, vha->host_no));
2005 } else { 2034 } else {
2006 DEBUG3(printk("%s: exiting normally\n", __func__)); 2035 DEBUG3(printk("%s: exiting normally\n", __func__));
2007 } 2036 }
2008 2037
2009 /* Restore state if a resync event occurred during processing */ 2038 /* Restore state if a resync event occurred during processing */
2010 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2039 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2011 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2040 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2012 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2041 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2013 if (test_bit(RSCN_UPDATE, &save_flags)) { 2042 if (test_bit(RSCN_UPDATE, &save_flags)) {
2014 ha->flags.rscn_queue_overflow = 1; 2043 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2015 set_bit(RSCN_UPDATE, &ha->dpc_flags); 2044 vha->flags.rscn_queue_overflow = 1;
2016 } 2045 }
2017 } 2046 }
2018 2047
@@ -2032,7 +2061,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
2032 * 0 = success. 2061 * 0 = success.
2033 */ 2062 */
2034static int 2063static int
2035qla2x00_configure_local_loop(scsi_qla_host_t *ha) 2064qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2036{ 2065{
2037 int rval, rval2; 2066 int rval, rval2;
2038 int found_devs; 2067 int found_devs;
@@ -2044,18 +2073,18 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2044 char *id_iter; 2073 char *id_iter;
2045 uint16_t loop_id; 2074 uint16_t loop_id;
2046 uint8_t domain, area, al_pa; 2075 uint8_t domain, area, al_pa;
2047 scsi_qla_host_t *pha = to_qla_parent(ha); 2076 struct qla_hw_data *ha = vha->hw;
2048 2077
2049 found_devs = 0; 2078 found_devs = 0;
2050 new_fcport = NULL; 2079 new_fcport = NULL;
2051 entries = MAX_FIBRE_DEVICES; 2080 entries = MAX_FIBRE_DEVICES;
2052 2081
2053 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", ha->host_no)); 2082 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
2054 DEBUG3(qla2x00_get_fcal_position_map(ha, NULL)); 2083 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
2055 2084
2056 /* Get list of logged in devices. */ 2085 /* Get list of logged in devices. */
2057 memset(ha->gid_list, 0, GID_LIST_SIZE); 2086 memset(ha->gid_list, 0, GID_LIST_SIZE);
2058 rval = qla2x00_get_id_list(ha, ha->gid_list, ha->gid_list_dma, 2087 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2059 &entries); 2088 &entries);
2060 if (rval != QLA_SUCCESS) 2089 if (rval != QLA_SUCCESS)
2061 goto cleanup_allocation; 2090 goto cleanup_allocation;
@@ -2066,7 +2095,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2066 entries * sizeof(struct gid_list_info))); 2095 entries * sizeof(struct gid_list_info)));
2067 2096
2068 /* Allocate temporary fcport for any new fcports discovered. */ 2097 /* Allocate temporary fcport for any new fcports discovered. */
2069 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2098 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2070 if (new_fcport == NULL) { 2099 if (new_fcport == NULL) {
2071 rval = QLA_MEMORY_ALLOC_FAILED; 2100 rval = QLA_MEMORY_ALLOC_FAILED;
2072 goto cleanup_allocation; 2101 goto cleanup_allocation;
@@ -2076,17 +2105,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2076 /* 2105 /*
2077 * Mark local devices that were present with FCF_DEVICE_LOST for now. 2106 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2078 */ 2107 */
2079 list_for_each_entry(fcport, &pha->fcports, list) { 2108 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2080 if (fcport->vp_idx != ha->vp_idx)
2081 continue;
2082
2083 if (atomic_read(&fcport->state) == FCS_ONLINE && 2109 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2084 fcport->port_type != FCT_BROADCAST && 2110 fcport->port_type != FCT_BROADCAST &&
2085 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2111 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2086 2112
2087 DEBUG(printk("scsi(%ld): Marking port lost, " 2113 DEBUG(printk("scsi(%ld): Marking port lost, "
2088 "loop_id=0x%04x\n", 2114 "loop_id=0x%04x\n",
2089 ha->host_no, fcport->loop_id)); 2115 vha->host_no, fcport->loop_id));
2090 2116
2091 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2117 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2092 fcport->flags &= ~FCF_FARP_DONE; 2118 fcport->flags &= ~FCF_FARP_DONE;
@@ -2113,7 +2139,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2113 2139
2114 /* Bypass if not same domain and area of adapter. */ 2140 /* Bypass if not same domain and area of adapter. */
2115 if (area && domain && 2141 if (area && domain &&
2116 (area != ha->d_id.b.area || domain != ha->d_id.b.domain)) 2142 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2117 continue; 2143 continue;
2118 2144
2119 /* Bypass invalid local loop ID. */ 2145 /* Bypass invalid local loop ID. */
@@ -2125,26 +2151,23 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2125 new_fcport->d_id.b.area = area; 2151 new_fcport->d_id.b.area = area;
2126 new_fcport->d_id.b.al_pa = al_pa; 2152 new_fcport->d_id.b.al_pa = al_pa;
2127 new_fcport->loop_id = loop_id; 2153 new_fcport->loop_id = loop_id;
2128 new_fcport->vp_idx = ha->vp_idx; 2154 new_fcport->vp_idx = vha->vp_idx;
2129 rval2 = qla2x00_get_port_database(ha, new_fcport, 0); 2155 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2130 if (rval2 != QLA_SUCCESS) { 2156 if (rval2 != QLA_SUCCESS) {
2131 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2157 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
2132 "information -- get_port_database=%x, " 2158 "information -- get_port_database=%x, "
2133 "loop_id=0x%04x\n", 2159 "loop_id=0x%04x\n",
2134 ha->host_no, rval2, new_fcport->loop_id)); 2160 vha->host_no, rval2, new_fcport->loop_id));
2135 DEBUG2(printk("scsi(%ld): Scheduling resync...\n", 2161 DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
2136 ha->host_no)); 2162 vha->host_no));
2137 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 2163 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2138 continue; 2164 continue;
2139 } 2165 }
2140 2166
2141 /* Check for matching device in port list. */ 2167 /* Check for matching device in port list. */
2142 found = 0; 2168 found = 0;
2143 fcport = NULL; 2169 fcport = NULL;
2144 list_for_each_entry(fcport, &pha->fcports, list) { 2170 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2145 if (fcport->vp_idx != ha->vp_idx)
2146 continue;
2147
2148 if (memcmp(new_fcport->port_name, fcport->port_name, 2171 if (memcmp(new_fcport->port_name, fcport->port_name,
2149 WWN_SIZE)) 2172 WWN_SIZE))
2150 continue; 2173 continue;
@@ -2164,17 +2187,15 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2164 if (!found) { 2187 if (!found) {
2165 /* New device, add to fcports list. */ 2188 /* New device, add to fcports list. */
2166 new_fcport->flags &= ~FCF_PERSISTENT_BOUND; 2189 new_fcport->flags &= ~FCF_PERSISTENT_BOUND;
2167 if (ha->parent) { 2190 if (vha->vp_idx) {
2168 new_fcport->ha = ha; 2191 new_fcport->vha = vha;
2169 new_fcport->vp_idx = ha->vp_idx; 2192 new_fcport->vp_idx = vha->vp_idx;
2170 list_add_tail(&new_fcport->vp_fcport,
2171 &ha->vp_fcports);
2172 } 2193 }
2173 list_add_tail(&new_fcport->list, &pha->fcports); 2194 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2174 2195
2175 /* Allocate a new replacement fcport. */ 2196 /* Allocate a new replacement fcport. */
2176 fcport = new_fcport; 2197 fcport = new_fcport;
2177 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2198 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2178 if (new_fcport == NULL) { 2199 if (new_fcport == NULL) {
2179 rval = QLA_MEMORY_ALLOC_FAILED; 2200 rval = QLA_MEMORY_ALLOC_FAILED;
2180 goto cleanup_allocation; 2201 goto cleanup_allocation;
@@ -2185,7 +2206,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2185 /* Base iIDMA settings on HBA port speed. */ 2206 /* Base iIDMA settings on HBA port speed. */
2186 fcport->fp_speed = ha->link_data_rate; 2207 fcport->fp_speed = ha->link_data_rate;
2187 2208
2188 qla2x00_update_fcport(ha, fcport); 2209 qla2x00_update_fcport(vha, fcport);
2189 2210
2190 found_devs++; 2211 found_devs++;
2191 } 2212 }
@@ -2195,24 +2216,25 @@ cleanup_allocation:
2195 2216
2196 if (rval != QLA_SUCCESS) { 2217 if (rval != QLA_SUCCESS) {
2197 DEBUG2(printk("scsi(%ld): Configure local loop error exit: " 2218 DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
2198 "rval=%x\n", ha->host_no, rval)); 2219 "rval=%x\n", vha->host_no, rval));
2199 } 2220 }
2200 2221
2201 if (found_devs) { 2222 if (found_devs) {
2202 ha->device_flags |= DFLG_LOCAL_DEVICES; 2223 vha->device_flags |= DFLG_LOCAL_DEVICES;
2203 ha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES; 2224 vha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES;
2204 } 2225 }
2205 2226
2206 return (rval); 2227 return (rval);
2207} 2228}
2208 2229
2209static void 2230static void
2210qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) 2231qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2211{ 2232{
2212#define LS_UNKNOWN 2 2233#define LS_UNKNOWN 2
2213 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 2234 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
2214 int rval; 2235 int rval;
2215 uint16_t mb[6]; 2236 uint16_t mb[6];
2237 struct qla_hw_data *ha = vha->hw;
2216 2238
2217 if (!IS_IIDMA_CAPABLE(ha)) 2239 if (!IS_IIDMA_CAPABLE(ha))
2218 return; 2240 return;
@@ -2221,12 +2243,12 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2221 fcport->fp_speed > ha->link_data_rate) 2243 fcport->fp_speed > ha->link_data_rate)
2222 return; 2244 return;
2223 2245
2224 rval = qla2x00_set_idma_speed(ha, fcport->loop_id, fcport->fp_speed, 2246 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2225 mb); 2247 mb);
2226 if (rval != QLA_SUCCESS) { 2248 if (rval != QLA_SUCCESS) {
2227 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " 2249 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2228 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", 2250 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2229 ha->host_no, fcport->port_name[0], fcport->port_name[1], 2251 vha->host_no, fcport->port_name[0], fcport->port_name[1],
2230 fcport->port_name[2], fcport->port_name[3], 2252 fcport->port_name[2], fcport->port_name[3],
2231 fcport->port_name[4], fcport->port_name[5], 2253 fcport->port_name[4], fcport->port_name[5],
2232 fcport->port_name[6], fcport->port_name[7], rval, 2254 fcport->port_name[6], fcport->port_name[7], rval,
@@ -2244,10 +2266,11 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2244} 2266}
2245 2267
2246static void 2268static void
2247qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport) 2269qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2248{ 2270{
2249 struct fc_rport_identifiers rport_ids; 2271 struct fc_rport_identifiers rport_ids;
2250 struct fc_rport *rport; 2272 struct fc_rport *rport;
2273 struct qla_hw_data *ha = vha->hw;
2251 2274
2252 if (fcport->drport) 2275 if (fcport->drport)
2253 qla2x00_rport_del(fcport); 2276 qla2x00_rport_del(fcport);
@@ -2257,15 +2280,15 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2257 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2280 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2258 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2281 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2259 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2282 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2260 fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids); 2283 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2261 if (!rport) { 2284 if (!rport) {
2262 qla_printk(KERN_WARNING, ha, 2285 qla_printk(KERN_WARNING, ha,
2263 "Unable to allocate fc remote port!\n"); 2286 "Unable to allocate fc remote port!\n");
2264 return; 2287 return;
2265 } 2288 }
2266 spin_lock_irq(fcport->ha->host->host_lock); 2289 spin_lock_irq(fcport->vha->host->host_lock);
2267 *((fc_port_t **)rport->dd_data) = fcport; 2290 *((fc_port_t **)rport->dd_data) = fcport;
2268 spin_unlock_irq(fcport->ha->host->host_lock); 2291 spin_unlock_irq(fcport->vha->host->host_lock);
2269 2292
2270 rport->supported_classes = fcport->supported_classes; 2293 rport->supported_classes = fcport->supported_classes;
2271 2294
@@ -2293,23 +2316,23 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2293 * Kernel context. 2316 * Kernel context.
2294 */ 2317 */
2295void 2318void
2296qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) 2319qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2297{ 2320{
2298 scsi_qla_host_t *pha = to_qla_parent(ha); 2321 struct qla_hw_data *ha = vha->hw;
2299 2322
2300 fcport->ha = ha; 2323 fcport->vha = vha;
2301 fcport->login_retry = 0; 2324 fcport->login_retry = 0;
2302 fcport->port_login_retry_count = pha->port_down_retry_count * 2325 fcport->port_login_retry_count = ha->port_down_retry_count *
2303 PORT_RETRY_TIME; 2326 PORT_RETRY_TIME;
2304 atomic_set(&fcport->port_down_timer, pha->port_down_retry_count * 2327 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
2305 PORT_RETRY_TIME); 2328 PORT_RETRY_TIME);
2306 fcport->flags &= ~FCF_LOGIN_NEEDED; 2329 fcport->flags &= ~FCF_LOGIN_NEEDED;
2307 2330
2308 qla2x00_iidma_fcport(ha, fcport); 2331 qla2x00_iidma_fcport(vha, fcport);
2309 2332
2310 atomic_set(&fcport->state, FCS_ONLINE); 2333 atomic_set(&fcport->state, FCS_ONLINE);
2311 2334
2312 qla2x00_reg_remote_port(ha, fcport); 2335 qla2x00_reg_remote_port(vha, fcport);
2313} 2336}
2314 2337
2315/* 2338/*
@@ -2324,7 +2347,7 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2324 * BIT_0 = error 2347 * BIT_0 = error
2325 */ 2348 */
2326static int 2349static int
2327qla2x00_configure_fabric(scsi_qla_host_t *ha) 2350qla2x00_configure_fabric(scsi_qla_host_t *vha)
2328{ 2351{
2329 int rval, rval2; 2352 int rval, rval2;
2330 fc_port_t *fcport, *fcptemp; 2353 fc_port_t *fcport, *fcptemp;
@@ -2332,25 +2355,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2332 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2355 uint16_t mb[MAILBOX_REGISTER_COUNT];
2333 uint16_t loop_id; 2356 uint16_t loop_id;
2334 LIST_HEAD(new_fcports); 2357 LIST_HEAD(new_fcports);
2335 scsi_qla_host_t *pha = to_qla_parent(ha); 2358 struct qla_hw_data *ha = vha->hw;
2359 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2336 2360
2337 /* If FL port exists, then SNS is present */ 2361 /* If FL port exists, then SNS is present */
2338 if (IS_FWI2_CAPABLE(ha)) 2362 if (IS_FWI2_CAPABLE(ha))
2339 loop_id = NPH_F_PORT; 2363 loop_id = NPH_F_PORT;
2340 else 2364 else
2341 loop_id = SNS_FL_PORT; 2365 loop_id = SNS_FL_PORT;
2342 rval = qla2x00_get_port_name(ha, loop_id, ha->fabric_node_name, 1); 2366 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2343 if (rval != QLA_SUCCESS) { 2367 if (rval != QLA_SUCCESS) {
2344 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2368 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2345 "Port\n", ha->host_no)); 2369 "Port\n", vha->host_no));
2346 2370
2347 ha->device_flags &= ~SWITCH_FOUND; 2371 vha->device_flags &= ~SWITCH_FOUND;
2348 return (QLA_SUCCESS); 2372 return (QLA_SUCCESS);
2349 } 2373 }
2350 ha->device_flags |= SWITCH_FOUND; 2374 vha->device_flags |= SWITCH_FOUND;
2351 2375
2352 /* Mark devices that need re-synchronization. */ 2376 /* Mark devices that need re-synchronization. */
2353 rval2 = qla2x00_device_resync(ha); 2377 rval2 = qla2x00_device_resync(vha);
2354 if (rval2 == QLA_RSCNS_HANDLED) { 2378 if (rval2 == QLA_RSCNS_HANDLED) {
2355 /* No point doing the scan, just continue. */ 2379 /* No point doing the scan, just continue. */
2356 return (QLA_SUCCESS); 2380 return (QLA_SUCCESS);
@@ -2358,15 +2382,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2358 do { 2382 do {
2359 /* FDMI support. */ 2383 /* FDMI support. */
2360 if (ql2xfdmienable && 2384 if (ql2xfdmienable &&
2361 test_and_clear_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags)) 2385 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
2362 qla2x00_fdmi_register(ha); 2386 qla2x00_fdmi_register(vha);
2363 2387
2364 /* Ensure we are logged into the SNS. */ 2388 /* Ensure we are logged into the SNS. */
2365 if (IS_FWI2_CAPABLE(ha)) 2389 if (IS_FWI2_CAPABLE(ha))
2366 loop_id = NPH_SNS; 2390 loop_id = NPH_SNS;
2367 else 2391 else
2368 loop_id = SIMPLE_NAME_SERVER; 2392 loop_id = SIMPLE_NAME_SERVER;
2369 ha->isp_ops->fabric_login(ha, loop_id, 0xff, 0xff, 2393 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
2370 0xfc, mb, BIT_1 | BIT_0); 2394 0xfc, mb, BIT_1 | BIT_0);
2371 if (mb[0] != MBS_COMMAND_COMPLETE) { 2395 if (mb[0] != MBS_COMMAND_COMPLETE) {
2372 DEBUG2(qla_printk(KERN_INFO, ha, 2396 DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2376,29 +2400,29 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2376 return (QLA_SUCCESS); 2400 return (QLA_SUCCESS);
2377 } 2401 }
2378 2402
2379 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags)) { 2403 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
2380 if (qla2x00_rft_id(ha)) { 2404 if (qla2x00_rft_id(vha)) {
2381 /* EMPTY */ 2405 /* EMPTY */
2382 DEBUG2(printk("scsi(%ld): Register FC-4 " 2406 DEBUG2(printk("scsi(%ld): Register FC-4 "
2383 "TYPE failed.\n", ha->host_no)); 2407 "TYPE failed.\n", vha->host_no));
2384 } 2408 }
2385 if (qla2x00_rff_id(ha)) { 2409 if (qla2x00_rff_id(vha)) {
2386 /* EMPTY */ 2410 /* EMPTY */
2387 DEBUG2(printk("scsi(%ld): Register FC-4 " 2411 DEBUG2(printk("scsi(%ld): Register FC-4 "
2388 "Features failed.\n", ha->host_no)); 2412 "Features failed.\n", vha->host_no));
2389 } 2413 }
2390 if (qla2x00_rnn_id(ha)) { 2414 if (qla2x00_rnn_id(vha)) {
2391 /* EMPTY */ 2415 /* EMPTY */
2392 DEBUG2(printk("scsi(%ld): Register Node Name " 2416 DEBUG2(printk("scsi(%ld): Register Node Name "
2393 "failed.\n", ha->host_no)); 2417 "failed.\n", vha->host_no));
2394 } else if (qla2x00_rsnn_nn(ha)) { 2418 } else if (qla2x00_rsnn_nn(vha)) {
2395 /* EMPTY */ 2419 /* EMPTY */
2396 DEBUG2(printk("scsi(%ld): Register Symbolic " 2420 DEBUG2(printk("scsi(%ld): Register Symbolic "
2397 "Node Name failed.\n", ha->host_no)); 2421 "Node Name failed.\n", vha->host_no));
2398 } 2422 }
2399 } 2423 }
2400 2424
2401 rval = qla2x00_find_all_fabric_devs(ha, &new_fcports); 2425 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
2402 if (rval != QLA_SUCCESS) 2426 if (rval != QLA_SUCCESS)
2403 break; 2427 break;
2404 2428
@@ -2406,24 +2430,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2406 * Logout all previous fabric devices marked lost, except 2430 * Logout all previous fabric devices marked lost, except
2407 * tape devices. 2431 * tape devices.
2408 */ 2432 */
2409 list_for_each_entry(fcport, &pha->fcports, list) { 2433 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2410 if (fcport->vp_idx !=ha->vp_idx) 2434 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2411 continue;
2412
2413 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2414 break; 2435 break;
2415 2436
2416 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 2437 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
2417 continue; 2438 continue;
2418 2439
2419 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 2440 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
2420 qla2x00_mark_device_lost(ha, fcport, 2441 qla2x00_mark_device_lost(vha, fcport,
2421 ql2xplogiabsentdevice, 0); 2442 ql2xplogiabsentdevice, 0);
2422 if (fcport->loop_id != FC_NO_LOOP_ID && 2443 if (fcport->loop_id != FC_NO_LOOP_ID &&
2423 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2444 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2424 fcport->port_type != FCT_INITIATOR && 2445 fcport->port_type != FCT_INITIATOR &&
2425 fcport->port_type != FCT_BROADCAST) { 2446 fcport->port_type != FCT_BROADCAST) {
2426 ha->isp_ops->fabric_logout(ha, 2447 ha->isp_ops->fabric_logout(vha,
2427 fcport->loop_id, 2448 fcport->loop_id,
2428 fcport->d_id.b.domain, 2449 fcport->d_id.b.domain,
2429 fcport->d_id.b.area, 2450 fcport->d_id.b.area,
@@ -2434,18 +2455,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2434 } 2455 }
2435 2456
2436 /* Starting free loop ID. */ 2457 /* Starting free loop ID. */
2437 next_loopid = pha->min_external_loopid; 2458 next_loopid = ha->min_external_loopid;
2438 2459
2439 /* 2460 /*
2440 * Scan through our port list and login entries that need to be 2461 * Scan through our port list and login entries that need to be
2441 * logged in. 2462 * logged in.
2442 */ 2463 */
2443 list_for_each_entry(fcport, &pha->fcports, list) { 2464 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2444 if (fcport->vp_idx != ha->vp_idx) 2465 if (atomic_read(&vha->loop_down_timer) ||
2445 continue; 2466 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2446
2447 if (atomic_read(&ha->loop_down_timer) ||
2448 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2449 break; 2467 break;
2450 2468
2451 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2469 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
@@ -2455,14 +2473,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2455 if (fcport->loop_id == FC_NO_LOOP_ID) { 2473 if (fcport->loop_id == FC_NO_LOOP_ID) {
2456 fcport->loop_id = next_loopid; 2474 fcport->loop_id = next_loopid;
2457 rval = qla2x00_find_new_loop_id( 2475 rval = qla2x00_find_new_loop_id(
2458 to_qla_parent(ha), fcport); 2476 base_vha, fcport);
2459 if (rval != QLA_SUCCESS) { 2477 if (rval != QLA_SUCCESS) {
2460 /* Ran out of IDs to use */ 2478 /* Ran out of IDs to use */
2461 break; 2479 break;
2462 } 2480 }
2463 } 2481 }
2464 /* Login and update database */ 2482 /* Login and update database */
2465 qla2x00_fabric_dev_login(ha, fcport, &next_loopid); 2483 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2466 } 2484 }
2467 2485
2468 /* Exit if out of loop IDs. */ 2486 /* Exit if out of loop IDs. */
@@ -2474,31 +2492,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2474 * Login and add the new devices to our port list. 2492 * Login and add the new devices to our port list.
2475 */ 2493 */
2476 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 2494 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
2477 if (atomic_read(&ha->loop_down_timer) || 2495 if (atomic_read(&vha->loop_down_timer) ||
2478 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2496 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2479 break; 2497 break;
2480 2498
2481 /* Find a new loop ID to use. */ 2499 /* Find a new loop ID to use. */
2482 fcport->loop_id = next_loopid; 2500 fcport->loop_id = next_loopid;
2483 rval = qla2x00_find_new_loop_id(to_qla_parent(ha), 2501 rval = qla2x00_find_new_loop_id(base_vha, fcport);
2484 fcport);
2485 if (rval != QLA_SUCCESS) { 2502 if (rval != QLA_SUCCESS) {
2486 /* Ran out of IDs to use */ 2503 /* Ran out of IDs to use */
2487 break; 2504 break;
2488 } 2505 }
2489 2506
2490 /* Login and update database */ 2507 /* Login and update database */
2491 qla2x00_fabric_dev_login(ha, fcport, &next_loopid); 2508 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2492 2509
2493 if (ha->parent) { 2510 if (vha->vp_idx) {
2494 fcport->ha = ha; 2511 fcport->vha = vha;
2495 fcport->vp_idx = ha->vp_idx; 2512 fcport->vp_idx = vha->vp_idx;
2496 list_add_tail(&fcport->vp_fcport, 2513 }
2497 &ha->vp_fcports); 2514 list_move_tail(&fcport->list, &vha->vp_fcports);
2498 list_move_tail(&fcport->list,
2499 &ha->parent->fcports);
2500 } else
2501 list_move_tail(&fcport->list, &ha->fcports);
2502 } 2515 }
2503 } while (0); 2516 } while (0);
2504 2517
@@ -2510,7 +2523,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2510 2523
2511 if (rval) { 2524 if (rval) {
2512 DEBUG2(printk("scsi(%ld): Configure fabric error exit: " 2525 DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
2513 "rval=%d\n", ha->host_no, rval)); 2526 "rval=%d\n", vha->host_no, rval));
2514 } 2527 }
2515 2528
2516 return (rval); 2529 return (rval);
@@ -2531,7 +2544,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2531 * Kernel context. 2544 * Kernel context.
2532 */ 2545 */
2533static int 2546static int
2534qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports) 2547qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
2548 struct list_head *new_fcports)
2535{ 2549{
2536 int rval; 2550 int rval;
2537 uint16_t loop_id; 2551 uint16_t loop_id;
@@ -2542,11 +2556,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2542 int swl_idx; 2556 int swl_idx;
2543 int first_dev, last_dev; 2557 int first_dev, last_dev;
2544 port_id_t wrap, nxt_d_id; 2558 port_id_t wrap, nxt_d_id;
2545 int vp_index; 2559 struct qla_hw_data *ha = vha->hw;
2546 int empty_vp_index; 2560 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
2547 int found_vp;
2548 scsi_qla_host_t *vha;
2549 scsi_qla_host_t *pha = to_qla_parent(ha);
2550 2561
2551 rval = QLA_SUCCESS; 2562 rval = QLA_SUCCESS;
2552 2563
@@ -2555,43 +2566,42 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2555 if (!swl) { 2566 if (!swl) {
2556 /*EMPTY*/ 2567 /*EMPTY*/
2557 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 2568 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
2558 "on GA_NXT\n", ha->host_no)); 2569 "on GA_NXT\n", vha->host_no));
2559 } else { 2570 } else {
2560 if (qla2x00_gid_pt(ha, swl) != QLA_SUCCESS) { 2571 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
2561 kfree(swl); 2572 kfree(swl);
2562 swl = NULL; 2573 swl = NULL;
2563 } else if (qla2x00_gpn_id(ha, swl) != QLA_SUCCESS) { 2574 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
2564 kfree(swl); 2575 kfree(swl);
2565 swl = NULL; 2576 swl = NULL;
2566 } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) { 2577 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
2567 kfree(swl); 2578 kfree(swl);
2568 swl = NULL; 2579 swl = NULL;
2569 } else if (ql2xiidmaenable && 2580 } else if (ql2xiidmaenable &&
2570 qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) { 2581 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
2571 qla2x00_gpsc(ha, swl); 2582 qla2x00_gpsc(vha, swl);
2572 } 2583 }
2573 } 2584 }
2574 swl_idx = 0; 2585 swl_idx = 0;
2575 2586
2576 /* Allocate temporary fcport for any new fcports discovered. */ 2587 /* Allocate temporary fcport for any new fcports discovered. */
2577 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2588 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2578 if (new_fcport == NULL) { 2589 if (new_fcport == NULL) {
2579 kfree(swl); 2590 kfree(swl);
2580 return (QLA_MEMORY_ALLOC_FAILED); 2591 return (QLA_MEMORY_ALLOC_FAILED);
2581 } 2592 }
2582 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2593 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2583 new_fcport->vp_idx = ha->vp_idx;
2584 /* Set start port ID scan at adapter ID. */ 2594 /* Set start port ID scan at adapter ID. */
2585 first_dev = 1; 2595 first_dev = 1;
2586 last_dev = 0; 2596 last_dev = 0;
2587 2597
2588 /* Starting free loop ID. */ 2598 /* Starting free loop ID. */
2589 loop_id = pha->min_external_loopid; 2599 loop_id = ha->min_external_loopid;
2590 for (; loop_id <= ha->last_loop_id; loop_id++) { 2600 for (; loop_id <= ha->max_loop_id; loop_id++) {
2591 if (qla2x00_is_reserved_id(ha, loop_id)) 2601 if (qla2x00_is_reserved_id(vha, loop_id))
2592 continue; 2602 continue;
2593 2603
2594 if (atomic_read(&ha->loop_down_timer) || LOOP_TRANSITION(ha)) 2604 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
2595 break; 2605 break;
2596 2606
2597 if (swl != NULL) { 2607 if (swl != NULL) {
@@ -2614,7 +2624,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2614 } 2624 }
2615 } else { 2625 } else {
2616 /* Send GA_NXT to the switch */ 2626 /* Send GA_NXT to the switch */
2617 rval = qla2x00_ga_nxt(ha, new_fcport); 2627 rval = qla2x00_ga_nxt(vha, new_fcport);
2618 if (rval != QLA_SUCCESS) { 2628 if (rval != QLA_SUCCESS) {
2619 qla_printk(KERN_WARNING, ha, 2629 qla_printk(KERN_WARNING, ha,
2620 "SNS scan failed -- assuming zero-entry " 2630 "SNS scan failed -- assuming zero-entry "
@@ -2635,44 +2645,31 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2635 first_dev = 0; 2645 first_dev = 0;
2636 } else if (new_fcport->d_id.b24 == wrap.b24) { 2646 } else if (new_fcport->d_id.b24 == wrap.b24) {
2637 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", 2647 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
2638 ha->host_no, new_fcport->d_id.b.domain, 2648 vha->host_no, new_fcport->d_id.b.domain,
2639 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); 2649 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
2640 break; 2650 break;
2641 } 2651 }
2642 2652
2643 /* Bypass if same physical adapter. */ 2653 /* Bypass if same physical adapter. */
2644 if (new_fcport->d_id.b24 == pha->d_id.b24) 2654 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
2645 continue; 2655 continue;
2646 2656
2647 /* Bypass virtual ports of the same host. */ 2657 /* Bypass virtual ports of the same host. */
2648 if (pha->num_vhosts) { 2658 found = 0;
2649 for_each_mapped_vp_idx(pha, vp_index) { 2659 if (ha->num_vhosts) {
2650 empty_vp_index = 1; 2660 list_for_each_entry(vp, &ha->vp_list, list) {
2651 found_vp = 0; 2661 if (new_fcport->d_id.b24 == vp->d_id.b24) {
2652 list_for_each_entry(vha, &pha->vp_list, 2662 found = 1;
2653 vp_list) {
2654 if (vp_index == vha->vp_idx) {
2655 empty_vp_index = 0;
2656 found_vp = 1;
2657 break;
2658 }
2659 }
2660
2661 if (empty_vp_index)
2662 continue;
2663
2664 if (found_vp &&
2665 new_fcport->d_id.b24 == vha->d_id.b24)
2666 break; 2663 break;
2664 }
2667 } 2665 }
2668 2666 if (found)
2669 if (vp_index <= pha->max_npiv_vports)
2670 continue; 2667 continue;
2671 } 2668 }
2672 2669
2673 /* Bypass if same domain and area of adapter. */ 2670 /* Bypass if same domain and area of adapter. */
2674 if (((new_fcport->d_id.b24 & 0xffff00) == 2671 if (((new_fcport->d_id.b24 & 0xffff00) ==
2675 (ha->d_id.b24 & 0xffff00)) && ha->current_topology == 2672 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
2676 ISP_CFG_FL) 2673 ISP_CFG_FL)
2677 continue; 2674 continue;
2678 2675
@@ -2682,9 +2679,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2682 2679
2683 /* Locate matching device in database. */ 2680 /* Locate matching device in database. */
2684 found = 0; 2681 found = 0;
2685 list_for_each_entry(fcport, &pha->fcports, list) { 2682 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2686 if (new_fcport->vp_idx != fcport->vp_idx)
2687 continue;
2688 if (memcmp(new_fcport->port_name, fcport->port_name, 2683 if (memcmp(new_fcport->port_name, fcport->port_name,
2689 WWN_SIZE)) 2684 WWN_SIZE))
2690 continue; 2685 continue;
@@ -2728,7 +2723,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2728 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2723 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2729 fcport->port_type != FCT_INITIATOR && 2724 fcport->port_type != FCT_INITIATOR &&
2730 fcport->port_type != FCT_BROADCAST) { 2725 fcport->port_type != FCT_BROADCAST) {
2731 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 2726 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
2732 fcport->d_id.b.domain, fcport->d_id.b.area, 2727 fcport->d_id.b.domain, fcport->d_id.b.area,
2733 fcport->d_id.b.al_pa); 2728 fcport->d_id.b.al_pa);
2734 fcport->loop_id = FC_NO_LOOP_ID; 2729 fcport->loop_id = FC_NO_LOOP_ID;
@@ -2739,27 +2734,25 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2739 2734
2740 if (found) 2735 if (found)
2741 continue; 2736 continue;
2742
2743 /* If device was not in our fcports list, then add it. */ 2737 /* If device was not in our fcports list, then add it. */
2744 list_add_tail(&new_fcport->list, new_fcports); 2738 list_add_tail(&new_fcport->list, new_fcports);
2745 2739
2746 /* Allocate a new replacement fcport. */ 2740 /* Allocate a new replacement fcport. */
2747 nxt_d_id.b24 = new_fcport->d_id.b24; 2741 nxt_d_id.b24 = new_fcport->d_id.b24;
2748 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2742 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2749 if (new_fcport == NULL) { 2743 if (new_fcport == NULL) {
2750 kfree(swl); 2744 kfree(swl);
2751 return (QLA_MEMORY_ALLOC_FAILED); 2745 return (QLA_MEMORY_ALLOC_FAILED);
2752 } 2746 }
2753 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2747 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2754 new_fcport->d_id.b24 = nxt_d_id.b24; 2748 new_fcport->d_id.b24 = nxt_d_id.b24;
2755 new_fcport->vp_idx = ha->vp_idx;
2756 } 2749 }
2757 2750
2758 kfree(swl); 2751 kfree(swl);
2759 kfree(new_fcport); 2752 kfree(new_fcport);
2760 2753
2761 if (!list_empty(new_fcports)) 2754 if (!list_empty(new_fcports))
2762 ha->device_flags |= DFLG_FABRIC_DEVICES; 2755 vha->device_flags |= DFLG_FABRIC_DEVICES;
2763 2756
2764 return (rval); 2757 return (rval);
2765} 2758}
@@ -2779,13 +2772,14 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2779 * Kernel context. 2772 * Kernel context.
2780 */ 2773 */
2781static int 2774static int
2782qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev) 2775qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
2783{ 2776{
2784 int rval; 2777 int rval;
2785 int found; 2778 int found;
2786 fc_port_t *fcport; 2779 fc_port_t *fcport;
2787 uint16_t first_loop_id; 2780 uint16_t first_loop_id;
2788 scsi_qla_host_t *pha = to_qla_parent(ha); 2781 struct qla_hw_data *ha = vha->hw;
2782 struct scsi_qla_host *vp;
2789 2783
2790 rval = QLA_SUCCESS; 2784 rval = QLA_SUCCESS;
2791 2785
@@ -2794,17 +2788,15 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2794 2788
2795 for (;;) { 2789 for (;;) {
2796 /* Skip loop ID if already used by adapter. */ 2790 /* Skip loop ID if already used by adapter. */
2797 if (dev->loop_id == ha->loop_id) { 2791 if (dev->loop_id == vha->loop_id)
2798 dev->loop_id++; 2792 dev->loop_id++;
2799 }
2800 2793
2801 /* Skip reserved loop IDs. */ 2794 /* Skip reserved loop IDs. */
2802 while (qla2x00_is_reserved_id(ha, dev->loop_id)) { 2795 while (qla2x00_is_reserved_id(vha, dev->loop_id))
2803 dev->loop_id++; 2796 dev->loop_id++;
2804 }
2805 2797
2806 /* Reset loop ID if passed the end. */ 2798 /* Reset loop ID if passed the end. */
2807 if (dev->loop_id > ha->last_loop_id) { 2799 if (dev->loop_id > ha->max_loop_id) {
2808 /* first loop ID. */ 2800 /* first loop ID. */
2809 dev->loop_id = ha->min_external_loopid; 2801 dev->loop_id = ha->min_external_loopid;
2810 } 2802 }
@@ -2812,12 +2804,17 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2812 /* Check for loop ID being already in use. */ 2804 /* Check for loop ID being already in use. */
2813 found = 0; 2805 found = 0;
2814 fcport = NULL; 2806 fcport = NULL;
2815 list_for_each_entry(fcport, &pha->fcports, list) { 2807 list_for_each_entry(vp, &ha->vp_list, list) {
2816 if (fcport->loop_id == dev->loop_id && fcport != dev) { 2808 list_for_each_entry(fcport, &vp->vp_fcports, list) {
2817 /* ID possibly in use */ 2809 if (fcport->loop_id == dev->loop_id &&
2818 found++; 2810 fcport != dev) {
2819 break; 2811 /* ID possibly in use */
2812 found++;
2813 break;
2814 }
2820 } 2815 }
2816 if (found)
2817 break;
2821 } 2818 }
2822 2819
2823 /* If not in use then it is free to use. */ 2820 /* If not in use then it is free to use. */
@@ -2850,7 +2847,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2850 * Kernel context. 2847 * Kernel context.
2851 */ 2848 */
2852static int 2849static int
2853qla2x00_device_resync(scsi_qla_host_t *ha) 2850qla2x00_device_resync(scsi_qla_host_t *vha)
2854{ 2851{
2855 int rval; 2852 int rval;
2856 uint32_t mask; 2853 uint32_t mask;
@@ -2859,14 +2856,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2859 uint8_t rscn_out_iter; 2856 uint8_t rscn_out_iter;
2860 uint8_t format; 2857 uint8_t format;
2861 port_id_t d_id; 2858 port_id_t d_id;
2862 scsi_qla_host_t *pha = to_qla_parent(ha);
2863 2859
2864 rval = QLA_RSCNS_HANDLED; 2860 rval = QLA_RSCNS_HANDLED;
2865 2861
2866 while (ha->rscn_out_ptr != ha->rscn_in_ptr || 2862 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
2867 ha->flags.rscn_queue_overflow) { 2863 vha->flags.rscn_queue_overflow) {
2868 2864
2869 rscn_entry = ha->rscn_queue[ha->rscn_out_ptr]; 2865 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
2870 format = MSB(MSW(rscn_entry)); 2866 format = MSB(MSW(rscn_entry));
2871 d_id.b.domain = LSB(MSW(rscn_entry)); 2867 d_id.b.domain = LSB(MSW(rscn_entry));
2872 d_id.b.area = MSB(LSW(rscn_entry)); 2868 d_id.b.area = MSB(LSW(rscn_entry));
@@ -2874,37 +2870,37 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2874 2870
2875 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " 2871 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
2876 "[%02x/%02x%02x%02x].\n", 2872 "[%02x/%02x%02x%02x].\n",
2877 ha->host_no, ha->rscn_out_ptr, format, d_id.b.domain, 2873 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
2878 d_id.b.area, d_id.b.al_pa)); 2874 d_id.b.area, d_id.b.al_pa));
2879 2875
2880 ha->rscn_out_ptr++; 2876 vha->rscn_out_ptr++;
2881 if (ha->rscn_out_ptr == MAX_RSCN_COUNT) 2877 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
2882 ha->rscn_out_ptr = 0; 2878 vha->rscn_out_ptr = 0;
2883 2879
2884 /* Skip duplicate entries. */ 2880 /* Skip duplicate entries. */
2885 for (rscn_out_iter = ha->rscn_out_ptr; 2881 for (rscn_out_iter = vha->rscn_out_ptr;
2886 !ha->flags.rscn_queue_overflow && 2882 !vha->flags.rscn_queue_overflow &&
2887 rscn_out_iter != ha->rscn_in_ptr; 2883 rscn_out_iter != vha->rscn_in_ptr;
2888 rscn_out_iter = (rscn_out_iter == 2884 rscn_out_iter = (rscn_out_iter ==
2889 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) { 2885 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
2890 2886
2891 if (rscn_entry != ha->rscn_queue[rscn_out_iter]) 2887 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
2892 break; 2888 break;
2893 2889
2894 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " 2890 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
2895 "entry found at [%d].\n", ha->host_no, 2891 "entry found at [%d].\n", vha->host_no,
2896 rscn_out_iter)); 2892 rscn_out_iter));
2897 2893
2898 ha->rscn_out_ptr = rscn_out_iter; 2894 vha->rscn_out_ptr = rscn_out_iter;
2899 } 2895 }
2900 2896
2901 /* Queue overflow, set switch default case. */ 2897 /* Queue overflow, set switch default case. */
2902 if (ha->flags.rscn_queue_overflow) { 2898 if (vha->flags.rscn_queue_overflow) {
2903 DEBUG(printk("scsi(%ld): device_resync: rscn " 2899 DEBUG(printk("scsi(%ld): device_resync: rscn "
2904 "overflow.\n", ha->host_no)); 2900 "overflow.\n", vha->host_no));
2905 2901
2906 format = 3; 2902 format = 3;
2907 ha->flags.rscn_queue_overflow = 0; 2903 vha->flags.rscn_queue_overflow = 0;
2908 } 2904 }
2909 2905
2910 switch (format) { 2906 switch (format) {
@@ -2920,16 +2916,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2920 default: 2916 default:
2921 mask = 0x0; 2917 mask = 0x0;
2922 d_id.b24 = 0; 2918 d_id.b24 = 0;
2923 ha->rscn_out_ptr = ha->rscn_in_ptr; 2919 vha->rscn_out_ptr = vha->rscn_in_ptr;
2924 break; 2920 break;
2925 } 2921 }
2926 2922
2927 rval = QLA_SUCCESS; 2923 rval = QLA_SUCCESS;
2928 2924
2929 list_for_each_entry(fcport, &pha->fcports, list) { 2925 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2930 if (fcport->vp_idx != ha->vp_idx)
2931 continue;
2932
2933 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2926 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
2934 (fcport->d_id.b24 & mask) != d_id.b24 || 2927 (fcport->d_id.b24 & mask) != d_id.b24 ||
2935 fcport->port_type == FCT_BROADCAST) 2928 fcport->port_type == FCT_BROADCAST)
@@ -2938,7 +2931,7 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2938 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2931 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2939 if (format != 3 || 2932 if (format != 3 ||
2940 fcport->port_type != FCT_INITIATOR) { 2933 fcport->port_type != FCT_INITIATOR) {
2941 qla2x00_mark_device_lost(ha, fcport, 2934 qla2x00_mark_device_lost(vha, fcport,
2942 0, 0); 2935 0, 0);
2943 } 2936 }
2944 } 2937 }
@@ -2965,30 +2958,31 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2965 * Kernel context. 2958 * Kernel context.
2966 */ 2959 */
2967static int 2960static int
2968qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport, 2961qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
2969 uint16_t *next_loopid) 2962 uint16_t *next_loopid)
2970{ 2963{
2971 int rval; 2964 int rval;
2972 int retry; 2965 int retry;
2973 uint8_t opts; 2966 uint8_t opts;
2967 struct qla_hw_data *ha = vha->hw;
2974 2968
2975 rval = QLA_SUCCESS; 2969 rval = QLA_SUCCESS;
2976 retry = 0; 2970 retry = 0;
2977 2971
2978 rval = qla2x00_fabric_login(ha, fcport, next_loopid); 2972 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
2979 if (rval == QLA_SUCCESS) { 2973 if (rval == QLA_SUCCESS) {
2980 /* Send an ADISC to tape devices.*/ 2974 /* Send an ADISC to tape devices.*/
2981 opts = 0; 2975 opts = 0;
2982 if (fcport->flags & FCF_TAPE_PRESENT) 2976 if (fcport->flags & FCF_TAPE_PRESENT)
2983 opts |= BIT_1; 2977 opts |= BIT_1;
2984 rval = qla2x00_get_port_database(ha, fcport, opts); 2978 rval = qla2x00_get_port_database(vha, fcport, opts);
2985 if (rval != QLA_SUCCESS) { 2979 if (rval != QLA_SUCCESS) {
2986 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 2980 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
2987 fcport->d_id.b.domain, fcport->d_id.b.area, 2981 fcport->d_id.b.domain, fcport->d_id.b.area,
2988 fcport->d_id.b.al_pa); 2982 fcport->d_id.b.al_pa);
2989 qla2x00_mark_device_lost(ha, fcport, 1, 0); 2983 qla2x00_mark_device_lost(vha, fcport, 1, 0);
2990 } else { 2984 } else {
2991 qla2x00_update_fcport(ha, fcport); 2985 qla2x00_update_fcport(vha, fcport);
2992 } 2986 }
2993 } 2987 }
2994 2988
@@ -3010,13 +3004,14 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3010 * 3 - Fatal error 3004 * 3 - Fatal error
3011 */ 3005 */
3012int 3006int
3013qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport, 3007qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3014 uint16_t *next_loopid) 3008 uint16_t *next_loopid)
3015{ 3009{
3016 int rval; 3010 int rval;
3017 int retry; 3011 int retry;
3018 uint16_t tmp_loopid; 3012 uint16_t tmp_loopid;
3019 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3013 uint16_t mb[MAILBOX_REGISTER_COUNT];
3014 struct qla_hw_data *ha = vha->hw;
3020 3015
3021 retry = 0; 3016 retry = 0;
3022 tmp_loopid = 0; 3017 tmp_loopid = 0;
@@ -3024,11 +3019,11 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3024 for (;;) { 3019 for (;;) {
3025 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " 3020 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
3026 "for port %02x%02x%02x.\n", 3021 "for port %02x%02x%02x.\n",
3027 ha->host_no, fcport->loop_id, fcport->d_id.b.domain, 3022 vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
3028 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3023 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3029 3024
3030 /* Login fcport on switch. */ 3025 /* Login fcport on switch. */
3031 ha->isp_ops->fabric_login(ha, fcport->loop_id, 3026 ha->isp_ops->fabric_login(vha, fcport->loop_id,
3032 fcport->d_id.b.domain, fcport->d_id.b.area, 3027 fcport->d_id.b.domain, fcport->d_id.b.area,
3033 fcport->d_id.b.al_pa, mb, BIT_0); 3028 fcport->d_id.b.al_pa, mb, BIT_0);
3034 if (mb[0] == MBS_PORT_ID_USED) { 3029 if (mb[0] == MBS_PORT_ID_USED) {
@@ -3084,7 +3079,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3084 * Loop ID already used, try next loop ID. 3079 * Loop ID already used, try next loop ID.
3085 */ 3080 */
3086 fcport->loop_id++; 3081 fcport->loop_id++;
3087 rval = qla2x00_find_new_loop_id(ha, fcport); 3082 rval = qla2x00_find_new_loop_id(vha, fcport);
3088 if (rval != QLA_SUCCESS) { 3083 if (rval != QLA_SUCCESS) {
3089 /* Ran out of loop IDs to use */ 3084 /* Ran out of loop IDs to use */
3090 break; 3085 break;
@@ -3096,10 +3091,10 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3096 * dead. 3091 * dead.
3097 */ 3092 */
3098 *next_loopid = fcport->loop_id; 3093 *next_loopid = fcport->loop_id;
3099 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 3094 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3100 fcport->d_id.b.domain, fcport->d_id.b.area, 3095 fcport->d_id.b.domain, fcport->d_id.b.area,
3101 fcport->d_id.b.al_pa); 3096 fcport->d_id.b.al_pa);
3102 qla2x00_mark_device_lost(ha, fcport, 1, 0); 3097 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3103 3098
3104 rval = 1; 3099 rval = 1;
3105 break; 3100 break;
@@ -3109,12 +3104,12 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3109 */ 3104 */
3110 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " 3105 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
3111 "loop_id=%x jiffies=%lx.\n", 3106 "loop_id=%x jiffies=%lx.\n",
3112 __func__, ha->host_no, mb[0], 3107 __func__, vha->host_no, mb[0],
3113 fcport->d_id.b.domain, fcport->d_id.b.area, 3108 fcport->d_id.b.domain, fcport->d_id.b.area,
3114 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3109 fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3115 3110
3116 *next_loopid = fcport->loop_id; 3111 *next_loopid = fcport->loop_id;
3117 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 3112 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3118 fcport->d_id.b.domain, fcport->d_id.b.area, 3113 fcport->d_id.b.domain, fcport->d_id.b.area,
3119 fcport->d_id.b.al_pa); 3114 fcport->d_id.b.al_pa);
3120 fcport->loop_id = FC_NO_LOOP_ID; 3115 fcport->loop_id = FC_NO_LOOP_ID;
@@ -3142,13 +3137,13 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3142 * 3 - Fatal error 3137 * 3 - Fatal error
3143 */ 3138 */
3144int 3139int
3145qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport) 3140qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3146{ 3141{
3147 int rval; 3142 int rval;
3148 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3143 uint16_t mb[MAILBOX_REGISTER_COUNT];
3149 3144
3150 memset(mb, 0, sizeof(mb)); 3145 memset(mb, 0, sizeof(mb));
3151 rval = qla2x00_login_local_device(ha, fcport, mb, BIT_0); 3146 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3152 if (rval == QLA_SUCCESS) { 3147 if (rval == QLA_SUCCESS) {
3153 /* Interrogate mailbox registers for any errors */ 3148 /* Interrogate mailbox registers for any errors */
3154 if (mb[0] == MBS_COMMAND_ERROR) 3149 if (mb[0] == MBS_COMMAND_ERROR)
@@ -3172,57 +3167,55 @@ qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport)
3172 * 0 = success 3167 * 0 = success
3173 */ 3168 */
3174int 3169int
3175qla2x00_loop_resync(scsi_qla_host_t *ha) 3170qla2x00_loop_resync(scsi_qla_host_t *vha)
3176{ 3171{
3177 int rval; 3172 int rval;
3178 uint32_t wait_time; 3173 uint32_t wait_time;
3179 3174
3180 rval = QLA_SUCCESS; 3175 rval = QLA_SUCCESS;
3181 3176
3182 atomic_set(&ha->loop_state, LOOP_UPDATE); 3177 atomic_set(&vha->loop_state, LOOP_UPDATE);
3183 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3178 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3184 if (ha->flags.online) { 3179 if (vha->flags.online) {
3185 if (!(rval = qla2x00_fw_ready(ha))) { 3180 if (!(rval = qla2x00_fw_ready(vha))) {
3186 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3181 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3187 wait_time = 256; 3182 wait_time = 256;
3188 do { 3183 do {
3189 atomic_set(&ha->loop_state, LOOP_UPDATE); 3184 atomic_set(&vha->loop_state, LOOP_UPDATE);
3190 3185
3191 /* Issue a marker after FW becomes ready. */ 3186 /* Issue a marker after FW becomes ready. */
3192 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 3187 qla2x00_marker(vha, 0, 0, MK_SYNC_ALL);
3193 ha->marker_needed = 0; 3188 vha->marker_needed = 0;
3194 3189
3195 /* Remap devices on Loop. */ 3190 /* Remap devices on Loop. */
3196 clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 3191 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3197 3192
3198 qla2x00_configure_loop(ha); 3193 qla2x00_configure_loop(vha);
3199 wait_time--; 3194 wait_time--;
3200 } while (!atomic_read(&ha->loop_down_timer) && 3195 } while (!atomic_read(&vha->loop_down_timer) &&
3201 !(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) && 3196 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3202 wait_time && 3197 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3203 (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))); 3198 &vha->dpc_flags)));
3204 } 3199 }
3205 } 3200 }
3206 3201
3207 if (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 3202 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3208 return (QLA_FUNCTION_FAILED); 3203 return (QLA_FUNCTION_FAILED);
3209 }
3210 3204
3211 if (rval) { 3205 if (rval)
3212 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); 3206 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
3213 }
3214 3207
3215 return (rval); 3208 return (rval);
3216} 3209}
3217 3210
3218void 3211void
3219qla2x00_update_fcports(scsi_qla_host_t *ha) 3212qla2x00_update_fcports(scsi_qla_host_t *vha)
3220{ 3213{
3221 fc_port_t *fcport; 3214 fc_port_t *fcport;
3222 3215
3223 /* Go with deferred removal of rport references. */ 3216 /* Go with deferred removal of rport references. */
3224 list_for_each_entry(fcport, &ha->fcports, list) 3217 list_for_each_entry(fcport, &vha->vp_fcports, list)
3225 if (fcport->drport && 3218 if (fcport && fcport->drport &&
3226 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3219 atomic_read(&fcport->state) != FCS_UNCONFIGURED)
3227 qla2x00_rport_del(fcport); 3220 qla2x00_rport_del(fcport);
3228} 3221}
@@ -3238,63 +3231,64 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
3238* 0 = success 3231* 0 = success
3239*/ 3232*/
3240int 3233int
3241qla2x00_abort_isp(scsi_qla_host_t *ha) 3234qla2x00_abort_isp(scsi_qla_host_t *vha)
3242{ 3235{
3243 int rval; 3236 int rval;
3244 uint8_t status = 0; 3237 uint8_t status = 0;
3245 scsi_qla_host_t *vha; 3238 struct qla_hw_data *ha = vha->hw;
3239 struct scsi_qla_host *vp;
3246 3240
3247 if (ha->flags.online) { 3241 if (vha->flags.online) {
3248 ha->flags.online = 0; 3242 vha->flags.online = 0;
3249 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 3243 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3250 ha->qla_stats.total_isp_aborts++; 3244 ha->qla_stats.total_isp_aborts++;
3251 3245
3252 qla_printk(KERN_INFO, ha, 3246 qla_printk(KERN_INFO, ha,
3253 "Performing ISP error recovery - ha= %p.\n", ha); 3247 "Performing ISP error recovery - ha= %p.\n", ha);
3254 ha->isp_ops->reset_chip(ha); 3248 ha->isp_ops->reset_chip(vha);
3255 3249
3256 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 3250 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3257 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 3251 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3258 atomic_set(&ha->loop_state, LOOP_DOWN); 3252 atomic_set(&vha->loop_state, LOOP_DOWN);
3259 qla2x00_mark_all_devices_lost(ha, 0); 3253 qla2x00_mark_all_devices_lost(vha, 0);
3260 list_for_each_entry(vha, &ha->vp_list, vp_list) 3254 list_for_each_entry(vp, &ha->vp_list, list)
3261 qla2x00_mark_all_devices_lost(vha, 0); 3255 qla2x00_mark_all_devices_lost(vp, 0);
3262 } else { 3256 } else {
3263 if (!atomic_read(&ha->loop_down_timer)) 3257 if (!atomic_read(&vha->loop_down_timer))
3264 atomic_set(&ha->loop_down_timer, 3258 atomic_set(&vha->loop_down_timer,
3265 LOOP_DOWN_TIME); 3259 LOOP_DOWN_TIME);
3266 } 3260 }
3267 3261
3268 /* Requeue all commands in outstanding command list. */ 3262 /* Requeue all commands in outstanding command list. */
3269 qla2x00_abort_all_cmds(ha, DID_RESET << 16); 3263 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3270 3264
3271 ha->isp_ops->get_flash_version(ha, ha->request_ring); 3265 ha->isp_ops->get_flash_version(vha, ha->req->ring);
3272 3266
3273 ha->isp_ops->nvram_config(ha); 3267 ha->isp_ops->nvram_config(vha);
3274 3268
3275 if (!qla2x00_restart_isp(ha)) { 3269 if (!qla2x00_restart_isp(vha)) {
3276 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3270 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3277 3271
3278 if (!atomic_read(&ha->loop_down_timer)) { 3272 if (!atomic_read(&vha->loop_down_timer)) {
3279 /* 3273 /*
3280 * Issue marker command only when we are going 3274 * Issue marker command only when we are going
3281 * to start the I/O . 3275 * to start the I/O .
3282 */ 3276 */
3283 ha->marker_needed = 1; 3277 vha->marker_needed = 1;
3284 } 3278 }
3285 3279
3286 ha->flags.online = 1; 3280 vha->flags.online = 1;
3287 3281
3288 ha->isp_ops->enable_intrs(ha); 3282 ha->isp_ops->enable_intrs(ha);
3289 3283
3290 ha->isp_abort_cnt = 0; 3284 ha->isp_abort_cnt = 0;
3291 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3285 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3292 3286
3293 if (ha->fce) { 3287 if (ha->fce) {
3294 ha->flags.fce_enabled = 1; 3288 ha->flags.fce_enabled = 1;
3295 memset(ha->fce, 0, 3289 memset(ha->fce, 0,
3296 fce_calc_size(ha->fce_bufs)); 3290 fce_calc_size(ha->fce_bufs));
3297 rval = qla2x00_enable_fce_trace(ha, 3291 rval = qla2x00_enable_fce_trace(vha,
3298 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 3292 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
3299 &ha->fce_bufs); 3293 &ha->fce_bufs);
3300 if (rval) { 3294 if (rval) {
@@ -3307,7 +3301,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3307 3301
3308 if (ha->eft) { 3302 if (ha->eft) {
3309 memset(ha->eft, 0, EFT_SIZE); 3303 memset(ha->eft, 0, EFT_SIZE);
3310 rval = qla2x00_enable_eft_trace(ha, 3304 rval = qla2x00_enable_eft_trace(vha,
3311 ha->eft_dma, EFT_NUM_BUFFERS); 3305 ha->eft_dma, EFT_NUM_BUFFERS);
3312 if (rval) { 3306 if (rval) {
3313 qla_printk(KERN_WARNING, ha, 3307 qla_printk(KERN_WARNING, ha,
@@ -3316,8 +3310,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3316 } 3310 }
3317 } 3311 }
3318 } else { /* failed the ISP abort */ 3312 } else { /* failed the ISP abort */
3319 ha->flags.online = 1; 3313 vha->flags.online = 1;
3320 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3314 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3321 if (ha->isp_abort_cnt == 0) { 3315 if (ha->isp_abort_cnt == 0) {
3322 qla_printk(KERN_WARNING, ha, 3316 qla_printk(KERN_WARNING, ha,
3323 "ISP error recovery failed - " 3317 "ISP error recovery failed - "
@@ -3326,37 +3320,41 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3326 * The next call disables the board 3320 * The next call disables the board
3327 * completely. 3321 * completely.
3328 */ 3322 */
3329 ha->isp_ops->reset_adapter(ha); 3323 ha->isp_ops->reset_adapter(vha);
3330 ha->flags.online = 0; 3324 vha->flags.online = 0;
3331 clear_bit(ISP_ABORT_RETRY, 3325 clear_bit(ISP_ABORT_RETRY,
3332 &ha->dpc_flags); 3326 &vha->dpc_flags);
3333 status = 0; 3327 status = 0;
3334 } else { /* schedule another ISP abort */ 3328 } else { /* schedule another ISP abort */
3335 ha->isp_abort_cnt--; 3329 ha->isp_abort_cnt--;
3336 DEBUG(printk("qla%ld: ISP abort - " 3330 DEBUG(printk("qla%ld: ISP abort - "
3337 "retry remaining %d\n", 3331 "retry remaining %d\n",
3338 ha->host_no, ha->isp_abort_cnt)); 3332 vha->host_no, ha->isp_abort_cnt));
3339 status = 1; 3333 status = 1;
3340 } 3334 }
3341 } else { 3335 } else {
3342 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3336 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3343 DEBUG(printk("qla2x00(%ld): ISP error recovery " 3337 DEBUG(printk("qla2x00(%ld): ISP error recovery "
3344 "- retrying (%d) more times\n", 3338 "- retrying (%d) more times\n",
3345 ha->host_no, ha->isp_abort_cnt)); 3339 vha->host_no, ha->isp_abort_cnt));
3346 set_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3340 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3347 status = 1; 3341 status = 1;
3348 } 3342 }
3349 } 3343 }
3350 3344
3351 } 3345 }
3352 3346
3353 if (status) { 3347 if (!status) {
3348 DEBUG(printk(KERN_INFO
3349 "qla2x00_abort_isp(%ld): succeeded.\n",
3350 vha->host_no));
3351 list_for_each_entry(vp, &ha->vp_list, list) {
3352 if (vp->vp_idx)
3353 qla2x00_vp_abort_isp(vp);
3354 }
3355 } else {
3354 qla_printk(KERN_INFO, ha, 3356 qla_printk(KERN_INFO, ha,
3355 "qla2x00_abort_isp: **** FAILED ****\n"); 3357 "qla2x00_abort_isp: **** FAILED ****\n");
3356 } else {
3357 DEBUG(printk(KERN_INFO
3358 "qla2x00_abort_isp(%ld): exiting.\n",
3359 ha->host_no));
3360 } 3358 }
3361 3359
3362 return(status); 3360 return(status);
@@ -3373,42 +3371,45 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3373* 0 = success 3371* 0 = success
3374*/ 3372*/
3375static int 3373static int
3376qla2x00_restart_isp(scsi_qla_host_t *ha) 3374qla2x00_restart_isp(scsi_qla_host_t *vha)
3377{ 3375{
3378 uint8_t status = 0; 3376 uint8_t status = 0;
3379 uint32_t wait_time; 3377 uint32_t wait_time;
3378 struct qla_hw_data *ha = vha->hw;
3380 3379
3381 /* If firmware needs to be loaded */ 3380 /* If firmware needs to be loaded */
3382 if (qla2x00_isp_firmware(ha)) { 3381 if (qla2x00_isp_firmware(vha)) {
3383 ha->flags.online = 0; 3382 vha->flags.online = 0;
3384 if (!(status = ha->isp_ops->chip_diag(ha))) 3383 status = ha->isp_ops->chip_diag(vha);
3385 status = qla2x00_setup_chip(ha); 3384 if (!status)
3385 status = qla2x00_setup_chip(vha);
3386 } 3386 }
3387 3387
3388 if (!status && !(status = qla2x00_init_rings(ha))) { 3388 if (!status && !(status = qla2x00_init_rings(vha))) {
3389 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3389 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3390 if (!(status = qla2x00_fw_ready(ha))) { 3390 status = qla2x00_fw_ready(vha);
3391 if (!status) {
3391 DEBUG(printk("%s(): Start configure loop, " 3392 DEBUG(printk("%s(): Start configure loop, "
3392 "status = %d\n", __func__, status)); 3393 "status = %d\n", __func__, status));
3393 3394
3394 /* Issue a marker after FW becomes ready. */ 3395 /* Issue a marker after FW becomes ready. */
3395 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 3396 qla2x00_marker(vha, 0, 0, MK_SYNC_ALL);
3396 3397
3397 ha->flags.online = 1; 3398 vha->flags.online = 1;
3398 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3399 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3399 wait_time = 256; 3400 wait_time = 256;
3400 do { 3401 do {
3401 clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 3402 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3402 qla2x00_configure_loop(ha); 3403 qla2x00_configure_loop(vha);
3403 wait_time--; 3404 wait_time--;
3404 } while (!atomic_read(&ha->loop_down_timer) && 3405 } while (!atomic_read(&vha->loop_down_timer) &&
3405 !(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) && 3406 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3406 wait_time && 3407 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3407 (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))); 3408 &vha->dpc_flags)));
3408 } 3409 }
3409 3410
3410 /* if no cable then assume it's good */ 3411 /* if no cable then assume it's good */
3411 if ((ha->device_flags & DFLG_NO_CABLE)) 3412 if ((vha->device_flags & DFLG_NO_CABLE))
3412 status = 0; 3413 status = 0;
3413 3414
3414 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 3415 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
@@ -3426,12 +3427,13 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3426* ha = adapter block pointer. 3427* ha = adapter block pointer.
3427*/ 3428*/
3428void 3429void
3429qla2x00_reset_adapter(scsi_qla_host_t *ha) 3430qla2x00_reset_adapter(scsi_qla_host_t *vha)
3430{ 3431{
3431 unsigned long flags = 0; 3432 unsigned long flags = 0;
3433 struct qla_hw_data *ha = vha->hw;
3432 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3434 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3433 3435
3434 ha->flags.online = 0; 3436 vha->flags.online = 0;
3435 ha->isp_ops->disable_intrs(ha); 3437 ha->isp_ops->disable_intrs(ha);
3436 3438
3437 spin_lock_irqsave(&ha->hardware_lock, flags); 3439 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3443,12 +3445,13 @@ qla2x00_reset_adapter(scsi_qla_host_t *ha)
3443} 3445}
3444 3446
3445void 3447void
3446qla24xx_reset_adapter(scsi_qla_host_t *ha) 3448qla24xx_reset_adapter(scsi_qla_host_t *vha)
3447{ 3449{
3448 unsigned long flags = 0; 3450 unsigned long flags = 0;
3451 struct qla_hw_data *ha = vha->hw;
3449 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3452 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3450 3453
3451 ha->flags.online = 0; 3454 vha->flags.online = 0;
3452 ha->isp_ops->disable_intrs(ha); 3455 ha->isp_ops->disable_intrs(ha);
3453 3456
3454 spin_lock_irqsave(&ha->hardware_lock, flags); 3457 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3462,9 +3465,11 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
3462/* On sparc systems, obtain port and node WWN from firmware 3465/* On sparc systems, obtain port and node WWN from firmware
3463 * properties. 3466 * properties.
3464 */ 3467 */
3465static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *nv) 3468static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
3469 struct nvram_24xx *nv)
3466{ 3470{
3467#ifdef CONFIG_SPARC 3471#ifdef CONFIG_SPARC
3472 struct qla_hw_data *ha = vha->hw;
3468 struct pci_dev *pdev = ha->pdev; 3473 struct pci_dev *pdev = ha->pdev;
3469 struct device_node *dp = pci_device_to_OF_node(pdev); 3474 struct device_node *dp = pci_device_to_OF_node(pdev);
3470 const u8 *val; 3475 const u8 *val;
@@ -3481,7 +3486,7 @@ static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *n
3481} 3486}
3482 3487
3483int 3488int
3484qla24xx_nvram_config(scsi_qla_host_t *ha) 3489qla24xx_nvram_config(scsi_qla_host_t *vha)
3485{ 3490{
3486 int rval; 3491 int rval;
3487 struct init_cb_24xx *icb; 3492 struct init_cb_24xx *icb;
@@ -3490,6 +3495,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3490 uint8_t *dptr1, *dptr2; 3495 uint8_t *dptr1, *dptr2;
3491 uint32_t chksum; 3496 uint32_t chksum;
3492 uint16_t cnt; 3497 uint16_t cnt;
3498 struct qla_hw_data *ha = vha->hw;
3493 3499
3494 rval = QLA_SUCCESS; 3500 rval = QLA_SUCCESS;
3495 icb = (struct init_cb_24xx *)ha->init_cb; 3501 icb = (struct init_cb_24xx *)ha->init_cb;
@@ -3507,12 +3513,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3507 3513
3508 /* Get VPD data into cache */ 3514 /* Get VPD data into cache */
3509 ha->vpd = ha->nvram + VPD_OFFSET; 3515 ha->vpd = ha->nvram + VPD_OFFSET;
3510 ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, 3516 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
3511 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 3517 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
3512 3518
3513 /* Get NVRAM data into cache and calculate checksum. */ 3519 /* Get NVRAM data into cache and calculate checksum. */
3514 dptr = (uint32_t *)nv; 3520 dptr = (uint32_t *)nv;
3515 ha->isp_ops->read_nvram(ha, (uint8_t *)dptr, ha->nvram_base, 3521 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
3516 ha->nvram_size); 3522 ha->nvram_size);
3517 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3523 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3518 chksum += le32_to_cpu(*dptr++); 3524 chksum += le32_to_cpu(*dptr++);
@@ -3557,7 +3563,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3557 nv->node_name[5] = 0x1c; 3563 nv->node_name[5] = 0x1c;
3558 nv->node_name[6] = 0x55; 3564 nv->node_name[6] = 0x55;
3559 nv->node_name[7] = 0x86; 3565 nv->node_name[7] = 0x86;
3560 qla24xx_nvram_wwn_from_ofw(ha, nv); 3566 qla24xx_nvram_wwn_from_ofw(vha, nv);
3561 nv->login_retry_count = __constant_cpu_to_le16(8); 3567 nv->login_retry_count = __constant_cpu_to_le16(8);
3562 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 3568 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
3563 nv->login_timeout = __constant_cpu_to_le16(0); 3569 nv->login_timeout = __constant_cpu_to_le16(0);
@@ -3577,7 +3583,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3577 } 3583 }
3578 3584
3579 /* Reset Initialization control block */ 3585 /* Reset Initialization control block */
3580 memset(icb, 0, sizeof(struct init_cb_24xx)); 3586 memset(icb, 0, ha->init_cb_size);
3581 3587
3582 /* Copy 1st segment. */ 3588 /* Copy 1st segment. */
3583 dptr1 = (uint8_t *)icb; 3589 dptr1 = (uint8_t *)icb;
@@ -3600,7 +3606,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3600 /* 3606 /*
3601 * Setup driver NVRAM options. 3607 * Setup driver NVRAM options.
3602 */ 3608 */
3603 qla2x00_set_model_info(ha, nv->model_name, sizeof(nv->model_name), 3609 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
3604 "QLA2462"); 3610 "QLA2462");
3605 3611
3606 /* Use alternate WWN? */ 3612 /* Use alternate WWN? */
@@ -3639,8 +3645,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3639 ha->serial0 = icb->port_name[5]; 3645 ha->serial0 = icb->port_name[5];
3640 ha->serial1 = icb->port_name[6]; 3646 ha->serial1 = icb->port_name[6];
3641 ha->serial2 = icb->port_name[7]; 3647 ha->serial2 = icb->port_name[7];
3642 ha->node_name = icb->node_name; 3648 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
3643 ha->port_name = icb->port_name; 3649 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
3644 3650
3645 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 3651 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
3646 3652
@@ -3695,7 +3701,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3695 ha->login_retry_count = ql2xloginretrycount; 3701 ha->login_retry_count = ql2xloginretrycount;
3696 3702
3697 /* Enable ZIO. */ 3703 /* Enable ZIO. */
3698 if (!ha->flags.init_done) { 3704 if (!vha->flags.init_done) {
3699 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 3705 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
3700 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3706 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3701 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 3707 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
@@ -3703,12 +3709,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3703 } 3709 }
3704 icb->firmware_options_2 &= __constant_cpu_to_le32( 3710 icb->firmware_options_2 &= __constant_cpu_to_le32(
3705 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 3711 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
3706 ha->flags.process_response_queue = 0; 3712 vha->flags.process_response_queue = 0;
3707 if (ha->zio_mode != QLA_ZIO_DISABLED) { 3713 if (ha->zio_mode != QLA_ZIO_DISABLED) {
3708 ha->zio_mode = QLA_ZIO_MODE_6; 3714 ha->zio_mode = QLA_ZIO_MODE_6;
3709 3715
3710 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 3716 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
3711 "(%d us).\n", ha->host_no, ha->zio_mode, 3717 "(%d us).\n", vha->host_no, ha->zio_mode,
3712 ha->zio_timer * 100)); 3718 ha->zio_timer * 100));
3713 qla_printk(KERN_INFO, ha, 3719 qla_printk(KERN_INFO, ha,
3714 "ZIO mode %d enabled; timer delay (%d us).\n", 3720 "ZIO mode %d enabled; timer delay (%d us).\n",
@@ -3717,18 +3723,18 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3717 icb->firmware_options_2 |= cpu_to_le32( 3723 icb->firmware_options_2 |= cpu_to_le32(
3718 (uint32_t)ha->zio_mode); 3724 (uint32_t)ha->zio_mode);
3719 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 3725 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
3720 ha->flags.process_response_queue = 1; 3726 vha->flags.process_response_queue = 1;
3721 } 3727 }
3722 3728
3723 if (rval) { 3729 if (rval) {
3724 DEBUG2_3(printk(KERN_WARNING 3730 DEBUG2_3(printk(KERN_WARNING
3725 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no)); 3731 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
3726 } 3732 }
3727 return (rval); 3733 return (rval);
3728} 3734}
3729 3735
3730static int 3736static int
3731qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr) 3737qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3732{ 3738{
3733 int rval; 3739 int rval;
3734 int segments, fragment; 3740 int segments, fragment;
@@ -3737,16 +3743,16 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3737 uint32_t risc_addr; 3743 uint32_t risc_addr;
3738 uint32_t risc_size; 3744 uint32_t risc_size;
3739 uint32_t i; 3745 uint32_t i;
3740 3746 struct qla_hw_data *ha = vha->hw;
3741 rval = QLA_SUCCESS; 3747 rval = QLA_SUCCESS;
3742 3748
3743 segments = FA_RISC_CODE_SEGMENTS; 3749 segments = FA_RISC_CODE_SEGMENTS;
3744 faddr = ha->flt_region_fw; 3750 faddr = ha->flt_region_fw;
3745 dcode = (uint32_t *)ha->request_ring; 3751 dcode = (uint32_t *)ha->req->ring;
3746 *srisc_addr = 0; 3752 *srisc_addr = 0;
3747 3753
3748 /* Validate firmware image by checking version. */ 3754 /* Validate firmware image by checking version. */
3749 qla24xx_read_flash_data(ha, dcode, faddr + 4, 4); 3755 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
3750 for (i = 0; i < 4; i++) 3756 for (i = 0; i < 4; i++)
3751 dcode[i] = be32_to_cpu(dcode[i]); 3757 dcode[i] = be32_to_cpu(dcode[i]);
3752 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 3758 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
@@ -3764,7 +3770,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3764 3770
3765 while (segments && rval == QLA_SUCCESS) { 3771 while (segments && rval == QLA_SUCCESS) {
3766 /* Read segment's load information. */ 3772 /* Read segment's load information. */
3767 qla24xx_read_flash_data(ha, dcode, faddr, 4); 3773 qla24xx_read_flash_data(vha, dcode, faddr, 4);
3768 3774
3769 risc_addr = be32_to_cpu(dcode[2]); 3775 risc_addr = be32_to_cpu(dcode[2]);
3770 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 3776 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
@@ -3778,17 +3784,17 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3778 3784
3779 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3785 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3780 "addr %x, number of dwords 0x%x, offset 0x%x.\n", 3786 "addr %x, number of dwords 0x%x, offset 0x%x.\n",
3781 ha->host_no, risc_addr, dlen, faddr)); 3787 vha->host_no, risc_addr, dlen, faddr));
3782 3788
3783 qla24xx_read_flash_data(ha, dcode, faddr, dlen); 3789 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
3784 for (i = 0; i < dlen; i++) 3790 for (i = 0; i < dlen; i++)
3785 dcode[i] = swab32(dcode[i]); 3791 dcode[i] = swab32(dcode[i]);
3786 3792
3787 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 3793 rval = qla2x00_load_ram(vha, ha->req->dma, risc_addr,
3788 dlen); 3794 dlen);
3789 if (rval) { 3795 if (rval) {
3790 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 3796 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
3791 "segment %d of firmware\n", ha->host_no, 3797 "segment %d of firmware\n", vha->host_no,
3792 fragment)); 3798 fragment));
3793 qla_printk(KERN_WARNING, ha, 3799 qla_printk(KERN_WARNING, ha,
3794 "[ERROR] Failed to load segment %d of " 3800 "[ERROR] Failed to load segment %d of "
@@ -3812,16 +3818,17 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3812#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/" 3818#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
3813 3819
3814int 3820int
3815qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr) 3821qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3816{ 3822{
3817 int rval; 3823 int rval;
3818 int i, fragment; 3824 int i, fragment;
3819 uint16_t *wcode, *fwcode; 3825 uint16_t *wcode, *fwcode;
3820 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 3826 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
3821 struct fw_blob *blob; 3827 struct fw_blob *blob;
3828 struct qla_hw_data *ha = vha->hw;
3822 3829
3823 /* Load firmware blob. */ 3830 /* Load firmware blob. */
3824 blob = qla2x00_request_firmware(ha); 3831 blob = qla2x00_request_firmware(vha);
3825 if (!blob) { 3832 if (!blob) {
3826 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 3833 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
3827 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 3834 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3831,7 +3838,7 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3831 3838
3832 rval = QLA_SUCCESS; 3839 rval = QLA_SUCCESS;
3833 3840
3834 wcode = (uint16_t *)ha->request_ring; 3841 wcode = (uint16_t *)ha->req->ring;
3835 *srisc_addr = 0; 3842 *srisc_addr = 0;
3836 fwcode = (uint16_t *)blob->fw->data; 3843 fwcode = (uint16_t *)blob->fw->data;
3837 fwclen = 0; 3844 fwclen = 0;
@@ -3878,17 +3885,17 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3878 wlen = risc_size; 3885 wlen = risc_size;
3879 3886
3880 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3887 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3881 "addr %x, number of words 0x%x.\n", ha->host_no, 3888 "addr %x, number of words 0x%x.\n", vha->host_no,
3882 risc_addr, wlen)); 3889 risc_addr, wlen));
3883 3890
3884 for (i = 0; i < wlen; i++) 3891 for (i = 0; i < wlen; i++)
3885 wcode[i] = swab16(fwcode[i]); 3892 wcode[i] = swab16(fwcode[i]);
3886 3893
3887 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 3894 rval = qla2x00_load_ram(vha, ha->req->dma, risc_addr,
3888 wlen); 3895 wlen);
3889 if (rval) { 3896 if (rval) {
3890 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 3897 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
3891 "segment %d of firmware\n", ha->host_no, 3898 "segment %d of firmware\n", vha->host_no,
3892 fragment)); 3899 fragment));
3893 qla_printk(KERN_WARNING, ha, 3900 qla_printk(KERN_WARNING, ha,
3894 "[ERROR] Failed to load segment %d of " 3901 "[ERROR] Failed to load segment %d of "
@@ -3912,7 +3919,7 @@ fail_fw_integrity:
3912} 3919}
3913 3920
3914int 3921int
3915qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr) 3922qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3916{ 3923{
3917 int rval; 3924 int rval;
3918 int segments, fragment; 3925 int segments, fragment;
@@ -3922,9 +3929,10 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3922 uint32_t i; 3929 uint32_t i;
3923 struct fw_blob *blob; 3930 struct fw_blob *blob;
3924 uint32_t *fwcode, fwclen; 3931 uint32_t *fwcode, fwclen;
3932 struct qla_hw_data *ha = vha->hw;
3925 3933
3926 /* Load firmware blob. */ 3934 /* Load firmware blob. */
3927 blob = qla2x00_request_firmware(ha); 3935 blob = qla2x00_request_firmware(vha);
3928 if (!blob) { 3936 if (!blob) {
3929 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 3937 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
3930 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 3938 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3933,13 +3941,13 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3933 /* Try to load RISC code from flash. */ 3941 /* Try to load RISC code from flash. */
3934 qla_printk(KERN_ERR, ha, "Attempting to load (potentially " 3942 qla_printk(KERN_ERR, ha, "Attempting to load (potentially "
3935 "outdated) firmware from flash.\n"); 3943 "outdated) firmware from flash.\n");
3936 return qla24xx_load_risc_flash(ha, srisc_addr); 3944 return qla24xx_load_risc_flash(vha, srisc_addr);
3937 } 3945 }
3938 3946
3939 rval = QLA_SUCCESS; 3947 rval = QLA_SUCCESS;
3940 3948
3941 segments = FA_RISC_CODE_SEGMENTS; 3949 segments = FA_RISC_CODE_SEGMENTS;
3942 dcode = (uint32_t *)ha->request_ring; 3950 dcode = (uint32_t *)ha->req->ring;
3943 *srisc_addr = 0; 3951 *srisc_addr = 0;
3944 fwcode = (uint32_t *)blob->fw->data; 3952 fwcode = (uint32_t *)blob->fw->data;
3945 fwclen = 0; 3953 fwclen = 0;
@@ -3987,17 +3995,17 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3987 dlen = risc_size; 3995 dlen = risc_size;
3988 3996
3989 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3997 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3990 "addr %x, number of dwords 0x%x.\n", ha->host_no, 3998 "addr %x, number of dwords 0x%x.\n", vha->host_no,
3991 risc_addr, dlen)); 3999 risc_addr, dlen));
3992 4000
3993 for (i = 0; i < dlen; i++) 4001 for (i = 0; i < dlen; i++)
3994 dcode[i] = swab32(fwcode[i]); 4002 dcode[i] = swab32(fwcode[i]);
3995 4003
3996 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 4004 rval = qla2x00_load_ram(vha, ha->req->dma, risc_addr,
3997 dlen); 4005 dlen);
3998 if (rval) { 4006 if (rval) {
3999 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4007 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4000 "segment %d of firmware\n", ha->host_no, 4008 "segment %d of firmware\n", vha->host_no,
4001 fragment)); 4009 fragment));
4002 qla_printk(KERN_WARNING, ha, 4010 qla_printk(KERN_WARNING, ha,
4003 "[ERROR] Failed to load segment %d of " 4011 "[ERROR] Failed to load segment %d of "
@@ -4021,49 +4029,51 @@ fail_fw_integrity:
4021} 4029}
4022 4030
4023void 4031void
4024qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha) 4032qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4025{ 4033{
4026 int ret, retries; 4034 int ret, retries;
4035 struct qla_hw_data *ha = vha->hw;
4027 4036
4028 if (!IS_FWI2_CAPABLE(ha)) 4037 if (!IS_FWI2_CAPABLE(ha))
4029 return; 4038 return;
4030 if (!ha->fw_major_version) 4039 if (!ha->fw_major_version)
4031 return; 4040 return;
4032 4041
4033 ret = qla2x00_stop_firmware(ha); 4042 ret = qla2x00_stop_firmware(vha);
4034 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4043 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4035 retries ; retries--) { 4044 retries ; retries--) {
4036 ha->isp_ops->reset_chip(ha); 4045 ha->isp_ops->reset_chip(vha);
4037 if (ha->isp_ops->chip_diag(ha) != QLA_SUCCESS) 4046 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4038 continue; 4047 continue;
4039 if (qla2x00_setup_chip(ha) != QLA_SUCCESS) 4048 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4040 continue; 4049 continue;
4041 qla_printk(KERN_INFO, ha, 4050 qla_printk(KERN_INFO, ha,
4042 "Attempting retry of stop-firmware command...\n"); 4051 "Attempting retry of stop-firmware command...\n");
4043 ret = qla2x00_stop_firmware(ha); 4052 ret = qla2x00_stop_firmware(vha);
4044 } 4053 }
4045} 4054}
4046 4055
4047int 4056int
4048qla24xx_configure_vhba(scsi_qla_host_t *ha) 4057qla24xx_configure_vhba(scsi_qla_host_t *vha)
4049{ 4058{
4050 int rval = QLA_SUCCESS; 4059 int rval = QLA_SUCCESS;
4051 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4060 uint16_t mb[MAILBOX_REGISTER_COUNT];
4061 struct qla_hw_data *ha = vha->hw;
4062 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4052 4063
4053 if (!ha->parent) 4064 if (!vha->vp_idx)
4054 return -EINVAL; 4065 return -EINVAL;
4055 4066
4056 rval = qla2x00_fw_ready(ha->parent); 4067 rval = qla2x00_fw_ready(base_vha);
4057 if (rval == QLA_SUCCESS) { 4068 if (rval == QLA_SUCCESS) {
4058 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 4069 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4059 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 4070 qla2x00_marker(vha, 0, 0, MK_SYNC_ALL);
4060 } 4071 }
4061 4072
4062 ha->flags.management_server_logged_in = 0; 4073 vha->flags.management_server_logged_in = 0;
4063 4074
4064 /* Login to SNS first */ 4075 /* Login to SNS first */
4065 qla24xx_login_fabric(ha->parent, NPH_SNS, 0xff, 0xff, 0xfc, 4076 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
4066 mb, BIT_1);
4067 if (mb[0] != MBS_COMMAND_COMPLETE) { 4077 if (mb[0] != MBS_COMMAND_COMPLETE) {
4068 DEBUG15(qla_printk(KERN_INFO, ha, 4078 DEBUG15(qla_printk(KERN_INFO, ha,
4069 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 4079 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
@@ -4072,11 +4082,11 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha)
4072 return (QLA_FUNCTION_FAILED); 4082 return (QLA_FUNCTION_FAILED);
4073 } 4083 }
4074 4084
4075 atomic_set(&ha->loop_down_timer, 0); 4085 atomic_set(&vha->loop_down_timer, 0);
4076 atomic_set(&ha->loop_state, LOOP_UP); 4086 atomic_set(&vha->loop_state, LOOP_UP);
4077 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 4087 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4078 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 4088 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4079 rval = qla2x00_loop_resync(ha->parent); 4089 rval = qla2x00_loop_resync(base_vha);
4080 4090
4081 return rval; 4091 return rval;
4082} 4092}
@@ -4087,9 +4097,10 @@ static LIST_HEAD(qla_cs84xx_list);
4087static DEFINE_MUTEX(qla_cs84xx_mutex); 4097static DEFINE_MUTEX(qla_cs84xx_mutex);
4088 4098
4089static struct qla_chip_state_84xx * 4099static struct qla_chip_state_84xx *
4090qla84xx_get_chip(struct scsi_qla_host *ha) 4100qla84xx_get_chip(struct scsi_qla_host *vha)
4091{ 4101{
4092 struct qla_chip_state_84xx *cs84xx; 4102 struct qla_chip_state_84xx *cs84xx;
4103 struct qla_hw_data *ha = vha->hw;
4093 4104
4094 mutex_lock(&qla_cs84xx_mutex); 4105 mutex_lock(&qla_cs84xx_mutex);
4095 4106
@@ -4129,21 +4140,23 @@ __qla84xx_chip_release(struct kref *kref)
4129} 4140}
4130 4141
4131void 4142void
4132qla84xx_put_chip(struct scsi_qla_host *ha) 4143qla84xx_put_chip(struct scsi_qla_host *vha)
4133{ 4144{
4145 struct qla_hw_data *ha = vha->hw;
4134 if (ha->cs84xx) 4146 if (ha->cs84xx)
4135 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 4147 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
4136} 4148}
4137 4149
4138static int 4150static int
4139qla84xx_init_chip(scsi_qla_host_t *ha) 4151qla84xx_init_chip(scsi_qla_host_t *vha)
4140{ 4152{
4141 int rval; 4153 int rval;
4142 uint16_t status[2]; 4154 uint16_t status[2];
4155 struct qla_hw_data *ha = vha->hw;
4143 4156
4144 mutex_lock(&ha->cs84xx->fw_update_mutex); 4157 mutex_lock(&ha->cs84xx->fw_update_mutex);
4145 4158
4146 rval = qla84xx_verify_chip(ha, status); 4159 rval = qla84xx_verify_chip(vha, status);
4147 4160
4148 mutex_unlock(&ha->cs84xx->fw_update_mutex); 4161 mutex_unlock(&ha->cs84xx->fw_update_mutex);
4149 4162
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index e90afad120ee..8ce354720680 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -32,21 +32,15 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr)
32} 32}
33 33
34static inline void 34static inline void
35qla2x00_poll(scsi_qla_host_t *ha) 35qla2x00_poll(struct rsp_que *rsp)
36{ 36{
37 unsigned long flags; 37 unsigned long flags;
38 38 struct qla_hw_data *ha = rsp->hw;
39 local_irq_save(flags); 39 local_irq_save(flags);
40 ha->isp_ops->intr_handler(0, ha); 40 ha->isp_ops->intr_handler(0, rsp);
41 local_irq_restore(flags); 41 local_irq_restore(flags);
42} 42}
43 43
44static __inline__ scsi_qla_host_t *
45to_qla_parent(scsi_qla_host_t *ha)
46{
47 return ha->parent ? ha->parent : ha;
48}
49
50/** 44/**
51 * qla2x00_issue_marker() - Issue a Marker IOCB if necessary. 45 * qla2x00_issue_marker() - Issue a Marker IOCB if necessary.
52 * @ha: HA context 46 * @ha: HA context
@@ -55,20 +49,20 @@ to_qla_parent(scsi_qla_host_t *ha)
55 * Returns non-zero if a failure occurred, else zero. 49 * Returns non-zero if a failure occurred, else zero.
56 */ 50 */
57static inline int 51static inline int
58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked) 52qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
59{ 53{
60 /* Send marker if required */ 54 /* Send marker if required */
61 if (ha->marker_needed != 0) { 55 if (vha->marker_needed != 0) {
62 if (ha_locked) { 56 if (ha_locked) {
63 if (__qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != 57 if (__qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) !=
64 QLA_SUCCESS) 58 QLA_SUCCESS)
65 return (QLA_FUNCTION_FAILED); 59 return (QLA_FUNCTION_FAILED);
66 } else { 60 } else {
67 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != 61 if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) !=
68 QLA_SUCCESS) 62 QLA_SUCCESS)
69 return (QLA_FUNCTION_FAILED); 63 return (QLA_FUNCTION_FAILED);
70 } 64 }
71 ha->marker_needed = 0; 65 vha->marker_needed = 0;
72 } 66 }
73 return (QLA_SUCCESS); 67 return (QLA_SUCCESS);
74} 68}
@@ -87,11 +81,12 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
87} 81}
88 82
89static inline int 83static inline int
90qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id) 84qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
91{ 85{
86 struct qla_hw_data *ha = vha->hw;
92 if (IS_FWI2_CAPABLE(ha)) 87 if (IS_FWI2_CAPABLE(ha))
93 return (loop_id > NPH_LAST_HANDLE); 88 return (loop_id > NPH_LAST_HANDLE);
94 89
95 return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 90 return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
96 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); 91 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
97}; 92};
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 85bc0a48598b..0c145c9e0cd9 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,8 @@
11 11
12#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
13 13
14static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha); 14static request_t *qla2x00_req_pkt(scsi_qla_host_t *);
15static void qla2x00_isp_cmd(scsi_qla_host_t *ha); 15static void qla2x00_isp_cmd(scsi_qla_host_t *);
16 16
17/** 17/**
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 18 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -30,11 +30,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
30 /* Set transfer direction */ 30 /* Set transfer direction */
31 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) { 31 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE; 32 cflags = CF_WRITE;
33 sp->fcport->ha->qla_stats.output_bytes += 33 sp->fcport->vha->hw->qla_stats.output_bytes +=
34 scsi_bufflen(sp->cmd); 34 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) { 35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 cflags = CF_READ; 36 cflags = CF_READ;
37 sp->fcport->ha->qla_stats.input_bytes += 37 sp->fcport->vha->hw->qla_stats.input_bytes +=
38 scsi_bufflen(sp->cmd); 38 scsi_bufflen(sp->cmd);
39 } 39 }
40 return (cflags); 40 return (cflags);
@@ -91,20 +91,20 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
91 * Returns a pointer to the Continuation Type 0 IOCB packet. 91 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */ 92 */
93static inline cont_entry_t * 93static inline cont_entry_t *
94qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha) 94qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *vha)
95{ 95{
96 cont_entry_t *cont_pkt; 96 cont_entry_t *cont_pkt;
97 97 struct req_que *req = vha->hw->req;
98 /* Adjust ring index. */ 98 /* Adjust ring index. */
99 ha->req_ring_index++; 99 req->ring_index++;
100 if (ha->req_ring_index == ha->request_q_length) { 100 if (req->ring_index == req->length) {
101 ha->req_ring_index = 0; 101 req->ring_index = 0;
102 ha->request_ring_ptr = ha->request_ring; 102 req->ring_ptr = req->ring;
103 } else { 103 } else {
104 ha->request_ring_ptr++; 104 req->ring_ptr++;
105 } 105 }
106 106
107 cont_pkt = (cont_entry_t *)ha->request_ring_ptr; 107 cont_pkt = (cont_entry_t *)req->ring_ptr;
108 108
109 /* Load packet defaults. */ 109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) = 110 *((uint32_t *)(&cont_pkt->entry_type)) =
@@ -120,20 +120,21 @@ qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
120 * Returns a pointer to the continuation type 1 IOCB packet. 120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */ 121 */
122static inline cont_a64_entry_t * 122static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha) 123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124{ 124{
125 cont_a64_entry_t *cont_pkt; 125 cont_a64_entry_t *cont_pkt;
126 struct req_que *req = vha->hw->req;
126 127
127 /* Adjust ring index. */ 128 /* Adjust ring index. */
128 ha->req_ring_index++; 129 req->ring_index++;
129 if (ha->req_ring_index == ha->request_q_length) { 130 if (req->ring_index == req->length) {
130 ha->req_ring_index = 0; 131 req->ring_index = 0;
131 ha->request_ring_ptr = ha->request_ring; 132 req->ring_ptr = req->ring;
132 } else { 133 } else {
133 ha->request_ring_ptr++; 134 req->ring_ptr++;
134 } 135 }
135 136
136 cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr; 137 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 138
138 /* Load packet defaults. */ 139 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt->entry_type)) = 140 *((uint32_t *)(&cont_pkt->entry_type)) =
@@ -155,7 +156,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
155{ 156{
156 uint16_t avail_dsds; 157 uint16_t avail_dsds;
157 uint32_t *cur_dsd; 158 uint32_t *cur_dsd;
158 scsi_qla_host_t *ha; 159 scsi_qla_host_t *vha;
159 struct scsi_cmnd *cmd; 160 struct scsi_cmnd *cmd;
160 struct scatterlist *sg; 161 struct scatterlist *sg;
161 int i; 162 int i;
@@ -172,7 +173,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
172 return; 173 return;
173 } 174 }
174 175
175 ha = sp->ha; 176 vha = sp->vha;
176 177
177 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 178 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
178 179
@@ -190,7 +191,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
190 * Seven DSDs are available in the Continuation 191 * Seven DSDs are available in the Continuation
191 * Type 0 IOCB. 192 * Type 0 IOCB.
192 */ 193 */
193 cont_pkt = qla2x00_prep_cont_type0_iocb(ha); 194 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
194 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 195 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
195 avail_dsds = 7; 196 avail_dsds = 7;
196 } 197 }
@@ -214,7 +215,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
214{ 215{
215 uint16_t avail_dsds; 216 uint16_t avail_dsds;
216 uint32_t *cur_dsd; 217 uint32_t *cur_dsd;
217 scsi_qla_host_t *ha; 218 scsi_qla_host_t *vha;
218 struct scsi_cmnd *cmd; 219 struct scsi_cmnd *cmd;
219 struct scatterlist *sg; 220 struct scatterlist *sg;
220 int i; 221 int i;
@@ -231,7 +232,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
231 return; 232 return;
232 } 233 }
233 234
234 ha = sp->ha; 235 vha = sp->vha;
235 236
236 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 237 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
237 238
@@ -250,7 +251,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 * Five DSDs are available in the Continuation 251 * Five DSDs are available in the Continuation
251 * Type 1 IOCB. 252 * Type 1 IOCB.
252 */ 253 */
253 cont_pkt = qla2x00_prep_cont_type1_iocb(ha); 254 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
254 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 255 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
255 avail_dsds = 5; 256 avail_dsds = 5;
256 } 257 }
@@ -274,7 +275,7 @@ qla2x00_start_scsi(srb_t *sp)
274{ 275{
275 int ret, nseg; 276 int ret, nseg;
276 unsigned long flags; 277 unsigned long flags;
277 scsi_qla_host_t *ha; 278 scsi_qla_host_t *vha;
278 struct scsi_cmnd *cmd; 279 struct scsi_cmnd *cmd;
279 uint32_t *clr_ptr; 280 uint32_t *clr_ptr;
280 uint32_t index; 281 uint32_t index;
@@ -284,33 +285,36 @@ qla2x00_start_scsi(srb_t *sp)
284 uint16_t req_cnt; 285 uint16_t req_cnt;
285 uint16_t tot_dsds; 286 uint16_t tot_dsds;
286 struct device_reg_2xxx __iomem *reg; 287 struct device_reg_2xxx __iomem *reg;
288 struct qla_hw_data *ha;
289 struct req_que *req;
287 290
288 /* Setup device pointers. */ 291 /* Setup device pointers. */
289 ret = 0; 292 ret = 0;
290 ha = sp->ha; 293 vha = sp->vha;
294 ha = vha->hw;
291 reg = &ha->iobase->isp; 295 reg = &ha->iobase->isp;
292 cmd = sp->cmd; 296 cmd = sp->cmd;
297 req = ha->req;
293 /* So we know we haven't pci_map'ed anything yet */ 298 /* So we know we haven't pci_map'ed anything yet */
294 tot_dsds = 0; 299 tot_dsds = 0;
295 300
296 /* Send marker if required */ 301 /* Send marker if required */
297 if (ha->marker_needed != 0) { 302 if (vha->marker_needed != 0) {
298 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 303 if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
299 return (QLA_FUNCTION_FAILED); 304 return (QLA_FUNCTION_FAILED);
300 } 305 vha->marker_needed = 0;
301 ha->marker_needed = 0;
302 } 306 }
303 307
304 /* Acquire ring specific lock */ 308 /* Acquire ring specific lock */
305 spin_lock_irqsave(&ha->hardware_lock, flags); 309 spin_lock_irqsave(&ha->hardware_lock, flags);
306 310
307 /* Check for room in outstanding command list. */ 311 /* Check for room in outstanding command list. */
308 handle = ha->current_outstanding_cmd; 312 handle = req->current_outstanding_cmd;
309 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 313 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
310 handle++; 314 handle++;
311 if (handle == MAX_OUTSTANDING_COMMANDS) 315 if (handle == MAX_OUTSTANDING_COMMANDS)
312 handle = 1; 316 handle = 1;
313 if (!ha->outstanding_cmds[handle]) 317 if (!req->outstanding_cmds[handle])
314 break; 318 break;
315 } 319 }
316 if (index == MAX_OUTSTANDING_COMMANDS) 320 if (index == MAX_OUTSTANDING_COMMANDS)
@@ -329,25 +333,25 @@ qla2x00_start_scsi(srb_t *sp)
329 333
330 /* Calculate the number of request entries needed. */ 334 /* Calculate the number of request entries needed. */
331 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 335 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
332 if (ha->req_q_cnt < (req_cnt + 2)) { 336 if (req->cnt < (req_cnt + 2)) {
333 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 337 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
334 if (ha->req_ring_index < cnt) 338 if (req->ring_index < cnt)
335 ha->req_q_cnt = cnt - ha->req_ring_index; 339 req->cnt = cnt - req->ring_index;
336 else 340 else
337 ha->req_q_cnt = ha->request_q_length - 341 req->cnt = req->length -
338 (ha->req_ring_index - cnt); 342 (req->ring_index - cnt);
339 } 343 }
340 if (ha->req_q_cnt < (req_cnt + 2)) 344 if (req->cnt < (req_cnt + 2))
341 goto queuing_error; 345 goto queuing_error;
342 346
343 /* Build command packet */ 347 /* Build command packet */
344 ha->current_outstanding_cmd = handle; 348 req->current_outstanding_cmd = handle;
345 ha->outstanding_cmds[handle] = sp; 349 req->outstanding_cmds[handle] = sp;
346 sp->ha = ha; 350 sp->vha = vha;
347 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 351 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
348 ha->req_q_cnt -= req_cnt; 352 req->cnt -= req_cnt;
349 353
350 cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr; 354 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
351 cmd_pkt->handle = handle; 355 cmd_pkt->handle = handle;
352 /* Zero out remaining portion of packet. */ 356 /* Zero out remaining portion of packet. */
353 clr_ptr = (uint32_t *)cmd_pkt + 2; 357 clr_ptr = (uint32_t *)cmd_pkt + 2;
@@ -373,23 +377,23 @@ qla2x00_start_scsi(srb_t *sp)
373 wmb(); 377 wmb();
374 378
375 /* Adjust ring index. */ 379 /* Adjust ring index. */
376 ha->req_ring_index++; 380 req->ring_index++;
377 if (ha->req_ring_index == ha->request_q_length) { 381 if (req->ring_index == req->length) {
378 ha->req_ring_index = 0; 382 req->ring_index = 0;
379 ha->request_ring_ptr = ha->request_ring; 383 req->ring_ptr = req->ring;
380 } else 384 } else
381 ha->request_ring_ptr++; 385 req->ring_ptr++;
382 386
383 sp->flags |= SRB_DMA_VALID; 387 sp->flags |= SRB_DMA_VALID;
384 388
385 /* Set chip new ring index. */ 389 /* Set chip new ring index. */
386 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index); 390 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
387 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 391 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
388 392
389 /* Manage unprocessed RIO/ZIO commands in response queue. */ 393 /* Manage unprocessed RIO/ZIO commands in response queue. */
390 if (ha->flags.process_response_queue && 394 if (vha->flags.process_response_queue &&
391 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 395 ha->rsp->ring_ptr->signature != RESPONSE_PROCESSED)
392 qla2x00_process_response_queue(ha); 396 qla2x00_process_response_queue(vha);
393 397
394 spin_unlock_irqrestore(&ha->hardware_lock, flags); 398 spin_unlock_irqrestore(&ha->hardware_lock, flags);
395 return (QLA_SUCCESS); 399 return (QLA_SUCCESS);
@@ -415,18 +419,19 @@ queuing_error:
415 * Returns non-zero if a failure occurred, else zero. 419 * Returns non-zero if a failure occurred, else zero.
416 */ 420 */
417int 421int
418__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 422__qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun,
419 uint8_t type) 423 uint8_t type)
420{ 424{
421 mrk_entry_t *mrk; 425 mrk_entry_t *mrk;
422 struct mrk_entry_24xx *mrk24; 426 struct mrk_entry_24xx *mrk24;
423 scsi_qla_host_t *pha = to_qla_parent(ha); 427 struct qla_hw_data *ha = vha->hw;
428 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
424 429
425 mrk24 = NULL; 430 mrk24 = NULL;
426 mrk = (mrk_entry_t *)qla2x00_req_pkt(pha); 431 mrk = (mrk_entry_t *)qla2x00_req_pkt(base_vha);
427 if (mrk == NULL) { 432 if (mrk == NULL) {
428 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", 433 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
429 __func__, ha->host_no)); 434 __func__, base_vha->host_no));
430 435
431 return (QLA_FUNCTION_FAILED); 436 return (QLA_FUNCTION_FAILED);
432 } 437 }
@@ -440,7 +445,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
440 mrk24->lun[1] = LSB(lun); 445 mrk24->lun[1] = LSB(lun);
441 mrk24->lun[2] = MSB(lun); 446 mrk24->lun[2] = MSB(lun);
442 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 447 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
443 mrk24->vp_index = ha->vp_idx; 448 mrk24->vp_index = vha->vp_idx;
444 } else { 449 } else {
445 SET_TARGET_ID(ha, mrk->target, loop_id); 450 SET_TARGET_ID(ha, mrk->target, loop_id);
446 mrk->lun = cpu_to_le16(lun); 451 mrk->lun = cpu_to_le16(lun);
@@ -448,22 +453,22 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
448 } 453 }
449 wmb(); 454 wmb();
450 455
451 qla2x00_isp_cmd(pha); 456 qla2x00_isp_cmd(base_vha);
452 457
453 return (QLA_SUCCESS); 458 return (QLA_SUCCESS);
454} 459}
455 460
456int 461int
457qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 462qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun,
458 uint8_t type) 463 uint8_t type)
459{ 464{
460 int ret; 465 int ret;
461 unsigned long flags = 0; 466 unsigned long flags = 0;
462 scsi_qla_host_t *pha = to_qla_parent(ha); 467 struct qla_hw_data *ha = vha->hw;
463 468
464 spin_lock_irqsave(&pha->hardware_lock, flags); 469 spin_lock_irqsave(&ha->hardware_lock, flags);
465 ret = __qla2x00_marker(ha, loop_id, lun, type); 470 ret = __qla2x00_marker(vha, loop_id, lun, type);
466 spin_unlock_irqrestore(&pha->hardware_lock, flags); 471 spin_unlock_irqrestore(&ha->hardware_lock, flags);
467 472
468 return (ret); 473 return (ret);
469} 474}
@@ -477,18 +482,20 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
477 * Returns NULL if function failed, else, a pointer to the request packet. 482 * Returns NULL if function failed, else, a pointer to the request packet.
478 */ 483 */
479static request_t * 484static request_t *
480qla2x00_req_pkt(scsi_qla_host_t *ha) 485qla2x00_req_pkt(scsi_qla_host_t *vha)
481{ 486{
487 struct qla_hw_data *ha = vha->hw;
482 device_reg_t __iomem *reg = ha->iobase; 488 device_reg_t __iomem *reg = ha->iobase;
483 request_t *pkt = NULL; 489 request_t *pkt = NULL;
484 uint16_t cnt; 490 uint16_t cnt;
485 uint32_t *dword_ptr; 491 uint32_t *dword_ptr;
486 uint32_t timer; 492 uint32_t timer;
487 uint16_t req_cnt = 1; 493 uint16_t req_cnt = 1;
494 struct req_que *req = ha->req;
488 495
489 /* Wait 1 second for slot. */ 496 /* Wait 1 second for slot. */
490 for (timer = HZ; timer; timer--) { 497 for (timer = HZ; timer; timer--) {
491 if ((req_cnt + 2) >= ha->req_q_cnt) { 498 if ((req_cnt + 2) >= req->cnt) {
492 /* Calculate number of free request entries. */ 499 /* Calculate number of free request entries. */
493 if (IS_FWI2_CAPABLE(ha)) 500 if (IS_FWI2_CAPABLE(ha))
494 cnt = (uint16_t)RD_REG_DWORD( 501 cnt = (uint16_t)RD_REG_DWORD(
@@ -496,16 +503,16 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
496 else 503 else
497 cnt = qla2x00_debounce_register( 504 cnt = qla2x00_debounce_register(
498 ISP_REQ_Q_OUT(ha, &reg->isp)); 505 ISP_REQ_Q_OUT(ha, &reg->isp));
499 if (ha->req_ring_index < cnt) 506 if (req->ring_index < cnt)
500 ha->req_q_cnt = cnt - ha->req_ring_index; 507 req->cnt = cnt - req->ring_index;
501 else 508 else
502 ha->req_q_cnt = ha->request_q_length - 509 req->cnt = req->length -
503 (ha->req_ring_index - cnt); 510 (req->ring_index - cnt);
504 } 511 }
505 /* If room for request in request ring. */ 512 /* If room for request in request ring. */
506 if ((req_cnt + 2) < ha->req_q_cnt) { 513 if ((req_cnt + 2) < req->cnt) {
507 ha->req_q_cnt--; 514 req->cnt--;
508 pkt = ha->request_ring_ptr; 515 pkt = req->ring_ptr;
509 516
510 /* Zero out packet. */ 517 /* Zero out packet. */
511 dword_ptr = (uint32_t *)pkt; 518 dword_ptr = (uint32_t *)pkt;
@@ -513,7 +520,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
513 *dword_ptr++ = 0; 520 *dword_ptr++ = 0;
514 521
515 /* Set system defined field. */ 522 /* Set system defined field. */
516 pkt->sys_define = (uint8_t)ha->req_ring_index; 523 pkt->sys_define = (uint8_t)req->ring_index;
517 524
518 /* Set entry count. */ 525 /* Set entry count. */
519 pkt->entry_count = 1; 526 pkt->entry_count = 1;
@@ -522,15 +529,14 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
522 } 529 }
523 530
524 /* Release ring specific lock */ 531 /* Release ring specific lock */
525 spin_unlock(&ha->hardware_lock); 532 spin_unlock_irq(&ha->hardware_lock);
526 533
527 udelay(2); /* 2 us */ 534 udelay(2); /* 2 us */
528 535
529 /* Check for pending interrupts. */ 536 /* Check for pending interrupts. */
530 /* During init we issue marker directly */ 537 /* During init we issue marker directly */
531 if (!ha->marker_needed && !ha->flags.init_done) 538 if (!vha->marker_needed && !vha->flags.init_done)
532 qla2x00_poll(ha); 539 qla2x00_poll(ha->rsp);
533
534 spin_lock_irq(&ha->hardware_lock); 540 spin_lock_irq(&ha->hardware_lock);
535 } 541 }
536 if (!pkt) { 542 if (!pkt) {
@@ -547,28 +553,30 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
547 * Note: The caller must hold the hardware lock before calling this routine. 553 * Note: The caller must hold the hardware lock before calling this routine.
548 */ 554 */
549static void 555static void
550qla2x00_isp_cmd(scsi_qla_host_t *ha) 556qla2x00_isp_cmd(scsi_qla_host_t *vha)
551{ 557{
558 struct qla_hw_data *ha = vha->hw;
552 device_reg_t __iomem *reg = ha->iobase; 559 device_reg_t __iomem *reg = ha->iobase;
560 struct req_que *req = ha->req;
553 561
554 DEBUG5(printk("%s(): IOCB data:\n", __func__)); 562 DEBUG5(printk("%s(): IOCB data:\n", __func__));
555 DEBUG5(qla2x00_dump_buffer( 563 DEBUG5(qla2x00_dump_buffer(
556 (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE)); 564 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
557 565
558 /* Adjust ring index. */ 566 /* Adjust ring index. */
559 ha->req_ring_index++; 567 req->ring_index++;
560 if (ha->req_ring_index == ha->request_q_length) { 568 if (req->ring_index == req->length) {
561 ha->req_ring_index = 0; 569 req->ring_index = 0;
562 ha->request_ring_ptr = ha->request_ring; 570 req->ring_ptr = req->ring;
563 } else 571 } else
564 ha->request_ring_ptr++; 572 req->ring_ptr++;
565 573
566 /* Set chip new ring index. */ 574 /* Set chip new ring index. */
567 if (IS_FWI2_CAPABLE(ha)) { 575 if (IS_FWI2_CAPABLE(ha)) {
568 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index); 576 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
569 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 577 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
570 } else { 578 } else {
571 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index); 579 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
572 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp)); 580 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
573 } 581 }
574 582
@@ -610,7 +618,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
610{ 618{
611 uint16_t avail_dsds; 619 uint16_t avail_dsds;
612 uint32_t *cur_dsd; 620 uint32_t *cur_dsd;
613 scsi_qla_host_t *ha; 621 scsi_qla_host_t *vha;
614 struct scsi_cmnd *cmd; 622 struct scsi_cmnd *cmd;
615 struct scatterlist *sg; 623 struct scatterlist *sg;
616 int i; 624 int i;
@@ -627,18 +635,18 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
627 return; 635 return;
628 } 636 }
629 637
630 ha = sp->ha; 638 vha = sp->vha;
631 639
632 /* Set transfer direction */ 640 /* Set transfer direction */
633 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 641 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
634 cmd_pkt->task_mgmt_flags = 642 cmd_pkt->task_mgmt_flags =
635 __constant_cpu_to_le16(TMF_WRITE_DATA); 643 __constant_cpu_to_le16(TMF_WRITE_DATA);
636 sp->fcport->ha->qla_stats.output_bytes += 644 sp->fcport->vha->hw->qla_stats.output_bytes +=
637 scsi_bufflen(sp->cmd); 645 scsi_bufflen(sp->cmd);
638 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 646 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
639 cmd_pkt->task_mgmt_flags = 647 cmd_pkt->task_mgmt_flags =
640 __constant_cpu_to_le16(TMF_READ_DATA); 648 __constant_cpu_to_le16(TMF_READ_DATA);
641 sp->fcport->ha->qla_stats.input_bytes += 649 sp->fcport->vha->hw->qla_stats.input_bytes +=
642 scsi_bufflen(sp->cmd); 650 scsi_bufflen(sp->cmd);
643 } 651 }
644 652
@@ -658,7 +666,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
658 * Five DSDs are available in the Continuation 666 * Five DSDs are available in the Continuation
659 * Type 1 IOCB. 667 * Type 1 IOCB.
660 */ 668 */
661 cont_pkt = qla2x00_prep_cont_type1_iocb(ha); 669 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
662 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 670 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
663 avail_dsds = 5; 671 avail_dsds = 5;
664 } 672 }
@@ -683,7 +691,7 @@ qla24xx_start_scsi(srb_t *sp)
683{ 691{
684 int ret, nseg; 692 int ret, nseg;
685 unsigned long flags; 693 unsigned long flags;
686 scsi_qla_host_t *ha, *pha; 694 scsi_qla_host_t *vha;
687 struct scsi_cmnd *cmd; 695 struct scsi_cmnd *cmd;
688 uint32_t *clr_ptr; 696 uint32_t *clr_ptr;
689 uint32_t index; 697 uint32_t index;
@@ -693,34 +701,36 @@ qla24xx_start_scsi(srb_t *sp)
693 uint16_t req_cnt; 701 uint16_t req_cnt;
694 uint16_t tot_dsds; 702 uint16_t tot_dsds;
695 struct device_reg_24xx __iomem *reg; 703 struct device_reg_24xx __iomem *reg;
704 struct qla_hw_data *ha;
705 struct req_que *req;
696 706
697 /* Setup device pointers. */ 707 /* Setup device pointers. */
698 ret = 0; 708 ret = 0;
699 ha = sp->ha; 709 vha = sp->vha;
700 pha = to_qla_parent(ha); 710 ha = vha->hw;
701 reg = &ha->iobase->isp24; 711 reg = &ha->iobase->isp24;
702 cmd = sp->cmd; 712 cmd = sp->cmd;
713 req = ha->req;
703 /* So we know we haven't pci_map'ed anything yet */ 714 /* So we know we haven't pci_map'ed anything yet */
704 tot_dsds = 0; 715 tot_dsds = 0;
705 716
706 /* Send marker if required */ 717 /* Send marker if required */
707 if (ha->marker_needed != 0) { 718 if (vha->marker_needed != 0) {
708 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 719 if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
709 return QLA_FUNCTION_FAILED; 720 return QLA_FUNCTION_FAILED;
710 } 721 vha->marker_needed = 0;
711 ha->marker_needed = 0;
712 } 722 }
713 723
714 /* Acquire ring specific lock */ 724 /* Acquire ring specific lock */
715 spin_lock_irqsave(&pha->hardware_lock, flags); 725 spin_lock_irqsave(&ha->hardware_lock, flags);
716 726
717 /* Check for room in outstanding command list. */ 727 /* Check for room in outstanding command list. */
718 handle = ha->current_outstanding_cmd; 728 handle = req->current_outstanding_cmd;
719 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 729 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
720 handle++; 730 handle++;
721 if (handle == MAX_OUTSTANDING_COMMANDS) 731 if (handle == MAX_OUTSTANDING_COMMANDS)
722 handle = 1; 732 handle = 1;
723 if (!ha->outstanding_cmds[handle]) 733 if (!req->outstanding_cmds[handle])
724 break; 734 break;
725 } 735 }
726 if (index == MAX_OUTSTANDING_COMMANDS) 736 if (index == MAX_OUTSTANDING_COMMANDS)
@@ -738,25 +748,25 @@ qla24xx_start_scsi(srb_t *sp)
738 tot_dsds = nseg; 748 tot_dsds = nseg;
739 749
740 req_cnt = qla24xx_calc_iocbs(tot_dsds); 750 req_cnt = qla24xx_calc_iocbs(tot_dsds);
741 if (ha->req_q_cnt < (req_cnt + 2)) { 751 if (req->cnt < (req_cnt + 2)) {
742 cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out); 752 cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
743 if (ha->req_ring_index < cnt) 753 if (req->ring_index < cnt)
744 ha->req_q_cnt = cnt - ha->req_ring_index; 754 req->cnt = cnt - req->ring_index;
745 else 755 else
746 ha->req_q_cnt = ha->request_q_length - 756 req->cnt = req->length -
747 (ha->req_ring_index - cnt); 757 (req->ring_index - cnt);
748 } 758 }
749 if (ha->req_q_cnt < (req_cnt + 2)) 759 if (req->cnt < (req_cnt + 2))
750 goto queuing_error; 760 goto queuing_error;
751 761
752 /* Build command packet. */ 762 /* Build command packet. */
753 ha->current_outstanding_cmd = handle; 763 req->current_outstanding_cmd = handle;
754 ha->outstanding_cmds[handle] = sp; 764 req->outstanding_cmds[handle] = sp;
755 sp->ha = ha; 765 sp->vha = vha;
756 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 766 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
757 ha->req_q_cnt -= req_cnt; 767 req->cnt -= req_cnt;
758 768
759 cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr; 769 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
760 cmd_pkt->handle = handle; 770 cmd_pkt->handle = handle;
761 771
762 /* Zero out remaining portion of packet. */ 772 /* Zero out remaining portion of packet. */
@@ -789,32 +799,32 @@ qla24xx_start_scsi(srb_t *sp)
789 wmb(); 799 wmb();
790 800
791 /* Adjust ring index. */ 801 /* Adjust ring index. */
792 ha->req_ring_index++; 802 req->ring_index++;
793 if (ha->req_ring_index == ha->request_q_length) { 803 if (req->ring_index == req->length) {
794 ha->req_ring_index = 0; 804 req->ring_index = 0;
795 ha->request_ring_ptr = ha->request_ring; 805 req->ring_ptr = req->ring;
796 } else 806 } else
797 ha->request_ring_ptr++; 807 req->ring_ptr++;
798 808
799 sp->flags |= SRB_DMA_VALID; 809 sp->flags |= SRB_DMA_VALID;
800 810
801 /* Set chip new ring index. */ 811 /* Set chip new ring index. */
802 WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index); 812 WRT_REG_DWORD(&reg->req_q_in, req->ring_index);
803 RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */ 813 RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
804 814
805 /* Manage unprocessed RIO/ZIO commands in response queue. */ 815 /* Manage unprocessed RIO/ZIO commands in response queue. */
806 if (ha->flags.process_response_queue && 816 if (vha->flags.process_response_queue &&
807 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 817 ha->rsp->ring_ptr->signature != RESPONSE_PROCESSED)
808 qla24xx_process_response_queue(ha); 818 qla24xx_process_response_queue(vha);
809 819
810 spin_unlock_irqrestore(&pha->hardware_lock, flags); 820 spin_unlock_irqrestore(&ha->hardware_lock, flags);
811 return QLA_SUCCESS; 821 return QLA_SUCCESS;
812 822
813queuing_error: 823queuing_error:
814 if (tot_dsds) 824 if (tot_dsds)
815 scsi_dma_unmap(cmd); 825 scsi_dma_unmap(cmd);
816 826
817 spin_unlock_irqrestore(&pha->hardware_lock, flags); 827 spin_unlock_irqrestore(&ha->hardware_lock, flags);
818 828
819 return QLA_FUNCTION_FAILED; 829 return QLA_FUNCTION_FAILED;
820} 830}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a76efd99d007..89d327117aa8 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -14,6 +14,7 @@ static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
14static void qla2x00_status_entry(scsi_qla_host_t *, void *); 14static void qla2x00_status_entry(scsi_qla_host_t *, void *);
15static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 15static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
16static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); 16static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
17static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
17 18
18/** 19/**
19 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 20 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -27,24 +28,28 @@ static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
27irqreturn_t 28irqreturn_t
28qla2100_intr_handler(int irq, void *dev_id) 29qla2100_intr_handler(int irq, void *dev_id)
29{ 30{
30 scsi_qla_host_t *ha; 31 scsi_qla_host_t *vha;
32 struct qla_hw_data *ha;
31 struct device_reg_2xxx __iomem *reg; 33 struct device_reg_2xxx __iomem *reg;
32 int status; 34 int status;
33 unsigned long iter; 35 unsigned long iter;
34 uint16_t hccr; 36 uint16_t hccr;
35 uint16_t mb[4]; 37 uint16_t mb[4];
38 struct rsp_que *rsp;
36 39
37 ha = (scsi_qla_host_t *) dev_id; 40 rsp = (struct rsp_que *) dev_id;
38 if (!ha) { 41 if (!rsp) {
39 printk(KERN_INFO 42 printk(KERN_INFO
40 "%s(): NULL host pointer\n", __func__); 43 "%s(): NULL response queue pointer\n", __func__);
41 return (IRQ_NONE); 44 return (IRQ_NONE);
42 } 45 }
43 46
47 ha = rsp->hw;
44 reg = &ha->iobase->isp; 48 reg = &ha->iobase->isp;
45 status = 0; 49 status = 0;
46 50
47 spin_lock(&ha->hardware_lock); 51 spin_lock(&ha->hardware_lock);
52 vha = qla2x00_get_rsp_host(rsp);
48 for (iter = 50; iter--; ) { 53 for (iter = 50; iter--; ) {
49 hccr = RD_REG_WORD(&reg->hccr); 54 hccr = RD_REG_WORD(&reg->hccr);
50 if (hccr & HCCR_RISC_PAUSE) { 55 if (hccr & HCCR_RISC_PAUSE) {
@@ -59,8 +64,8 @@ qla2100_intr_handler(int irq, void *dev_id)
59 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 64 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
60 RD_REG_WORD(&reg->hccr); 65 RD_REG_WORD(&reg->hccr);
61 66
62 ha->isp_ops->fw_dump(ha, 1); 67 ha->isp_ops->fw_dump(vha, 1);
63 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 68 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
64 break; 69 break;
65 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0) 70 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
66 break; 71 break;
@@ -72,24 +77,24 @@ qla2100_intr_handler(int irq, void *dev_id)
72 /* Get mailbox data. */ 77 /* Get mailbox data. */
73 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 78 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
74 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 79 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
75 qla2x00_mbx_completion(ha, mb[0]); 80 qla2x00_mbx_completion(vha, mb[0]);
76 status |= MBX_INTERRUPT; 81 status |= MBX_INTERRUPT;
77 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 82 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
78 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 83 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
79 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 84 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
80 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 85 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
81 qla2x00_async_event(ha, mb); 86 qla2x00_async_event(vha, mb);
82 } else { 87 } else {
83 /*EMPTY*/ 88 /*EMPTY*/
84 DEBUG2(printk("scsi(%ld): Unrecognized " 89 DEBUG2(printk("scsi(%ld): Unrecognized "
85 "interrupt type (%d).\n", 90 "interrupt type (%d).\n",
86 ha->host_no, mb[0])); 91 vha->host_no, mb[0]));
87 } 92 }
88 /* Release mailbox registers. */ 93 /* Release mailbox registers. */
89 WRT_REG_WORD(&reg->semaphore, 0); 94 WRT_REG_WORD(&reg->semaphore, 0);
90 RD_REG_WORD(&reg->semaphore); 95 RD_REG_WORD(&reg->semaphore);
91 } else { 96 } else {
92 qla2x00_process_response_queue(ha); 97 qla2x00_process_response_queue(vha);
93 98
94 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 99 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
95 RD_REG_WORD(&reg->hccr); 100 RD_REG_WORD(&reg->hccr);
@@ -118,25 +123,29 @@ qla2100_intr_handler(int irq, void *dev_id)
118irqreturn_t 123irqreturn_t
119qla2300_intr_handler(int irq, void *dev_id) 124qla2300_intr_handler(int irq, void *dev_id)
120{ 125{
121 scsi_qla_host_t *ha; 126 scsi_qla_host_t *vha;
122 struct device_reg_2xxx __iomem *reg; 127 struct device_reg_2xxx __iomem *reg;
123 int status; 128 int status;
124 unsigned long iter; 129 unsigned long iter;
125 uint32_t stat; 130 uint32_t stat;
126 uint16_t hccr; 131 uint16_t hccr;
127 uint16_t mb[4]; 132 uint16_t mb[4];
133 struct rsp_que *rsp;
134 struct qla_hw_data *ha;
128 135
129 ha = (scsi_qla_host_t *) dev_id; 136 rsp = (struct rsp_que *) dev_id;
130 if (!ha) { 137 if (!rsp) {
131 printk(KERN_INFO 138 printk(KERN_INFO
132 "%s(): NULL host pointer\n", __func__); 139 "%s(): NULL response queue pointer\n", __func__);
133 return (IRQ_NONE); 140 return (IRQ_NONE);
134 } 141 }
135 142
143 ha = rsp->hw;
136 reg = &ha->iobase->isp; 144 reg = &ha->iobase->isp;
137 status = 0; 145 status = 0;
138 146
139 spin_lock(&ha->hardware_lock); 147 spin_lock(&ha->hardware_lock);
148 vha = qla2x00_get_rsp_host(rsp);
140 for (iter = 50; iter--; ) { 149 for (iter = 50; iter--; ) {
141 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 150 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
142 if (stat & HSR_RISC_PAUSED) { 151 if (stat & HSR_RISC_PAUSED) {
@@ -159,8 +168,8 @@ qla2300_intr_handler(int irq, void *dev_id)
159 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 168 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
160 RD_REG_WORD(&reg->hccr); 169 RD_REG_WORD(&reg->hccr);
161 170
162 ha->isp_ops->fw_dump(ha, 1); 171 ha->isp_ops->fw_dump(vha, 1);
163 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 172 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
164 break; 173 break;
165 } else if ((stat & HSR_RISC_INT) == 0) 174 } else if ((stat & HSR_RISC_INT) == 0)
166 break; 175 break;
@@ -170,7 +179,7 @@ qla2300_intr_handler(int irq, void *dev_id)
170 case 0x2: 179 case 0x2:
171 case 0x10: 180 case 0x10:
172 case 0x11: 181 case 0x11:
173 qla2x00_mbx_completion(ha, MSW(stat)); 182 qla2x00_mbx_completion(vha, MSW(stat));
174 status |= MBX_INTERRUPT; 183 status |= MBX_INTERRUPT;
175 184
176 /* Release mailbox registers. */ 185 /* Release mailbox registers. */
@@ -181,26 +190,26 @@ qla2300_intr_handler(int irq, void *dev_id)
181 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 190 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
182 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 191 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
183 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 192 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
184 qla2x00_async_event(ha, mb); 193 qla2x00_async_event(vha, mb);
185 break; 194 break;
186 case 0x13: 195 case 0x13:
187 qla2x00_process_response_queue(ha); 196 qla2x00_process_response_queue(vha);
188 break; 197 break;
189 case 0x15: 198 case 0x15:
190 mb[0] = MBA_CMPLT_1_16BIT; 199 mb[0] = MBA_CMPLT_1_16BIT;
191 mb[1] = MSW(stat); 200 mb[1] = MSW(stat);
192 qla2x00_async_event(ha, mb); 201 qla2x00_async_event(vha, mb);
193 break; 202 break;
194 case 0x16: 203 case 0x16:
195 mb[0] = MBA_SCSI_COMPLETION; 204 mb[0] = MBA_SCSI_COMPLETION;
196 mb[1] = MSW(stat); 205 mb[1] = MSW(stat);
197 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 206 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
198 qla2x00_async_event(ha, mb); 207 qla2x00_async_event(vha, mb);
199 break; 208 break;
200 default: 209 default:
201 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 210 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
202 "(%d).\n", 211 "(%d).\n",
203 ha->host_no, stat & 0xff)); 212 vha->host_no, stat & 0xff));
204 break; 213 break;
205 } 214 }
206 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 215 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -223,10 +232,11 @@ qla2300_intr_handler(int irq, void *dev_id)
223 * @mb0: Mailbox0 register 232 * @mb0: Mailbox0 register
224 */ 233 */
225static void 234static void
226qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) 235qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
227{ 236{
228 uint16_t cnt; 237 uint16_t cnt;
229 uint16_t __iomem *wptr; 238 uint16_t __iomem *wptr;
239 struct qla_hw_data *ha = vha->hw;
230 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 240 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
231 241
232 /* Load return mailbox registers. */ 242 /* Load return mailbox registers. */
@@ -247,10 +257,10 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
247 257
248 if (ha->mcp) { 258 if (ha->mcp) {
249 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 259 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
250 __func__, ha->host_no, ha->mcp->mb[0])); 260 __func__, vha->host_no, ha->mcp->mb[0]));
251 } else { 261 } else {
252 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 262 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
253 __func__, ha->host_no)); 263 __func__, vha->host_no));
254 } 264 }
255} 265}
256 266
@@ -260,7 +270,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
260 * @mb: Mailbox registers (0 - 3) 270 * @mb: Mailbox registers (0 - 3)
261 */ 271 */
262void 272void
263qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) 273qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb)
264{ 274{
265#define LS_UNKNOWN 2 275#define LS_UNKNOWN 2
266 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 276 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
@@ -268,6 +278,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
268 uint16_t handle_cnt; 278 uint16_t handle_cnt;
269 uint16_t cnt; 279 uint16_t cnt;
270 uint32_t handles[5]; 280 uint32_t handles[5];
281 struct qla_hw_data *ha = vha->hw;
271 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 282 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
272 uint32_t rscn_entry, host_pid; 283 uint32_t rscn_entry, host_pid;
273 uint8_t rscn_queue_index; 284 uint8_t rscn_queue_index;
@@ -329,17 +340,18 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
329 340
330 switch (mb[0]) { 341 switch (mb[0]) {
331 case MBA_SCSI_COMPLETION: /* Fast Post */ 342 case MBA_SCSI_COMPLETION: /* Fast Post */
332 if (!ha->flags.online) 343 if (!vha->flags.online)
333 break; 344 break;
334 345
335 for (cnt = 0; cnt < handle_cnt; cnt++) 346 for (cnt = 0; cnt < handle_cnt; cnt++)
336 qla2x00_process_completed_request(ha, handles[cnt]); 347 qla2x00_process_completed_request(vha, handles[cnt]);
337 break; 348 break;
338 349
339 case MBA_RESET: /* Reset */ 350 case MBA_RESET: /* Reset */
340 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no)); 351 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
352 vha->host_no));
341 353
342 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 354 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
343 break; 355 break;
344 356
345 case MBA_SYSTEM_ERR: /* System Error */ 357 case MBA_SYSTEM_ERR: /* System Error */
@@ -347,70 +359,70 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
347 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 359 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
348 mb[1], mb[2], mb[3]); 360 mb[1], mb[2], mb[3]);
349 361
350 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 362 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
351 ha->isp_ops->fw_dump(ha, 1); 363 ha->isp_ops->fw_dump(vha, 1);
352 364
353 if (IS_FWI2_CAPABLE(ha)) { 365 if (IS_FWI2_CAPABLE(ha)) {
354 if (mb[1] == 0 && mb[2] == 0) { 366 if (mb[1] == 0 && mb[2] == 0) {
355 qla_printk(KERN_ERR, ha, 367 qla_printk(KERN_ERR, ha,
356 "Unrecoverable Hardware Error: adapter " 368 "Unrecoverable Hardware Error: adapter "
357 "marked OFFLINE!\n"); 369 "marked OFFLINE!\n");
358 ha->flags.online = 0; 370 vha->flags.online = 0;
359 } else 371 } else
360 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 372 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
361 } else if (mb[1] == 0) { 373 } else if (mb[1] == 0) {
362 qla_printk(KERN_INFO, ha, 374 qla_printk(KERN_INFO, ha,
363 "Unrecoverable Hardware Error: adapter marked " 375 "Unrecoverable Hardware Error: adapter marked "
364 "OFFLINE!\n"); 376 "OFFLINE!\n");
365 ha->flags.online = 0; 377 vha->flags.online = 0;
366 } else 378 } else
367 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 379 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
368 break; 380 break;
369 381
370 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 382 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
371 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", 383 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
372 ha->host_no)); 384 vha->host_no));
373 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); 385 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
374 386
375 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 387 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
376 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 388 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
377 break; 389 break;
378 390
379 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 391 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
380 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", 392 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
381 ha->host_no)); 393 vha->host_no));
382 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); 394 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
383 395
384 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 396 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
385 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 397 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
386 break; 398 break;
387 399
388 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 400 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
389 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", 401 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
390 ha->host_no)); 402 vha->host_no));
391 break; 403 break;
392 404
393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 405 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
394 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no, 406 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
395 mb[1])); 407 mb[1]));
396 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]); 408 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
397 409
398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 410 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
399 atomic_set(&ha->loop_state, LOOP_DOWN); 411 atomic_set(&vha->loop_state, LOOP_DOWN);
400 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 412 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
401 qla2x00_mark_all_devices_lost(ha, 1); 413 qla2x00_mark_all_devices_lost(vha, 1);
402 } 414 }
403 415
404 if (ha->parent) { 416 if (vha->vp_idx) {
405 atomic_set(&ha->vp_state, VP_FAILED); 417 atomic_set(&vha->vp_state, VP_FAILED);
406 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 418 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
407 } 419 }
408 420
409 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 421 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
410 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 422 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
411 423
412 ha->flags.management_server_logged_in = 0; 424 vha->flags.management_server_logged_in = 0;
413 qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]); 425 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
414 break; 426 break;
415 427
416 case MBA_LOOP_UP: /* Loop Up Event */ 428 case MBA_LOOP_UP: /* Loop Up Event */
@@ -425,59 +437,59 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
425 } 437 }
426 438
427 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", 439 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
428 ha->host_no, link_speed)); 440 vha->host_no, link_speed));
429 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", 441 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
430 link_speed); 442 link_speed);
431 443
432 ha->flags.management_server_logged_in = 0; 444 vha->flags.management_server_logged_in = 0;
433 qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate); 445 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
434 break; 446 break;
435 447
436 case MBA_LOOP_DOWN: /* Loop Down Event */ 448 case MBA_LOOP_DOWN: /* Loop Down Event */
437 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " 449 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
438 "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3])); 450 "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
439 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", 451 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
440 mb[1], mb[2], mb[3]); 452 mb[1], mb[2], mb[3]);
441 453
442 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 454 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
443 atomic_set(&ha->loop_state, LOOP_DOWN); 455 atomic_set(&vha->loop_state, LOOP_DOWN);
444 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 456 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
445 ha->device_flags |= DFLG_NO_CABLE; 457 vha->device_flags |= DFLG_NO_CABLE;
446 qla2x00_mark_all_devices_lost(ha, 1); 458 qla2x00_mark_all_devices_lost(vha, 1);
447 } 459 }
448 460
449 if (ha->parent) { 461 if (vha->vp_idx) {
450 atomic_set(&ha->vp_state, VP_FAILED); 462 atomic_set(&vha->vp_state, VP_FAILED);
451 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 463 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
452 } 464 }
453 465
454 ha->flags.management_server_logged_in = 0; 466 vha->flags.management_server_logged_in = 0;
455 ha->link_data_rate = PORT_SPEED_UNKNOWN; 467 ha->link_data_rate = PORT_SPEED_UNKNOWN;
456 qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0); 468 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
457 break; 469 break;
458 470
459 case MBA_LIP_RESET: /* LIP reset occurred */ 471 case MBA_LIP_RESET: /* LIP reset occurred */
460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 472 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
461 ha->host_no, mb[1])); 473 vha->host_no, mb[1]));
462 qla_printk(KERN_INFO, ha, 474 qla_printk(KERN_INFO, ha,
463 "LIP reset occurred (%x).\n", mb[1]); 475 "LIP reset occurred (%x).\n", mb[1]);
464 476
465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 477 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
466 atomic_set(&ha->loop_state, LOOP_DOWN); 478 atomic_set(&vha->loop_state, LOOP_DOWN);
467 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 479 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
468 qla2x00_mark_all_devices_lost(ha, 1); 480 qla2x00_mark_all_devices_lost(vha, 1);
469 } 481 }
470 482
471 if (ha->parent) { 483 if (vha->vp_idx) {
472 atomic_set(&ha->vp_state, VP_FAILED); 484 atomic_set(&vha->vp_state, VP_FAILED);
473 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 485 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
474 } 486 }
475 487
476 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 488 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
477 489
478 ha->operating_mode = LOOP; 490 ha->operating_mode = LOOP;
479 ha->flags.management_server_logged_in = 0; 491 vha->flags.management_server_logged_in = 0;
480 qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]); 492 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
481 break; 493 break;
482 494
483 case MBA_POINT_TO_POINT: /* Point-to-Point */ 495 case MBA_POINT_TO_POINT: /* Point-to-Point */
@@ -485,33 +497,33 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
485 break; 497 break;
486 498
487 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n", 499 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
488 ha->host_no)); 500 vha->host_no));
489 501
490 /* 502 /*
491 * Until there's a transition from loop down to loop up, treat 503 * Until there's a transition from loop down to loop up, treat
492 * this as loop down only. 504 * this as loop down only.
493 */ 505 */
494 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 506 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
495 atomic_set(&ha->loop_state, LOOP_DOWN); 507 atomic_set(&vha->loop_state, LOOP_DOWN);
496 if (!atomic_read(&ha->loop_down_timer)) 508 if (!atomic_read(&vha->loop_down_timer))
497 atomic_set(&ha->loop_down_timer, 509 atomic_set(&vha->loop_down_timer,
498 LOOP_DOWN_TIME); 510 LOOP_DOWN_TIME);
499 qla2x00_mark_all_devices_lost(ha, 1); 511 qla2x00_mark_all_devices_lost(vha, 1);
500 } 512 }
501 513
502 if (ha->parent) { 514 if (vha->vp_idx) {
503 atomic_set(&ha->vp_state, VP_FAILED); 515 atomic_set(&vha->vp_state, VP_FAILED);
504 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 516 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
505 } 517 }
506 518
507 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 519 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
508 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 520 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
509 } 521
510 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 522 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
511 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 523 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
512 524
513 ha->flags.gpsc_supported = 1; 525 ha->flags.gpsc_supported = 1;
514 ha->flags.management_server_logged_in = 0; 526 vha->flags.management_server_logged_in = 0;
515 break; 527 break;
516 528
517 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 529 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
@@ -520,25 +532,25 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
520 532
521 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " 533 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
522 "received.\n", 534 "received.\n",
523 ha->host_no)); 535 vha->host_no));
524 qla_printk(KERN_INFO, ha, 536 qla_printk(KERN_INFO, ha,
525 "Configuration change detected: value=%x.\n", mb[1]); 537 "Configuration change detected: value=%x.\n", mb[1]);
526 538
527 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 539 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
528 atomic_set(&ha->loop_state, LOOP_DOWN); 540 atomic_set(&vha->loop_state, LOOP_DOWN);
529 if (!atomic_read(&ha->loop_down_timer)) 541 if (!atomic_read(&vha->loop_down_timer))
530 atomic_set(&ha->loop_down_timer, 542 atomic_set(&vha->loop_down_timer,
531 LOOP_DOWN_TIME); 543 LOOP_DOWN_TIME);
532 qla2x00_mark_all_devices_lost(ha, 1); 544 qla2x00_mark_all_devices_lost(vha, 1);
533 } 545 }
534 546
535 if (ha->parent) { 547 if (vha->vp_idx) {
536 atomic_set(&ha->vp_state, VP_FAILED); 548 atomic_set(&vha->vp_state, VP_FAILED);
537 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 549 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
538 } 550 }
539 551
540 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 552 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
541 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 553 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
542 break; 554 break;
543 555
544 case MBA_PORT_UPDATE: /* Port database update */ 556 case MBA_PORT_UPDATE: /* Port database update */
@@ -547,107 +559,106 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
547 * event etc. earlier indicating loop is down) then process 559 * event etc. earlier indicating loop is down) then process
548 * it. Otherwise ignore it and Wait for RSCN to come in. 560 * it. Otherwise ignore it and Wait for RSCN to come in.
549 */ 561 */
550 atomic_set(&ha->loop_down_timer, 0); 562 atomic_set(&vha->loop_down_timer, 0);
551 if (atomic_read(&ha->loop_state) != LOOP_DOWN && 563 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
552 atomic_read(&ha->loop_state) != LOOP_DEAD) { 564 atomic_read(&vha->loop_state) != LOOP_DEAD) {
553 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " 565 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
554 "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1], 566 "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
555 mb[2], mb[3])); 567 mb[2], mb[3]));
556 break; 568 break;
557 } 569 }
558 570
559 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 571 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
560 ha->host_no)); 572 vha->host_no));
561 DEBUG(printk(KERN_INFO 573 DEBUG(printk(KERN_INFO
562 "scsi(%ld): Port database changed %04x %04x %04x.\n", 574 "scsi(%ld): Port database changed %04x %04x %04x.\n",
563 ha->host_no, mb[1], mb[2], mb[3])); 575 vha->host_no, mb[1], mb[2], mb[3]));
564 576
565 /* 577 /*
566 * Mark all devices as missing so we will login again. 578 * Mark all devices as missing so we will login again.
567 */ 579 */
568 atomic_set(&ha->loop_state, LOOP_UP); 580 atomic_set(&vha->loop_state, LOOP_UP);
569 581
570 qla2x00_mark_all_devices_lost(ha, 1); 582 qla2x00_mark_all_devices_lost(vha, 1);
571 583
572 ha->flags.rscn_queue_overflow = 1; 584 vha->flags.rscn_queue_overflow = 1;
573 585
574 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 586 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
575 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 587 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
576 break; 588 break;
577 589
578 case MBA_RSCN_UPDATE: /* State Change Registration */ 590 case MBA_RSCN_UPDATE: /* State Change Registration */
579 /* Check if the Vport has issued a SCR */ 591 /* Check if the Vport has issued a SCR */
580 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags)) 592 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
581 break; 593 break;
582 /* Only handle SCNs for our Vport index. */ 594 /* Only handle SCNs for our Vport index. */
583 if (ha->parent && ha->vp_idx != (mb[3] & 0xff)) 595 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
584 break; 596 break;
585
586 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 597 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
587 ha->host_no)); 598 vha->host_no));
588 DEBUG(printk(KERN_INFO 599 DEBUG(printk(KERN_INFO
589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", 600 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
590 ha->host_no, mb[1], mb[2], mb[3])); 601 vha->host_no, mb[1], mb[2], mb[3]));
591 602
592 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 603 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | 604 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
594 ha->d_id.b.al_pa; 605 | vha->d_id.b.al_pa;
595 if (rscn_entry == host_pid) { 606 if (rscn_entry == host_pid) {
596 DEBUG(printk(KERN_INFO 607 DEBUG(printk(KERN_INFO
597 "scsi(%ld): Ignoring RSCN update to local host " 608 "scsi(%ld): Ignoring RSCN update to local host "
598 "port ID (%06x)\n", 609 "port ID (%06x)\n",
599 ha->host_no, host_pid)); 610 vha->host_no, host_pid));
600 break; 611 break;
601 } 612 }
602 613
603 /* Ignore reserved bits from RSCN-payload. */ 614 /* Ignore reserved bits from RSCN-payload. */
604 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 615 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
605 rscn_queue_index = ha->rscn_in_ptr + 1; 616 rscn_queue_index = vha->rscn_in_ptr + 1;
606 if (rscn_queue_index == MAX_RSCN_COUNT) 617 if (rscn_queue_index == MAX_RSCN_COUNT)
607 rscn_queue_index = 0; 618 rscn_queue_index = 0;
608 if (rscn_queue_index != ha->rscn_out_ptr) { 619 if (rscn_queue_index != vha->rscn_out_ptr) {
609 ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry; 620 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
610 ha->rscn_in_ptr = rscn_queue_index; 621 vha->rscn_in_ptr = rscn_queue_index;
611 } else { 622 } else {
612 ha->flags.rscn_queue_overflow = 1; 623 vha->flags.rscn_queue_overflow = 1;
613 } 624 }
614 625
615 atomic_set(&ha->loop_state, LOOP_UPDATE); 626 atomic_set(&vha->loop_state, LOOP_UPDATE);
616 atomic_set(&ha->loop_down_timer, 0); 627 atomic_set(&vha->loop_down_timer, 0);
617 ha->flags.management_server_logged_in = 0; 628 vha->flags.management_server_logged_in = 0;
618 629
619 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 630 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
620 set_bit(RSCN_UPDATE, &ha->dpc_flags); 631 set_bit(RSCN_UPDATE, &vha->dpc_flags);
621 qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry); 632 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
622 break; 633 break;
623 634
624 /* case MBA_RIO_RESPONSE: */ 635 /* case MBA_RIO_RESPONSE: */
625 case MBA_ZIO_RESPONSE: 636 case MBA_ZIO_RESPONSE:
626 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n", 637 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
627 ha->host_no)); 638 vha->host_no));
628 DEBUG(printk(KERN_INFO 639 DEBUG(printk(KERN_INFO
629 "scsi(%ld): [R|Z]IO update completion.\n", 640 "scsi(%ld): [R|Z]IO update completion.\n",
630 ha->host_no)); 641 vha->host_no));
631 642
632 if (IS_FWI2_CAPABLE(ha)) 643 if (IS_FWI2_CAPABLE(ha))
633 qla24xx_process_response_queue(ha); 644 qla24xx_process_response_queue(vha);
634 else 645 else
635 qla2x00_process_response_queue(ha); 646 qla2x00_process_response_queue(vha);
636 break; 647 break;
637 648
638 case MBA_DISCARD_RND_FRAME: 649 case MBA_DISCARD_RND_FRAME:
639 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 650 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
640 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); 651 "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
641 break; 652 break;
642 653
643 case MBA_TRACE_NOTIFICATION: 654 case MBA_TRACE_NOTIFICATION:
644 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", 655 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
645 ha->host_no, mb[1], mb[2])); 656 vha->host_no, mb[1], mb[2]));
646 break; 657 break;
647 658
648 case MBA_ISP84XX_ALERT: 659 case MBA_ISP84XX_ALERT:
649 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " 660 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
650 "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3])); 661 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
651 662
652 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 663 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
653 switch (mb[1]) { 664 switch (mb[1]) {
@@ -682,7 +693,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
682 break; 693 break;
683 } 694 }
684 695
685 if (!ha->parent && ha->num_vhosts) 696 if (!vha->vp_idx && ha->num_vhosts)
686 qla2x00_alert_all_vps(ha, mb); 697 qla2x00_alert_all_vps(ha, mb);
687} 698}
688 699
@@ -690,8 +701,8 @@ static void
690qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) 701qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
691{ 702{
692 fc_port_t *fcport = data; 703 fc_port_t *fcport = data;
693 704 struct qla_hw_data *ha = fcport->vha->hw;
694 if (fcport->ha->max_q_depth <= sdev->queue_depth) 705 if (ha->req->max_q_depth <= sdev->queue_depth)
695 return; 706 return;
696 707
697 if (sdev->ordered_tags) 708 if (sdev->ordered_tags)
@@ -703,9 +714,9 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
703 714
704 fcport->last_ramp_up = jiffies; 715 fcport->last_ramp_up = jiffies;
705 716
706 DEBUG2(qla_printk(KERN_INFO, fcport->ha, 717 DEBUG2(qla_printk(KERN_INFO, ha,
707 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", 718 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
708 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, 719 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
709 sdev->queue_depth)); 720 sdev->queue_depth));
710} 721}
711 722
@@ -717,20 +728,21 @@ qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
717 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1)) 728 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
718 return; 729 return;
719 730
720 DEBUG2(qla_printk(KERN_INFO, fcport->ha, 731 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
721 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", 732 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
722 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, 733 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
723 sdev->queue_depth)); 734 sdev->queue_depth));
724} 735}
725 736
726static inline void 737static inline void
727qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp) 738qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, srb_t *sp)
728{ 739{
729 fc_port_t *fcport; 740 fc_port_t *fcport;
730 struct scsi_device *sdev; 741 struct scsi_device *sdev;
742 struct qla_hw_data *ha = vha->hw;
731 743
732 sdev = sp->cmd->device; 744 sdev = sp->cmd->device;
733 if (sdev->queue_depth >= ha->max_q_depth) 745 if (sdev->queue_depth >= ha->req->max_q_depth)
734 return; 746 return;
735 747
736 fcport = sp->fcport; 748 fcport = sp->fcport;
@@ -751,25 +763,27 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
751 * @index: SRB index 763 * @index: SRB index
752 */ 764 */
753static void 765static void
754qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index) 766qla2x00_process_completed_request(struct scsi_qla_host *vha, uint32_t index)
755{ 767{
756 srb_t *sp; 768 srb_t *sp;
769 struct qla_hw_data *ha = vha->hw;
770 struct req_que *req = ha->req;
757 771
758 /* Validate handle. */ 772 /* Validate handle. */
759 if (index >= MAX_OUTSTANDING_COMMANDS) { 773 if (index >= MAX_OUTSTANDING_COMMANDS) {
760 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", 774 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
761 ha->host_no, index)); 775 vha->host_no, index));
762 qla_printk(KERN_WARNING, ha, 776 qla_printk(KERN_WARNING, ha,
763 "Invalid SCSI completion handle %d.\n", index); 777 "Invalid SCSI completion handle %d.\n", index);
764 778
765 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 779 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
766 return; 780 return;
767 } 781 }
768 782
769 sp = ha->outstanding_cmds[index]; 783 sp = req->outstanding_cmds[index];
770 if (sp) { 784 if (sp) {
771 /* Free outstanding command slot. */ 785 /* Free outstanding command slot. */
772 ha->outstanding_cmds[index] = NULL; 786 req->outstanding_cmds[index] = NULL;
773 787
774 CMD_COMPL_STATUS(sp->cmd) = 0L; 788 CMD_COMPL_STATUS(sp->cmd) = 0L;
775 CMD_SCSI_STATUS(sp->cmd) = 0L; 789 CMD_SCSI_STATUS(sp->cmd) = 0L;
@@ -777,15 +791,15 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
777 /* Save ISP completion status */ 791 /* Save ISP completion status */
778 sp->cmd->result = DID_OK << 16; 792 sp->cmd->result = DID_OK << 16;
779 793
780 qla2x00_ramp_up_queue_depth(ha, sp); 794 qla2x00_ramp_up_queue_depth(vha, sp);
781 qla2x00_sp_compl(ha, sp); 795 qla2x00_sp_compl(vha, sp);
782 } else { 796 } else {
783 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 797 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
784 ha->host_no)); 798 vha->host_no));
785 qla_printk(KERN_WARNING, ha, 799 qla_printk(KERN_WARNING, ha,
786 "Invalid ISP SCSI completion handle\n"); 800 "Invalid ISP SCSI completion handle\n");
787 801
788 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 802 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
789 } 803 }
790} 804}
791 805
@@ -794,32 +808,34 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
794 * @ha: SCSI driver HA context 808 * @ha: SCSI driver HA context
795 */ 809 */
796void 810void
797qla2x00_process_response_queue(struct scsi_qla_host *ha) 811qla2x00_process_response_queue(struct scsi_qla_host *vha)
798{ 812{
813 struct qla_hw_data *ha = vha->hw;
799 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 814 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
800 sts_entry_t *pkt; 815 sts_entry_t *pkt;
801 uint16_t handle_cnt; 816 uint16_t handle_cnt;
802 uint16_t cnt; 817 uint16_t cnt;
818 struct rsp_que *rsp = ha->rsp;
803 819
804 if (!ha->flags.online) 820 if (!vha->flags.online)
805 return; 821 return;
806 822
807 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { 823 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
808 pkt = (sts_entry_t *)ha->response_ring_ptr; 824 pkt = (sts_entry_t *)rsp->ring_ptr;
809 825
810 ha->rsp_ring_index++; 826 rsp->ring_index++;
811 if (ha->rsp_ring_index == ha->response_q_length) { 827 if (rsp->ring_index == rsp->length) {
812 ha->rsp_ring_index = 0; 828 rsp->ring_index = 0;
813 ha->response_ring_ptr = ha->response_ring; 829 rsp->ring_ptr = rsp->ring;
814 } else { 830 } else {
815 ha->response_ring_ptr++; 831 rsp->ring_ptr++;
816 } 832 }
817 833
818 if (pkt->entry_status != 0) { 834 if (pkt->entry_status != 0) {
819 DEBUG3(printk(KERN_INFO 835 DEBUG3(printk(KERN_INFO
820 "scsi(%ld): Process error entry.\n", ha->host_no)); 836 "scsi(%ld): Process error entry.\n", vha->host_no));
821 837
822 qla2x00_error_entry(ha, pkt); 838 qla2x00_error_entry(vha, pkt);
823 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 839 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
824 wmb(); 840 wmb();
825 continue; 841 continue;
@@ -827,31 +843,31 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
827 843
828 switch (pkt->entry_type) { 844 switch (pkt->entry_type) {
829 case STATUS_TYPE: 845 case STATUS_TYPE:
830 qla2x00_status_entry(ha, pkt); 846 qla2x00_status_entry(vha, pkt);
831 break; 847 break;
832 case STATUS_TYPE_21: 848 case STATUS_TYPE_21:
833 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 849 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
834 for (cnt = 0; cnt < handle_cnt; cnt++) { 850 for (cnt = 0; cnt < handle_cnt; cnt++) {
835 qla2x00_process_completed_request(ha, 851 qla2x00_process_completed_request(vha,
836 ((sts21_entry_t *)pkt)->handle[cnt]); 852 ((sts21_entry_t *)pkt)->handle[cnt]);
837 } 853 }
838 break; 854 break;
839 case STATUS_TYPE_22: 855 case STATUS_TYPE_22:
840 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 856 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
841 for (cnt = 0; cnt < handle_cnt; cnt++) { 857 for (cnt = 0; cnt < handle_cnt; cnt++) {
842 qla2x00_process_completed_request(ha, 858 qla2x00_process_completed_request(vha,
843 ((sts22_entry_t *)pkt)->handle[cnt]); 859 ((sts22_entry_t *)pkt)->handle[cnt]);
844 } 860 }
845 break; 861 break;
846 case STATUS_CONT_TYPE: 862 case STATUS_CONT_TYPE:
847 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); 863 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
848 break; 864 break;
849 default: 865 default:
850 /* Type Not Supported. */ 866 /* Type Not Supported. */
851 DEBUG4(printk(KERN_WARNING 867 DEBUG4(printk(KERN_WARNING
852 "scsi(%ld): Received unknown response pkt type %x " 868 "scsi(%ld): Received unknown response pkt type %x "
853 "entry status=%x.\n", 869 "entry status=%x.\n",
854 ha->host_no, pkt->entry_type, pkt->entry_status)); 870 vha->host_no, pkt->entry_type, pkt->entry_status));
855 break; 871 break;
856 } 872 }
857 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 873 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -859,7 +875,7 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
859 } 875 }
860 876
861 /* Adjust ring index */ 877 /* Adjust ring index */
862 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index); 878 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
863} 879}
864 880
865static inline void 881static inline void
@@ -881,10 +897,10 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
881 sp->request_sense_ptr += sense_len; 897 sp->request_sense_ptr += sense_len;
882 sp->request_sense_length -= sense_len; 898 sp->request_sense_length -= sense_len;
883 if (sp->request_sense_length != 0) 899 if (sp->request_sense_length != 0)
884 sp->fcport->ha->status_srb = sp; 900 sp->fcport->vha->status_srb = sp;
885 901
886 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 902 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
887 "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no, 903 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
888 cp->device->channel, cp->device->id, cp->device->lun, cp, 904 cp->device->channel, cp->device->id, cp->device->lun, cp,
889 cp->serial_number)); 905 cp->serial_number));
890 if (sense_len) 906 if (sense_len)
@@ -898,7 +914,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
898 * @pkt: Entry pointer 914 * @pkt: Entry pointer
899 */ 915 */
900static void 916static void
901qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) 917qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt)
902{ 918{
903 srb_t *sp; 919 srb_t *sp;
904 fc_port_t *fcport; 920 fc_port_t *fcport;
@@ -911,6 +927,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
911 int32_t resid; 927 int32_t resid;
912 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 928 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
913 uint8_t *rsp_info, *sense_data; 929 uint8_t *rsp_info, *sense_data;
930 struct qla_hw_data *ha = vha->hw;
931 struct req_que *req = ha->req;
914 932
915 sts = (sts_entry_t *) pkt; 933 sts = (sts_entry_t *) pkt;
916 sts24 = (struct sts_entry_24xx *) pkt; 934 sts24 = (struct sts_entry_24xx *) pkt;
@@ -924,31 +942,31 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
924 942
925 /* Fast path completion. */ 943 /* Fast path completion. */
926 if (comp_status == CS_COMPLETE && scsi_status == 0) { 944 if (comp_status == CS_COMPLETE && scsi_status == 0) {
927 qla2x00_process_completed_request(ha, sts->handle); 945 qla2x00_process_completed_request(vha, sts->handle);
928 946
929 return; 947 return;
930 } 948 }
931 949
932 /* Validate handle. */ 950 /* Validate handle. */
933 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 951 if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
934 sp = ha->outstanding_cmds[sts->handle]; 952 sp = req->outstanding_cmds[sts->handle];
935 ha->outstanding_cmds[sts->handle] = NULL; 953 req->outstanding_cmds[sts->handle] = NULL;
936 } else 954 } else
937 sp = NULL; 955 sp = NULL;
938 956
939 if (sp == NULL) { 957 if (sp == NULL) {
940 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n", 958 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
941 ha->host_no)); 959 vha->host_no));
942 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n"); 960 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
943 961
944 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 962 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
945 qla2xxx_wake_dpc(ha); 963 qla2xxx_wake_dpc(vha);
946 return; 964 return;
947 } 965 }
948 cp = sp->cmd; 966 cp = sp->cmd;
949 if (cp == NULL) { 967 if (cp == NULL) {
950 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 968 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
951 "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp)); 969 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
952 qla_printk(KERN_WARNING, ha, 970 qla_printk(KERN_WARNING, ha,
953 "Command is NULL: already returned to OS (sp=%p)\n", sp); 971 "Command is NULL: already returned to OS (sp=%p)\n", sp);
954 972
@@ -987,14 +1005,14 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
987 if (rsp_info_len > 3 && rsp_info[3]) { 1005 if (rsp_info_len > 3 && rsp_info[3]) {
988 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol " 1006 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
989 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..." 1007 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
990 "retrying command\n", ha->host_no, 1008 "retrying command\n", vha->host_no,
991 cp->device->channel, cp->device->id, 1009 cp->device->channel, cp->device->id,
992 cp->device->lun, rsp_info_len, rsp_info[0], 1010 cp->device->lun, rsp_info_len, rsp_info[0],
993 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4], 1011 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
994 rsp_info[5], rsp_info[6], rsp_info[7])); 1012 rsp_info[5], rsp_info[6], rsp_info[7]));
995 1013
996 cp->result = DID_BUS_BUSY << 16; 1014 cp->result = DID_BUS_BUSY << 16;
997 qla2x00_sp_compl(ha, sp); 1015 qla2x00_sp_compl(vha, sp);
998 return; 1016 return;
999 } 1017 }
1000 } 1018 }
@@ -1025,7 +1043,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1025 qla_printk(KERN_INFO, ha, 1043 qla_printk(KERN_INFO, ha,
1026 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1044 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1027 "detected (%x of %x bytes)...returning " 1045 "detected (%x of %x bytes)...returning "
1028 "error status.\n", ha->host_no, 1046 "error status.\n", vha->host_no,
1029 cp->device->channel, cp->device->id, 1047 cp->device->channel, cp->device->id,
1030 cp->device->lun, resid, 1048 cp->device->lun, resid,
1031 scsi_bufflen(cp)); 1049 scsi_bufflen(cp));
@@ -1039,7 +1057,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1039 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1057 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1040 DEBUG2(printk(KERN_INFO 1058 DEBUG2(printk(KERN_INFO
1041 "scsi(%ld): QUEUE FULL status detected " 1059 "scsi(%ld): QUEUE FULL status detected "
1042 "0x%x-0x%x.\n", ha->host_no, comp_status, 1060 "0x%x-0x%x.\n", vha->host_no, comp_status,
1043 scsi_status)); 1061 scsi_status));
1044 1062
1045 /* Adjust queue depth for all luns on the port. */ 1063 /* Adjust queue depth for all luns on the port. */
@@ -1078,7 +1096,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1078 DEBUG2(printk(KERN_INFO 1096 DEBUG2(printk(KERN_INFO
1079 "scsi(%ld:%d:%d) UNDERRUN status detected " 1097 "scsi(%ld:%d:%d) UNDERRUN status detected "
1080 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " 1098 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1081 "os_underflow=0x%x\n", ha->host_no, 1099 "os_underflow=0x%x\n", vha->host_no,
1082 cp->device->id, cp->device->lun, comp_status, 1100 cp->device->id, cp->device->lun, comp_status,
1083 scsi_status, resid_len, resid, cp->cmnd[0], 1101 scsi_status, resid_len, resid, cp->cmnd[0],
1084 cp->underflow)); 1102 cp->underflow));
@@ -1095,7 +1113,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1095 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1113 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1096 DEBUG2(printk(KERN_INFO 1114 DEBUG2(printk(KERN_INFO
1097 "scsi(%ld): QUEUE FULL status detected " 1115 "scsi(%ld): QUEUE FULL status detected "
1098 "0x%x-0x%x.\n", ha->host_no, comp_status, 1116 "0x%x-0x%x.\n", vha->host_no, comp_status,
1099 scsi_status)); 1117 scsi_status));
1100 1118
1101 /* 1119 /*
@@ -1125,10 +1143,10 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1125 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1143 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1126 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1144 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1127 "frame(s) detected (%x of %x bytes)..." 1145 "frame(s) detected (%x of %x bytes)..."
1128 "retrying command.\n", ha->host_no, 1146 "retrying command.\n",
1129 cp->device->channel, cp->device->id, 1147 vha->host_no, cp->device->channel,
1130 cp->device->lun, resid, 1148 cp->device->id, cp->device->lun, resid,
1131 scsi_bufflen(cp))); 1149 scsi_bufflen(cp)));
1132 1150
1133 cp->result = DID_BUS_BUSY << 16; 1151 cp->result = DID_BUS_BUSY << 16;
1134 break; 1152 break;
@@ -1140,7 +1158,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1140 qla_printk(KERN_INFO, ha, 1158 qla_printk(KERN_INFO, ha,
1141 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1159 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1142 "detected (%x of %x bytes)...returning " 1160 "detected (%x of %x bytes)...returning "
1143 "error status.\n", ha->host_no, 1161 "error status.\n", vha->host_no,
1144 cp->device->channel, cp->device->id, 1162 cp->device->channel, cp->device->id,
1145 cp->device->lun, resid, 1163 cp->device->lun, resid,
1146 scsi_bufflen(cp)); 1164 scsi_bufflen(cp));
@@ -1157,7 +1175,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1157 case CS_DATA_OVERRUN: 1175 case CS_DATA_OVERRUN:
1158 DEBUG2(printk(KERN_INFO 1176 DEBUG2(printk(KERN_INFO
1159 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n", 1177 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1160 ha->host_no, cp->device->id, cp->device->lun, comp_status, 1178 vha->host_no, cp->device->id, cp->device->lun, comp_status,
1161 scsi_status)); 1179 scsi_status));
1162 DEBUG2(printk(KERN_INFO 1180 DEBUG2(printk(KERN_INFO
1163 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 1181 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
@@ -1183,7 +1201,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1183 */ 1201 */
1184 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down " 1202 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1185 "pid=%ld, compl status=0x%x, port state=0x%x\n", 1203 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1186 ha->host_no, cp->device->id, cp->device->lun, 1204 vha->host_no, cp->device->id, cp->device->lun,
1187 cp->serial_number, comp_status, 1205 cp->serial_number, comp_status,
1188 atomic_read(&fcport->state))); 1206 atomic_read(&fcport->state)));
1189 1207
@@ -1194,13 +1212,13 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1194 */ 1212 */
1195 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1213 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1196 if (atomic_read(&fcport->state) == FCS_ONLINE) 1214 if (atomic_read(&fcport->state) == FCS_ONLINE)
1197 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); 1215 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1198 break; 1216 break;
1199 1217
1200 case CS_RESET: 1218 case CS_RESET:
1201 DEBUG2(printk(KERN_INFO 1219 DEBUG2(printk(KERN_INFO
1202 "scsi(%ld): RESET status detected 0x%x-0x%x.\n", 1220 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1203 ha->host_no, comp_status, scsi_status)); 1221 vha->host_no, comp_status, scsi_status));
1204 1222
1205 cp->result = DID_RESET << 16; 1223 cp->result = DID_RESET << 16;
1206 break; 1224 break;
@@ -1213,7 +1231,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1213 */ 1231 */
1214 DEBUG2(printk(KERN_INFO 1232 DEBUG2(printk(KERN_INFO
1215 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n", 1233 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1216 ha->host_no, comp_status, scsi_status)); 1234 vha->host_no, comp_status, scsi_status));
1217 1235
1218 cp->result = DID_RESET << 16; 1236 cp->result = DID_RESET << 16;
1219 break; 1237 break;
@@ -1229,25 +1247,25 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1229 if (IS_FWI2_CAPABLE(ha)) { 1247 if (IS_FWI2_CAPABLE(ha)) {
1230 DEBUG2(printk(KERN_INFO 1248 DEBUG2(printk(KERN_INFO
1231 "scsi(%ld:%d:%d:%d): TIMEOUT status detected " 1249 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1232 "0x%x-0x%x\n", ha->host_no, cp->device->channel, 1250 "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1233 cp->device->id, cp->device->lun, comp_status, 1251 cp->device->id, cp->device->lun, comp_status,
1234 scsi_status)); 1252 scsi_status));
1235 break; 1253 break;
1236 } 1254 }
1237 DEBUG2(printk(KERN_INFO 1255 DEBUG2(printk(KERN_INFO
1238 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x " 1256 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1239 "sflags=%x.\n", ha->host_no, cp->device->channel, 1257 "sflags=%x.\n", vha->host_no, cp->device->channel,
1240 cp->device->id, cp->device->lun, comp_status, scsi_status, 1258 cp->device->id, cp->device->lun, comp_status, scsi_status,
1241 le16_to_cpu(sts->status_flags))); 1259 le16_to_cpu(sts->status_flags)));
1242 1260
1243 /* Check to see if logout occurred. */ 1261 /* Check to see if logout occurred. */
1244 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) 1262 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1245 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); 1263 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1246 break; 1264 break;
1247 1265
1248 default: 1266 default:
1249 DEBUG3(printk("scsi(%ld): Error detected (unknown status) " 1267 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1250 "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status)); 1268 "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1251 qla_printk(KERN_INFO, ha, 1269 qla_printk(KERN_INFO, ha,
1252 "Unknown status detected 0x%x-0x%x.\n", 1270 "Unknown status detected 0x%x-0x%x.\n",
1253 comp_status, scsi_status); 1271 comp_status, scsi_status);
@@ -1257,8 +1275,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1257 } 1275 }
1258 1276
1259 /* Place command on done queue. */ 1277 /* Place command on done queue. */
1260 if (ha->status_srb == NULL) 1278 if (vha->status_srb == NULL)
1261 qla2x00_sp_compl(ha, sp); 1279 qla2x00_sp_compl(vha, sp);
1262} 1280}
1263 1281
1264/** 1282/**
@@ -1269,10 +1287,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1269 * Extended sense data. 1287 * Extended sense data.
1270 */ 1288 */
1271static void 1289static void
1272qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt) 1290qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1273{ 1291{
1274 uint8_t sense_sz = 0; 1292 uint8_t sense_sz = 0;
1275 srb_t *sp = ha->status_srb; 1293 struct qla_hw_data *ha = vha->hw;
1294 srb_t *sp = vha->status_srb;
1276 struct scsi_cmnd *cp; 1295 struct scsi_cmnd *cp;
1277 1296
1278 if (sp != NULL && sp->request_sense_length != 0) { 1297 if (sp != NULL && sp->request_sense_length != 0) {
@@ -1284,7 +1303,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1284 "cmd is NULL: already returned to OS (sp=%p)\n", 1303 "cmd is NULL: already returned to OS (sp=%p)\n",
1285 sp); 1304 sp);
1286 1305
1287 ha->status_srb = NULL; 1306 vha->status_srb = NULL;
1288 return; 1307 return;
1289 } 1308 }
1290 1309
@@ -1305,8 +1324,8 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1305 1324
1306 /* Place command on done queue. */ 1325 /* Place command on done queue. */
1307 if (sp->request_sense_length == 0) { 1326 if (sp->request_sense_length == 0) {
1308 ha->status_srb = NULL; 1327 vha->status_srb = NULL;
1309 qla2x00_sp_compl(ha, sp); 1328 qla2x00_sp_compl(vha, sp);
1310 } 1329 }
1311 } 1330 }
1312} 1331}
@@ -1317,10 +1336,11 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1317 * @pkt: Entry pointer 1336 * @pkt: Entry pointer
1318 */ 1337 */
1319static void 1338static void
1320qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt) 1339qla2x00_error_entry(scsi_qla_host_t *vha, sts_entry_t *pkt)
1321{ 1340{
1322 srb_t *sp; 1341 srb_t *sp;
1323 1342 struct qla_hw_data *ha = vha->hw;
1343 struct req_que *req = ha->req;
1324#if defined(QL_DEBUG_LEVEL_2) 1344#if defined(QL_DEBUG_LEVEL_2)
1325 if (pkt->entry_status & RF_INV_E_ORDER) 1345 if (pkt->entry_status & RF_INV_E_ORDER)
1326 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1346 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1339,13 +1359,13 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1339 1359
1340 /* Validate handle. */ 1360 /* Validate handle. */
1341 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1361 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1342 sp = ha->outstanding_cmds[pkt->handle]; 1362 sp = req->outstanding_cmds[pkt->handle];
1343 else 1363 else
1344 sp = NULL; 1364 sp = NULL;
1345 1365
1346 if (sp) { 1366 if (sp) {
1347 /* Free outstanding command slot. */ 1367 /* Free outstanding command slot. */
1348 ha->outstanding_cmds[pkt->handle] = NULL; 1368 req->outstanding_cmds[pkt->handle] = NULL;
1349 1369
1350 /* Bad payload or header */ 1370 /* Bad payload or header */
1351 if (pkt->entry_status & 1371 if (pkt->entry_status &
@@ -1357,17 +1377,17 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1357 } else { 1377 } else {
1358 sp->cmd->result = DID_ERROR << 16; 1378 sp->cmd->result = DID_ERROR << 16;
1359 } 1379 }
1360 qla2x00_sp_compl(ha, sp); 1380 qla2x00_sp_compl(vha, sp);
1361 1381
1362 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1382 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1363 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { 1383 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1364 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1384 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1365 ha->host_no)); 1385 vha->host_no));
1366 qla_printk(KERN_WARNING, ha, 1386 qla_printk(KERN_WARNING, ha,
1367 "Error entry - invalid handle\n"); 1387 "Error entry - invalid handle\n");
1368 1388
1369 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1389 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1370 qla2xxx_wake_dpc(ha); 1390 qla2xxx_wake_dpc(vha);
1371 } 1391 }
1372} 1392}
1373 1393
@@ -1377,10 +1397,11 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1377 * @mb0: Mailbox0 register 1397 * @mb0: Mailbox0 register
1378 */ 1398 */
1379static void 1399static void
1380qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) 1400qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1381{ 1401{
1382 uint16_t cnt; 1402 uint16_t cnt;
1383 uint16_t __iomem *wptr; 1403 uint16_t __iomem *wptr;
1404 struct qla_hw_data *ha = vha->hw;
1384 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1405 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1385 1406
1386 /* Load return mailbox registers. */ 1407 /* Load return mailbox registers. */
@@ -1395,10 +1416,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1395 1416
1396 if (ha->mcp) { 1417 if (ha->mcp) {
1397 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 1418 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1398 __func__, ha->host_no, ha->mcp->mb[0])); 1419 __func__, vha->host_no, ha->mcp->mb[0]));
1399 } else { 1420 } else {
1400 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 1421 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1401 __func__, ha->host_no)); 1422 __func__, vha->host_no));
1402 } 1423 }
1403} 1424}
1404 1425
@@ -1407,30 +1428,32 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1407 * @ha: SCSI driver HA context 1428 * @ha: SCSI driver HA context
1408 */ 1429 */
1409void 1430void
1410qla24xx_process_response_queue(struct scsi_qla_host *ha) 1431qla24xx_process_response_queue(struct scsi_qla_host *vha)
1411{ 1432{
1433 struct qla_hw_data *ha = vha->hw;
1412 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1434 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1413 struct sts_entry_24xx *pkt; 1435 struct sts_entry_24xx *pkt;
1436 struct rsp_que *rsp = ha->rsp;
1414 1437
1415 if (!ha->flags.online) 1438 if (!vha->flags.online)
1416 return; 1439 return;
1417 1440
1418 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { 1441 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1419 pkt = (struct sts_entry_24xx *)ha->response_ring_ptr; 1442 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1420 1443
1421 ha->rsp_ring_index++; 1444 rsp->ring_index++;
1422 if (ha->rsp_ring_index == ha->response_q_length) { 1445 if (rsp->ring_index == rsp->length) {
1423 ha->rsp_ring_index = 0; 1446 rsp->ring_index = 0;
1424 ha->response_ring_ptr = ha->response_ring; 1447 rsp->ring_ptr = rsp->ring;
1425 } else { 1448 } else {
1426 ha->response_ring_ptr++; 1449 rsp->ring_ptr++;
1427 } 1450 }
1428 1451
1429 if (pkt->entry_status != 0) { 1452 if (pkt->entry_status != 0) {
1430 DEBUG3(printk(KERN_INFO 1453 DEBUG3(printk(KERN_INFO
1431 "scsi(%ld): Process error entry.\n", ha->host_no)); 1454 "scsi(%ld): Process error entry.\n", vha->host_no));
1432 1455
1433 qla2x00_error_entry(ha, (sts_entry_t *) pkt); 1456 qla2x00_error_entry(vha, (sts_entry_t *) pkt);
1434 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1457 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1435 wmb(); 1458 wmb();
1436 continue; 1459 continue;
@@ -1438,13 +1461,13 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1438 1461
1439 switch (pkt->entry_type) { 1462 switch (pkt->entry_type) {
1440 case STATUS_TYPE: 1463 case STATUS_TYPE:
1441 qla2x00_status_entry(ha, pkt); 1464 qla2x00_status_entry(vha, pkt);
1442 break; 1465 break;
1443 case STATUS_CONT_TYPE: 1466 case STATUS_CONT_TYPE:
1444 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); 1467 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1445 break; 1468 break;
1446 case VP_RPT_ID_IOCB_TYPE: 1469 case VP_RPT_ID_IOCB_TYPE:
1447 qla24xx_report_id_acquisition(ha, 1470 qla24xx_report_id_acquisition(vha,
1448 (struct vp_rpt_id_entry_24xx *)pkt); 1471 (struct vp_rpt_id_entry_24xx *)pkt);
1449 break; 1472 break;
1450 default: 1473 default:
@@ -1452,7 +1475,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1452 DEBUG4(printk(KERN_WARNING 1475 DEBUG4(printk(KERN_WARNING
1453 "scsi(%ld): Received unknown response pkt type %x " 1476 "scsi(%ld): Received unknown response pkt type %x "
1454 "entry status=%x.\n", 1477 "entry status=%x.\n",
1455 ha->host_no, pkt->entry_type, pkt->entry_status)); 1478 vha->host_no, pkt->entry_type, pkt->entry_status));
1456 break; 1479 break;
1457 } 1480 }
1458 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1481 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1460,14 +1483,15 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1460 } 1483 }
1461 1484
1462 /* Adjust ring index */ 1485 /* Adjust ring index */
1463 WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index); 1486 WRT_REG_DWORD(&reg->rsp_q_out, rsp->ring_index);
1464} 1487}
1465 1488
1466static void 1489static void
1467qla2xxx_check_risc_status(scsi_qla_host_t *ha) 1490qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1468{ 1491{
1469 int rval; 1492 int rval;
1470 uint32_t cnt; 1493 uint32_t cnt;
1494 struct qla_hw_data *ha = vha->hw;
1471 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1495 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1472 1496
1473 if (!IS_QLA25XX(ha)) 1497 if (!IS_QLA25XX(ha))
@@ -1521,25 +1545,29 @@ done:
1521irqreturn_t 1545irqreturn_t
1522qla24xx_intr_handler(int irq, void *dev_id) 1546qla24xx_intr_handler(int irq, void *dev_id)
1523{ 1547{
1524 scsi_qla_host_t *ha; 1548 scsi_qla_host_t *vha;
1549 struct qla_hw_data *ha;
1525 struct device_reg_24xx __iomem *reg; 1550 struct device_reg_24xx __iomem *reg;
1526 int status; 1551 int status;
1527 unsigned long iter; 1552 unsigned long iter;
1528 uint32_t stat; 1553 uint32_t stat;
1529 uint32_t hccr; 1554 uint32_t hccr;
1530 uint16_t mb[4]; 1555 uint16_t mb[4];
1556 struct rsp_que *rsp;
1531 1557
1532 ha = (scsi_qla_host_t *) dev_id; 1558 rsp = (struct rsp_que *) dev_id;
1533 if (!ha) { 1559 if (!rsp) {
1534 printk(KERN_INFO 1560 printk(KERN_INFO
1535 "%s(): NULL host pointer\n", __func__); 1561 "%s(): NULL response queue pointer\n", __func__);
1536 return IRQ_NONE; 1562 return IRQ_NONE;
1537 } 1563 }
1538 1564
1565 ha = rsp->hw;
1539 reg = &ha->iobase->isp24; 1566 reg = &ha->iobase->isp24;
1540 status = 0; 1567 status = 0;
1541 1568
1542 spin_lock(&ha->hardware_lock); 1569 spin_lock(&ha->hardware_lock);
1570 vha = qla2x00_get_rsp_host(rsp);
1543 for (iter = 50; iter--; ) { 1571 for (iter = 50; iter--; ) {
1544 stat = RD_REG_DWORD(&reg->host_status); 1572 stat = RD_REG_DWORD(&reg->host_status);
1545 if (stat & HSRX_RISC_PAUSED) { 1573 if (stat & HSRX_RISC_PAUSED) {
@@ -1547,7 +1575,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1547 break; 1575 break;
1548 1576
1549 if (ha->hw_event_pause_errors == 0) 1577 if (ha->hw_event_pause_errors == 0)
1550 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, 1578 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1551 0, MSW(stat), LSW(stat)); 1579 0, MSW(stat), LSW(stat));
1552 else if (ha->hw_event_pause_errors < 0xffffffff) 1580 else if (ha->hw_event_pause_errors < 0xffffffff)
1553 ha->hw_event_pause_errors++; 1581 ha->hw_event_pause_errors++;
@@ -1557,10 +1585,10 @@ qla24xx_intr_handler(int irq, void *dev_id)
1557 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1585 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1558 "Dumping firmware!\n", hccr); 1586 "Dumping firmware!\n", hccr);
1559 1587
1560 qla2xxx_check_risc_status(ha); 1588 qla2xxx_check_risc_status(vha);
1561 1589
1562 ha->isp_ops->fw_dump(ha, 1); 1590 ha->isp_ops->fw_dump(vha, 1);
1563 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1591 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1564 break; 1592 break;
1565 } else if ((stat & HSRX_RISC_INT) == 0) 1593 } else if ((stat & HSRX_RISC_INT) == 0)
1566 break; 1594 break;
@@ -1570,7 +1598,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1570 case 0x2: 1598 case 0x2:
1571 case 0x10: 1599 case 0x10:
1572 case 0x11: 1600 case 0x11:
1573 qla24xx_mbx_completion(ha, MSW(stat)); 1601 qla24xx_mbx_completion(vha, MSW(stat));
1574 status |= MBX_INTERRUPT; 1602 status |= MBX_INTERRUPT;
1575 1603
1576 break; 1604 break;
@@ -1579,15 +1607,15 @@ qla24xx_intr_handler(int irq, void *dev_id)
1579 mb[1] = RD_REG_WORD(&reg->mailbox1); 1607 mb[1] = RD_REG_WORD(&reg->mailbox1);
1580 mb[2] = RD_REG_WORD(&reg->mailbox2); 1608 mb[2] = RD_REG_WORD(&reg->mailbox2);
1581 mb[3] = RD_REG_WORD(&reg->mailbox3); 1609 mb[3] = RD_REG_WORD(&reg->mailbox3);
1582 qla2x00_async_event(ha, mb); 1610 qla2x00_async_event(vha, mb);
1583 break; 1611 break;
1584 case 0x13: 1612 case 0x13:
1585 qla24xx_process_response_queue(ha); 1613 qla24xx_process_response_queue(vha);
1586 break; 1614 break;
1587 default: 1615 default:
1588 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1616 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1589 "(%d).\n", 1617 "(%d).\n",
1590 ha->host_no, stat & 0xff)); 1618 vha->host_no, stat & 0xff));
1591 break; 1619 break;
1592 } 1620 }
1593 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1621 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1607,15 +1635,24 @@ qla24xx_intr_handler(int irq, void *dev_id)
1607static irqreturn_t 1635static irqreturn_t
1608qla24xx_msix_rsp_q(int irq, void *dev_id) 1636qla24xx_msix_rsp_q(int irq, void *dev_id)
1609{ 1637{
1610 scsi_qla_host_t *ha; 1638 scsi_qla_host_t *vha;
1639 struct qla_hw_data *ha;
1640 struct rsp_que *rsp;
1611 struct device_reg_24xx __iomem *reg; 1641 struct device_reg_24xx __iomem *reg;
1612 1642
1613 ha = dev_id; 1643 rsp = (struct rsp_que *) dev_id;
1644 if (!rsp) {
1645 printk(KERN_INFO
1646 "%s(): NULL response queue pointer\n", __func__);
1647 return IRQ_NONE;
1648 }
1649 ha = rsp->hw;
1614 reg = &ha->iobase->isp24; 1650 reg = &ha->iobase->isp24;
1615 1651
1616 spin_lock_irq(&ha->hardware_lock); 1652 spin_lock_irq(&ha->hardware_lock);
1617 1653
1618 qla24xx_process_response_queue(ha); 1654 vha = qla2x00_get_rsp_host(rsp);
1655 qla24xx_process_response_queue(vha);
1619 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1656 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1620 1657
1621 spin_unlock_irq(&ha->hardware_lock); 1658 spin_unlock_irq(&ha->hardware_lock);
@@ -1626,18 +1663,27 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1626static irqreturn_t 1663static irqreturn_t
1627qla24xx_msix_default(int irq, void *dev_id) 1664qla24xx_msix_default(int irq, void *dev_id)
1628{ 1665{
1629 scsi_qla_host_t *ha; 1666 scsi_qla_host_t *vha;
1667 struct qla_hw_data *ha;
1668 struct rsp_que *rsp;
1630 struct device_reg_24xx __iomem *reg; 1669 struct device_reg_24xx __iomem *reg;
1631 int status; 1670 int status;
1632 uint32_t stat; 1671 uint32_t stat;
1633 uint32_t hccr; 1672 uint32_t hccr;
1634 uint16_t mb[4]; 1673 uint16_t mb[4];
1635 1674
1636 ha = dev_id; 1675 rsp = (struct rsp_que *) dev_id;
1676 if (!rsp) {
1677 DEBUG(printk(
1678 "%s(): NULL response queue pointer\n", __func__));
1679 return IRQ_NONE;
1680 }
1681 ha = rsp->hw;
1637 reg = &ha->iobase->isp24; 1682 reg = &ha->iobase->isp24;
1638 status = 0; 1683 status = 0;
1639 1684
1640 spin_lock_irq(&ha->hardware_lock); 1685 spin_lock_irq(&ha->hardware_lock);
1686 vha = qla2x00_get_rsp_host(rsp);
1641 do { 1687 do {
1642 stat = RD_REG_DWORD(&reg->host_status); 1688 stat = RD_REG_DWORD(&reg->host_status);
1643 if (stat & HSRX_RISC_PAUSED) { 1689 if (stat & HSRX_RISC_PAUSED) {
@@ -1645,7 +1691,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1645 break; 1691 break;
1646 1692
1647 if (ha->hw_event_pause_errors == 0) 1693 if (ha->hw_event_pause_errors == 0)
1648 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, 1694 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1649 0, MSW(stat), LSW(stat)); 1695 0, MSW(stat), LSW(stat));
1650 else if (ha->hw_event_pause_errors < 0xffffffff) 1696 else if (ha->hw_event_pause_errors < 0xffffffff)
1651 ha->hw_event_pause_errors++; 1697 ha->hw_event_pause_errors++;
@@ -1655,10 +1701,10 @@ qla24xx_msix_default(int irq, void *dev_id)
1655 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1701 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1656 "Dumping firmware!\n", hccr); 1702 "Dumping firmware!\n", hccr);
1657 1703
1658 qla2xxx_check_risc_status(ha); 1704 qla2xxx_check_risc_status(vha);
1659 1705
1660 ha->isp_ops->fw_dump(ha, 1); 1706 ha->isp_ops->fw_dump(vha, 1);
1661 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1707 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1662 break; 1708 break;
1663 } else if ((stat & HSRX_RISC_INT) == 0) 1709 } else if ((stat & HSRX_RISC_INT) == 0)
1664 break; 1710 break;
@@ -1668,7 +1714,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1668 case 0x2: 1714 case 0x2:
1669 case 0x10: 1715 case 0x10:
1670 case 0x11: 1716 case 0x11:
1671 qla24xx_mbx_completion(ha, MSW(stat)); 1717 qla24xx_mbx_completion(vha, MSW(stat));
1672 status |= MBX_INTERRUPT; 1718 status |= MBX_INTERRUPT;
1673 1719
1674 break; 1720 break;
@@ -1677,15 +1723,15 @@ qla24xx_msix_default(int irq, void *dev_id)
1677 mb[1] = RD_REG_WORD(&reg->mailbox1); 1723 mb[1] = RD_REG_WORD(&reg->mailbox1);
1678 mb[2] = RD_REG_WORD(&reg->mailbox2); 1724 mb[2] = RD_REG_WORD(&reg->mailbox2);
1679 mb[3] = RD_REG_WORD(&reg->mailbox3); 1725 mb[3] = RD_REG_WORD(&reg->mailbox3);
1680 qla2x00_async_event(ha, mb); 1726 qla2x00_async_event(vha, mb);
1681 break; 1727 break;
1682 case 0x13: 1728 case 0x13:
1683 qla24xx_process_response_queue(ha); 1729 qla24xx_process_response_queue(vha);
1684 break; 1730 break;
1685 default: 1731 default:
1686 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1732 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1687 "(%d).\n", 1733 "(%d).\n",
1688 ha->host_no, stat & 0xff)); 1734 vha->host_no, stat & 0xff));
1689 break; 1735 break;
1690 } 1736 }
1691 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1737 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1719,23 +1765,25 @@ static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
1719}; 1765};
1720 1766
1721static void 1767static void
1722qla24xx_disable_msix(scsi_qla_host_t *ha) 1768qla24xx_disable_msix(struct qla_hw_data *ha)
1723{ 1769{
1724 int i; 1770 int i;
1725 struct qla_msix_entry *qentry; 1771 struct qla_msix_entry *qentry;
1772 struct rsp_que *rsp = ha->rsp;
1726 1773
1727 for (i = 0; i < QLA_MSIX_ENTRIES; i++) { 1774 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1728 qentry = &ha->msix_entries[imsix_entries[i].index]; 1775 qentry = &ha->msix_entries[imsix_entries[i].index];
1729 if (qentry->have_irq) 1776 if (qentry->have_irq)
1730 free_irq(qentry->msix_vector, ha); 1777 free_irq(qentry->msix_vector, rsp);
1731 } 1778 }
1732 pci_disable_msix(ha->pdev); 1779 pci_disable_msix(ha->pdev);
1733} 1780}
1734 1781
1735static int 1782static int
1736qla24xx_enable_msix(scsi_qla_host_t *ha) 1783qla24xx_enable_msix(struct qla_hw_data *ha)
1737{ 1784{
1738 int i, ret; 1785 int i, ret;
1786 struct rsp_que *rsp = ha->rsp;
1739 struct msix_entry entries[QLA_MSIX_ENTRIES]; 1787 struct msix_entry entries[QLA_MSIX_ENTRIES];
1740 struct qla_msix_entry *qentry; 1788 struct qla_msix_entry *qentry;
1741 1789
@@ -1757,7 +1805,7 @@ qla24xx_enable_msix(scsi_qla_host_t *ha)
1757 qentry->msix_entry = entries[i].entry; 1805 qentry->msix_entry = entries[i].entry;
1758 qentry->have_irq = 0; 1806 qentry->have_irq = 0;
1759 ret = request_irq(qentry->msix_vector, 1807 ret = request_irq(qentry->msix_vector,
1760 imsix_entries[i].handler, 0, imsix_entries[i].name, ha); 1808 imsix_entries[i].handler, 0, imsix_entries[i].name, rsp);
1761 if (ret) { 1809 if (ret) {
1762 qla_printk(KERN_WARNING, ha, 1810 qla_printk(KERN_WARNING, ha,
1763 "MSI-X: Unable to register handler -- %x/%d.\n", 1811 "MSI-X: Unable to register handler -- %x/%d.\n",
@@ -1773,20 +1821,21 @@ msix_out:
1773} 1821}
1774 1822
1775int 1823int
1776qla2x00_request_irqs(scsi_qla_host_t *ha) 1824qla2x00_request_irqs(struct qla_hw_data *ha)
1777{ 1825{
1778 int ret; 1826 int ret;
1779 device_reg_t __iomem *reg = ha->iobase; 1827 device_reg_t __iomem *reg = ha->iobase;
1828 struct rsp_que *rsp = ha->rsp;
1780 1829
1781 /* If possible, enable MSI-X. */ 1830 /* If possible, enable MSI-X. */
1782 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) 1831 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1783 goto skip_msix; 1832 goto skip_msix;
1784 1833
1785 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX || 1834 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1786 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 1835 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1787 DEBUG2(qla_printk(KERN_WARNING, ha, 1836 DEBUG2(qla_printk(KERN_WARNING, ha,
1788 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 1837 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1789 ha->chip_revision, ha->fw_attributes)); 1838 ha->pdev->revision, ha->fw_attributes));
1790 1839
1791 goto skip_msix; 1840 goto skip_msix;
1792 } 1841 }
@@ -1825,7 +1874,7 @@ skip_msix:
1825skip_msi: 1874skip_msi:
1826 1875
1827 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1876 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1828 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1877 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
1829 if (ret) { 1878 if (ret) {
1830 qla_printk(KERN_WARNING, ha, 1879 qla_printk(KERN_WARNING, ha,
1831 "Failed to reserve interrupt %d already in use.\n", 1880 "Failed to reserve interrupt %d already in use.\n",
@@ -1833,10 +1882,8 @@ skip_msi:
1833 goto fail; 1882 goto fail;
1834 } 1883 }
1835 ha->flags.inta_enabled = 1; 1884 ha->flags.inta_enabled = 1;
1836 ha->host->irq = ha->pdev->irq;
1837clear_risc_ints: 1885clear_risc_ints:
1838 1886
1839 ha->isp_ops->disable_intrs(ha);
1840 spin_lock_irq(&ha->hardware_lock); 1887 spin_lock_irq(&ha->hardware_lock);
1841 if (IS_FWI2_CAPABLE(ha)) { 1888 if (IS_FWI2_CAPABLE(ha)) {
1842 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); 1889 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
@@ -1853,13 +1900,35 @@ fail:
1853} 1900}
1854 1901
1855void 1902void
1856qla2x00_free_irqs(scsi_qla_host_t *ha) 1903qla2x00_free_irqs(scsi_qla_host_t *vha)
1857{ 1904{
1905 struct qla_hw_data *ha = vha->hw;
1906 struct rsp_que *rsp = ha->rsp;
1858 1907
1859 if (ha->flags.msix_enabled) 1908 if (ha->flags.msix_enabled)
1860 qla24xx_disable_msix(ha); 1909 qla24xx_disable_msix(ha);
1861 else if (ha->flags.inta_enabled) { 1910 else if (ha->flags.inta_enabled) {
1862 free_irq(ha->host->irq, ha); 1911 free_irq(ha->pdev->irq, rsp);
1863 pci_disable_msi(ha->pdev); 1912 pci_disable_msi(ha->pdev);
1864 } 1913 }
1865} 1914}
1915
1916static struct scsi_qla_host *
1917qla2x00_get_rsp_host(struct rsp_que *rsp)
1918{
1919 srb_t *sp;
1920 struct qla_hw_data *ha = rsp->hw;
1921 struct scsi_qla_host *vha = NULL;
1922 struct sts_entry_24xx *pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
1923
1924 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
1925 sp = ha->req->outstanding_cmds[pkt->handle];
1926 if (sp)
1927 vha = sp->vha;
1928 }
1929 if (!vha)
1930 /* Invalid entry, handle it in base queue */
1931 vha = pci_get_drvdata(ha->pdev);
1932
1933 return vha;
1934}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 35567203ef61..05db1660855e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -183,42 +183,42 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
183 */ 183 */
184 184
185__inline__ void 185__inline__ void
186qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval) 186qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
187{ 187{
188 init_timer(&ha->timer); 188 init_timer(&vha->timer);
189 ha->timer.expires = jiffies + interval * HZ; 189 vha->timer.expires = jiffies + interval * HZ;
190 ha->timer.data = (unsigned long)ha; 190 vha->timer.data = (unsigned long)vha;
191 ha->timer.function = (void (*)(unsigned long))func; 191 vha->timer.function = (void (*)(unsigned long))func;
192 add_timer(&ha->timer); 192 add_timer(&vha->timer);
193 ha->timer_active = 1; 193 vha->timer_active = 1;
194} 194}
195 195
196static inline void 196static inline void
197qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval) 197qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
198{ 198{
199 mod_timer(&ha->timer, jiffies + interval * HZ); 199 mod_timer(&vha->timer, jiffies + interval * HZ);
200} 200}
201 201
202static __inline__ void 202static __inline__ void
203qla2x00_stop_timer(scsi_qla_host_t *ha) 203qla2x00_stop_timer(scsi_qla_host_t *vha)
204{ 204{
205 del_timer_sync(&ha->timer); 205 del_timer_sync(&vha->timer);
206 ha->timer_active = 0; 206 vha->timer_active = 0;
207} 207}
208 208
209static int qla2x00_do_dpc(void *data); 209static int qla2x00_do_dpc(void *data);
210 210
211static void qla2x00_rst_aen(scsi_qla_host_t *); 211static void qla2x00_rst_aen(scsi_qla_host_t *);
212 212
213static int qla2x00_mem_alloc(scsi_qla_host_t *); 213static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t);
214static void qla2x00_mem_free(scsi_qla_host_t *ha); 214static void qla2x00_mem_free(struct qla_hw_data *);
215static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 215static void qla2x00_sp_free_dma(srb_t *);
216 216
217/* -------------------------------------------------------------------------- */ 217/* -------------------------------------------------------------------------- */
218
219static char * 218static char *
220qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str) 219qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
221{ 220{
221 struct qla_hw_data *ha = vha->hw;
222 static char *pci_bus_modes[] = { 222 static char *pci_bus_modes[] = {
223 "33", "66", "100", "133", 223 "33", "66", "100", "133",
224 }; 224 };
@@ -240,9 +240,10 @@ qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str)
240} 240}
241 241
242static char * 242static char *
243qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str) 243qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
244{ 244{
245 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 245 static char *pci_bus_modes[] = { "33", "66", "100", "133", };
246 struct qla_hw_data *ha = vha->hw;
246 uint32_t pci_bus; 247 uint32_t pci_bus;
247 int pcie_reg; 248 int pcie_reg;
248 249
@@ -290,9 +291,10 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
290} 291}
291 292
292static char * 293static char *
293qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) 294qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
294{ 295{
295 char un_str[10]; 296 char un_str[10];
297 struct qla_hw_data *ha = vha->hw;
296 298
297 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 299 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
298 ha->fw_minor_version, 300 ha->fw_minor_version,
@@ -328,8 +330,9 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
328} 330}
329 331
330static char * 332static char *
331qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) 333qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
332{ 334{
335 struct qla_hw_data *ha = vha->hw;
333 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 336 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
334 ha->fw_minor_version, 337 ha->fw_minor_version,
335 ha->fw_subminor_version); 338 ha->fw_subminor_version);
@@ -354,16 +357,17 @@ qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
354} 357}
355 358
356static inline srb_t * 359static inline srb_t *
357qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport, 360qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
358 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 361 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
359{ 362{
360 srb_t *sp; 363 srb_t *sp;
364 struct qla_hw_data *ha = vha->hw;
361 365
362 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 366 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
363 if (!sp) 367 if (!sp)
364 return sp; 368 return sp;
365 369
366 sp->ha = ha; 370 sp->vha = vha;
367 sp->fcport = fcport; 371 sp->fcport = fcport;
368 sp->cmd = cmd; 372 sp->cmd = cmd;
369 sp->flags = 0; 373 sp->flags = 0;
@@ -376,9 +380,10 @@ qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport,
376static int 380static int
377qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 381qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
378{ 382{
379 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 383 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
380 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 384 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
381 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 385 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
386 struct qla_hw_data *ha = vha->hw;
382 srb_t *sp; 387 srb_t *sp;
383 int rval; 388 int rval;
384 389
@@ -399,33 +404,33 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
399 404
400 if (atomic_read(&fcport->state) != FCS_ONLINE) { 405 if (atomic_read(&fcport->state) != FCS_ONLINE) {
401 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 406 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
402 atomic_read(&ha->loop_state) == LOOP_DEAD) { 407 atomic_read(&vha->loop_state) == LOOP_DEAD) {
403 cmd->result = DID_NO_CONNECT << 16; 408 cmd->result = DID_NO_CONNECT << 16;
404 goto qc_fail_command; 409 goto qc_fail_command;
405 } 410 }
406 goto qc_target_busy; 411 goto qc_target_busy;
407 } 412 }
408 413
409 spin_unlock_irq(ha->host->host_lock); 414 spin_unlock_irq(vha->host->host_lock);
410 415
411 sp = qla2x00_get_new_sp(ha, fcport, cmd, done); 416 sp = qla2x00_get_new_sp(vha, fcport, cmd, done);
412 if (!sp) 417 if (!sp)
413 goto qc_host_busy_lock; 418 goto qc_host_busy_lock;
414 419
415 rval = qla2x00_start_scsi(sp); 420 rval = ha->isp_ops->start_scsi(sp);
416 if (rval != QLA_SUCCESS) 421 if (rval != QLA_SUCCESS)
417 goto qc_host_busy_free_sp; 422 goto qc_host_busy_free_sp;
418 423
419 spin_lock_irq(ha->host->host_lock); 424 spin_lock_irq(vha->host->host_lock);
420 425
421 return 0; 426 return 0;
422 427
423qc_host_busy_free_sp: 428qc_host_busy_free_sp:
424 qla2x00_sp_free_dma(ha, sp); 429 qla2x00_sp_free_dma(sp);
425 mempool_free(sp, ha->srb_mempool); 430 mempool_free(sp, ha->srb_mempool);
426 431
427qc_host_busy_lock: 432qc_host_busy_lock:
428 spin_lock_irq(ha->host->host_lock); 433 spin_lock_irq(vha->host->host_lock);
429 return SCSI_MLQUEUE_HOST_BUSY; 434 return SCSI_MLQUEUE_HOST_BUSY;
430 435
431qc_target_busy: 436qc_target_busy:
@@ -441,14 +446,15 @@ qc_fail_command:
441static int 446static int
442qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 447qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
443{ 448{
444 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 449 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
445 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 450 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
446 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 451 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
452 struct qla_hw_data *ha = vha->hw;
453 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
447 srb_t *sp; 454 srb_t *sp;
448 int rval; 455 int rval;
449 scsi_qla_host_t *pha = to_qla_parent(ha);
450 456
451 if (unlikely(pci_channel_offline(pha->pdev))) { 457 if (unlikely(pci_channel_offline(ha->pdev))) {
452 cmd->result = DID_REQUEUE << 16; 458 cmd->result = DID_REQUEUE << 16;
453 goto qc24_fail_command; 459 goto qc24_fail_command;
454 } 460 }
@@ -465,33 +471,33 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
465 471
466 if (atomic_read(&fcport->state) != FCS_ONLINE) { 472 if (atomic_read(&fcport->state) != FCS_ONLINE) {
467 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 473 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
468 atomic_read(&pha->loop_state) == LOOP_DEAD) { 474 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
469 cmd->result = DID_NO_CONNECT << 16; 475 cmd->result = DID_NO_CONNECT << 16;
470 goto qc24_fail_command; 476 goto qc24_fail_command;
471 } 477 }
472 goto qc24_target_busy; 478 goto qc24_target_busy;
473 } 479 }
474 480
475 spin_unlock_irq(ha->host->host_lock); 481 spin_unlock_irq(vha->host->host_lock);
476 482
477 sp = qla2x00_get_new_sp(pha, fcport, cmd, done); 483 sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
478 if (!sp) 484 if (!sp)
479 goto qc24_host_busy_lock; 485 goto qc24_host_busy_lock;
480 486
481 rval = qla24xx_start_scsi(sp); 487 rval = ha->isp_ops->start_scsi(sp);
482 if (rval != QLA_SUCCESS) 488 if (rval != QLA_SUCCESS)
483 goto qc24_host_busy_free_sp; 489 goto qc24_host_busy_free_sp;
484 490
485 spin_lock_irq(ha->host->host_lock); 491 spin_lock_irq(vha->host->host_lock);
486 492
487 return 0; 493 return 0;
488 494
489qc24_host_busy_free_sp: 495qc24_host_busy_free_sp:
490 qla2x00_sp_free_dma(pha, sp); 496 qla2x00_sp_free_dma(sp);
491 mempool_free(sp, pha->srb_mempool); 497 mempool_free(sp, ha->srb_mempool);
492 498
493qc24_host_busy_lock: 499qc24_host_busy_lock:
494 spin_lock_irq(ha->host->host_lock); 500 spin_lock_irq(vha->host->host_lock);
495 return SCSI_MLQUEUE_HOST_BUSY; 501 return SCSI_MLQUEUE_HOST_BUSY;
496 502
497qc24_target_busy: 503qc24_target_busy:
@@ -510,17 +516,14 @@ qc24_fail_command:
510 * max time. 516 * max time.
511 * 517 *
512 * Input: 518 * Input:
513 * ha = actual ha whose done queue will contain the command
514 * returned by firmware.
515 * cmd = Scsi Command to wait on. 519 * cmd = Scsi Command to wait on.
516 * flag = Abort/Reset(Bus or Device Reset)
517 * 520 *
518 * Return: 521 * Return:
519 * Not Found : 0 522 * Not Found : 0
520 * Found : 1 523 * Found : 1
521 */ 524 */
522static int 525static int
523qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) 526qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
524{ 527{
525#define ABORT_POLLING_PERIOD 1000 528#define ABORT_POLLING_PERIOD 1000
526#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 529#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
@@ -557,21 +560,22 @@ qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
557 * Failed (Adapter is offline/disabled) : 1 560 * Failed (Adapter is offline/disabled) : 1
558 */ 561 */
559int 562int
560qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 563qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
561{ 564{
562 int return_status; 565 int return_status;
563 unsigned long wait_online; 566 unsigned long wait_online;
564 scsi_qla_host_t *pha = to_qla_parent(ha); 567 struct qla_hw_data *ha = vha->hw;
568 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
565 569
566 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 570 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
567 while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) || 571 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
568 test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) || 572 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
569 test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) || 573 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
570 pha->dpc_active) && time_before(jiffies, wait_online)) { 574 ha->dpc_active) && time_before(jiffies, wait_online)) {
571 575
572 msleep(1000); 576 msleep(1000);
573 } 577 }
574 if (pha->flags.online) 578 if (base_vha->flags.online)
575 return_status = QLA_SUCCESS; 579 return_status = QLA_SUCCESS;
576 else 580 else
577 return_status = QLA_FUNCTION_FAILED; 581 return_status = QLA_FUNCTION_FAILED;
@@ -596,19 +600,20 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
596 * Failed (LOOP_NOT_READY) : 1 600 * Failed (LOOP_NOT_READY) : 1
597 */ 601 */
598static inline int 602static inline int
599qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha) 603qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
600{ 604{
601 int return_status = QLA_SUCCESS; 605 int return_status = QLA_SUCCESS;
602 unsigned long loop_timeout ; 606 unsigned long loop_timeout ;
603 scsi_qla_host_t *pha = to_qla_parent(ha); 607 struct qla_hw_data *ha = vha->hw;
608 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
604 609
605 /* wait for 5 min at the max for loop to be ready */ 610 /* wait for 5 min at the max for loop to be ready */
606 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ); 611 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
607 612
608 while ((!atomic_read(&pha->loop_down_timer) && 613 while ((!atomic_read(&base_vha->loop_down_timer) &&
609 atomic_read(&pha->loop_state) == LOOP_DOWN) || 614 atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
610 atomic_read(&pha->loop_state) != LOOP_READY) { 615 atomic_read(&base_vha->loop_state) != LOOP_READY) {
611 if (atomic_read(&pha->loop_state) == LOOP_DEAD) { 616 if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
612 return_status = QLA_FUNCTION_FAILED; 617 return_status = QLA_FUNCTION_FAILED;
613 break; 618 break;
614 } 619 }
@@ -627,32 +632,33 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
627 int cnt; 632 int cnt;
628 unsigned long flags; 633 unsigned long flags;
629 srb_t *sp; 634 srb_t *sp;
630 scsi_qla_host_t *ha = fcport->ha; 635 scsi_qla_host_t *vha = fcport->vha;
631 scsi_qla_host_t *pha = to_qla_parent(ha); 636 struct qla_hw_data *ha = vha->hw;
637 struct req_que *req = ha->req;
632 638
633 spin_lock_irqsave(&pha->hardware_lock, flags); 639 spin_lock_irqsave(&ha->hardware_lock, flags);
634 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 640 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
635 sp = pha->outstanding_cmds[cnt]; 641 sp = req->outstanding_cmds[cnt];
636 if (!sp) 642 if (!sp)
637 continue; 643 continue;
638 if (sp->fcport != fcport) 644 if (sp->fcport != fcport)
639 continue; 645 continue;
640 646
641 spin_unlock_irqrestore(&pha->hardware_lock, flags); 647 spin_unlock_irqrestore(&ha->hardware_lock, flags);
642 if (ha->isp_ops->abort_command(ha, sp)) { 648 if (ha->isp_ops->abort_command(vha, sp)) {
643 DEBUG2(qla_printk(KERN_WARNING, ha, 649 DEBUG2(qla_printk(KERN_WARNING, ha,
644 "Abort failed -- %lx\n", sp->cmd->serial_number)); 650 "Abort failed -- %lx\n", sp->cmd->serial_number));
645 } else { 651 } else {
646 if (qla2x00_eh_wait_on_command(ha, sp->cmd) != 652 if (qla2x00_eh_wait_on_command(sp->cmd) !=
647 QLA_SUCCESS) 653 QLA_SUCCESS)
648 DEBUG2(qla_printk(KERN_WARNING, ha, 654 DEBUG2(qla_printk(KERN_WARNING, ha,
649 "Abort failed while waiting -- %lx\n", 655 "Abort failed while waiting -- %lx\n",
650 sp->cmd->serial_number)); 656 sp->cmd->serial_number));
651 657
652 } 658 }
653 spin_lock_irqsave(&pha->hardware_lock, flags); 659 spin_lock_irqsave(&ha->hardware_lock, flags);
654 } 660 }
655 spin_unlock_irqrestore(&pha->hardware_lock, flags); 661 spin_unlock_irqrestore(&ha->hardware_lock, flags);
656} 662}
657 663
658static void 664static void
@@ -690,14 +696,15 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
690static int 696static int
691qla2xxx_eh_abort(struct scsi_cmnd *cmd) 697qla2xxx_eh_abort(struct scsi_cmnd *cmd)
692{ 698{
693 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 699 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
694 srb_t *sp; 700 srb_t *sp;
695 int ret, i; 701 int ret, i;
696 unsigned int id, lun; 702 unsigned int id, lun;
697 unsigned long serial; 703 unsigned long serial;
698 unsigned long flags; 704 unsigned long flags;
699 int wait = 0; 705 int wait = 0;
700 scsi_qla_host_t *pha = to_qla_parent(ha); 706 struct qla_hw_data *ha = vha->hw;
707 struct req_que *req = ha->req;
701 708
702 qla2x00_block_error_handler(cmd); 709 qla2x00_block_error_handler(cmd);
703 710
@@ -711,9 +718,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
711 serial = cmd->serial_number; 718 serial = cmd->serial_number;
712 719
713 /* Check active list for command command. */ 720 /* Check active list for command command. */
714 spin_lock_irqsave(&pha->hardware_lock, flags); 721 spin_lock_irqsave(&ha->hardware_lock, flags);
715 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 722 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
716 sp = pha->outstanding_cmds[i]; 723 sp = req->outstanding_cmds[i];
717 724
718 if (sp == NULL) 725 if (sp == NULL)
719 continue; 726 continue;
@@ -722,37 +729,37 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
722 continue; 729 continue;
723 730
724 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", 731 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n",
725 __func__, ha->host_no, sp, serial)); 732 __func__, vha->host_no, sp, serial));
726 733
727 spin_unlock_irqrestore(&pha->hardware_lock, flags); 734 spin_unlock_irqrestore(&ha->hardware_lock, flags);
728 if (ha->isp_ops->abort_command(ha, sp)) { 735 if (ha->isp_ops->abort_command(vha, sp)) {
729 DEBUG2(printk("%s(%ld): abort_command " 736 DEBUG2(printk("%s(%ld): abort_command "
730 "mbx failed.\n", __func__, ha->host_no)); 737 "mbx failed.\n", __func__, vha->host_no));
731 ret = FAILED; 738 ret = FAILED;
732 } else { 739 } else {
733 DEBUG3(printk("%s(%ld): abort_command " 740 DEBUG3(printk("%s(%ld): abort_command "
734 "mbx success.\n", __func__, ha->host_no)); 741 "mbx success.\n", __func__, vha->host_no));
735 wait = 1; 742 wait = 1;
736 } 743 }
737 spin_lock_irqsave(&pha->hardware_lock, flags); 744 spin_lock_irqsave(&ha->hardware_lock, flags);
738 745
739 break; 746 break;
740 } 747 }
741 spin_unlock_irqrestore(&pha->hardware_lock, flags); 748 spin_unlock_irqrestore(&ha->hardware_lock, flags);
742 749
743 /* Wait for the command to be returned. */ 750 /* Wait for the command to be returned. */
744 if (wait) { 751 if (wait) {
745 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { 752 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
746 qla_printk(KERN_ERR, ha, 753 qla_printk(KERN_ERR, ha,
747 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 754 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
748 "%x.\n", ha->host_no, id, lun, serial, ret); 755 "%x.\n", vha->host_no, id, lun, serial, ret);
749 ret = FAILED; 756 ret = FAILED;
750 } 757 }
751 } 758 }
752 759
753 qla_printk(KERN_INFO, ha, 760 qla_printk(KERN_INFO, ha,
754 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", 761 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
755 ha->host_no, id, lun, wait, serial, ret); 762 vha->host_no, id, lun, wait, serial, ret);
756 763
757 return ret; 764 return ret;
758} 765}
@@ -764,23 +771,24 @@ enum nexus_wait_type {
764}; 771};
765 772
766static int 773static int
767qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t, 774qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
768 unsigned int l, enum nexus_wait_type type) 775 unsigned int l, enum nexus_wait_type type)
769{ 776{
770 int cnt, match, status; 777 int cnt, match, status;
771 srb_t *sp; 778 srb_t *sp;
772 unsigned long flags; 779 unsigned long flags;
773 scsi_qla_host_t *pha = to_qla_parent(ha); 780 struct qla_hw_data *ha = vha->hw;
781 struct req_que *req = ha->req;
774 782
775 status = QLA_SUCCESS; 783 status = QLA_SUCCESS;
776 spin_lock_irqsave(&pha->hardware_lock, flags); 784 spin_lock_irqsave(&ha->hardware_lock, flags);
777 for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; 785 for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS;
778 cnt++) { 786 cnt++) {
779 sp = pha->outstanding_cmds[cnt]; 787 sp = req->outstanding_cmds[cnt];
780 if (!sp) 788 if (!sp)
781 continue; 789 continue;
782 790
783 if (ha->vp_idx != sp->fcport->ha->vp_idx) 791 if (vha->vp_idx != sp->fcport->vha->vp_idx)
784 continue; 792 continue;
785 match = 0; 793 match = 0;
786 switch (type) { 794 switch (type) {
@@ -798,11 +806,11 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t,
798 if (!match) 806 if (!match)
799 continue; 807 continue;
800 808
801 spin_unlock_irqrestore(&pha->hardware_lock, flags); 809 spin_unlock_irqrestore(&ha->hardware_lock, flags);
802 status = qla2x00_eh_wait_on_command(ha, sp->cmd); 810 status = qla2x00_eh_wait_on_command(sp->cmd);
803 spin_lock_irqsave(&pha->hardware_lock, flags); 811 spin_lock_irqsave(&ha->hardware_lock, flags);
804 } 812 }
805 spin_unlock_irqrestore(&pha->hardware_lock, flags); 813 spin_unlock_irqrestore(&ha->hardware_lock, flags);
806 814
807 return status; 815 return status;
808} 816}
@@ -818,7 +826,7 @@ static int
818__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 826__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
819 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 827 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int))
820{ 828{
821 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 829 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
822 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 830 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
823 int err; 831 int err;
824 832
@@ -827,31 +835,31 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
827 if (!fcport) 835 if (!fcport)
828 return FAILED; 836 return FAILED;
829 837
830 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 838 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
831 ha->host_no, cmd->device->id, cmd->device->lun, name); 839 vha->host_no, cmd->device->id, cmd->device->lun, name);
832 840
833 err = 0; 841 err = 0;
834 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 842 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
835 goto eh_reset_failed; 843 goto eh_reset_failed;
836 err = 1; 844 err = 1;
837 if (qla2x00_wait_for_loop_ready(ha) != QLA_SUCCESS) 845 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
838 goto eh_reset_failed; 846 goto eh_reset_failed;
839 err = 2; 847 err = 2;
840 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 848 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS)
841 goto eh_reset_failed; 849 goto eh_reset_failed;
842 err = 3; 850 err = 3;
843 if (qla2x00_eh_wait_for_pending_commands(ha, cmd->device->id, 851 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
844 cmd->device->lun, type) != QLA_SUCCESS) 852 cmd->device->lun, type) != QLA_SUCCESS)
845 goto eh_reset_failed; 853 goto eh_reset_failed;
846 854
847 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 855 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
848 ha->host_no, cmd->device->id, cmd->device->lun, name); 856 vha->host_no, cmd->device->id, cmd->device->lun, name);
849 857
850 return SUCCESS; 858 return SUCCESS;
851 859
852 eh_reset_failed: 860 eh_reset_failed:
853 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n", 861 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
854 ha->host_no, cmd->device->id, cmd->device->lun, name, 862 , vha->host_no, cmd->device->id, cmd->device->lun, name,
855 reset_errors[err]); 863 reset_errors[err]);
856 return FAILED; 864 return FAILED;
857} 865}
@@ -859,7 +867,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
859static int 867static int
860qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 868qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
861{ 869{
862 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 870 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
871 struct qla_hw_data *ha = vha->hw;
863 872
864 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 873 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
865 ha->isp_ops->lun_reset); 874 ha->isp_ops->lun_reset);
@@ -868,7 +877,8 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
868static int 877static int
869qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 878qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
870{ 879{
871 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 880 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
881 struct qla_hw_data *ha = vha->hw;
872 882
873 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 883 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
874 ha->isp_ops->target_reset); 884 ha->isp_ops->target_reset);
@@ -892,8 +902,7 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
892static int 902static int
893qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 903qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
894{ 904{
895 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 905 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
896 scsi_qla_host_t *pha = to_qla_parent(ha);
897 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 906 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
898 int ret = FAILED; 907 int ret = FAILED;
899 unsigned int id, lun; 908 unsigned int id, lun;
@@ -908,28 +917,28 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
908 if (!fcport) 917 if (!fcport)
909 return ret; 918 return ret;
910 919
911 qla_printk(KERN_INFO, ha, 920 qla_printk(KERN_INFO, vha->hw,
912 "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun); 921 "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", vha->host_no, id, lun);
913 922
914 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) { 923 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
915 DEBUG2(printk("%s failed:board disabled\n",__func__)); 924 DEBUG2(printk("%s failed:board disabled\n",__func__));
916 goto eh_bus_reset_done; 925 goto eh_bus_reset_done;
917 } 926 }
918 927
919 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { 928 if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
920 if (qla2x00_loop_reset(ha) == QLA_SUCCESS) 929 if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
921 ret = SUCCESS; 930 ret = SUCCESS;
922 } 931 }
923 if (ret == FAILED) 932 if (ret == FAILED)
924 goto eh_bus_reset_done; 933 goto eh_bus_reset_done;
925 934
926 /* Flush outstanding commands. */ 935 /* Flush outstanding commands. */
927 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) != 936 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
928 QLA_SUCCESS) 937 QLA_SUCCESS)
929 ret = FAILED; 938 ret = FAILED;
930 939
931eh_bus_reset_done: 940eh_bus_reset_done:
932 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 941 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
933 (ret == FAILED) ? "failed" : "succeded"); 942 (ret == FAILED) ? "failed" : "succeded");
934 943
935 return ret; 944 return ret;
@@ -953,12 +962,13 @@ eh_bus_reset_done:
953static int 962static int
954qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 963qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
955{ 964{
956 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 965 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
957 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 966 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
967 struct qla_hw_data *ha = vha->hw;
958 int ret = FAILED; 968 int ret = FAILED;
959 unsigned int id, lun; 969 unsigned int id, lun;
960 unsigned long serial; 970 unsigned long serial;
961 scsi_qla_host_t *pha = to_qla_parent(ha); 971 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
962 972
963 qla2x00_block_error_handler(cmd); 973 qla2x00_block_error_handler(cmd);
964 974
@@ -970,9 +980,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
970 return ret; 980 return ret;
971 981
972 qla_printk(KERN_INFO, ha, 982 qla_printk(KERN_INFO, ha,
973 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun); 983 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
974 984
975 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 985 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
976 goto eh_host_reset_lock; 986 goto eh_host_reset_lock;
977 987
978 /* 988 /*
@@ -983,26 +993,28 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
983 * devices as lost kicking of the port_down_timer 993 * devices as lost kicking of the port_down_timer
984 * while dpc is stuck for the mailbox to complete. 994 * while dpc is stuck for the mailbox to complete.
985 */ 995 */
986 qla2x00_wait_for_loop_ready(ha); 996 qla2x00_wait_for_loop_ready(vha);
987 set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 997 if (vha != base_vha) {
988 if (qla2x00_abort_isp(pha)) { 998 if (qla2x00_vp_abort_isp(vha))
989 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
990 /* failed. schedule dpc to try */
991 set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags);
992
993 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
994 goto eh_host_reset_lock; 999 goto eh_host_reset_lock;
1000 } else {
1001 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1002 if (qla2x00_abort_isp(base_vha)) {
1003 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1004 /* failed. schedule dpc to try */
1005 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1006
1007 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1008 goto eh_host_reset_lock;
1009 }
1010 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
995 } 1011 }
996 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
997 1012
998 /* Waiting for our command in done_queue to be returned to OS.*/ 1013 /* Waiting for command to be returned to OS.*/
999 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) == 1014 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
1000 QLA_SUCCESS) 1015 QLA_SUCCESS)
1001 ret = SUCCESS; 1016 ret = SUCCESS;
1002 1017
1003 if (ha->parent)
1004 qla2x00_vp_abort_isp(ha);
1005
1006eh_host_reset_lock: 1018eh_host_reset_lock:
1007 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1019 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1008 (ret == FAILED) ? "failed" : "succeded"); 1020 (ret == FAILED) ? "failed" : "succeded");
@@ -1021,35 +1033,33 @@ eh_host_reset_lock:
1021* 0 = success 1033* 0 = success
1022*/ 1034*/
1023int 1035int
1024qla2x00_loop_reset(scsi_qla_host_t *ha) 1036qla2x00_loop_reset(scsi_qla_host_t *vha)
1025{ 1037{
1026 int ret; 1038 int ret;
1027 struct fc_port *fcport; 1039 struct fc_port *fcport;
1040 struct qla_hw_data *ha = vha->hw;
1028 1041
1029 if (ha->flags.enable_lip_full_login) { 1042 if (ha->flags.enable_lip_full_login) {
1030 ret = qla2x00_full_login_lip(ha); 1043 ret = qla2x00_full_login_lip(vha);
1031 if (ret != QLA_SUCCESS) { 1044 if (ret != QLA_SUCCESS) {
1032 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1045 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1033 "full_login_lip=%d.\n", __func__, ha->host_no, 1046 "full_login_lip=%d.\n", __func__, vha->host_no,
1034 ret)); 1047 ret));
1035 } 1048 } else
1036 atomic_set(&ha->loop_state, LOOP_DOWN); 1049 qla2x00_wait_for_loop_ready(vha);
1037 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
1038 qla2x00_mark_all_devices_lost(ha, 0);
1039 qla2x00_wait_for_loop_ready(ha);
1040 } 1050 }
1041 1051
1042 if (ha->flags.enable_lip_reset) { 1052 if (ha->flags.enable_lip_reset) {
1043 ret = qla2x00_lip_reset(ha); 1053 ret = qla2x00_lip_reset(vha);
1044 if (ret != QLA_SUCCESS) { 1054 if (ret != QLA_SUCCESS) {
1045 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1055 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1046 "lip_reset=%d.\n", __func__, ha->host_no, ret)); 1056 "lip_reset=%d.\n", __func__, vha->host_no, ret));
1047 } 1057 } else
1048 qla2x00_wait_for_loop_ready(ha); 1058 qla2x00_wait_for_loop_ready(vha);
1049 } 1059 }
1050 1060
1051 if (ha->flags.enable_target_reset) { 1061 if (ha->flags.enable_target_reset) {
1052 list_for_each_entry(fcport, &ha->fcports, list) { 1062 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1053 if (fcport->port_type != FCT_TARGET) 1063 if (fcport->port_type != FCT_TARGET)
1054 continue; 1064 continue;
1055 1065
@@ -1057,31 +1067,33 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
1057 if (ret != QLA_SUCCESS) { 1067 if (ret != QLA_SUCCESS) {
1058 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1068 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1059 "target_reset=%d d_id=%x.\n", __func__, 1069 "target_reset=%d d_id=%x.\n", __func__,
1060 ha->host_no, ret, fcport->d_id.b24)); 1070 vha->host_no, ret, fcport->d_id.b24));
1061 } 1071 }
1062 } 1072 }
1063 } 1073 }
1064 1074
1065 /* Issue marker command only when we are going to start the I/O */ 1075 /* Issue marker command only when we are going to start the I/O */
1066 ha->marker_needed = 1; 1076 vha->marker_needed = 1;
1067 1077
1068 return QLA_SUCCESS; 1078 return QLA_SUCCESS;
1069} 1079}
1070 1080
1071void 1081void
1072qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res) 1082qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1073{ 1083{
1074 int cnt; 1084 int cnt;
1075 unsigned long flags; 1085 unsigned long flags;
1076 srb_t *sp; 1086 srb_t *sp;
1087 struct qla_hw_data *ha = vha->hw;
1088 struct req_que *req = ha->req;
1077 1089
1078 spin_lock_irqsave(&ha->hardware_lock, flags); 1090 spin_lock_irqsave(&ha->hardware_lock, flags);
1079 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1091 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1080 sp = ha->outstanding_cmds[cnt]; 1092 sp = req->outstanding_cmds[cnt];
1081 if (sp) { 1093 if (sp) {
1082 ha->outstanding_cmds[cnt] = NULL; 1094 req->outstanding_cmds[cnt] = NULL;
1083 sp->cmd->result = res; 1095 sp->cmd->result = res;
1084 qla2x00_sp_compl(ha, sp); 1096 qla2x00_sp_compl(vha, sp);
1085 } 1097 }
1086 } 1098 }
1087 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1099 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1103,13 +1115,14 @@ qla2xxx_slave_alloc(struct scsi_device *sdev)
1103static int 1115static int
1104qla2xxx_slave_configure(struct scsi_device *sdev) 1116qla2xxx_slave_configure(struct scsi_device *sdev)
1105{ 1117{
1106 scsi_qla_host_t *ha = shost_priv(sdev->host); 1118 scsi_qla_host_t *vha = shost_priv(sdev->host);
1119 struct qla_hw_data *ha = vha->hw;
1107 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1120 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1108 1121
1109 if (sdev->tagged_supported) 1122 if (sdev->tagged_supported)
1110 scsi_activate_tcq(sdev, ha->max_q_depth); 1123 scsi_activate_tcq(sdev, ha->req->max_q_depth);
1111 else 1124 else
1112 scsi_deactivate_tcq(sdev, ha->max_q_depth); 1125 scsi_deactivate_tcq(sdev, ha->req->max_q_depth);
1113 1126
1114 rport->dev_loss_tmo = ha->port_down_retry_count; 1127 rport->dev_loss_tmo = ha->port_down_retry_count;
1115 1128
@@ -1152,8 +1165,9 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1152 * supported addressing method. 1165 * supported addressing method.
1153 */ 1166 */
1154static void 1167static void
1155qla2x00_config_dma_addressing(scsi_qla_host_t *ha) 1168qla2x00_config_dma_addressing(scsi_qla_host_t *vha)
1156{ 1169{
1170 struct qla_hw_data *ha = vha->hw;
1157 /* Assume a 32bit DMA mask. */ 1171 /* Assume a 32bit DMA mask. */
1158 ha->flags.enable_64bit_addressing = 0; 1172 ha->flags.enable_64bit_addressing = 0;
1159 1173
@@ -1174,7 +1188,7 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
1174} 1188}
1175 1189
1176static void 1190static void
1177qla2x00_enable_intrs(scsi_qla_host_t *ha) 1191qla2x00_enable_intrs(struct qla_hw_data *ha)
1178{ 1192{
1179 unsigned long flags = 0; 1193 unsigned long flags = 0;
1180 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1194 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1189,7 +1203,7 @@ qla2x00_enable_intrs(scsi_qla_host_t *ha)
1189} 1203}
1190 1204
1191static void 1205static void
1192qla2x00_disable_intrs(scsi_qla_host_t *ha) 1206qla2x00_disable_intrs(struct qla_hw_data *ha)
1193{ 1207{
1194 unsigned long flags = 0; 1208 unsigned long flags = 0;
1195 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1209 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1203,7 +1217,7 @@ qla2x00_disable_intrs(scsi_qla_host_t *ha)
1203} 1217}
1204 1218
1205static void 1219static void
1206qla24xx_enable_intrs(scsi_qla_host_t *ha) 1220qla24xx_enable_intrs(struct qla_hw_data *ha)
1207{ 1221{
1208 unsigned long flags = 0; 1222 unsigned long flags = 0;
1209 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1223 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1216,7 +1230,7 @@ qla24xx_enable_intrs(scsi_qla_host_t *ha)
1216} 1230}
1217 1231
1218static void 1232static void
1219qla24xx_disable_intrs(scsi_qla_host_t *ha) 1233qla24xx_disable_intrs(struct qla_hw_data *ha)
1220{ 1234{
1221 unsigned long flags = 0; 1235 unsigned long flags = 0;
1222 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1236 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1260,6 +1274,7 @@ static struct isp_operations qla2100_isp_ops = {
1260 .read_optrom = qla2x00_read_optrom_data, 1274 .read_optrom = qla2x00_read_optrom_data,
1261 .write_optrom = qla2x00_write_optrom_data, 1275 .write_optrom = qla2x00_write_optrom_data,
1262 .get_flash_version = qla2x00_get_flash_version, 1276 .get_flash_version = qla2x00_get_flash_version,
1277 .start_scsi = qla2x00_start_scsi,
1263}; 1278};
1264 1279
1265static struct isp_operations qla2300_isp_ops = { 1280static struct isp_operations qla2300_isp_ops = {
@@ -1294,6 +1309,7 @@ static struct isp_operations qla2300_isp_ops = {
1294 .read_optrom = qla2x00_read_optrom_data, 1309 .read_optrom = qla2x00_read_optrom_data,
1295 .write_optrom = qla2x00_write_optrom_data, 1310 .write_optrom = qla2x00_write_optrom_data,
1296 .get_flash_version = qla2x00_get_flash_version, 1311 .get_flash_version = qla2x00_get_flash_version,
1312 .start_scsi = qla2x00_start_scsi,
1297}; 1313};
1298 1314
1299static struct isp_operations qla24xx_isp_ops = { 1315static struct isp_operations qla24xx_isp_ops = {
@@ -1328,6 +1344,7 @@ static struct isp_operations qla24xx_isp_ops = {
1328 .read_optrom = qla24xx_read_optrom_data, 1344 .read_optrom = qla24xx_read_optrom_data,
1329 .write_optrom = qla24xx_write_optrom_data, 1345 .write_optrom = qla24xx_write_optrom_data,
1330 .get_flash_version = qla24xx_get_flash_version, 1346 .get_flash_version = qla24xx_get_flash_version,
1347 .start_scsi = qla24xx_start_scsi,
1331}; 1348};
1332 1349
1333static struct isp_operations qla25xx_isp_ops = { 1350static struct isp_operations qla25xx_isp_ops = {
@@ -1362,10 +1379,11 @@ static struct isp_operations qla25xx_isp_ops = {
1362 .read_optrom = qla25xx_read_optrom_data, 1379 .read_optrom = qla25xx_read_optrom_data,
1363 .write_optrom = qla24xx_write_optrom_data, 1380 .write_optrom = qla24xx_write_optrom_data,
1364 .get_flash_version = qla24xx_get_flash_version, 1381 .get_flash_version = qla24xx_get_flash_version,
1382 .start_scsi = qla24xx_start_scsi,
1365}; 1383};
1366 1384
1367static inline void 1385static inline void
1368qla2x00_set_isp_flags(scsi_qla_host_t *ha) 1386qla2x00_set_isp_flags(struct qla_hw_data *ha)
1369{ 1387{
1370 ha->device_type = DT_EXTENDED_IDS; 1388 ha->device_type = DT_EXTENDED_IDS;
1371 switch (ha->pdev->device) { 1389 switch (ha->pdev->device) {
@@ -1447,7 +1465,7 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
1447} 1465}
1448 1466
1449static int 1467static int
1450qla2x00_iospace_config(scsi_qla_host_t *ha) 1468qla2x00_iospace_config(struct qla_hw_data *ha)
1451{ 1469{
1452 resource_size_t pio; 1470 resource_size_t pio;
1453 1471
@@ -1511,25 +1529,25 @@ iospace_error_exit:
1511static void 1529static void
1512qla2xxx_scan_start(struct Scsi_Host *shost) 1530qla2xxx_scan_start(struct Scsi_Host *shost)
1513{ 1531{
1514 scsi_qla_host_t *ha = shost_priv(shost); 1532 scsi_qla_host_t *vha = shost_priv(shost);
1515 1533
1516 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 1534 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1517 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1535 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1518 set_bit(RSCN_UPDATE, &ha->dpc_flags); 1536 set_bit(RSCN_UPDATE, &vha->dpc_flags);
1519 set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 1537 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1520} 1538}
1521 1539
1522static int 1540static int
1523qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 1541qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1524{ 1542{
1525 scsi_qla_host_t *ha = shost_priv(shost); 1543 scsi_qla_host_t *vha = shost_priv(shost);
1526 1544
1527 if (!ha->host) 1545 if (!vha->host)
1528 return 1; 1546 return 1;
1529 if (time > ha->loop_reset_delay * HZ) 1547 if (time > vha->hw->loop_reset_delay * HZ)
1530 return 1; 1548 return 1;
1531 1549
1532 return atomic_read(&ha->loop_state) == LOOP_READY; 1550 return atomic_read(&vha->loop_state) == LOOP_READY;
1533} 1551}
1534 1552
1535/* 1553/*
@@ -1540,11 +1558,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1540{ 1558{
1541 int ret = -ENODEV; 1559 int ret = -ENODEV;
1542 struct Scsi_Host *host; 1560 struct Scsi_Host *host;
1543 scsi_qla_host_t *ha; 1561 scsi_qla_host_t *base_vha = NULL;
1562 struct qla_hw_data *ha;
1544 char pci_info[30]; 1563 char pci_info[30];
1545 char fw_str[30]; 1564 char fw_str[30];
1546 struct scsi_host_template *sht; 1565 struct scsi_host_template *sht;
1547 int bars, mem_only = 0; 1566 int bars, mem_only, max_id = 0;
1567 uint16_t req_length = 0, rsp_length = 0;
1548 1568
1549 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 1569 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1550 sht = &qla2x00_driver_template; 1570 sht = &qla2x00_driver_template;
@@ -1570,33 +1590,24 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1570 /* This may fail but that's ok */ 1590 /* This may fail but that's ok */
1571 pci_enable_pcie_error_reporting(pdev); 1591 pci_enable_pcie_error_reporting(pdev);
1572 1592
1573 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 1593 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1574 if (host == NULL) { 1594 if (!ha) {
1575 printk(KERN_WARNING 1595 DEBUG(printk("Unable to allocate memory for ha\n"));
1576 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 1596 goto probe_out;
1577 goto probe_disable_device;
1578 } 1597 }
1598 ha->pdev = pdev;
1579 1599
1580 /* Clear our data area */ 1600 /* Clear our data area */
1581 ha = shost_priv(host);
1582 memset(ha, 0, sizeof(scsi_qla_host_t));
1583
1584 ha->pdev = pdev;
1585 ha->host = host;
1586 ha->host_no = host->host_no;
1587 sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no);
1588 ha->parent = NULL;
1589 ha->bars = bars; 1601 ha->bars = bars;
1590 ha->mem_only = mem_only; 1602 ha->mem_only = mem_only;
1591 spin_lock_init(&ha->hardware_lock); 1603 spin_lock_init(&ha->hardware_lock);
1592 1604
1593 /* Set ISP-type information. */ 1605 /* Set ISP-type information. */
1594 qla2x00_set_isp_flags(ha); 1606 qla2x00_set_isp_flags(ha);
1595
1596 /* Configure PCI I/O space */ 1607 /* Configure PCI I/O space */
1597 ret = qla2x00_iospace_config(ha); 1608 ret = qla2x00_iospace_config(ha);
1598 if (ret) 1609 if (ret)
1599 goto probe_failed; 1610 goto probe_hw_failed;
1600 1611
1601 qla_printk(KERN_INFO, ha, 1612 qla_printk(KERN_INFO, ha,
1602 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 1613 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
@@ -1604,105 +1615,128 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1604 1615
1605 ha->prev_topology = 0; 1616 ha->prev_topology = 0;
1606 ha->init_cb_size = sizeof(init_cb_t); 1617 ha->init_cb_size = sizeof(init_cb_t);
1607 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
1608 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1618 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1609 ha->optrom_size = OPTROM_SIZE_2300; 1619 ha->optrom_size = OPTROM_SIZE_2300;
1610 1620
1611 ha->max_q_depth = MAX_Q_DEPTH;
1612 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
1613 ha->max_q_depth = ql2xmaxqdepth;
1614
1615 /* Assign ISP specific operations. */ 1621 /* Assign ISP specific operations. */
1622 max_id = MAX_TARGETS_2200;
1616 if (IS_QLA2100(ha)) { 1623 if (IS_QLA2100(ha)) {
1617 host->max_id = MAX_TARGETS_2100; 1624 max_id = MAX_TARGETS_2100;
1618 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1625 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
1619 ha->request_q_length = REQUEST_ENTRY_CNT_2100; 1626 req_length = REQUEST_ENTRY_CNT_2100;
1620 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1627 rsp_length = RESPONSE_ENTRY_CNT_2100;
1621 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1628 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1622 host->sg_tablesize = 32;
1623 ha->gid_list_info_size = 4; 1629 ha->gid_list_info_size = 4;
1624 ha->isp_ops = &qla2100_isp_ops; 1630 ha->isp_ops = &qla2100_isp_ops;
1625 } else if (IS_QLA2200(ha)) { 1631 } else if (IS_QLA2200(ha)) {
1626 host->max_id = MAX_TARGETS_2200;
1627 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1632 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1628 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1633 req_length = REQUEST_ENTRY_CNT_2200;
1629 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1634 rsp_length = RESPONSE_ENTRY_CNT_2100;
1630 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1635 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1631 ha->gid_list_info_size = 4; 1636 ha->gid_list_info_size = 4;
1632 ha->isp_ops = &qla2100_isp_ops; 1637 ha->isp_ops = &qla2100_isp_ops;
1633 } else if (IS_QLA23XX(ha)) { 1638 } else if (IS_QLA23XX(ha)) {
1634 host->max_id = MAX_TARGETS_2200;
1635 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1639 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1636 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1640 req_length = REQUEST_ENTRY_CNT_2200;
1637 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1641 rsp_length = RESPONSE_ENTRY_CNT_2300;
1638 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1642 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1639 ha->gid_list_info_size = 6; 1643 ha->gid_list_info_size = 6;
1640 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1644 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1641 ha->optrom_size = OPTROM_SIZE_2322; 1645 ha->optrom_size = OPTROM_SIZE_2322;
1642 ha->isp_ops = &qla2300_isp_ops; 1646 ha->isp_ops = &qla2300_isp_ops;
1643 } else if (IS_QLA24XX_TYPE(ha)) { 1647 } else if (IS_QLA24XX_TYPE(ha)) {
1644 host->max_id = MAX_TARGETS_2200;
1645 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1648 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1646 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1649 req_length = REQUEST_ENTRY_CNT_24XX;
1647 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1650 rsp_length = RESPONSE_ENTRY_CNT_2300;
1648 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1651 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1649 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1652 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1650 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1651 ha->gid_list_info_size = 8; 1653 ha->gid_list_info_size = 8;
1652 ha->optrom_size = OPTROM_SIZE_24XX; 1654 ha->optrom_size = OPTROM_SIZE_24XX;
1653 ha->isp_ops = &qla24xx_isp_ops; 1655 ha->isp_ops = &qla24xx_isp_ops;
1654 } else if (IS_QLA25XX(ha)) { 1656 } else if (IS_QLA25XX(ha)) {
1655 host->max_id = MAX_TARGETS_2200;
1656 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1657 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1657 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1658 req_length = REQUEST_ENTRY_CNT_24XX;
1658 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1659 rsp_length = RESPONSE_ENTRY_CNT_2300;
1659 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1660 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1660 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1661 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1661 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1662 ha->gid_list_info_size = 8; 1662 ha->gid_list_info_size = 8;
1663 ha->optrom_size = OPTROM_SIZE_25XX; 1663 ha->optrom_size = OPTROM_SIZE_25XX;
1664 ha->isp_ops = &qla25xx_isp_ops; 1664 ha->isp_ops = &qla25xx_isp_ops;
1665 } 1665 }
1666 host->can_queue = ha->request_q_length + 128;
1667 1666
1668 mutex_init(&ha->vport_lock); 1667 mutex_init(&ha->vport_lock);
1669 init_completion(&ha->mbx_cmd_comp); 1668 init_completion(&ha->mbx_cmd_comp);
1670 complete(&ha->mbx_cmd_comp); 1669 complete(&ha->mbx_cmd_comp);
1671 init_completion(&ha->mbx_intr_comp); 1670 init_completion(&ha->mbx_intr_comp);
1672 1671
1673 INIT_LIST_HEAD(&ha->list);
1674 INIT_LIST_HEAD(&ha->fcports);
1675 INIT_LIST_HEAD(&ha->vp_list);
1676 INIT_LIST_HEAD(&ha->work_list);
1677
1678 set_bit(0, (unsigned long *) ha->vp_idx_map); 1672 set_bit(0, (unsigned long *) ha->vp_idx_map);
1679 1673
1680 qla2x00_config_dma_addressing(ha); 1674 ret = qla2x00_mem_alloc(ha, req_length, rsp_length);
1681 if (qla2x00_mem_alloc(ha)) { 1675 if (!ret) {
1682 qla_printk(KERN_WARNING, ha, 1676 qla_printk(KERN_WARNING, ha,
1683 "[ERROR] Failed to allocate memory for adapter\n"); 1677 "[ERROR] Failed to allocate memory for adapter\n");
1684 1678
1679 goto probe_hw_failed;
1680 }
1681
1682 ha->req->max_q_depth = MAX_Q_DEPTH;
1683 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
1684 ha->req->max_q_depth = ql2xmaxqdepth;
1685
1686 base_vha = qla2x00_create_host(sht, ha);
1687 if (!base_vha) {
1688 qla_printk(KERN_WARNING, ha,
1689 "[ERROR] Failed to allocate memory for scsi_host\n");
1690
1685 ret = -ENOMEM; 1691 ret = -ENOMEM;
1686 goto probe_failed; 1692 goto probe_hw_failed;
1687 } 1693 }
1688 1694
1689 if (qla2x00_initialize_adapter(ha)) { 1695 pci_set_drvdata(pdev, base_vha);
1696
1697 qla2x00_config_dma_addressing(base_vha);
1698
1699 host = base_vha->host;
1700 host->can_queue = ha->req->length + 128;
1701 if (IS_QLA2XXX_MIDTYPE(ha)) {
1702 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
1703 } else {
1704 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
1705 base_vha->vp_idx;
1706 }
1707 if (IS_QLA2100(ha))
1708 host->sg_tablesize = 32;
1709 host->max_id = max_id;
1710 host->this_id = 255;
1711 host->cmd_per_lun = 3;
1712 host->unique_id = host->host_no;
1713 host->max_cmd_len = MAX_CMDSZ;
1714 host->max_channel = MAX_BUSES - 1;
1715 host->max_lun = MAX_LUNS;
1716 host->transportt = qla2xxx_transport_template;
1717
1718 if (qla2x00_initialize_adapter(base_vha)) {
1690 qla_printk(KERN_WARNING, ha, 1719 qla_printk(KERN_WARNING, ha,
1691 "Failed to initialize adapter\n"); 1720 "Failed to initialize adapter\n");
1692 1721
1693 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 1722 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
1694 "Adapter flags %x.\n", 1723 "Adapter flags %x.\n",
1695 ha->host_no, ha->device_flags)); 1724 base_vha->host_no, base_vha->device_flags));
1696 1725
1697 ret = -ENODEV; 1726 ret = -ENODEV;
1698 goto probe_failed; 1727 goto probe_failed;
1699 } 1728 }
1700 1729
1730 /* Set up the irqs */
1731 ret = qla2x00_request_irqs(ha);
1732 if (ret)
1733 goto probe_failed;
1734
1701 /* 1735 /*
1702 * Startup the kernel thread for this host adapter 1736 * Startup the kernel thread for this host adapter
1703 */ 1737 */
1704 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 1738 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
1705 "%s_dpc", ha->host_str); 1739 "%s_dpc", base_vha->host_str);
1706 if (IS_ERR(ha->dpc_thread)) { 1740 if (IS_ERR(ha->dpc_thread)) {
1707 qla_printk(KERN_WARNING, ha, 1741 qla_printk(KERN_WARNING, ha,
1708 "Unable to start DPC thread!\n"); 1742 "Unable to start DPC thread!\n");
@@ -1710,28 +1744,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1710 goto probe_failed; 1744 goto probe_failed;
1711 } 1745 }
1712 1746
1713 host->this_id = 255; 1747 list_add_tail(&base_vha->list, &ha->vp_list);
1714 host->cmd_per_lun = 3; 1748 base_vha->host->irq = ha->pdev->irq;
1715 host->unique_id = host->host_no;
1716 host->max_cmd_len = MAX_CMDSZ;
1717 host->max_channel = MAX_BUSES - 1;
1718 host->max_lun = MAX_LUNS;
1719 host->transportt = qla2xxx_transport_template;
1720
1721 ret = qla2x00_request_irqs(ha);
1722 if (ret)
1723 goto probe_failed;
1724 1749
1725 /* Initialized the timer */ 1750 /* Initialized the timer */
1726 qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL); 1751 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
1727 1752
1728 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1753 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
1729 ha->host_no, ha)); 1754 base_vha->host_no, ha));
1730
1731 pci_set_drvdata(pdev, ha);
1732 1755
1733 ha->flags.init_done = 1; 1756 base_vha->flags.init_done = 1;
1734 ha->flags.online = 1; 1757 base_vha->flags.online = 1;
1735 1758
1736 ret = scsi_add_host(host, &pdev->dev); 1759 ret = scsi_add_host(host, &pdev->dev);
1737 if (ret) 1760 if (ret)
@@ -1741,76 +1764,94 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1741 1764
1742 scsi_scan_host(host); 1765 scsi_scan_host(host);
1743 1766
1744 qla2x00_alloc_sysfs_attr(ha); 1767 qla2x00_alloc_sysfs_attr(base_vha);
1745 1768
1746 qla2x00_init_host_attr(ha); 1769 qla2x00_init_host_attr(base_vha);
1747 1770
1748 qla2x00_dfs_setup(ha); 1771 qla2x00_dfs_setup(base_vha);
1749 1772
1750 qla_printk(KERN_INFO, ha, "\n" 1773 qla_printk(KERN_INFO, ha, "\n"
1751 " QLogic Fibre Channel HBA Driver: %s\n" 1774 " QLogic Fibre Channel HBA Driver: %s\n"
1752 " QLogic %s - %s\n" 1775 " QLogic %s - %s\n"
1753 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 1776 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
1754 qla2x00_version_str, ha->model_number, 1777 qla2x00_version_str, ha->model_number,
1755 ha->model_desc ? ha->model_desc: "", pdev->device, 1778 ha->model_desc ? ha->model_desc : "", pdev->device,
1756 ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev), 1779 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
1757 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, 1780 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
1758 ha->isp_ops->fw_version_str(ha, fw_str)); 1781 ha->isp_ops->fw_version_str(base_vha, fw_str));
1759 1782
1760 return 0; 1783 return 0;
1761 1784
1762probe_failed: 1785probe_failed:
1763 qla2x00_free_device(ha); 1786 qla2x00_free_device(base_vha);
1764 1787
1765 scsi_host_put(host); 1788 scsi_host_put(base_vha->host);
1766 1789
1767probe_disable_device: 1790probe_hw_failed:
1768 pci_disable_device(pdev); 1791 if (ha->iobase)
1792 iounmap(ha->iobase);
1793
1794 pci_release_selected_regions(ha->pdev, ha->bars);
1795 kfree(ha);
1796 ha = NULL;
1769 1797
1770probe_out: 1798probe_out:
1799 pci_disable_device(pdev);
1771 return ret; 1800 return ret;
1772} 1801}
1773 1802
1774static void 1803static void
1775qla2x00_remove_one(struct pci_dev *pdev) 1804qla2x00_remove_one(struct pci_dev *pdev)
1776{ 1805{
1777 scsi_qla_host_t *ha, *vha, *temp; 1806 scsi_qla_host_t *base_vha, *vha, *temp;
1807 struct qla_hw_data *ha;
1808
1809 base_vha = pci_get_drvdata(pdev);
1810 ha = base_vha->hw;
1811
1812 list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
1813 if (vha && vha->fc_vport)
1814 fc_vport_terminate(vha->fc_vport);
1815 }
1778 1816
1779 ha = pci_get_drvdata(pdev); 1817 set_bit(UNLOADING, &base_vha->dpc_flags);
1780 1818
1781 list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list) 1819 qla2x00_dfs_remove(base_vha);
1782 fc_vport_terminate(vha->fc_vport);
1783 1820
1784 set_bit(UNLOADING, &ha->dpc_flags); 1821 qla84xx_put_chip(base_vha);
1785 1822
1786 qla2x00_dfs_remove(ha); 1823 qla2x00_free_sysfs_attr(base_vha);
1787 1824
1788 qla84xx_put_chip(ha); 1825 fc_remove_host(base_vha->host);
1789 1826
1790 qla2x00_free_sysfs_attr(ha); 1827 scsi_remove_host(base_vha->host);
1791 1828
1792 fc_remove_host(ha->host); 1829 qla2x00_free_device(base_vha);
1793 1830
1794 scsi_remove_host(ha->host); 1831 scsi_host_put(base_vha->host);
1795 1832
1796 qla2x00_free_device(ha); 1833 if (ha->iobase)
1834 iounmap(ha->iobase);
1797 1835
1798 scsi_host_put(ha->host); 1836 pci_release_selected_regions(ha->pdev, ha->bars);
1837 kfree(ha);
1838 ha = NULL;
1799 1839
1800 pci_disable_device(pdev); 1840 pci_disable_device(pdev);
1801 pci_set_drvdata(pdev, NULL); 1841 pci_set_drvdata(pdev, NULL);
1802} 1842}
1803 1843
1804static void 1844static void
1805qla2x00_free_device(scsi_qla_host_t *ha) 1845qla2x00_free_device(scsi_qla_host_t *vha)
1806{ 1846{
1807 qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16); 1847 struct qla_hw_data *ha = vha->hw;
1848 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
1808 1849
1809 /* Disable timer */ 1850 /* Disable timer */
1810 if (ha->timer_active) 1851 if (vha->timer_active)
1811 qla2x00_stop_timer(ha); 1852 qla2x00_stop_timer(vha);
1812 1853
1813 ha->flags.online = 0; 1854 vha->flags.online = 0;
1814 1855
1815 /* Kill the kernel thread for this host */ 1856 /* Kill the kernel thread for this host */
1816 if (ha->dpc_thread) { 1857 if (ha->dpc_thread) {
@@ -1825,45 +1866,39 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1825 } 1866 }
1826 1867
1827 if (ha->flags.fce_enabled) 1868 if (ha->flags.fce_enabled)
1828 qla2x00_disable_fce_trace(ha, NULL, NULL); 1869 qla2x00_disable_fce_trace(vha, NULL, NULL);
1829 1870
1830 if (ha->eft) 1871 if (ha->eft)
1831 qla2x00_disable_eft_trace(ha); 1872 qla2x00_disable_eft_trace(vha);
1832 1873
1833 /* Stop currently executing firmware. */ 1874 /* Stop currently executing firmware. */
1834 qla2x00_try_to_stop_firmware(ha); 1875 qla2x00_try_to_stop_firmware(vha);
1835 1876
1836 /* turn-off interrupts on the card */ 1877 /* turn-off interrupts on the card */
1837 if (ha->interrupts_on) 1878 if (ha->interrupts_on)
1838 ha->isp_ops->disable_intrs(ha); 1879 ha->isp_ops->disable_intrs(ha);
1839 1880
1840 qla2x00_mem_free(ha); 1881 qla2x00_free_irqs(vha);
1841
1842 qla2x00_free_irqs(ha);
1843 1882
1844 /* release io space registers */ 1883 qla2x00_mem_free(ha);
1845 if (ha->iobase)
1846 iounmap(ha->iobase);
1847 pci_release_selected_regions(ha->pdev, ha->bars);
1848} 1884}
1849 1885
1850static inline void 1886static inline void
1851qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, 1887qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
1852 int defer) 1888 int defer)
1853{ 1889{
1854 struct fc_rport *rport; 1890 struct fc_rport *rport;
1855 scsi_qla_host_t *pha = to_qla_parent(ha);
1856 1891
1857 if (!fcport->rport) 1892 if (!fcport->rport)
1858 return; 1893 return;
1859 1894
1860 rport = fcport->rport; 1895 rport = fcport->rport;
1861 if (defer) { 1896 if (defer) {
1862 spin_lock_irq(ha->host->host_lock); 1897 spin_lock_irq(vha->host->host_lock);
1863 fcport->drport = rport; 1898 fcport->drport = rport;
1864 spin_unlock_irq(ha->host->host_lock); 1899 spin_unlock_irq(vha->host->host_lock);
1865 set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags); 1900 set_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
1866 qla2xxx_wake_dpc(pha); 1901 qla2xxx_wake_dpc(vha);
1867 } else 1902 } else
1868 fc_remote_port_delete(rport); 1903 fc_remote_port_delete(rport);
1869} 1904}
@@ -1877,13 +1912,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1877 * 1912 *
1878 * Context: 1913 * Context:
1879 */ 1914 */
1880void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport, 1915void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
1881 int do_login, int defer) 1916 int do_login, int defer)
1882{ 1917{
1883 if (atomic_read(&fcport->state) == FCS_ONLINE && 1918 if (atomic_read(&fcport->state) == FCS_ONLINE &&
1884 ha->vp_idx == fcport->vp_idx) 1919 vha->vp_idx == fcport->vp_idx) {
1885 qla2x00_schedule_rport_del(ha, fcport, defer); 1920 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1886 1921 qla2x00_schedule_rport_del(vha, fcport, defer);
1922 }
1887 /* 1923 /*
1888 * We may need to retry the login, so don't change the state of the 1924 * We may need to retry the login, so don't change the state of the
1889 * port but do the retries. 1925 * port but do the retries.
@@ -1895,13 +1931,13 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
1895 return; 1931 return;
1896 1932
1897 if (fcport->login_retry == 0) { 1933 if (fcport->login_retry == 0) {
1898 fcport->login_retry = ha->login_retry_count; 1934 fcport->login_retry = vha->hw->login_retry_count;
1899 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 1935 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1900 1936
1901 DEBUG(printk("scsi(%ld): Port login retry: " 1937 DEBUG(printk("scsi(%ld): Port login retry: "
1902 "%02x%02x%02x%02x%02x%02x%02x%02x, " 1938 "%02x%02x%02x%02x%02x%02x%02x%02x, "
1903 "id = 0x%04x retry cnt=%d\n", 1939 "id = 0x%04x retry cnt=%d\n",
1904 ha->host_no, 1940 vha->host_no,
1905 fcport->port_name[0], 1941 fcport->port_name[0],
1906 fcport->port_name[1], 1942 fcport->port_name[1],
1907 fcport->port_name[2], 1943 fcport->port_name[2],
@@ -1929,13 +1965,12 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
1929 * Context: 1965 * Context:
1930 */ 1966 */
1931void 1967void
1932qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer) 1968qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
1933{ 1969{
1934 fc_port_t *fcport; 1970 fc_port_t *fcport;
1935 scsi_qla_host_t *pha = to_qla_parent(ha);
1936 1971
1937 list_for_each_entry(fcport, &pha->fcports, list) { 1972 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1938 if (ha->vp_idx != fcport->vp_idx) 1973 if (vha->vp_idx != fcport->vp_idx)
1939 continue; 1974 continue;
1940 /* 1975 /*
1941 * No point in marking the device as lost, if the device is 1976 * No point in marking the device as lost, if the device is
@@ -1943,9 +1978,11 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1943 */ 1978 */
1944 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 1979 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
1945 continue; 1980 continue;
1946 if (atomic_read(&fcport->state) == FCS_ONLINE) 1981 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1947 qla2x00_schedule_rport_del(ha, fcport, defer); 1982 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1948 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1983 qla2x00_schedule_rport_del(vha, fcport, defer);
1984 } else
1985 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1949 } 1986 }
1950} 1987}
1951 1988
@@ -1958,105 +1995,139 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1958* !0 = failure. 1995* !0 = failure.
1959*/ 1996*/
1960static int 1997static int
1961qla2x00_mem_alloc(scsi_qla_host_t *ha) 1998qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len)
1962{ 1999{
1963 char name[16]; 2000 char name[16];
2001 struct req_que *req = NULL;
2002 struct rsp_que *rsp = NULL;
1964 2003
1965 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, 2004 ha->init_cb_size = sizeof(init_cb_t);
1966 (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma, 2005 if (IS_QLA2XXX_MIDTYPE(ha))
1967 GFP_KERNEL); 2006 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1968 if (!ha->request_ring)
1969 goto fail;
1970
1971 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
1972 (ha->response_q_length + 1) * sizeof(response_t),
1973 &ha->response_dma, GFP_KERNEL);
1974 if (!ha->response_ring)
1975 goto fail_free_request_ring;
1976
1977 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
1978 &ha->gid_list_dma, GFP_KERNEL);
1979 if (!ha->gid_list)
1980 goto fail_free_response_ring;
1981 2007
1982 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 2008 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
1983 &ha->init_cb_dma, GFP_KERNEL); 2009 &ha->init_cb_dma, GFP_KERNEL);
1984 if (!ha->init_cb) 2010 if (!ha->init_cb)
1985 goto fail_free_gid_list; 2011 goto fail;
1986 2012
1987 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME, 2013 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
1988 ha->host_no); 2014 &ha->gid_list_dma, GFP_KERNEL);
1989 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2015 if (!ha->gid_list)
1990 DMA_POOL_SIZE, 8, 0);
1991 if (!ha->s_dma_pool)
1992 goto fail_free_init_cb; 2016 goto fail_free_init_cb;
1993 2017
1994 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 2018 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
1995 if (!ha->srb_mempool) 2019 if (!ha->srb_mempool)
1996 goto fail_free_s_dma_pool; 2020 goto fail_free_gid_list;
1997 2021
1998 /* Get memory for cached NVRAM */ 2022 /* Get memory for cached NVRAM */
1999 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 2023 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2000 if (!ha->nvram) 2024 if (!ha->nvram)
2001 goto fail_free_srb_mempool; 2025 goto fail_free_srb_mempool;
2002 2026
2027 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2028 ha->pdev->device);
2029 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2030 DMA_POOL_SIZE, 8, 0);
2031 if (!ha->s_dma_pool)
2032 goto fail_free_nvram;
2033
2003 /* Allocate memory for SNS commands */ 2034 /* Allocate memory for SNS commands */
2004 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2035 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2005 /* Get consistent memory allocated for SNS commands */ 2036 /* Get consistent memory allocated for SNS commands */
2006 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 2037 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2007 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2038 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2008 if (!ha->sns_cmd) 2039 if (!ha->sns_cmd)
2009 goto fail_free_nvram; 2040 goto fail_dma_pool;
2010 } else { 2041 } else {
2011 /* Get consistent memory allocated for MS IOCB */ 2042 /* Get consistent memory allocated for MS IOCB */
2012 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2043 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2013 &ha->ms_iocb_dma); 2044 &ha->ms_iocb_dma);
2014 if (!ha->ms_iocb) 2045 if (!ha->ms_iocb)
2015 goto fail_free_nvram; 2046 goto fail_dma_pool;
2016 2047 /* Get consistent memory allocated for CT SNS commands */
2017 /* Get consistent memory allocated for CT SNS commands */
2018 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 2048 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2019 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2049 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2020 if (!ha->ct_sns) 2050 if (!ha->ct_sns)
2021 goto fail_free_ms_iocb; 2051 goto fail_free_ms_iocb;
2022 } 2052 }
2023 2053
2024 return 0; 2054 /* Allocate memory for request ring */
2055 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2056 if (!req) {
2057 DEBUG(printk("Unable to allocate memory for req\n"));
2058 goto fail_req;
2059 }
2060 ha->req = req;
2061 req->length = req_len;
2062 req->ring = dma_alloc_coherent(&ha->pdev->dev,
2063 (req->length + 1) * sizeof(request_t),
2064 &req->dma, GFP_KERNEL);
2065 if (!req->ring) {
2066 DEBUG(printk("Unable to allocate memory for req_ring\n"));
2067 goto fail_req_ring;
2068 }
2069 /* Allocate memory for response ring */
2070 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2071 if (!rsp) {
2072 DEBUG(printk("Unable to allocate memory for rsp\n"));
2073 goto fail_rsp;
2074 }
2075 ha->rsp = rsp;
2076 rsp->hw = ha;
2077 rsp->length = rsp_len;
2078
2079 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
2080 (rsp->length + 1) * sizeof(response_t),
2081 &rsp->dma, GFP_KERNEL);
2082 if (!rsp->ring) {
2083 DEBUG(printk("Unable to allocate memory for rsp_ring\n"));
2084 goto fail_rsp_ring;
2085 }
2025 2086
2087 INIT_LIST_HEAD(&ha->vp_list);
2088 return 1;
2089
2090fail_rsp_ring:
2091 kfree(rsp);
2092 ha->rsp = NULL;
2093fail_rsp:
2094 dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
2095 sizeof(request_t), req->ring, req->dma);
2096 req->ring = NULL;
2097 req->dma = 0;
2098fail_req_ring:
2099 kfree(req);
2100 ha->req = NULL;
2101fail_req:
2102 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2103 ha->ct_sns, ha->ct_sns_dma);
2104 ha->ct_sns = NULL;
2105 ha->ct_sns_dma = 0;
2026fail_free_ms_iocb: 2106fail_free_ms_iocb:
2027 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2107 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2028 ha->ms_iocb = NULL; 2108 ha->ms_iocb = NULL;
2029 ha->ms_iocb_dma = 0; 2109 ha->ms_iocb_dma = 0;
2110fail_dma_pool:
2111 dma_pool_destroy(ha->s_dma_pool);
2112 ha->s_dma_pool = NULL;
2030fail_free_nvram: 2113fail_free_nvram:
2031 kfree(ha->nvram); 2114 kfree(ha->nvram);
2032 ha->nvram = NULL; 2115 ha->nvram = NULL;
2033fail_free_srb_mempool: 2116fail_free_srb_mempool:
2034 mempool_destroy(ha->srb_mempool); 2117 mempool_destroy(ha->srb_mempool);
2035 ha->srb_mempool = NULL; 2118 ha->srb_mempool = NULL;
2036fail_free_s_dma_pool:
2037 dma_pool_destroy(ha->s_dma_pool);
2038 ha->s_dma_pool = NULL;
2039fail_free_init_cb:
2040 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2041 ha->init_cb_dma);
2042 ha->init_cb = NULL;
2043 ha->init_cb_dma = 0;
2044fail_free_gid_list: 2119fail_free_gid_list:
2045 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2120 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2046 ha->gid_list_dma); 2121 ha->gid_list_dma);
2047 ha->gid_list = NULL; 2122 ha->gid_list = NULL;
2048 ha->gid_list_dma = 0; 2123 ha->gid_list_dma = 0;
2049fail_free_response_ring: 2124fail_free_init_cb:
2050 dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) * 2125 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2051 sizeof(response_t), ha->response_ring, ha->response_dma); 2126 ha->init_cb_dma);
2052 ha->response_ring = NULL; 2127 ha->init_cb = NULL;
2053 ha->response_dma = 0; 2128 ha->init_cb_dma = 0;
2054fail_free_request_ring:
2055 dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) *
2056 sizeof(request_t), ha->request_ring, ha->request_dma);
2057 ha->request_ring = NULL;
2058 ha->request_dma = 0;
2059fail: 2129fail:
2130 DEBUG(printk("%s: Memory allocation failure\n", __func__));
2060 return -ENOMEM; 2131 return -ENOMEM;
2061} 2132}
2062 2133
@@ -2068,32 +2139,32 @@ fail:
2068* ha = adapter block pointer. 2139* ha = adapter block pointer.
2069*/ 2140*/
2070static void 2141static void
2071qla2x00_mem_free(scsi_qla_host_t *ha) 2142qla2x00_mem_free(struct qla_hw_data *ha)
2072{ 2143{
2073 struct list_head *fcpl, *fcptemp; 2144 struct req_que *req = ha->req;
2074 fc_port_t *fcport; 2145 struct rsp_que *rsp = ha->rsp;
2075 2146
2076 if (ha->srb_mempool) 2147 if (ha->srb_mempool)
2077 mempool_destroy(ha->srb_mempool); 2148 mempool_destroy(ha->srb_mempool);
2078 2149
2079 if (ha->fce) 2150 if (ha->fce)
2080 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2151 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2081 ha->fce_dma); 2152 ha->fce_dma);
2082 2153
2083 if (ha->fw_dump) { 2154 if (ha->fw_dump) {
2084 if (ha->eft) 2155 if (ha->eft)
2085 dma_free_coherent(&ha->pdev->dev, 2156 dma_free_coherent(&ha->pdev->dev,
2086 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 2157 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2087 vfree(ha->fw_dump); 2158 vfree(ha->fw_dump);
2088 } 2159 }
2089 2160
2090 if (ha->sns_cmd) 2161 if (ha->sns_cmd)
2091 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2162 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2092 ha->sns_cmd, ha->sns_cmd_dma); 2163 ha->sns_cmd, ha->sns_cmd_dma);
2093 2164
2094 if (ha->ct_sns) 2165 if (ha->ct_sns)
2095 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 2166 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2096 ha->ct_sns, ha->ct_sns_dma); 2167 ha->ct_sns, ha->ct_sns_dma);
2097 2168
2098 if (ha->sfp_data) 2169 if (ha->sfp_data)
2099 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 2170 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
@@ -2104,23 +2175,17 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2104 if (ha->s_dma_pool) 2175 if (ha->s_dma_pool)
2105 dma_pool_destroy(ha->s_dma_pool); 2176 dma_pool_destroy(ha->s_dma_pool);
2106 2177
2107 if (ha->init_cb)
2108 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2109 ha->init_cb, ha->init_cb_dma);
2110 2178
2111 if (ha->gid_list) 2179 if (ha->gid_list)
2112 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2180 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2113 ha->gid_list_dma); 2181 ha->gid_list_dma);
2114 2182
2115 if (ha->response_ring)
2116 dma_free_coherent(&ha->pdev->dev,
2117 (ha->response_q_length + 1) * sizeof(response_t),
2118 ha->response_ring, ha->response_dma);
2119 2183
2120 if (ha->request_ring) 2184 if (ha->init_cb)
2121 dma_free_coherent(&ha->pdev->dev, 2185 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2122 (ha->request_q_length + 1) * sizeof(request_t), 2186 ha->init_cb, ha->init_cb_dma);
2123 ha->request_ring, ha->request_dma); 2187 vfree(ha->optrom_buffer);
2188 kfree(ha->nvram);
2124 2189
2125 ha->srb_mempool = NULL; 2190 ha->srb_mempool = NULL;
2126 ha->eft = NULL; 2191 ha->eft = NULL;
@@ -2139,30 +2204,65 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2139 ha->gid_list = NULL; 2204 ha->gid_list = NULL;
2140 ha->gid_list_dma = 0; 2205 ha->gid_list_dma = 0;
2141 2206
2142 ha->response_ring = NULL; 2207 ha->fw_dump = NULL;
2143 ha->response_dma = 0; 2208 ha->fw_dumped = 0;
2144 ha->request_ring = NULL; 2209 ha->fw_dump_reading = 0;
2145 ha->request_dma = 0; 2210
2211 if (rsp) {
2212 if (rsp->ring)
2213 dma_free_coherent(&ha->pdev->dev,
2214 (rsp->length + 1) * sizeof(response_t),
2215 rsp->ring, rsp->dma);
2216
2217 kfree(rsp);
2218 rsp = NULL;
2219 }
2146 2220
2147 list_for_each_safe(fcpl, fcptemp, &ha->fcports) { 2221 if (req) {
2148 fcport = list_entry(fcpl, fc_port_t, list); 2222 if (req->ring)
2223 dma_free_coherent(&ha->pdev->dev,
2224 (req->length + 1) * sizeof(request_t),
2225 req->ring, req->dma);
2149 2226
2150 /* fc ports */ 2227 kfree(req);
2151 list_del_init(&fcport->list); 2228 req = NULL;
2152 kfree(fcport);
2153 } 2229 }
2154 INIT_LIST_HEAD(&ha->fcports); 2230}
2155 2231
2156 ha->fw_dump = NULL; 2232struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2157 ha->fw_dumped = 0; 2233 struct qla_hw_data *ha)
2158 ha->fw_dump_reading = 0; 2234{
2235 struct Scsi_Host *host;
2236 struct scsi_qla_host *vha = NULL;
2159 2237
2160 vfree(ha->optrom_buffer); 2238 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
2161 kfree(ha->nvram); 2239 if (host == NULL) {
2240 printk(KERN_WARNING
2241 "qla2xxx: Couldn't allocate host from scsi layer!\n");
2242 goto fail;
2243 }
2244
2245 /* Clear our data area */
2246 vha = shost_priv(host);
2247 memset(vha, 0, sizeof(scsi_qla_host_t));
2248
2249 vha->host = host;
2250 vha->host_no = host->host_no;
2251 vha->hw = ha;
2252
2253 INIT_LIST_HEAD(&vha->vp_fcports);
2254 INIT_LIST_HEAD(&vha->work_list);
2255 INIT_LIST_HEAD(&vha->list);
2256
2257 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2258 return vha;
2259
2260fail:
2261 return vha;
2162} 2262}
2163 2263
2164static struct qla_work_evt * 2264static struct qla_work_evt *
2165qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type, 2265qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
2166 int locked) 2266 int locked)
2167{ 2267{
2168 struct qla_work_evt *e; 2268 struct qla_work_evt *e;
@@ -2179,42 +2279,42 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
2179} 2279}
2180 2280
2181static int 2281static int
2182qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2282qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked)
2183{ 2283{
2184 unsigned long uninitialized_var(flags); 2284 unsigned long uninitialized_var(flags);
2185 scsi_qla_host_t *pha = to_qla_parent(ha); 2285 struct qla_hw_data *ha = vha->hw;
2186 2286
2187 if (!locked) 2287 if (!locked)
2188 spin_lock_irqsave(&pha->hardware_lock, flags); 2288 spin_lock_irqsave(&ha->hardware_lock, flags);
2189 list_add_tail(&e->list, &ha->work_list); 2289 list_add_tail(&e->list, &vha->work_list);
2190 qla2xxx_wake_dpc(ha); 2290 qla2xxx_wake_dpc(vha);
2191 if (!locked) 2291 if (!locked)
2192 spin_unlock_irqrestore(&pha->hardware_lock, flags); 2292 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2193 return QLA_SUCCESS; 2293 return QLA_SUCCESS;
2194} 2294}
2195 2295
2196int 2296int
2197qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code, 2297qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2198 u32 data) 2298 u32 data)
2199{ 2299{
2200 struct qla_work_evt *e; 2300 struct qla_work_evt *e;
2201 2301
2202 e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1); 2302 e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1);
2203 if (!e) 2303 if (!e)
2204 return QLA_FUNCTION_FAILED; 2304 return QLA_FUNCTION_FAILED;
2205 2305
2206 e->u.aen.code = code; 2306 e->u.aen.code = code;
2207 e->u.aen.data = data; 2307 e->u.aen.data = data;
2208 return qla2x00_post_work(ha, e, 1); 2308 return qla2x00_post_work(vha, e, 1);
2209} 2309}
2210 2310
2211int 2311int
2212qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1, 2312qla2x00_post_hwe_work(struct scsi_qla_host *vha, uint16_t code, uint16_t d1,
2213 uint16_t d2, uint16_t d3) 2313 uint16_t d2, uint16_t d3)
2214{ 2314{
2215 struct qla_work_evt *e; 2315 struct qla_work_evt *e;
2216 2316
2217 e = qla2x00_alloc_work(ha, QLA_EVT_HWE_LOG, 1); 2317 e = qla2x00_alloc_work(vha, QLA_EVT_HWE_LOG, 1);
2218 if (!e) 2318 if (!e)
2219 return QLA_FUNCTION_FAILED; 2319 return QLA_FUNCTION_FAILED;
2220 2320
@@ -2222,36 +2322,95 @@ qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1,
2222 e->u.hwe.d1 = d1; 2322 e->u.hwe.d1 = d1;
2223 e->u.hwe.d2 = d2; 2323 e->u.hwe.d2 = d2;
2224 e->u.hwe.d3 = d3; 2324 e->u.hwe.d3 = d3;
2225 return qla2x00_post_work(ha, e, 1); 2325 return qla2x00_post_work(vha, e, 1);
2226} 2326}
2227 2327
2228static void 2328static void
2229qla2x00_do_work(struct scsi_qla_host *ha) 2329qla2x00_do_work(struct scsi_qla_host *vha)
2230{ 2330{
2231 struct qla_work_evt *e; 2331 struct qla_work_evt *e;
2232 scsi_qla_host_t *pha = to_qla_parent(ha); 2332 struct qla_hw_data *ha = vha->hw;
2233 2333
2234 spin_lock_irq(&pha->hardware_lock); 2334 spin_lock_irq(&ha->hardware_lock);
2235 while (!list_empty(&ha->work_list)) { 2335 while (!list_empty(&vha->work_list)) {
2236 e = list_entry(ha->work_list.next, struct qla_work_evt, list); 2336 e = list_entry(vha->work_list.next, struct qla_work_evt, list);
2237 list_del_init(&e->list); 2337 list_del_init(&e->list);
2238 spin_unlock_irq(&pha->hardware_lock); 2338 spin_unlock_irq(&ha->hardware_lock);
2239 2339
2240 switch (e->type) { 2340 switch (e->type) {
2241 case QLA_EVT_AEN: 2341 case QLA_EVT_AEN:
2242 fc_host_post_event(ha->host, fc_get_event_number(), 2342 fc_host_post_event(vha->host, fc_get_event_number(),
2243 e->u.aen.code, e->u.aen.data); 2343 e->u.aen.code, e->u.aen.data);
2244 break; 2344 break;
2245 case QLA_EVT_HWE_LOG: 2345 case QLA_EVT_HWE_LOG:
2246 qla2xxx_hw_event_log(ha, e->u.hwe.code, e->u.hwe.d1, 2346 qla2xxx_hw_event_log(vha, e->u.hwe.code, e->u.hwe.d1,
2247 e->u.hwe.d2, e->u.hwe.d3); 2347 e->u.hwe.d2, e->u.hwe.d3);
2248 break; 2348 break;
2249 } 2349 }
2250 if (e->flags & QLA_EVT_FLAG_FREE) 2350 if (e->flags & QLA_EVT_FLAG_FREE)
2251 kfree(e); 2351 kfree(e);
2252 spin_lock_irq(&pha->hardware_lock); 2352 spin_lock_irq(&ha->hardware_lock);
2353 }
2354 spin_unlock_irq(&ha->hardware_lock);
2355}
2356/* Relogins all the fcports of a vport
2357 * Context: dpc thread
2358 */
2359void qla2x00_relogin(struct scsi_qla_host *vha)
2360{
2361 fc_port_t *fcport;
2362 uint8_t status;
2363 uint16_t next_loopid = 0;
2364 struct qla_hw_data *ha = vha->hw;
2365
2366 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2367 /*
2368 * If the port is not ONLINE then try to login
2369 * to it if we haven't run out of retries.
2370 */
2371 if (atomic_read(&fcport->state) !=
2372 FCS_ONLINE && fcport->login_retry) {
2373
2374 if (fcport->flags & FCF_FABRIC_DEVICE) {
2375 if (fcport->flags & FCF_TAPE_PRESENT)
2376 ha->isp_ops->fabric_logout(vha,
2377 fcport->loop_id,
2378 fcport->d_id.b.domain,
2379 fcport->d_id.b.area,
2380 fcport->d_id.b.al_pa);
2381
2382 status = qla2x00_fabric_login(vha, fcport,
2383 &next_loopid);
2384 } else
2385 status = qla2x00_local_device_login(vha,
2386 fcport);
2387
2388 fcport->login_retry--;
2389 if (status == QLA_SUCCESS) {
2390 fcport->old_loop_id = fcport->loop_id;
2391
2392 DEBUG(printk("scsi(%ld): port login OK: logged "
2393 "in ID 0x%x\n", vha->host_no, fcport->loop_id));
2394
2395 qla2x00_update_fcport(vha, fcport);
2396
2397 } else if (status == 1) {
2398 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2399 /* retry the login again */
2400 DEBUG(printk("scsi(%ld): Retrying"
2401 " %d login again loop_id 0x%x\n",
2402 vha->host_no, fcport->login_retry,
2403 fcport->loop_id));
2404 } else {
2405 fcport->login_retry = 0;
2406 }
2407
2408 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2409 fcport->loop_id = FC_NO_LOOP_ID;
2410 }
2411 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2412 break;
2253 } 2413 }
2254 spin_unlock_irq(&pha->hardware_lock);
2255} 2414}
2256 2415
2257/************************************************************************** 2416/**************************************************************************
@@ -2271,15 +2430,11 @@ static int
2271qla2x00_do_dpc(void *data) 2430qla2x00_do_dpc(void *data)
2272{ 2431{
2273 int rval; 2432 int rval;
2274 scsi_qla_host_t *ha; 2433 scsi_qla_host_t *base_vha;
2275 fc_port_t *fcport; 2434 struct qla_hw_data *ha;
2276 uint8_t status;
2277 uint16_t next_loopid;
2278 struct scsi_qla_host *vha;
2279 int i;
2280
2281 2435
2282 ha = (scsi_qla_host_t *)data; 2436 ha = (struct qla_hw_data *)data;
2437 base_vha = pci_get_drvdata(ha->pdev);
2283 2438
2284 set_user_nice(current, -20); 2439 set_user_nice(current, -20);
2285 2440
@@ -2293,10 +2448,10 @@ qla2x00_do_dpc(void *data)
2293 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 2448 DEBUG3(printk("qla2x00: DPC handler waking up\n"));
2294 2449
2295 /* Initialization not yet finished. Don't do anything yet. */ 2450 /* Initialization not yet finished. Don't do anything yet. */
2296 if (!ha->flags.init_done) 2451 if (!base_vha->flags.init_done)
2297 continue; 2452 continue;
2298 2453
2299 DEBUG3(printk("scsi(%ld): DPC handler\n", ha->host_no)); 2454 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
2300 2455
2301 ha->dpc_active = 1; 2456 ha->dpc_active = 1;
2302 2457
@@ -2305,149 +2460,98 @@ qla2x00_do_dpc(void *data)
2305 continue; 2460 continue;
2306 } 2461 }
2307 2462
2308 qla2x00_do_work(ha); 2463 qla2x00_do_work(base_vha);
2309 2464
2310 if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 2465 if (test_and_clear_bit(ISP_ABORT_NEEDED,
2466 &base_vha->dpc_flags)) {
2311 2467
2312 DEBUG(printk("scsi(%ld): dpc: sched " 2468 DEBUG(printk("scsi(%ld): dpc: sched "
2313 "qla2x00_abort_isp ha = %p\n", 2469 "qla2x00_abort_isp ha = %p\n",
2314 ha->host_no, ha)); 2470 base_vha->host_no, ha));
2315 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 2471 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
2316 &ha->dpc_flags))) { 2472 &base_vha->dpc_flags))) {
2317 2473
2318 if (qla2x00_abort_isp(ha)) { 2474 if (qla2x00_abort_isp(base_vha)) {
2319 /* failed. retry later */ 2475 /* failed. retry later */
2320 set_bit(ISP_ABORT_NEEDED, 2476 set_bit(ISP_ABORT_NEEDED,
2321 &ha->dpc_flags); 2477 &base_vha->dpc_flags);
2322 }
2323 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
2324 }
2325
2326 for_each_mapped_vp_idx(ha, i) {
2327 list_for_each_entry(vha, &ha->vp_list,
2328 vp_list) {
2329 if (i == vha->vp_idx) {
2330 set_bit(ISP_ABORT_NEEDED,
2331 &vha->dpc_flags);
2332 break;
2333 }
2334 } 2478 }
2479 clear_bit(ABORT_ISP_ACTIVE,
2480 &base_vha->dpc_flags);
2335 } 2481 }
2336 2482
2337 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 2483 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
2338 ha->host_no)); 2484 base_vha->host_no));
2339 } 2485 }
2340 2486
2341 if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) { 2487 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
2342 qla2x00_update_fcports(ha); 2488 qla2x00_update_fcports(base_vha);
2343 clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 2489 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2344 } 2490 }
2345 2491
2346 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2492 if (test_and_clear_bit(RESET_MARKER_NEEDED,
2347 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { 2493 &base_vha->dpc_flags) &&
2494 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
2348 2495
2349 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 2496 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
2350 ha->host_no)); 2497 base_vha->host_no));
2351 2498
2352 qla2x00_rst_aen(ha); 2499 qla2x00_rst_aen(base_vha);
2353 clear_bit(RESET_ACTIVE, &ha->dpc_flags); 2500 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
2354 } 2501 }
2355 2502
2356 /* Retry each device up to login retry count */ 2503 /* Retry each device up to login retry count */
2357 if ((test_and_clear_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2504 if ((test_and_clear_bit(RELOGIN_NEEDED,
2358 !test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) && 2505 &base_vha->dpc_flags)) &&
2359 atomic_read(&ha->loop_state) != LOOP_DOWN) { 2506 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
2507 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
2360 2508
2361 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 2509 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
2362 ha->host_no)); 2510 base_vha->host_no));
2363 2511 qla2x00_relogin(base_vha);
2364 next_loopid = 0; 2512
2365 list_for_each_entry(fcport, &ha->fcports, list) {
2366 /*
2367 * If the port is not ONLINE then try to login
2368 * to it if we haven't run out of retries.
2369 */
2370 if (atomic_read(&fcport->state) != FCS_ONLINE &&
2371 fcport->login_retry) {
2372
2373 if (fcport->flags & FCF_FABRIC_DEVICE) {
2374 if (fcport->flags &
2375 FCF_TAPE_PRESENT)
2376 ha->isp_ops->fabric_logout(
2377 ha, fcport->loop_id,
2378 fcport->d_id.b.domain,
2379 fcport->d_id.b.area,
2380 fcport->d_id.b.al_pa);
2381 status = qla2x00_fabric_login(
2382 ha, fcport, &next_loopid);
2383 } else
2384 status =
2385 qla2x00_local_device_login(
2386 ha, fcport);
2387
2388 fcport->login_retry--;
2389 if (status == QLA_SUCCESS) {
2390 fcport->old_loop_id = fcport->loop_id;
2391
2392 DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n",
2393 ha->host_no, fcport->loop_id));
2394
2395 qla2x00_update_fcport(ha,
2396 fcport);
2397 } else if (status == 1) {
2398 set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
2399 /* retry the login again */
2400 DEBUG(printk("scsi(%ld): Retrying %d login again loop_id 0x%x\n",
2401 ha->host_no,
2402 fcport->login_retry, fcport->loop_id));
2403 } else {
2404 fcport->login_retry = 0;
2405 }
2406 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2407 fcport->loop_id = FC_NO_LOOP_ID;
2408 }
2409 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2410 break;
2411 }
2412 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 2513 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
2413 ha->host_no)); 2514 base_vha->host_no));
2414 } 2515 }
2415 2516
2416 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2517 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
2518 &base_vha->dpc_flags)) {
2417 2519
2418 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 2520 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
2419 ha->host_no)); 2521 base_vha->host_no));
2420 2522
2421 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 2523 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
2422 &ha->dpc_flags))) { 2524 &base_vha->dpc_flags))) {
2423 2525
2424 rval = qla2x00_loop_resync(ha); 2526 rval = qla2x00_loop_resync(base_vha);
2425 2527
2426 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 2528 clear_bit(LOOP_RESYNC_ACTIVE,
2529 &base_vha->dpc_flags);
2427 } 2530 }
2428 2531
2429 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 2532 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
2430 ha->host_no)); 2533 base_vha->host_no));
2431 } 2534 }
2432 2535
2433 if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) && 2536 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
2434 atomic_read(&ha->loop_state) == LOOP_READY) { 2537 atomic_read(&base_vha->loop_state) == LOOP_READY) {
2435 clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 2538 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
2436 qla2xxx_flash_npiv_conf(ha); 2539 qla2xxx_flash_npiv_conf(base_vha);
2437 } 2540 }
2438 2541
2439 if (!ha->interrupts_on) 2542 if (!ha->interrupts_on)
2440 ha->isp_ops->enable_intrs(ha); 2543 ha->isp_ops->enable_intrs(ha);
2441 2544
2442 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2545 if (test_and_clear_bit(BEACON_BLINK_NEEDED,
2443 ha->isp_ops->beacon_blink(ha); 2546 &base_vha->dpc_flags))
2547 ha->isp_ops->beacon_blink(base_vha);
2444 2548
2445 qla2x00_do_dpc_all_vps(ha); 2549 qla2x00_do_dpc_all_vps(base_vha);
2446 2550
2447 ha->dpc_active = 0; 2551 ha->dpc_active = 0;
2448 } /* End of while(1) */ 2552 } /* End of while(1) */
2449 2553
2450 DEBUG(printk("scsi(%ld): DPC handler exiting\n", ha->host_no)); 2554 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
2451 2555
2452 /* 2556 /*
2453 * Make sure that nobody tries to wake us up again. 2557 * Make sure that nobody tries to wake us up again.
@@ -2458,11 +2562,12 @@ qla2x00_do_dpc(void *data)
2458} 2562}
2459 2563
2460void 2564void
2461qla2xxx_wake_dpc(scsi_qla_host_t *ha) 2565qla2xxx_wake_dpc(struct scsi_qla_host *vha)
2462{ 2566{
2567 struct qla_hw_data *ha = vha->hw;
2463 struct task_struct *t = ha->dpc_thread; 2568 struct task_struct *t = ha->dpc_thread;
2464 2569
2465 if (!test_bit(UNLOADING, &ha->dpc_flags) && t) 2570 if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
2466 wake_up_process(t); 2571 wake_up_process(t);
2467} 2572}
2468 2573
@@ -2474,26 +2579,26 @@ qla2xxx_wake_dpc(scsi_qla_host_t *ha)
2474* ha = adapter block pointer. 2579* ha = adapter block pointer.
2475*/ 2580*/
2476static void 2581static void
2477qla2x00_rst_aen(scsi_qla_host_t *ha) 2582qla2x00_rst_aen(scsi_qla_host_t *vha)
2478{ 2583{
2479 if (ha->flags.online && !ha->flags.reset_active && 2584 if (vha->flags.online && !vha->flags.reset_active &&
2480 !atomic_read(&ha->loop_down_timer) && 2585 !atomic_read(&vha->loop_down_timer) &&
2481 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 2586 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
2482 do { 2587 do {
2483 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 2588 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2484 2589
2485 /* 2590 /*
2486 * Issue marker command only when we are going to start 2591 * Issue marker command only when we are going to start
2487 * the I/O. 2592 * the I/O.
2488 */ 2593 */
2489 ha->marker_needed = 1; 2594 vha->marker_needed = 1;
2490 } while (!atomic_read(&ha->loop_down_timer) && 2595 } while (!atomic_read(&vha->loop_down_timer) &&
2491 (test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags))); 2596 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
2492 } 2597 }
2493} 2598}
2494 2599
2495static void 2600static void
2496qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp) 2601qla2x00_sp_free_dma(srb_t *sp)
2497{ 2602{
2498 struct scsi_cmnd *cmd = sp->cmd; 2603 struct scsi_cmnd *cmd = sp->cmd;
2499 2604
@@ -2505,11 +2610,12 @@ qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
2505} 2610}
2506 2611
2507void 2612void
2508qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp) 2613qla2x00_sp_compl(scsi_qla_host_t *vha, srb_t *sp)
2509{ 2614{
2615 struct qla_hw_data *ha = vha->hw;
2510 struct scsi_cmnd *cmd = sp->cmd; 2616 struct scsi_cmnd *cmd = sp->cmd;
2511 2617
2512 qla2x00_sp_free_dma(ha, sp); 2618 qla2x00_sp_free_dma(sp);
2513 2619
2514 mempool_free(sp, ha->srb_mempool); 2620 mempool_free(sp, ha->srb_mempool);
2515 2621
@@ -2525,7 +2631,7 @@ qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
2525* Context: Interrupt 2631* Context: Interrupt
2526***************************************************************************/ 2632***************************************************************************/
2527void 2633void
2528qla2x00_timer(scsi_qla_host_t *ha) 2634qla2x00_timer(scsi_qla_host_t *vha)
2529{ 2635{
2530 unsigned long cpu_flags = 0; 2636 unsigned long cpu_flags = 0;
2531 fc_port_t *fcport; 2637 fc_port_t *fcport;
@@ -2533,8 +2639,8 @@ qla2x00_timer(scsi_qla_host_t *ha)
2533 int index; 2639 int index;
2534 srb_t *sp; 2640 srb_t *sp;
2535 int t; 2641 int t;
2536 scsi_qla_host_t *pha = to_qla_parent(ha); 2642 struct qla_hw_data *ha = vha->hw;
2537 2643 struct req_que *req = ha->req;
2538 /* 2644 /*
2539 * Ports - Port down timer. 2645 * Ports - Port down timer.
2540 * 2646 *
@@ -2543,7 +2649,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2543 * the port it marked DEAD. 2649 * the port it marked DEAD.
2544 */ 2650 */
2545 t = 0; 2651 t = 0;
2546 list_for_each_entry(fcport, &ha->fcports, list) { 2652 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2547 if (fcport->port_type != FCT_TARGET) 2653 if (fcport->port_type != FCT_TARGET)
2548 continue; 2654 continue;
2549 2655
@@ -2557,7 +2663,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2557 2663
2558 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: " 2664 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
2559 "%d remaining\n", 2665 "%d remaining\n",
2560 ha->host_no, 2666 vha->host_no,
2561 t, atomic_read(&fcport->port_down_timer))); 2667 t, atomic_read(&fcport->port_down_timer)));
2562 } 2668 }
2563 t++; 2669 t++;
@@ -2565,22 +2671,23 @@ qla2x00_timer(scsi_qla_host_t *ha)
2565 2671
2566 2672
2567 /* Loop down handler. */ 2673 /* Loop down handler. */
2568 if (atomic_read(&ha->loop_down_timer) > 0 && 2674 if (atomic_read(&vha->loop_down_timer) > 0 &&
2569 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) && ha->flags.online) { 2675 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
2676 && vha->flags.online) {
2570 2677
2571 if (atomic_read(&ha->loop_down_timer) == 2678 if (atomic_read(&vha->loop_down_timer) ==
2572 ha->loop_down_abort_time) { 2679 vha->loop_down_abort_time) {
2573 2680
2574 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 2681 DEBUG(printk("scsi(%ld): Loop Down - aborting the "
2575 "queues before time expire\n", 2682 "queues before time expire\n",
2576 ha->host_no)); 2683 vha->host_no));
2577 2684
2578 if (!IS_QLA2100(ha) && ha->link_down_timeout) 2685 if (!IS_QLA2100(ha) && vha->link_down_timeout)
2579 atomic_set(&ha->loop_state, LOOP_DEAD); 2686 atomic_set(&vha->loop_state, LOOP_DEAD);
2580 2687
2581 /* Schedule an ISP abort to return any tape commands. */ 2688 /* Schedule an ISP abort to return any tape commands. */
2582 /* NPIV - scan physical port only */ 2689 /* NPIV - scan physical port only */
2583 if (!ha->parent) { 2690 if (!vha->vp_idx) {
2584 spin_lock_irqsave(&ha->hardware_lock, 2691 spin_lock_irqsave(&ha->hardware_lock,
2585 cpu_flags); 2692 cpu_flags);
2586 for (index = 1; 2693 for (index = 1;
@@ -2588,7 +2695,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2588 index++) { 2695 index++) {
2589 fc_port_t *sfcp; 2696 fc_port_t *sfcp;
2590 2697
2591 sp = ha->outstanding_cmds[index]; 2698 sp = req->outstanding_cmds[index];
2592 if (!sp) 2699 if (!sp)
2593 continue; 2700 continue;
2594 sfcp = sp->fcport; 2701 sfcp = sp->fcport;
@@ -2596,63 +2703,63 @@ qla2x00_timer(scsi_qla_host_t *ha)
2596 continue; 2703 continue;
2597 2704
2598 set_bit(ISP_ABORT_NEEDED, 2705 set_bit(ISP_ABORT_NEEDED,
2599 &ha->dpc_flags); 2706 &vha->dpc_flags);
2600 break; 2707 break;
2601 } 2708 }
2602 spin_unlock_irqrestore(&ha->hardware_lock, 2709 spin_unlock_irqrestore(&ha->hardware_lock,
2603 cpu_flags); 2710 cpu_flags);
2604 } 2711 }
2605 set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags); 2712 set_bit(ABORT_QUEUES_NEEDED, &vha->dpc_flags);
2606 start_dpc++; 2713 start_dpc++;
2607 } 2714 }
2608 2715
2609 /* if the loop has been down for 4 minutes, reinit adapter */ 2716 /* if the loop has been down for 4 minutes, reinit adapter */
2610 if (atomic_dec_and_test(&ha->loop_down_timer) != 0) { 2717 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
2611 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - " 2718 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - "
2612 "restarting queues.\n", 2719 "restarting queues.\n",
2613 ha->host_no)); 2720 vha->host_no));
2614 2721
2615 set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags); 2722 set_bit(RESTART_QUEUES_NEEDED, &vha->dpc_flags);
2616 start_dpc++; 2723 start_dpc++;
2617 2724
2618 if (!(ha->device_flags & DFLG_NO_CABLE) && 2725 if (!(vha->device_flags & DFLG_NO_CABLE) &&
2619 !ha->parent) { 2726 !vha->vp_idx) {
2620 DEBUG(printk("scsi(%ld): Loop down - " 2727 DEBUG(printk("scsi(%ld): Loop down - "
2621 "aborting ISP.\n", 2728 "aborting ISP.\n",
2622 ha->host_no)); 2729 vha->host_no));
2623 qla_printk(KERN_WARNING, ha, 2730 qla_printk(KERN_WARNING, ha,
2624 "Loop down - aborting ISP.\n"); 2731 "Loop down - aborting ISP.\n");
2625 2732
2626 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2733 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2627 } 2734 }
2628 } 2735 }
2629 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 2736 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
2630 ha->host_no, 2737 vha->host_no,
2631 atomic_read(&ha->loop_down_timer))); 2738 atomic_read(&vha->loop_down_timer)));
2632 } 2739 }
2633 2740
2634 /* Check if beacon LED needs to be blinked */ 2741 /* Check if beacon LED needs to be blinked */
2635 if (ha->beacon_blink_led == 1) { 2742 if (ha->beacon_blink_led == 1) {
2636 set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags); 2743 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
2637 start_dpc++; 2744 start_dpc++;
2638 } 2745 }
2639 2746
2640 /* Process any deferred work. */ 2747 /* Process any deferred work. */
2641 if (!list_empty(&ha->work_list)) 2748 if (!list_empty(&vha->work_list))
2642 start_dpc++; 2749 start_dpc++;
2643 2750
2644 /* Schedule the DPC routine if needed */ 2751 /* Schedule the DPC routine if needed */
2645 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2752 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2646 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2753 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
2647 test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) || 2754 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
2648 start_dpc || 2755 start_dpc ||
2649 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2756 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
2650 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || 2757 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
2651 test_bit(VP_DPC_NEEDED, &ha->dpc_flags) || 2758 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
2652 test_bit(RELOGIN_NEEDED, &ha->dpc_flags))) 2759 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
2653 qla2xxx_wake_dpc(pha); 2760 qla2xxx_wake_dpc(vha);
2654 2761
2655 qla2x00_restart_timer(ha, WATCH_INTERVAL); 2762 qla2x00_restart_timer(vha, WATCH_INTERVAL);
2656} 2763}
2657 2764
2658/* Firmware interface routines. */ 2765/* Firmware interface routines. */
@@ -2684,8 +2791,9 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
2684}; 2791};
2685 2792
2686struct fw_blob * 2793struct fw_blob *
2687qla2x00_request_firmware(scsi_qla_host_t *ha) 2794qla2x00_request_firmware(scsi_qla_host_t *vha)
2688{ 2795{
2796 struct qla_hw_data *ha = vha->hw;
2689 struct fw_blob *blob; 2797 struct fw_blob *blob;
2690 2798
2691 blob = NULL; 2799 blob = NULL;
@@ -2709,7 +2817,7 @@ qla2x00_request_firmware(scsi_qla_host_t *ha)
2709 2817
2710 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 2818 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
2711 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 2819 DEBUG2(printk("scsi(%ld): Failed to load firmware image "
2712 "(%s).\n", ha->host_no, blob->name)); 2820 "(%s).\n", vha->host_no, blob->name));
2713 blob->fw = NULL; 2821 blob->fw = NULL;
2714 blob = NULL; 2822 blob = NULL;
2715 goto out; 2823 goto out;
@@ -2754,7 +2862,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
2754 int risc_paused = 0; 2862 int risc_paused = 0;
2755 uint32_t stat; 2863 uint32_t stat;
2756 unsigned long flags; 2864 unsigned long flags;
2757 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2865 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
2866 struct qla_hw_data *ha = base_vha->hw;
2758 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2867 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2759 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 2868 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2760 2869
@@ -2777,7 +2886,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
2777 if (risc_paused) { 2886 if (risc_paused) {
2778 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 2887 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
2779 "Dumping firmware!\n"); 2888 "Dumping firmware!\n");
2780 ha->isp_ops->fw_dump(ha, 0); 2889 ha->isp_ops->fw_dump(base_vha, 0);
2781 2890
2782 return PCI_ERS_RESULT_NEED_RESET; 2891 return PCI_ERS_RESULT_NEED_RESET;
2783 } else 2892 } else
@@ -2788,7 +2897,8 @@ static pci_ers_result_t
2788qla2xxx_pci_slot_reset(struct pci_dev *pdev) 2897qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2789{ 2898{
2790 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 2899 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
2791 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2900 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
2901 struct qla_hw_data *ha = base_vha->hw;
2792 int rc; 2902 int rc;
2793 2903
2794 if (ha->mem_only) 2904 if (ha->mem_only)
@@ -2804,13 +2914,13 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2804 } 2914 }
2805 pci_set_master(pdev); 2915 pci_set_master(pdev);
2806 2916
2807 if (ha->isp_ops->pci_config(ha)) 2917 if (ha->isp_ops->pci_config(base_vha))
2808 return ret; 2918 return ret;
2809 2919
2810 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2920 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
2811 if (qla2x00_abort_isp(ha)== QLA_SUCCESS) 2921 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
2812 ret = PCI_ERS_RESULT_RECOVERED; 2922 ret = PCI_ERS_RESULT_RECOVERED;
2813 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2923 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
2814 2924
2815 return ret; 2925 return ret;
2816} 2926}
@@ -2818,10 +2928,11 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2818static void 2928static void
2819qla2xxx_pci_resume(struct pci_dev *pdev) 2929qla2xxx_pci_resume(struct pci_dev *pdev)
2820{ 2930{
2821 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2931 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
2932 struct qla_hw_data *ha = base_vha->hw;
2822 int ret; 2933 int ret;
2823 2934
2824 ret = qla2x00_wait_for_hba_online(ha); 2935 ret = qla2x00_wait_for_hba_online(base_vha);
2825 if (ret != QLA_SUCCESS) { 2936 if (ret != QLA_SUCCESS) {
2826 qla_printk(KERN_ERR, ha, 2937 qla_printk(KERN_ERR, ha,
2827 "the device failed to resume I/O " 2938 "the device failed to resume I/O "
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index eea6720adf16..54b1100810b4 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k9" 10#define QLA2XXX_VERSION "8.02.02-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
14#define QLA_DRIVER_PATCH_VER 1 14#define QLA_DRIVER_PATCH_VER 2
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0