aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_os.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_os.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c770
1 files changed, 741 insertions, 29 deletions
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fb8cd3847d4b..c7dd29876836 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -113,11 +113,11 @@ MODULE_PARM_DESC(ql2xfdmienable,
113static int ql2xmaxqdepth = MAX_Q_DEPTH; 113static int ql2xmaxqdepth = MAX_Q_DEPTH;
114module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 114module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
115MODULE_PARM_DESC(ql2xmaxqdepth, 115MODULE_PARM_DESC(ql2xmaxqdepth,
116 "Maximum queue depth to report for target devices."); 116 "Maximum queue depth to set for each LUN. "
117 "Default is 32.");
117 118
118/* Do not change the value of this after module load */ 119int ql2xenabledif = 2;
119int ql2xenabledif = 0; 120module_param(ql2xenabledif, int, S_IRUGO);
120module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
121MODULE_PARM_DESC(ql2xenabledif, 121MODULE_PARM_DESC(ql2xenabledif,
122 " Enable T10-CRC-DIF " 122 " Enable T10-CRC-DIF "
123 " Default is 0 - No DIF Support. 1 - Enable it" 123 " Default is 0 - No DIF Support. 1 - Enable it"
@@ -1078,7 +1078,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1078 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1078 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
1079 cmd->device->lun, type) != QLA_SUCCESS) { 1079 cmd->device->lun, type) != QLA_SUCCESS) {
1080 ql_log(ql_log_warn, vha, 0x800d, 1080 ql_log(ql_log_warn, vha, 0x800d,
1081 "wait for peding cmds failed for cmd=%p.\n", cmd); 1081 "wait for pending cmds failed for cmd=%p.\n", cmd);
1082 goto eh_reset_failed; 1082 goto eh_reset_failed;
1083 } 1083 }
1084 1084
@@ -1177,7 +1177,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1177eh_bus_reset_done: 1177eh_bus_reset_done:
1178 ql_log(ql_log_warn, vha, 0x802b, 1178 ql_log(ql_log_warn, vha, 0x802b,
1179 "BUS RESET %s nexus=%ld:%d:%d.\n", 1179 "BUS RESET %s nexus=%ld:%d:%d.\n",
1180 (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun); 1180 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1181 1181
1182 return ret; 1182 return ret;
1183} 1183}
@@ -1357,6 +1357,9 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1357 scsi_qla_host_t *vha = shost_priv(sdev->host); 1357 scsi_qla_host_t *vha = shost_priv(sdev->host);
1358 struct req_que *req = vha->req; 1358 struct req_que *req = vha->req;
1359 1359
1360 if (IS_T10_PI_CAPABLE(vha->hw))
1361 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1362
1360 if (sdev->tagged_supported) 1363 if (sdev->tagged_supported)
1361 scsi_activate_tcq(sdev, req->max_q_depth); 1364 scsi_activate_tcq(sdev, req->max_q_depth);
1362 else 1365 else
@@ -1919,7 +1922,7 @@ static struct isp_operations qla82xx_isp_ops = {
1919 .nvram_config = qla81xx_nvram_config, 1922 .nvram_config = qla81xx_nvram_config,
1920 .update_fw_options = qla24xx_update_fw_options, 1923 .update_fw_options = qla24xx_update_fw_options,
1921 .load_risc = qla82xx_load_risc, 1924 .load_risc = qla82xx_load_risc,
1922 .pci_info_str = qla82xx_pci_info_str, 1925 .pci_info_str = qla24xx_pci_info_str,
1923 .fw_version_str = qla24xx_fw_version_str, 1926 .fw_version_str = qla24xx_fw_version_str,
1924 .intr_handler = qla82xx_intr_handler, 1927 .intr_handler = qla82xx_intr_handler,
1925 .enable_intrs = qla82xx_enable_intrs, 1928 .enable_intrs = qla82xx_enable_intrs,
@@ -2149,7 +2152,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2149 scsi_qla_host_t *base_vha = NULL; 2152 scsi_qla_host_t *base_vha = NULL;
2150 struct qla_hw_data *ha; 2153 struct qla_hw_data *ha;
2151 char pci_info[30]; 2154 char pci_info[30];
2152 char fw_str[30]; 2155 char fw_str[30], wq_name[30];
2153 struct scsi_host_template *sht; 2156 struct scsi_host_template *sht;
2154 int bars, mem_only = 0; 2157 int bars, mem_only = 0;
2155 uint16_t req_length = 0, rsp_length = 0; 2158 uint16_t req_length = 0, rsp_length = 0;
@@ -2203,12 +2206,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2203 ha->mem_only = mem_only; 2206 ha->mem_only = mem_only;
2204 spin_lock_init(&ha->hardware_lock); 2207 spin_lock_init(&ha->hardware_lock);
2205 spin_lock_init(&ha->vport_slock); 2208 spin_lock_init(&ha->vport_slock);
2209 mutex_init(&ha->selflogin_lock);
2206 2210
2207 /* Set ISP-type information. */ 2211 /* Set ISP-type information. */
2208 qla2x00_set_isp_flags(ha); 2212 qla2x00_set_isp_flags(ha);
2209 2213
2210 /* Set EEH reset type to fundamental if required by hba */ 2214 /* Set EEH reset type to fundamental if required by hba */
2211 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) 2215 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
2216 IS_QLA83XX(ha))
2212 pdev->needs_freset = 1; 2217 pdev->needs_freset = 1;
2213 2218
2214 ha->prev_topology = 0; 2219 ha->prev_topology = 0;
@@ -2318,6 +2323,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2318 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2323 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2319 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2324 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2320 } else if (IS_QLA83XX(ha)) { 2325 } else if (IS_QLA83XX(ha)) {
2326 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2321 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2327 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2322 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2328 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2323 req_length = REQUEST_ENTRY_CNT_24XX; 2329 req_length = REQUEST_ENTRY_CNT_24XX;
@@ -2416,7 +2422,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2416 host->can_queue, base_vha->req, 2422 host->can_queue, base_vha->req,
2417 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 2423 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2418 host->max_id = ha->max_fibre_devices; 2424 host->max_id = ha->max_fibre_devices;
2419 host->this_id = 255;
2420 host->cmd_per_lun = 3; 2425 host->cmd_per_lun = 3;
2421 host->unique_id = host->host_no; 2426 host->unique_id = host->host_no;
2422 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 2427 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
@@ -2499,7 +2504,7 @@ que_init:
2499 if (IS_QLA82XX(ha)) { 2504 if (IS_QLA82XX(ha)) {
2500 qla82xx_idc_lock(ha); 2505 qla82xx_idc_lock(ha);
2501 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2506 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2502 QLA82XX_DEV_FAILED); 2507 QLA8XXX_DEV_FAILED);
2503 qla82xx_idc_unlock(ha); 2508 qla82xx_idc_unlock(ha);
2504 ql_log(ql_log_fatal, base_vha, 0x00d7, 2509 ql_log(ql_log_fatal, base_vha, 0x00d7,
2505 "HW State: FAILED.\n"); 2510 "HW State: FAILED.\n");
@@ -2542,6 +2547,20 @@ que_init:
2542 */ 2547 */
2543 qla2xxx_wake_dpc(base_vha); 2548 qla2xxx_wake_dpc(base_vha);
2544 2549
2550 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
2551 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
2552 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
2553 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
2554
2555 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
2556 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
2557 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
2558 INIT_WORK(&ha->idc_state_handler,
2559 qla83xx_idc_state_handler_work);
2560 INIT_WORK(&ha->nic_core_unrecoverable,
2561 qla83xx_nic_core_unrecoverable_work);
2562 }
2563
2545skip_dpc: 2564skip_dpc:
2546 list_add_tail(&base_vha->list, &ha->vp_list); 2565 list_add_tail(&base_vha->list, &ha->vp_list);
2547 base_vha->host->irq = ha->pdev->irq; 2566 base_vha->host->irq = ha->pdev->irq;
@@ -2557,7 +2576,7 @@ skip_dpc:
2557 2576
2558 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 2577 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2559 if (ha->fw_attributes & BIT_4) { 2578 if (ha->fw_attributes & BIT_4) {
2560 int prot = 0; 2579 int prot = 0, guard;
2561 base_vha->flags.difdix_supported = 1; 2580 base_vha->flags.difdix_supported = 1;
2562 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 2581 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
2563 "Registering for DIF/DIX type 1 and 3 protection.\n"); 2582 "Registering for DIF/DIX type 1 and 3 protection.\n");
@@ -2570,7 +2589,14 @@ skip_dpc:
2570 | SHOST_DIX_TYPE1_PROTECTION 2589 | SHOST_DIX_TYPE1_PROTECTION
2571 | SHOST_DIX_TYPE2_PROTECTION 2590 | SHOST_DIX_TYPE2_PROTECTION
2572 | SHOST_DIX_TYPE3_PROTECTION); 2591 | SHOST_DIX_TYPE3_PROTECTION);
2573 scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC); 2592
2593 guard = SHOST_DIX_GUARD_CRC;
2594
2595 if (IS_PI_IPGUARD_CAPABLE(ha) &&
2596 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2597 guard |= SHOST_DIX_GUARD_IP;
2598
2599 scsi_host_set_guard(host, guard);
2574 } else 2600 } else
2575 base_vha->flags.difdix_supported = 0; 2601 base_vha->flags.difdix_supported = 0;
2576 } 2602 }
@@ -2750,6 +2776,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
2750 } 2776 }
2751 mutex_unlock(&ha->vport_lock); 2777 mutex_unlock(&ha->vport_lock);
2752 2778
2779 if (IS_QLA8031(ha)) {
2780 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
2781 "Clearing fcoe driver presence.\n");
2782 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
2783 ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
2784 "Error while clearing DRV-Presence.\n");
2785 }
2786
2753 set_bit(UNLOADING, &base_vha->dpc_flags); 2787 set_bit(UNLOADING, &base_vha->dpc_flags);
2754 2788
2755 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 2789 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
@@ -2771,6 +2805,21 @@ qla2x00_remove_one(struct pci_dev *pdev)
2771 ha->wq = NULL; 2805 ha->wq = NULL;
2772 } 2806 }
2773 2807
2808 /* Cancel all work and destroy DPC workqueues */
2809 if (ha->dpc_lp_wq) {
2810 cancel_work_sync(&ha->idc_aen);
2811 destroy_workqueue(ha->dpc_lp_wq);
2812 ha->dpc_lp_wq = NULL;
2813 }
2814
2815 if (ha->dpc_hp_wq) {
2816 cancel_work_sync(&ha->nic_core_reset);
2817 cancel_work_sync(&ha->idc_state_handler);
2818 cancel_work_sync(&ha->nic_core_unrecoverable);
2819 destroy_workqueue(ha->dpc_hp_wq);
2820 ha->dpc_hp_wq = NULL;
2821 }
2822
2774 /* Kill the kernel thread for this host */ 2823 /* Kill the kernel thread for this host */
2775 if (ha->dpc_thread) { 2824 if (ha->dpc_thread) {
2776 struct task_struct *t = ha->dpc_thread; 2825 struct task_struct *t = ha->dpc_thread;
@@ -2837,7 +2886,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2837 qla2x00_stop_dpc_thread(vha); 2886 qla2x00_stop_dpc_thread(vha);
2838 2887
2839 qla25xx_delete_queues(vha); 2888 qla25xx_delete_queues(vha);
2840
2841 if (ha->flags.fce_enabled) 2889 if (ha->flags.fce_enabled)
2842 qla2x00_disable_fce_trace(vha, NULL, NULL); 2890 qla2x00_disable_fce_trace(vha, NULL, NULL);
2843 2891
@@ -2872,6 +2920,7 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
2872 2920
2873 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { 2921 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
2874 list_del(&fcport->list); 2922 list_del(&fcport->list);
2923 qla2x00_clear_loop_id(fcport);
2875 kfree(fcport); 2924 kfree(fcport);
2876 fcport = NULL; 2925 fcport = NULL;
2877 } 2926 }
@@ -3169,6 +3218,18 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3169 } 3218 }
3170 3219
3171 INIT_LIST_HEAD(&ha->vp_list); 3220 INIT_LIST_HEAD(&ha->vp_list);
3221
3222 /* Allocate memory for our loop_id bitmap */
3223 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
3224 GFP_KERNEL);
3225 if (!ha->loop_id_map)
3226 goto fail_async_pd;
3227 else {
3228 qla2x00_set_reserved_loop_ids(ha);
3229 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
3230 "loop_id_map=%p. \n", ha->loop_id_map);
3231 }
3232
3172 return 1; 3233 return 1;
3173 3234
3174fail_async_pd: 3235fail_async_pd:
@@ -3280,6 +3341,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3280{ 3341{
3281 qla2x00_free_fw_dump(ha); 3342 qla2x00_free_fw_dump(ha);
3282 3343
3344 if (ha->mctp_dump)
3345 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
3346 ha->mctp_dump_dma);
3347
3283 if (ha->srb_mempool) 3348 if (ha->srb_mempool)
3284 mempool_destroy(ha->srb_mempool); 3349 mempool_destroy(ha->srb_mempool);
3285 3350
@@ -3352,6 +3417,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3352 kfree(ha->nvram); 3417 kfree(ha->nvram);
3353 kfree(ha->npiv_info); 3418 kfree(ha->npiv_info);
3354 kfree(ha->swl); 3419 kfree(ha->swl);
3420 kfree(ha->loop_id_map);
3355 3421
3356 ha->srb_mempool = NULL; 3422 ha->srb_mempool = NULL;
3357 ha->ctx_mempool = NULL; 3423 ha->ctx_mempool = NULL;
@@ -3687,13 +3753,651 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3687 } 3753 }
3688 3754
3689 if (fcport->login_retry == 0 && status != QLA_SUCCESS) 3755 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
3690 fcport->loop_id = FC_NO_LOOP_ID; 3756 qla2x00_clear_loop_id(fcport);
3691 } 3757 }
3692 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3758 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3693 break; 3759 break;
3694 } 3760 }
3695} 3761}
3696 3762
3763/* Schedule work on any of the dpc-workqueues */
3764void
3765qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
3766{
3767 struct qla_hw_data *ha = base_vha->hw;
3768
3769 switch (work_code) {
3770 case MBA_IDC_AEN: /* 0x8200 */
3771 if (ha->dpc_lp_wq)
3772 queue_work(ha->dpc_lp_wq, &ha->idc_aen);
3773 break;
3774
3775 case QLA83XX_NIC_CORE_RESET: /* 0x1 */
3776 if (!ha->flags.nic_core_reset_hdlr_active) {
3777 if (ha->dpc_hp_wq)
3778 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
3779 } else
3780 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
3781 "NIC Core reset is already active. Skip "
3782 "scheduling it again.\n");
3783 break;
3784 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
3785 if (ha->dpc_hp_wq)
3786 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
3787 break;
3788 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
3789 if (ha->dpc_hp_wq)
3790 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
3791 break;
3792 default:
3793 ql_log(ql_log_warn, base_vha, 0xb05f,
3794 "Unknow work-code=0x%x.\n", work_code);
3795 }
3796
3797 return;
3798}
3799
3800/* Work: Perform NIC Core Unrecoverable state handling */
3801void
3802qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
3803{
3804 struct qla_hw_data *ha =
3805 container_of(work, struct qla_hw_data, nic_core_unrecoverable);
3806 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3807 uint32_t dev_state = 0;
3808
3809 qla83xx_idc_lock(base_vha, 0);
3810 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
3811 qla83xx_reset_ownership(base_vha);
3812 if (ha->flags.nic_core_reset_owner) {
3813 ha->flags.nic_core_reset_owner = 0;
3814 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
3815 QLA8XXX_DEV_FAILED);
3816 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
3817 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
3818 }
3819 qla83xx_idc_unlock(base_vha, 0);
3820}
3821
3822/* Work: Execute IDC state handler */
3823void
3824qla83xx_idc_state_handler_work(struct work_struct *work)
3825{
3826 struct qla_hw_data *ha =
3827 container_of(work, struct qla_hw_data, idc_state_handler);
3828 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3829 uint32_t dev_state = 0;
3830
3831 qla83xx_idc_lock(base_vha, 0);
3832 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
3833 if (dev_state == QLA8XXX_DEV_FAILED ||
3834 dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
3835 qla83xx_idc_state_handler(base_vha);
3836 qla83xx_idc_unlock(base_vha, 0);
3837}
3838
3839int
3840qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
3841{
3842 int rval = QLA_SUCCESS;
3843 unsigned long heart_beat_wait = jiffies + (1 * HZ);
3844 uint32_t heart_beat_counter1, heart_beat_counter2;
3845
3846 do {
3847 if (time_after(jiffies, heart_beat_wait)) {
3848 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
3849 "Nic Core f/w is not alive.\n");
3850 rval = QLA_FUNCTION_FAILED;
3851 break;
3852 }
3853
3854 qla83xx_idc_lock(base_vha, 0);
3855 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
3856 &heart_beat_counter1);
3857 qla83xx_idc_unlock(base_vha, 0);
3858 msleep(100);
3859 qla83xx_idc_lock(base_vha, 0);
3860 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
3861 &heart_beat_counter2);
3862 qla83xx_idc_unlock(base_vha, 0);
3863 } while (heart_beat_counter1 == heart_beat_counter2);
3864
3865 return rval;
3866}
3867
3868/* Work: Perform NIC Core Reset handling */
3869void
3870qla83xx_nic_core_reset_work(struct work_struct *work)
3871{
3872 struct qla_hw_data *ha =
3873 container_of(work, struct qla_hw_data, nic_core_reset);
3874 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3875 uint32_t dev_state = 0;
3876
3877 if (IS_QLA2031(ha)) {
3878 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
3879 ql_log(ql_log_warn, base_vha, 0xb081,
3880 "Failed to dump mctp\n");
3881 return;
3882 }
3883
3884 if (!ha->flags.nic_core_reset_hdlr_active) {
3885 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
3886 qla83xx_idc_lock(base_vha, 0);
3887 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
3888 &dev_state);
3889 qla83xx_idc_unlock(base_vha, 0);
3890 if (dev_state != QLA8XXX_DEV_NEED_RESET) {
3891 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
3892 "Nic Core f/w is alive.\n");
3893 return;
3894 }
3895 }
3896
3897 ha->flags.nic_core_reset_hdlr_active = 1;
3898 if (qla83xx_nic_core_reset(base_vha)) {
3899 /* NIC Core reset failed. */
3900 ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
3901 "NIC Core reset failed.\n");
3902 }
3903 ha->flags.nic_core_reset_hdlr_active = 0;
3904 }
3905}
3906
3907/* Work: Handle 8200 IDC aens */
3908void
3909qla83xx_service_idc_aen(struct work_struct *work)
3910{
3911 struct qla_hw_data *ha =
3912 container_of(work, struct qla_hw_data, idc_aen);
3913 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3914 uint32_t dev_state, idc_control;
3915
3916 qla83xx_idc_lock(base_vha, 0);
3917 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
3918 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
3919 qla83xx_idc_unlock(base_vha, 0);
3920 if (dev_state == QLA8XXX_DEV_NEED_RESET) {
3921 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
3922 ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
3923 "Application requested NIC Core Reset.\n");
3924 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
3925 } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
3926 QLA_SUCCESS) {
3927 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
3928 "Other protocol driver requested NIC Core Reset.\n");
3929 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
3930 }
3931 } else if (dev_state == QLA8XXX_DEV_FAILED ||
3932 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
3933 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
3934 }
3935}
3936
3937static void
3938qla83xx_wait_logic(void)
3939{
3940 int i;
3941
3942 /* Yield CPU */
3943 if (!in_interrupt()) {
3944 /*
3945 * Wait about 200ms before retrying again.
3946 * This controls the number of retries for single
3947 * lock operation.
3948 */
3949 msleep(100);
3950 schedule();
3951 } else {
3952 for (i = 0; i < 20; i++)
3953 cpu_relax(); /* This a nop instr on i386 */
3954 }
3955}
3956
3957int
3958qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
3959{
3960 int rval;
3961 uint32_t data;
3962 uint32_t idc_lck_rcvry_stage_mask = 0x3;
3963 uint32_t idc_lck_rcvry_owner_mask = 0x3c;
3964 struct qla_hw_data *ha = base_vha->hw;
3965
3966 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
3967 if (rval)
3968 return rval;
3969
3970 if ((data & idc_lck_rcvry_stage_mask) > 0) {
3971 return QLA_SUCCESS;
3972 } else {
3973 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
3974 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
3975 data);
3976 if (rval)
3977 return rval;
3978
3979 msleep(200);
3980
3981 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
3982 &data);
3983 if (rval)
3984 return rval;
3985
3986 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
3987 data &= (IDC_LOCK_RECOVERY_STAGE2 |
3988 ~(idc_lck_rcvry_stage_mask));
3989 rval = qla83xx_wr_reg(base_vha,
3990 QLA83XX_IDC_LOCK_RECOVERY, data);
3991 if (rval)
3992 return rval;
3993
3994 /* Forcefully perform IDC UnLock */
3995 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
3996 &data);
3997 if (rval)
3998 return rval;
3999 /* Clear lock-id by setting 0xff */
4000 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
4001 0xff);
4002 if (rval)
4003 return rval;
4004 /* Clear lock-recovery by setting 0x0 */
4005 rval = qla83xx_wr_reg(base_vha,
4006 QLA83XX_IDC_LOCK_RECOVERY, 0x0);
4007 if (rval)
4008 return rval;
4009 } else
4010 return QLA_SUCCESS;
4011 }
4012
4013 return rval;
4014}
4015
4016int
4017qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
4018{
4019 int rval = QLA_SUCCESS;
4020 uint32_t o_drv_lockid, n_drv_lockid;
4021 unsigned long lock_recovery_timeout;
4022
4023 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
4024retry_lockid:
4025 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
4026 if (rval)
4027 goto exit;
4028
4029 /* MAX wait time before forcing IDC Lock recovery = 2 secs */
4030 if (time_after_eq(jiffies, lock_recovery_timeout)) {
4031 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
4032 return QLA_SUCCESS;
4033 else
4034 return QLA_FUNCTION_FAILED;
4035 }
4036
4037 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
4038 if (rval)
4039 goto exit;
4040
4041 if (o_drv_lockid == n_drv_lockid) {
4042 qla83xx_wait_logic();
4043 goto retry_lockid;
4044 } else
4045 return QLA_SUCCESS;
4046
4047exit:
4048 return rval;
4049}
4050
4051void
4052qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
4053{
4054 uint16_t options = (requester_id << 15) | BIT_6;
4055 uint32_t data;
4056 struct qla_hw_data *ha = base_vha->hw;
4057
4058 /* IDC-lock implementation using driver-lock/lock-id remote registers */
4059retry_lock:
4060 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
4061 == QLA_SUCCESS) {
4062 if (data) {
4063 /* Setting lock-id to our function-number */
4064 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
4065 ha->portnum);
4066 } else {
4067 ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
4068 "Failed to acquire IDC lock. retrying...\n");
4069
4070 /* Retry/Perform IDC-Lock recovery */
4071 if (qla83xx_idc_lock_recovery(base_vha)
4072 == QLA_SUCCESS) {
4073 qla83xx_wait_logic();
4074 goto retry_lock;
4075 } else
4076 ql_log(ql_log_warn, base_vha, 0xb075,
4077 "IDC Lock recovery FAILED.\n");
4078 }
4079
4080 }
4081
4082 return;
4083
4084 /* XXX: IDC-lock implementation using access-control mbx */
4085retry_lock2:
4086 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
4087 ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
4088 "Failed to acquire IDC lock. retrying...\n");
4089 /* Retry/Perform IDC-Lock recovery */
4090 if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
4091 qla83xx_wait_logic();
4092 goto retry_lock2;
4093 } else
4094 ql_log(ql_log_warn, base_vha, 0xb076,
4095 "IDC Lock recovery FAILED.\n");
4096 }
4097
4098 return;
4099}
4100
4101void
4102qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
4103{
4104 uint16_t options = (requester_id << 15) | BIT_7, retry;
4105 uint32_t data;
4106 struct qla_hw_data *ha = base_vha->hw;
4107
4108 /* IDC-unlock implementation using driver-unlock/lock-id
4109 * remote registers
4110 */
4111 retry = 0;
4112retry_unlock:
4113 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
4114 == QLA_SUCCESS) {
4115 if (data == ha->portnum) {
4116 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
4117 /* Clearing lock-id by setting 0xff */
4118 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
4119 } else if (retry < 10) {
4120 /* SV: XXX: IDC unlock retrying needed here? */
4121
4122 /* Retry for IDC-unlock */
4123 qla83xx_wait_logic();
4124 retry++;
4125 ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
4126 "Failed to release IDC lock, retyring=%d\n", retry);
4127 goto retry_unlock;
4128 }
4129 } else if (retry < 10) {
4130 /* Retry for IDC-unlock */
4131 qla83xx_wait_logic();
4132 retry++;
4133 ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
4134 "Failed to read drv-lockid, retyring=%d\n", retry);
4135 goto retry_unlock;
4136 }
4137
4138 return;
4139
4140 /* XXX: IDC-unlock implementation using access-control mbx */
4141 retry = 0;
4142retry_unlock2:
4143 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
4144 if (retry < 10) {
4145 /* Retry for IDC-unlock */
4146 qla83xx_wait_logic();
4147 retry++;
4148 ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
4149 "Failed to release IDC lock, retyring=%d\n", retry);
4150 goto retry_unlock2;
4151 }
4152 }
4153
4154 return;
4155}
4156
4157int
4158__qla83xx_set_drv_presence(scsi_qla_host_t *vha)
4159{
4160 int rval = QLA_SUCCESS;
4161 struct qla_hw_data *ha = vha->hw;
4162 uint32_t drv_presence;
4163
4164 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4165 if (rval == QLA_SUCCESS) {
4166 drv_presence |= (1 << ha->portnum);
4167 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
4168 drv_presence);
4169 }
4170
4171 return rval;
4172}
4173
4174int
4175qla83xx_set_drv_presence(scsi_qla_host_t *vha)
4176{
4177 int rval = QLA_SUCCESS;
4178
4179 qla83xx_idc_lock(vha, 0);
4180 rval = __qla83xx_set_drv_presence(vha);
4181 qla83xx_idc_unlock(vha, 0);
4182
4183 return rval;
4184}
4185
4186int
4187__qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
4188{
4189 int rval = QLA_SUCCESS;
4190 struct qla_hw_data *ha = vha->hw;
4191 uint32_t drv_presence;
4192
4193 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4194 if (rval == QLA_SUCCESS) {
4195 drv_presence &= ~(1 << ha->portnum);
4196 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
4197 drv_presence);
4198 }
4199
4200 return rval;
4201}
4202
4203int
4204qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
4205{
4206 int rval = QLA_SUCCESS;
4207
4208 qla83xx_idc_lock(vha, 0);
4209 rval = __qla83xx_clear_drv_presence(vha);
4210 qla83xx_idc_unlock(vha, 0);
4211
4212 return rval;
4213}
4214
4215void
4216qla83xx_need_reset_handler(scsi_qla_host_t *vha)
4217{
4218 struct qla_hw_data *ha = vha->hw;
4219 uint32_t drv_ack, drv_presence;
4220 unsigned long ack_timeout;
4221
4222 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
4223 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
4224 while (1) {
4225 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
4226 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4227 if (drv_ack == drv_presence)
4228 break;
4229
4230 if (time_after_eq(jiffies, ack_timeout)) {
4231 ql_log(ql_log_warn, vha, 0xb067,
4232 "RESET ACK TIMEOUT! drv_presence=0x%x "
4233 "drv_ack=0x%x\n", drv_presence, drv_ack);
4234 /*
4235 * The function(s) which did not ack in time are forced
4236 * to withdraw any further participation in the IDC
4237 * reset.
4238 */
4239 if (drv_ack != drv_presence)
4240 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
4241 drv_ack);
4242 break;
4243 }
4244
4245 qla83xx_idc_unlock(vha, 0);
4246 msleep(1000);
4247 qla83xx_idc_lock(vha, 0);
4248 }
4249
4250 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
4251 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
4252}
4253
4254int
4255qla83xx_device_bootstrap(scsi_qla_host_t *vha)
4256{
4257 int rval = QLA_SUCCESS;
4258 uint32_t idc_control;
4259
4260 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
4261 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
4262
4263 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
4264 __qla83xx_get_idc_control(vha, &idc_control);
4265 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
4266 __qla83xx_set_idc_control(vha, 0);
4267
4268 qla83xx_idc_unlock(vha, 0);
4269 rval = qla83xx_restart_nic_firmware(vha);
4270 qla83xx_idc_lock(vha, 0);
4271
4272 if (rval != QLA_SUCCESS) {
4273 ql_log(ql_log_fatal, vha, 0xb06a,
4274 "Failed to restart NIC f/w.\n");
4275 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
4276 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
4277 } else {
4278 ql_dbg(ql_dbg_p3p, vha, 0xb06c,
4279 "Success in restarting nic f/w.\n");
4280 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
4281 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
4282 }
4283
4284 return rval;
4285}
4286
4287/* Assumes idc_lock always held on entry */
4288int
4289qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
4290{
4291 struct qla_hw_data *ha = base_vha->hw;
4292 int rval = QLA_SUCCESS;
4293 unsigned long dev_init_timeout;
4294 uint32_t dev_state;
4295
4296 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
4297 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
4298
4299 while (1) {
4300
4301 if (time_after_eq(jiffies, dev_init_timeout)) {
4302 ql_log(ql_log_warn, base_vha, 0xb06e,
4303 "Initialization TIMEOUT!\n");
4304 /* Init timeout. Disable further NIC Core
4305 * communication.
4306 */
4307 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
4308 QLA8XXX_DEV_FAILED);
4309 ql_log(ql_log_info, base_vha, 0xb06f,
4310 "HW State: FAILED.\n");
4311 }
4312
4313 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4314 switch (dev_state) {
4315 case QLA8XXX_DEV_READY:
4316 if (ha->flags.nic_core_reset_owner)
4317 qla83xx_idc_audit(base_vha,
4318 IDC_AUDIT_COMPLETION);
4319 ha->flags.nic_core_reset_owner = 0;
4320 ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
4321 "Reset_owner reset by 0x%x.\n",
4322 ha->portnum);
4323 goto exit;
4324 case QLA8XXX_DEV_COLD:
4325 if (ha->flags.nic_core_reset_owner)
4326 rval = qla83xx_device_bootstrap(base_vha);
4327 else {
4328 /* Wait for AEN to change device-state */
4329 qla83xx_idc_unlock(base_vha, 0);
4330 msleep(1000);
4331 qla83xx_idc_lock(base_vha, 0);
4332 }
4333 break;
4334 case QLA8XXX_DEV_INITIALIZING:
4335 /* Wait for AEN to change device-state */
4336 qla83xx_idc_unlock(base_vha, 0);
4337 msleep(1000);
4338 qla83xx_idc_lock(base_vha, 0);
4339 break;
4340 case QLA8XXX_DEV_NEED_RESET:
4341 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
4342 qla83xx_need_reset_handler(base_vha);
4343 else {
4344 /* Wait for AEN to change device-state */
4345 qla83xx_idc_unlock(base_vha, 0);
4346 msleep(1000);
4347 qla83xx_idc_lock(base_vha, 0);
4348 }
4349 /* reset timeout value after need reset handler */
4350 dev_init_timeout = jiffies +
4351 (ha->fcoe_dev_init_timeout * HZ);
4352 break;
4353 case QLA8XXX_DEV_NEED_QUIESCENT:
4354 /* XXX: DEBUG for now */
4355 qla83xx_idc_unlock(base_vha, 0);
4356 msleep(1000);
4357 qla83xx_idc_lock(base_vha, 0);
4358 break;
4359 case QLA8XXX_DEV_QUIESCENT:
4360 /* XXX: DEBUG for now */
4361 if (ha->flags.quiesce_owner)
4362 goto exit;
4363
4364 qla83xx_idc_unlock(base_vha, 0);
4365 msleep(1000);
4366 qla83xx_idc_lock(base_vha, 0);
4367 dev_init_timeout = jiffies +
4368 (ha->fcoe_dev_init_timeout * HZ);
4369 break;
4370 case QLA8XXX_DEV_FAILED:
4371 if (ha->flags.nic_core_reset_owner)
4372 qla83xx_idc_audit(base_vha,
4373 IDC_AUDIT_COMPLETION);
4374 ha->flags.nic_core_reset_owner = 0;
4375 __qla83xx_clear_drv_presence(base_vha);
4376 qla83xx_idc_unlock(base_vha, 0);
4377 qla8xxx_dev_failed_handler(base_vha);
4378 rval = QLA_FUNCTION_FAILED;
4379 qla83xx_idc_lock(base_vha, 0);
4380 goto exit;
4381 case QLA8XXX_BAD_VALUE:
4382 qla83xx_idc_unlock(base_vha, 0);
4383 msleep(1000);
4384 qla83xx_idc_lock(base_vha, 0);
4385 break;
4386 default:
4387 ql_log(ql_log_warn, base_vha, 0xb071,
4388 "Unknow Device State: %x.\n", dev_state);
4389 qla83xx_idc_unlock(base_vha, 0);
4390 qla8xxx_dev_failed_handler(base_vha);
4391 rval = QLA_FUNCTION_FAILED;
4392 qla83xx_idc_lock(base_vha, 0);
4393 goto exit;
4394 }
4395 }
4396
4397exit:
4398 return rval;
4399}
4400
3697/************************************************************************** 4401/**************************************************************************
3698* qla2x00_do_dpc 4402* qla2x00_do_dpc
3699* This kernel thread is a task that is schedule by the interrupt handler 4403* This kernel thread is a task that is schedule by the interrupt handler
@@ -3749,7 +4453,7 @@ qla2x00_do_dpc(void *data)
3749 &base_vha->dpc_flags)) { 4453 &base_vha->dpc_flags)) {
3750 qla82xx_idc_lock(ha); 4454 qla82xx_idc_lock(ha);
3751 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4455 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3752 QLA82XX_DEV_FAILED); 4456 QLA8XXX_DEV_FAILED);
3753 qla82xx_idc_unlock(ha); 4457 qla82xx_idc_unlock(ha);
3754 ql_log(ql_log_info, base_vha, 0x4004, 4458 ql_log(ql_log_info, base_vha, 0x4004,
3755 "HW State: FAILED.\n"); 4459 "HW State: FAILED.\n");
@@ -3819,14 +4523,21 @@ qla2x00_do_dpc(void *data)
3819 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 4523 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3820 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 4524 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
3821 "Quiescence mode scheduled.\n"); 4525 "Quiescence mode scheduled.\n");
3822 qla82xx_device_state_handler(base_vha); 4526 if (IS_QLA82XX(ha)) {
3823 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); 4527 qla82xx_device_state_handler(base_vha);
3824 if (!ha->flags.quiesce_owner) { 4528 clear_bit(ISP_QUIESCE_NEEDED,
3825 qla2x00_perform_loop_resync(base_vha); 4529 &base_vha->dpc_flags);
3826 4530 if (!ha->flags.quiesce_owner) {
3827 qla82xx_idc_lock(ha); 4531 qla2x00_perform_loop_resync(base_vha);
3828 qla82xx_clear_qsnt_ready(base_vha); 4532
3829 qla82xx_idc_unlock(ha); 4533 qla82xx_idc_lock(ha);
4534 qla82xx_clear_qsnt_ready(base_vha);
4535 qla82xx_idc_unlock(ha);
4536 }
4537 } else {
4538 clear_bit(ISP_QUIESCE_NEEDED,
4539 &base_vha->dpc_flags);
4540 qla2x00_quiesce_io(base_vha);
3830 } 4541 }
3831 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 4542 ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
3832 "Quiescence mode end.\n"); 4543 "Quiescence mode end.\n");
@@ -4326,7 +5037,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
4326 qla82xx_idc_lock(ha); 5037 qla82xx_idc_lock(ha);
4327 5038
4328 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5039 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4329 QLA82XX_DEV_INITIALIZING); 5040 QLA8XXX_DEV_INITIALIZING);
4330 5041
4331 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, 5042 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
4332 QLA82XX_IDC_VERSION); 5043 QLA82XX_IDC_VERSION);
@@ -4350,12 +5061,12 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
4350 "HW State: FAILED.\n"); 5061 "HW State: FAILED.\n");
4351 qla82xx_clear_drv_active(ha); 5062 qla82xx_clear_drv_active(ha);
4352 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5063 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4353 QLA82XX_DEV_FAILED); 5064 QLA8XXX_DEV_FAILED);
4354 } else { 5065 } else {
4355 ql_log(ql_log_info, base_vha, 0x900c, 5066 ql_log(ql_log_info, base_vha, 0x900c,
4356 "HW State: READY.\n"); 5067 "HW State: READY.\n");
4357 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5068 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4358 QLA82XX_DEV_READY); 5069 QLA8XXX_DEV_READY);
4359 qla82xx_idc_unlock(ha); 5070 qla82xx_idc_unlock(ha);
4360 ha->flags.isp82xx_fw_hung = 0; 5071 ha->flags.isp82xx_fw_hung = 0;
4361 rval = qla82xx_restart_isp(base_vha); 5072 rval = qla82xx_restart_isp(base_vha);
@@ -4370,7 +5081,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
4370 "This devfn is not reset owner = 0x%x.\n", 5081 "This devfn is not reset owner = 0x%x.\n",
4371 ha->pdev->devfn); 5082 ha->pdev->devfn);
4372 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 5083 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4373 QLA82XX_DEV_READY)) { 5084 QLA8XXX_DEV_READY)) {
4374 ha->flags.isp82xx_fw_hung = 0; 5085 ha->flags.isp82xx_fw_hung = 0;
4375 rval = qla82xx_restart_isp(base_vha); 5086 rval = qla82xx_restart_isp(base_vha);
4376 qla82xx_idc_lock(ha); 5087 qla82xx_idc_lock(ha);
@@ -4495,6 +5206,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
4495 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 5206 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
4496 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 5207 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
4497 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 5208 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
5209 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
4498 { 0 }, 5210 { 0 },
4499}; 5211};
4500MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 5212MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);