aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c159
1 files changed, 56 insertions, 103 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8ab5babdeebc..01dfdc8696f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -542,6 +542,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
542 */ 542 */
543 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 543 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
544 544
545
545 if (pring->ringno == LPFC_ELS_RING) { 546 if (pring->ringno == LPFC_ELS_RING) {
546 lpfc_debugfs_slow_ring_trc(phba, 547 lpfc_debugfs_slow_ring_trc(phba,
547 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 548 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
@@ -1259,68 +1260,6 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1259} 1260}
1260 1261
1261/** 1262/**
1262 * lpfc_sli_replace_hbqbuff: Replace the HBQ buffer with a new buffer.
1263 * @phba: Pointer to HBA context object.
1264 * @tag: Tag for the HBQ buffer.
1265 *
1266 * This function is called from unsolicited event handler code path to get the
1267 * HBQ buffer associated with an unsolicited iocb. This function is called with
1268 * no lock held. It returns the buffer associated with the given tag and posts
1269 * another buffer to the firmware. Note that the new buffer must be allocated
1270 * before taking the hbalock and that the hba lock must be held until it is
1271 * finished with the hbq entry swap.
1272 **/
1273static struct lpfc_dmabuf *
1274lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
1275{
1276 struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
1277 uint32_t hbqno;
1278 void *virt; /* virtual address ptr */
1279 dma_addr_t phys; /* mapped address */
1280 unsigned long flags;
1281
1282 hbqno = tag >> 16;
1283 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1284 /* Check whether HBQ is still in use */
1285 spin_lock_irqsave(&phba->hbalock, flags);
1286 if (!phba->hbq_in_use) {
1287 if (new_hbq_entry)
1288 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1289 new_hbq_entry);
1290 spin_unlock_irqrestore(&phba->hbalock, flags);
1291 return NULL;
1292 }
1293
1294 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
1295 if (hbq_entry == NULL) {
1296 if (new_hbq_entry)
1297 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1298 new_hbq_entry);
1299 spin_unlock_irqrestore(&phba->hbalock, flags);
1300 return NULL;
1301 }
1302 list_del(&hbq_entry->dbuf.list);
1303
1304 if (new_hbq_entry == NULL) {
1305 list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
1306 spin_unlock_irqrestore(&phba->hbalock, flags);
1307 return &hbq_entry->dbuf;
1308 }
1309 new_hbq_entry->tag = -1;
1310 phys = new_hbq_entry->dbuf.phys;
1311 virt = new_hbq_entry->dbuf.virt;
1312 new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
1313 new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
1314 hbq_entry->dbuf.phys = phys;
1315 hbq_entry->dbuf.virt = virt;
1316 lpfc_sli_free_hbq(phba, hbq_entry);
1317 list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
1318 spin_unlock_irqrestore(&phba->hbalock, flags);
1319
1320 return &new_hbq_entry->dbuf;
1321}
1322
1323/**
1324 * lpfc_sli_get_buff: Get the buffer associated with the buffer tag. 1263 * lpfc_sli_get_buff: Get the buffer associated with the buffer tag.
1325 * @phba: Pointer to HBA context object. 1264 * @phba: Pointer to HBA context object.
1326 * @pring: Pointer to driver SLI ring object. 1265 * @pring: Pointer to driver SLI ring object.
@@ -1334,13 +1273,17 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
1334 **/ 1273 **/
1335static struct lpfc_dmabuf * 1274static struct lpfc_dmabuf *
1336lpfc_sli_get_buff(struct lpfc_hba *phba, 1275lpfc_sli_get_buff(struct lpfc_hba *phba,
1337 struct lpfc_sli_ring *pring, 1276 struct lpfc_sli_ring *pring,
1338 uint32_t tag) 1277 uint32_t tag)
1339{ 1278{
1279 struct hbq_dmabuf *hbq_entry;
1280
1340 if (tag & QUE_BUFTAG_BIT) 1281 if (tag & QUE_BUFTAG_BIT)
1341 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 1282 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
1342 else 1283 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
1343 return lpfc_sli_replace_hbqbuff(phba, tag); 1284 if (!hbq_entry)
1285 return NULL;
1286 return &hbq_entry->dbuf;
1344} 1287}
1345 1288
1346 1289
@@ -1372,8 +1315,6 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1372 match = 0; 1315 match = 0;
1373 irsp = &(saveq->iocb); 1316 irsp = &(saveq->iocb);
1374 1317
1375 if (irsp->ulpStatus == IOSTAT_NEED_BUFFER)
1376 return 1;
1377 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 1318 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
1378 if (pring->lpfc_sli_rcv_async_status) 1319 if (pring->lpfc_sli_rcv_async_status)
1379 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 1320 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
@@ -1982,7 +1923,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1982 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1923 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1983 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 1924 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1984 spin_unlock_irqrestore(&phba->hbalock, iflag); 1925 spin_unlock_irqrestore(&phba->hbalock, iflag);
1985 lpfc_adjust_queue_depth(phba); 1926 lpfc_rampdown_queue_depth(phba);
1986 spin_lock_irqsave(&phba->hbalock, iflag); 1927 spin_lock_irqsave(&phba->hbalock, iflag);
1987 } 1928 }
1988 1929
@@ -2225,7 +2166,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2225 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2166 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2226 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2167 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2227 spin_unlock_irqrestore(&phba->hbalock, iflag); 2168 spin_unlock_irqrestore(&phba->hbalock, iflag);
2228 lpfc_adjust_queue_depth(phba); 2169 lpfc_rampdown_queue_depth(phba);
2229 spin_lock_irqsave(&phba->hbalock, iflag); 2170 spin_lock_irqsave(&phba->hbalock, iflag);
2230 } 2171 }
2231 2172
@@ -2790,7 +2731,6 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2790{ 2731{
2791 MAILBOX_t *mb; 2732 MAILBOX_t *mb;
2792 struct lpfc_sli *psli; 2733 struct lpfc_sli *psli;
2793 uint16_t skip_post;
2794 volatile uint32_t word0; 2734 volatile uint32_t word0;
2795 void __iomem *to_slim; 2735 void __iomem *to_slim;
2796 2736
@@ -2815,13 +2755,10 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2815 readl(to_slim); /* flush */ 2755 readl(to_slim); /* flush */
2816 2756
2817 /* Only skip post after fc_ffinit is completed */ 2757 /* Only skip post after fc_ffinit is completed */
2818 if (phba->pport->port_state) { 2758 if (phba->pport->port_state)
2819 skip_post = 1;
2820 word0 = 1; /* This is really setting up word1 */ 2759 word0 = 1; /* This is really setting up word1 */
2821 } else { 2760 else
2822 skip_post = 0;
2823 word0 = 0; /* This is really setting up word1 */ 2761 word0 = 0; /* This is really setting up word1 */
2824 }
2825 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2762 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2826 writel(*(uint32_t *) mb, to_slim); 2763 writel(*(uint32_t *) mb, to_slim);
2827 readl(to_slim); /* flush */ 2764 readl(to_slim); /* flush */
@@ -2835,10 +2772,8 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2835 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 2772 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
2836 psli->stats_start = get_seconds(); 2773 psli->stats_start = get_seconds();
2837 2774
2838 if (skip_post) 2775 /* Give the INITFF and Post time to settle. */
2839 mdelay(100); 2776 mdelay(100);
2840 else
2841 mdelay(2000);
2842 2777
2843 lpfc_hba_down_post(phba); 2778 lpfc_hba_down_post(phba);
2844 2779
@@ -3084,7 +3019,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3084 spin_unlock_irq(&phba->hbalock); 3019 spin_unlock_irq(&phba->hbalock);
3085 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3020 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3086 lpfc_sli_brdrestart(phba); 3021 lpfc_sli_brdrestart(phba);
3087 msleep(2500);
3088 rc = lpfc_sli_chipset_init(phba); 3022 rc = lpfc_sli_chipset_init(phba);
3089 if (rc) 3023 if (rc)
3090 break; 3024 break;
@@ -3111,7 +3045,8 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3111 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 3045 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
3112 LPFC_SLI3_HBQ_ENABLED | 3046 LPFC_SLI3_HBQ_ENABLED |
3113 LPFC_SLI3_CRP_ENABLED | 3047 LPFC_SLI3_CRP_ENABLED |
3114 LPFC_SLI3_INB_ENABLED); 3048 LPFC_SLI3_INB_ENABLED |
3049 LPFC_SLI3_BG_ENABLED);
3115 if (rc != MBX_SUCCESS) { 3050 if (rc != MBX_SUCCESS) {
3116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3117 "0442 Adapter failed to init, mbxCmd x%x " 3052 "0442 Adapter failed to init, mbxCmd x%x "
@@ -3144,17 +3079,29 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3144 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3079 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3145 if (pmb->mb.un.varCfgPort.ginb) { 3080 if (pmb->mb.un.varCfgPort.ginb) {
3146 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3081 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3082 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3147 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3083 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
3148 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy; 3084 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
3149 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter; 3085 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
3150 phba->inb_last_counter = 3086 phba->inb_last_counter =
3151 phba->mbox->us.s3_inb_pgp.counter; 3087 phba->mbox->us.s3_inb_pgp.counter;
3152 } else { 3088 } else {
3089 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
3153 phba->port_gp = phba->mbox->us.s3_pgp.port; 3090 phba->port_gp = phba->mbox->us.s3_pgp.port;
3154 phba->inb_ha_copy = NULL; 3091 phba->inb_ha_copy = NULL;
3155 phba->inb_counter = NULL; 3092 phba->inb_counter = NULL;
3156 } 3093 }
3094
3095 if (phba->cfg_enable_bg) {
3096 if (pmb->mb.un.varCfgPort.gbg)
3097 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3098 else
3099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3100 "0443 Adapter did not grant "
3101 "BlockGuard\n");
3102 }
3157 } else { 3103 } else {
3104 phba->hbq_get = NULL;
3158 phba->port_gp = phba->mbox->us.s2.port; 3105 phba->port_gp = phba->mbox->us.s2.port;
3159 phba->inb_ha_copy = NULL; 3106 phba->inb_ha_copy = NULL;
3160 phba->inb_counter = NULL; 3107 phba->inb_counter = NULL;
@@ -3305,10 +3252,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3305 struct lpfc_sli *psli = &phba->sli; 3252 struct lpfc_sli *psli = &phba->sli;
3306 struct lpfc_sli_ring *pring; 3253 struct lpfc_sli_ring *pring;
3307 3254
3308 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
3309 return;
3310 }
3311
3312 /* Mbox cmd <mbxCommand> timeout */ 3255 /* Mbox cmd <mbxCommand> timeout */
3313 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 3256 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3314 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 3257 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
@@ -4005,7 +3948,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
4005 shost = lpfc_shost_from_vport(phba->pport); 3948 shost = lpfc_shost_from_vport(phba->pport);
4006 fc_host_post_vendor_event(shost, fc_get_event_number(), 3949 fc_host_post_vendor_event(shost, fc_get_event_number(),
4007 sizeof(temp_event_data), (char *) &temp_event_data, 3950 sizeof(temp_event_data), (char *) &temp_event_data,
4008 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 3951 LPFC_NL_VENDOR_ID);
4009 3952
4010} 3953}
4011 3954
@@ -5184,6 +5127,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5184{ 5127{
5185 uint32_t ha_copy; 5128 uint32_t ha_copy;
5186 5129
5130 /* If PCI channel is offline, don't process it */
5131 if (unlikely(pci_channel_offline(phba->pcidev)))
5132 return 0;
5133
5187 /* If somebody is waiting to handle an eratt, don't process it 5134 /* If somebody is waiting to handle an eratt, don't process it
5188 * here. The brdkill function will do this. 5135 * here. The brdkill function will do this.
5189 */ 5136 */
@@ -5242,6 +5189,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5242 uint32_t ha_copy; 5189 uint32_t ha_copy;
5243 uint32_t work_ha_copy; 5190 uint32_t work_ha_copy;
5244 unsigned long status; 5191 unsigned long status;
5192 unsigned long iflag;
5245 uint32_t control; 5193 uint32_t control;
5246 5194
5247 MAILBOX_t *mbox, *pmbox; 5195 MAILBOX_t *mbox, *pmbox;
@@ -5274,7 +5222,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5274 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 5222 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5275 return IRQ_NONE; 5223 return IRQ_NONE;
5276 /* Need to read HA REG for slow-path events */ 5224 /* Need to read HA REG for slow-path events */
5277 spin_lock(&phba->hbalock); 5225 spin_lock_irqsave(&phba->hbalock, iflag);
5278 ha_copy = readl(phba->HAregaddr); 5226 ha_copy = readl(phba->HAregaddr);
5279 /* If somebody is waiting to handle an eratt don't process it 5227 /* If somebody is waiting to handle an eratt don't process it
5280 * here. The brdkill function will do this. 5228 * here. The brdkill function will do this.
@@ -5294,7 +5242,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5294 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 5242 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
5295 phba->HAregaddr); 5243 phba->HAregaddr);
5296 readl(phba->HAregaddr); /* flush */ 5244 readl(phba->HAregaddr); /* flush */
5297 spin_unlock(&phba->hbalock); 5245 spin_unlock_irqrestore(&phba->hbalock, iflag);
5298 } else 5246 } else
5299 ha_copy = phba->ha_copy; 5247 ha_copy = phba->ha_copy;
5300 5248
@@ -5307,13 +5255,13 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5307 * Turn off Link Attention interrupts 5255 * Turn off Link Attention interrupts
5308 * until CLEAR_LA done 5256 * until CLEAR_LA done
5309 */ 5257 */
5310 spin_lock(&phba->hbalock); 5258 spin_lock_irqsave(&phba->hbalock, iflag);
5311 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 5259 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
5312 control = readl(phba->HCregaddr); 5260 control = readl(phba->HCregaddr);
5313 control &= ~HC_LAINT_ENA; 5261 control &= ~HC_LAINT_ENA;
5314 writel(control, phba->HCregaddr); 5262 writel(control, phba->HCregaddr);
5315 readl(phba->HCregaddr); /* flush */ 5263 readl(phba->HCregaddr); /* flush */
5316 spin_unlock(&phba->hbalock); 5264 spin_unlock_irqrestore(&phba->hbalock, iflag);
5317 } 5265 }
5318 else 5266 else
5319 work_ha_copy &= ~HA_LATT; 5267 work_ha_copy &= ~HA_LATT;
@@ -5328,7 +5276,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5328 (HA_RXMASK << (4*LPFC_ELS_RING))); 5276 (HA_RXMASK << (4*LPFC_ELS_RING)));
5329 status >>= (4*LPFC_ELS_RING); 5277 status >>= (4*LPFC_ELS_RING);
5330 if (status & HA_RXMASK) { 5278 if (status & HA_RXMASK) {
5331 spin_lock(&phba->hbalock); 5279 spin_lock_irqsave(&phba->hbalock, iflag);
5332 control = readl(phba->HCregaddr); 5280 control = readl(phba->HCregaddr);
5333 5281
5334 lpfc_debugfs_slow_ring_trc(phba, 5282 lpfc_debugfs_slow_ring_trc(phba,
@@ -5357,10 +5305,10 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5357 (uint32_t)((unsigned long) 5305 (uint32_t)((unsigned long)
5358 &phba->work_waitq)); 5306 &phba->work_waitq));
5359 } 5307 }
5360 spin_unlock(&phba->hbalock); 5308 spin_unlock_irqrestore(&phba->hbalock, iflag);
5361 } 5309 }
5362 } 5310 }
5363 spin_lock(&phba->hbalock); 5311 spin_lock_irqsave(&phba->hbalock, iflag);
5364 if (work_ha_copy & HA_ERATT) 5312 if (work_ha_copy & HA_ERATT)
5365 lpfc_sli_read_hs(phba); 5313 lpfc_sli_read_hs(phba);
5366 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 5314 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
@@ -5372,7 +5320,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5372 /* First check out the status word */ 5320 /* First check out the status word */
5373 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 5321 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
5374 if (pmbox->mbxOwner != OWN_HOST) { 5322 if (pmbox->mbxOwner != OWN_HOST) {
5375 spin_unlock(&phba->hbalock); 5323 spin_unlock_irqrestore(&phba->hbalock, iflag);
5376 /* 5324 /*
5377 * Stray Mailbox Interrupt, mbxCommand <cmd> 5325 * Stray Mailbox Interrupt, mbxCommand <cmd>
5378 * mbxStatus <status> 5326 * mbxStatus <status>
@@ -5389,7 +5337,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5389 work_ha_copy &= ~HA_MBATT; 5337 work_ha_copy &= ~HA_MBATT;
5390 } else { 5338 } else {
5391 phba->sli.mbox_active = NULL; 5339 phba->sli.mbox_active = NULL;
5392 spin_unlock(&phba->hbalock); 5340 spin_unlock_irqrestore(&phba->hbalock, iflag);
5393 phba->last_completion_time = jiffies; 5341 phba->last_completion_time = jiffies;
5394 del_timer(&phba->sli.mbox_tmo); 5342 del_timer(&phba->sli.mbox_tmo);
5395 if (pmb->mbox_cmpl) { 5343 if (pmb->mbox_cmpl) {
@@ -5438,14 +5386,18 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5438 goto send_current_mbox; 5386 goto send_current_mbox;
5439 } 5387 }
5440 } 5388 }
5441 spin_lock(&phba->pport->work_port_lock); 5389 spin_lock_irqsave(
5390 &phba->pport->work_port_lock,
5391 iflag);
5442 phba->pport->work_port_events &= 5392 phba->pport->work_port_events &=
5443 ~WORKER_MBOX_TMO; 5393 ~WORKER_MBOX_TMO;
5444 spin_unlock(&phba->pport->work_port_lock); 5394 spin_unlock_irqrestore(
5395 &phba->pport->work_port_lock,
5396 iflag);
5445 lpfc_mbox_cmpl_put(phba, pmb); 5397 lpfc_mbox_cmpl_put(phba, pmb);
5446 } 5398 }
5447 } else 5399 } else
5448 spin_unlock(&phba->hbalock); 5400 spin_unlock_irqrestore(&phba->hbalock, iflag);
5449 5401
5450 if ((work_ha_copy & HA_MBATT) && 5402 if ((work_ha_copy & HA_MBATT) &&
5451 (phba->sli.mbox_active == NULL)) { 5403 (phba->sli.mbox_active == NULL)) {
@@ -5461,9 +5413,9 @@ send_current_mbox:
5461 "MBX_SUCCESS"); 5413 "MBX_SUCCESS");
5462 } 5414 }
5463 5415
5464 spin_lock(&phba->hbalock); 5416 spin_lock_irqsave(&phba->hbalock, iflag);
5465 phba->work_ha |= work_ha_copy; 5417 phba->work_ha |= work_ha_copy;
5466 spin_unlock(&phba->hbalock); 5418 spin_unlock_irqrestore(&phba->hbalock, iflag);
5467 lpfc_worker_wake_up(phba); 5419 lpfc_worker_wake_up(phba);
5468 } 5420 }
5469 return IRQ_HANDLED; 5421 return IRQ_HANDLED;
@@ -5495,6 +5447,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5495 struct lpfc_hba *phba; 5447 struct lpfc_hba *phba;
5496 uint32_t ha_copy; 5448 uint32_t ha_copy;
5497 unsigned long status; 5449 unsigned long status;
5450 unsigned long iflag;
5498 5451
5499 /* Get the driver's phba structure from the dev_id and 5452 /* Get the driver's phba structure from the dev_id and
5500 * assume the HBA is not interrupting. 5453 * assume the HBA is not interrupting.
@@ -5520,11 +5473,11 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5520 /* Need to read HA REG for FCP ring and other ring events */ 5473 /* Need to read HA REG for FCP ring and other ring events */
5521 ha_copy = readl(phba->HAregaddr); 5474 ha_copy = readl(phba->HAregaddr);
5522 /* Clear up only attention source related to fast-path */ 5475 /* Clear up only attention source related to fast-path */
5523 spin_lock(&phba->hbalock); 5476 spin_lock_irqsave(&phba->hbalock, iflag);
5524 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 5477 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
5525 phba->HAregaddr); 5478 phba->HAregaddr);
5526 readl(phba->HAregaddr); /* flush */ 5479 readl(phba->HAregaddr); /* flush */
5527 spin_unlock(&phba->hbalock); 5480 spin_unlock_irqrestore(&phba->hbalock, iflag);
5528 } else 5481 } else
5529 ha_copy = phba->ha_copy; 5482 ha_copy = phba->ha_copy;
5530 5483