diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-15 17:41:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-15 17:41:30 -0400 |
commit | 0375ec5899a37e80af7be8e813548df3292325ff (patch) | |
tree | 6f3e60e0430988255ea6507d2c018b06227f8349 /drivers/scsi/lpfc | |
parent | bff157b3ad4b9f6be0af6987fcd62deaf0f2b799 (diff) | |
parent | fc85799ee362e3ef13b8f2f5c687101490aaaa7e (diff) |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull misc SCSI driver updates from James Bottomley:
"This patch set is a set of driver updates (megaraid_sas, fnic, lpfc,
ufs, hpsa) we also have a couple of bug fixes (sd out of bounds and
ibmvfc error handling) and the first round of esas2r checker fixes and
finally the much anticipated big endian additions for megaraid_sas"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (47 commits)
[SCSI] fnic: fnic Driver Tuneables Exposed through CLI
[SCSI] fnic: Kernel panic while running sh/nosh with max lun cfg
[SCSI] fnic: Hitting BUG_ON(io_req->abts_done) in fnic_rport_exch_reset
[SCSI] fnic: Remove QUEUE_FULL handling code
[SCSI] fnic: On system with >1.1TB RAM, VIC fails multipath after boot up
[SCSI] fnic: FC stat param seconds_since_last_reset not getting updated
[SCSI] sd: Fix potential out-of-bounds access
[SCSI] lpfc 8.3.42: Update lpfc version to driver version 8.3.42
[SCSI] lpfc 8.3.42: Fixed issue of task management commands having a fixed timeout
[SCSI] lpfc 8.3.42: Fixed inconsistent spin lock usage.
[SCSI] lpfc 8.3.42: Fix driver's abort loop functionality to skip IOs already getting aborted
[SCSI] lpfc 8.3.42: Fixed failure to allocate SCSI buffer on PPC64 platform for SLI4 devices
[SCSI] lpfc 8.3.42: Fix WARN_ON when driver unloads
[SCSI] lpfc 8.3.42: Avoided making pci bar ioremap call during dual-chute WQ/RQ pci bar selection
[SCSI] lpfc 8.3.42: Fixed driver iocbq structure's iocb_flag field running out of space
[SCSI] lpfc 8.3.42: Fix crash on driver load due to cpu affinity logic
[SCSI] lpfc 8.3.42: Fixed logging format of setting driver sysfs attributes hard to interpret
[SCSI] lpfc 8.3.42: Fixed back to back RSCNs discovery failure.
[SCSI] lpfc 8.3.42: Fixed race condition between BSG I/O dispatch and timeout handling
[SCSI] lpfc 8.3.42: Fixed function mode field defined too small for not recognizing dual-chute mode
...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 19 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.c | 90 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 11 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 92 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 55 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 33 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.h | 4 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli4.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 2 |
10 files changed, 199 insertions, 110 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index df43bfe6d573..4e1b75ca7451 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -708,6 +708,7 @@ struct lpfc_hba { | |||
708 | uint32_t cfg_multi_ring_type; | 708 | uint32_t cfg_multi_ring_type; |
709 | uint32_t cfg_poll; | 709 | uint32_t cfg_poll; |
710 | uint32_t cfg_poll_tmo; | 710 | uint32_t cfg_poll_tmo; |
711 | uint32_t cfg_task_mgmt_tmo; | ||
711 | uint32_t cfg_use_msi; | 712 | uint32_t cfg_use_msi; |
712 | uint32_t cfg_fcp_imax; | 713 | uint32_t cfg_fcp_imax; |
713 | uint32_t cfg_fcp_cpu_map; | 714 | uint32_t cfg_fcp_cpu_map; |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 16498e030c70..00656fc92b93 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -1865,8 +1865,10 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ | |||
1865 | { \ | 1865 | { \ |
1866 | if (val >= minval && val <= maxval) {\ | 1866 | if (val >= minval && val <= maxval) {\ |
1867 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ | 1867 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ |
1868 | "3053 lpfc_" #attr " changed from %d to %d\n", \ | 1868 | "3053 lpfc_" #attr \ |
1869 | vport->cfg_##attr, val); \ | 1869 | " changed from %d (x%x) to %d (x%x)\n", \ |
1870 | vport->cfg_##attr, vport->cfg_##attr, \ | ||
1871 | val, val); \ | ||
1870 | vport->cfg_##attr = val;\ | 1872 | vport->cfg_##attr = val;\ |
1871 | return 0;\ | 1873 | return 0;\ |
1872 | }\ | 1874 | }\ |
@@ -4011,8 +4013,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); | |||
4011 | # For [0], FCP commands are issued to Work Queues ina round robin fashion. | 4013 | # For [0], FCP commands are issued to Work Queues ina round robin fashion. |
4012 | # For [1], FCP commands are issued to a Work Queue associated with the | 4014 | # For [1], FCP commands are issued to a Work Queue associated with the |
4013 | # current CPU. | 4015 | # current CPU. |
4016 | # It would be set to 1 by the driver if it's able to set up cpu affinity | ||
4017 | # for FCP I/Os through Work Queue associated with the current CPU. Otherwise, | ||
4018 | # roundrobin scheduling of FCP I/Os through WQs will be used. | ||
4014 | */ | 4019 | */ |
4015 | LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for " | 4020 | LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for " |
4016 | "issuing commands [0] - Round Robin, [1] - Current CPU"); | 4021 | "issuing commands [0] - Round Robin, [1] - Current CPU"); |
4017 | 4022 | ||
4018 | /* | 4023 | /* |
@@ -4110,6 +4115,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, | |||
4110 | "Milliseconds driver will wait between polling FCP ring"); | 4115 | "Milliseconds driver will wait between polling FCP ring"); |
4111 | 4116 | ||
4112 | /* | 4117 | /* |
4118 | # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands | ||
4119 | # to complete in seconds. Value range is [5,180], default value is 60. | ||
4120 | */ | ||
4121 | LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, | ||
4122 | "Maximum time to wait for task management commands to complete"); | ||
4123 | /* | ||
4113 | # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that | 4124 | # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that |
4114 | # support this feature | 4125 | # support this feature |
4115 | # 0 = MSI disabled | 4126 | # 0 = MSI disabled |
@@ -4295,6 +4306,7 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
4295 | &dev_attr_issue_reset, | 4306 | &dev_attr_issue_reset, |
4296 | &dev_attr_lpfc_poll, | 4307 | &dev_attr_lpfc_poll, |
4297 | &dev_attr_lpfc_poll_tmo, | 4308 | &dev_attr_lpfc_poll_tmo, |
4309 | &dev_attr_lpfc_task_mgmt_tmo, | ||
4298 | &dev_attr_lpfc_use_msi, | 4310 | &dev_attr_lpfc_use_msi, |
4299 | &dev_attr_lpfc_fcp_imax, | 4311 | &dev_attr_lpfc_fcp_imax, |
4300 | &dev_attr_lpfc_fcp_cpu_map, | 4312 | &dev_attr_lpfc_fcp_cpu_map, |
@@ -5274,6 +5286,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
5274 | lpfc_topology_init(phba, lpfc_topology); | 5286 | lpfc_topology_init(phba, lpfc_topology); |
5275 | lpfc_link_speed_init(phba, lpfc_link_speed); | 5287 | lpfc_link_speed_init(phba, lpfc_link_speed); |
5276 | lpfc_poll_tmo_init(phba, lpfc_poll_tmo); | 5288 | lpfc_poll_tmo_init(phba, lpfc_poll_tmo); |
5289 | lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo); | ||
5277 | lpfc_enable_npiv_init(phba, lpfc_enable_npiv); | 5290 | lpfc_enable_npiv_init(phba, lpfc_enable_npiv); |
5278 | lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); | 5291 | lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); |
5279 | lpfc_enable_rrq_init(phba, lpfc_enable_rrq); | 5292 | lpfc_enable_rrq_init(phba, lpfc_enable_rrq); |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 79c13c3263f1..b92aec989d60 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -317,6 +317,11 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, | |||
317 | } | 317 | } |
318 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | 318 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
319 | 319 | ||
320 | /* Close the timeout handler abort window */ | ||
321 | spin_lock_irqsave(&phba->hbalock, flags); | ||
322 | cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; | ||
323 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
324 | |||
320 | iocb = &dd_data->context_un.iocb; | 325 | iocb = &dd_data->context_un.iocb; |
321 | ndlp = iocb->ndlp; | 326 | ndlp = iocb->ndlp; |
322 | rmp = iocb->rmp; | 327 | rmp = iocb->rmp; |
@@ -387,6 +392,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) | |||
387 | int request_nseg; | 392 | int request_nseg; |
388 | int reply_nseg; | 393 | int reply_nseg; |
389 | struct bsg_job_data *dd_data; | 394 | struct bsg_job_data *dd_data; |
395 | unsigned long flags; | ||
390 | uint32_t creg_val; | 396 | uint32_t creg_val; |
391 | int rc = 0; | 397 | int rc = 0; |
392 | int iocb_stat; | 398 | int iocb_stat; |
@@ -501,14 +507,24 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) | |||
501 | } | 507 | } |
502 | 508 | ||
503 | iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); | 509 | iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); |
504 | if (iocb_stat == IOCB_SUCCESS) | 510 | |
511 | if (iocb_stat == IOCB_SUCCESS) { | ||
512 | spin_lock_irqsave(&phba->hbalock, flags); | ||
513 | /* make sure the I/O had not been completed yet */ | ||
514 | if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { | ||
515 | /* open up abort window to timeout handler */ | ||
516 | cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; | ||
517 | } | ||
518 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
505 | return 0; /* done for now */ | 519 | return 0; /* done for now */ |
506 | else if (iocb_stat == IOCB_BUSY) | 520 | } else if (iocb_stat == IOCB_BUSY) { |
507 | rc = -EAGAIN; | 521 | rc = -EAGAIN; |
508 | else | 522 | } else { |
509 | rc = -EIO; | 523 | rc = -EIO; |
524 | } | ||
510 | 525 | ||
511 | /* iocb failed so cleanup */ | 526 | /* iocb failed so cleanup */ |
527 | job->dd_data = NULL; | ||
512 | 528 | ||
513 | free_rmp: | 529 | free_rmp: |
514 | lpfc_free_bsg_buffers(phba, rmp); | 530 | lpfc_free_bsg_buffers(phba, rmp); |
@@ -577,6 +593,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, | |||
577 | } | 593 | } |
578 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | 594 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
579 | 595 | ||
596 | /* Close the timeout handler abort window */ | ||
597 | spin_lock_irqsave(&phba->hbalock, flags); | ||
598 | cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; | ||
599 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
600 | |||
580 | rsp = &rspiocbq->iocb; | 601 | rsp = &rspiocbq->iocb; |
581 | pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; | 602 | pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; |
582 | prsp = (struct lpfc_dmabuf *)pcmd->list.next; | 603 | prsp = (struct lpfc_dmabuf *)pcmd->list.next; |
@@ -639,6 +660,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
639 | struct lpfc_iocbq *cmdiocbq; | 660 | struct lpfc_iocbq *cmdiocbq; |
640 | uint16_t rpi = 0; | 661 | uint16_t rpi = 0; |
641 | struct bsg_job_data *dd_data; | 662 | struct bsg_job_data *dd_data; |
663 | unsigned long flags; | ||
642 | uint32_t creg_val; | 664 | uint32_t creg_val; |
643 | int rc = 0; | 665 | int rc = 0; |
644 | 666 | ||
@@ -721,15 +743,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
721 | 743 | ||
722 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); | 744 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); |
723 | 745 | ||
724 | if (rc == IOCB_SUCCESS) | 746 | if (rc == IOCB_SUCCESS) { |
747 | spin_lock_irqsave(&phba->hbalock, flags); | ||
748 | /* make sure the I/O had not been completed/released */ | ||
749 | if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { | ||
750 | /* open up abort window to timeout handler */ | ||
751 | cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; | ||
752 | } | ||
753 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
725 | return 0; /* done for now */ | 754 | return 0; /* done for now */ |
726 | else if (rc == IOCB_BUSY) | 755 | } else if (rc == IOCB_BUSY) { |
727 | rc = -EAGAIN; | 756 | rc = -EAGAIN; |
728 | else | 757 | } else { |
729 | rc = -EIO; | 758 | rc = -EIO; |
759 | } | ||
730 | 760 | ||
731 | linkdown_err: | 761 | /* iocb failed so cleanup */ |
762 | job->dd_data = NULL; | ||
732 | 763 | ||
764 | linkdown_err: | ||
733 | cmdiocbq->context1 = ndlp; | 765 | cmdiocbq->context1 = ndlp; |
734 | lpfc_els_free_iocb(phba, cmdiocbq); | 766 | lpfc_els_free_iocb(phba, cmdiocbq); |
735 | 767 | ||
@@ -1249,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) | |||
1249 | struct lpfc_hba *phba = vport->phba; | 1281 | struct lpfc_hba *phba = vport->phba; |
1250 | struct get_ct_event *event_req; | 1282 | struct get_ct_event *event_req; |
1251 | struct get_ct_event_reply *event_reply; | 1283 | struct get_ct_event_reply *event_reply; |
1252 | struct lpfc_bsg_event *evt; | 1284 | struct lpfc_bsg_event *evt, *evt_next; |
1253 | struct event_data *evt_dat = NULL; | 1285 | struct event_data *evt_dat = NULL; |
1254 | unsigned long flags; | 1286 | unsigned long flags; |
1255 | uint32_t rc = 0; | 1287 | uint32_t rc = 0; |
@@ -1269,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) | |||
1269 | event_reply = (struct get_ct_event_reply *) | 1301 | event_reply = (struct get_ct_event_reply *) |
1270 | job->reply->reply_data.vendor_reply.vendor_rsp; | 1302 | job->reply->reply_data.vendor_reply.vendor_rsp; |
1271 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | 1303 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
1272 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { | 1304 | list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { |
1273 | if (evt->reg_id == event_req->ev_reg_id) { | 1305 | if (evt->reg_id == event_req->ev_reg_id) { |
1274 | if (list_empty(&evt->events_to_get)) | 1306 | if (list_empty(&evt->events_to_get)) |
1275 | break; | 1307 | break; |
@@ -1370,6 +1402,11 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, | |||
1370 | } | 1402 | } |
1371 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | 1403 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
1372 | 1404 | ||
1405 | /* Close the timeout handler abort window */ | ||
1406 | spin_lock_irqsave(&phba->hbalock, flags); | ||
1407 | cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; | ||
1408 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1409 | |||
1373 | ndlp = dd_data->context_un.iocb.ndlp; | 1410 | ndlp = dd_data->context_un.iocb.ndlp; |
1374 | cmp = cmdiocbq->context2; | 1411 | cmp = cmdiocbq->context2; |
1375 | bmp = cmdiocbq->context3; | 1412 | bmp = cmdiocbq->context3; |
@@ -1433,6 +1470,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, | |||
1433 | int rc = 0; | 1470 | int rc = 0; |
1434 | struct lpfc_nodelist *ndlp = NULL; | 1471 | struct lpfc_nodelist *ndlp = NULL; |
1435 | struct bsg_job_data *dd_data; | 1472 | struct bsg_job_data *dd_data; |
1473 | unsigned long flags; | ||
1436 | uint32_t creg_val; | 1474 | uint32_t creg_val; |
1437 | 1475 | ||
1438 | /* allocate our bsg tracking structure */ | 1476 | /* allocate our bsg tracking structure */ |
@@ -1542,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, | |||
1542 | 1580 | ||
1543 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); | 1581 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); |
1544 | 1582 | ||
1545 | if (rc == IOCB_SUCCESS) | 1583 | if (rc == IOCB_SUCCESS) { |
1584 | spin_lock_irqsave(&phba->hbalock, flags); | ||
1585 | /* make sure the I/O had not been completed/released */ | ||
1586 | if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { | ||
1587 | /* open up abort window to timeout handler */ | ||
1588 | ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; | ||
1589 | } | ||
1590 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1546 | return 0; /* done for now */ | 1591 | return 0; /* done for now */ |
1592 | } | ||
1593 | |||
1594 | /* iocb failed so cleanup */ | ||
1595 | job->dd_data = NULL; | ||
1547 | 1596 | ||
1548 | issue_ct_rsp_exit: | 1597 | issue_ct_rsp_exit: |
1549 | lpfc_sli_release_iocbq(phba, ctiocb); | 1598 | lpfc_sli_release_iocbq(phba, ctiocb); |
@@ -5284,9 +5333,15 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) | |||
5284 | * remove it from the txq queue and call cancel iocbs. | 5333 | * remove it from the txq queue and call cancel iocbs. |
5285 | * Otherwise, call abort iotag | 5334 | * Otherwise, call abort iotag |
5286 | */ | 5335 | */ |
5287 | |||
5288 | cmdiocb = dd_data->context_un.iocb.cmdiocbq; | 5336 | cmdiocb = dd_data->context_un.iocb.cmdiocbq; |
5289 | spin_lock_irq(&phba->hbalock); | 5337 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
5338 | |||
5339 | spin_lock_irqsave(&phba->hbalock, flags); | ||
5340 | /* make sure the I/O abort window is still open */ | ||
5341 | if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) { | ||
5342 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
5343 | return -EAGAIN; | ||
5344 | } | ||
5290 | list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, | 5345 | list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, |
5291 | list) { | 5346 | list) { |
5292 | if (check_iocb == cmdiocb) { | 5347 | if (check_iocb == cmdiocb) { |
@@ -5296,8 +5351,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) | |||
5296 | } | 5351 | } |
5297 | if (list_empty(&completions)) | 5352 | if (list_empty(&completions)) |
5298 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); | 5353 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); |
5299 | spin_unlock_irq(&phba->hbalock); | 5354 | spin_unlock_irqrestore(&phba->hbalock, flags); |
5300 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
5301 | if (!list_empty(&completions)) { | 5355 | if (!list_empty(&completions)) { |
5302 | lpfc_sli_cancel_iocbs(phba, &completions, | 5356 | lpfc_sli_cancel_iocbs(phba, &completions, |
5303 | IOSTAT_LOCAL_REJECT, | 5357 | IOSTAT_LOCAL_REJECT, |
@@ -5321,9 +5375,10 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) | |||
5321 | * remove it from the txq queue and call cancel iocbs. | 5375 | * remove it from the txq queue and call cancel iocbs. |
5322 | * Otherwise, call abort iotag. | 5376 | * Otherwise, call abort iotag. |
5323 | */ | 5377 | */ |
5324 | |||
5325 | cmdiocb = dd_data->context_un.menlo.cmdiocbq; | 5378 | cmdiocb = dd_data->context_un.menlo.cmdiocbq; |
5326 | spin_lock_irq(&phba->hbalock); | 5379 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
5380 | |||
5381 | spin_lock_irqsave(&phba->hbalock, flags); | ||
5327 | list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, | 5382 | list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, |
5328 | list) { | 5383 | list) { |
5329 | if (check_iocb == cmdiocb) { | 5384 | if (check_iocb == cmdiocb) { |
@@ -5333,8 +5388,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) | |||
5333 | } | 5388 | } |
5334 | if (list_empty(&completions)) | 5389 | if (list_empty(&completions)) |
5335 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); | 5390 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); |
5336 | spin_unlock_irq(&phba->hbalock); | 5391 | spin_unlock_irqrestore(&phba->hbalock, flags); |
5337 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
5338 | if (!list_empty(&completions)) { | 5392 | if (!list_empty(&completions)) { |
5339 | lpfc_sli_cancel_iocbs(phba, &completions, | 5393 | lpfc_sli_cancel_iocbs(phba, &completions, |
5340 | IOSTAT_LOCAL_REJECT, | 5394 | IOSTAT_LOCAL_REJECT, |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 60d6ca2f68c2..7801601aa5d9 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -4437,6 +4437,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
4437 | if (!ndlp) | 4437 | if (!ndlp) |
4438 | return; | 4438 | return; |
4439 | lpfc_issue_els_logo(vport, ndlp, 0); | 4439 | lpfc_issue_els_logo(vport, ndlp, 0); |
4440 | mempool_free(pmb, phba->mbox_mem_pool); | ||
4440 | } | 4441 | } |
4441 | 4442 | ||
4442 | /* | 4443 | /* |
@@ -4456,7 +4457,15 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
4456 | int rc; | 4457 | int rc; |
4457 | uint16_t rpi; | 4458 | uint16_t rpi; |
4458 | 4459 | ||
4459 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { | 4460 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED || |
4461 | ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { | ||
4462 | if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) | ||
4463 | lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, | ||
4464 | "3366 RPI x%x needs to be " | ||
4465 | "unregistered nlp_flag x%x " | ||
4466 | "did x%x\n", | ||
4467 | ndlp->nlp_rpi, ndlp->nlp_flag, | ||
4468 | ndlp->nlp_DID); | ||
4460 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 4469 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
4461 | if (mbox) { | 4470 | if (mbox) { |
4462 | /* SLI4 ports require the physical rpi value. */ | 4471 | /* SLI4 ports require the physical rpi value. */ |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 501147c4a147..647f5bfb3bd3 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -3031,10 +3031,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) | |||
3031 | phba->sli4_hba.scsi_xri_max); | 3031 | phba->sli4_hba.scsi_xri_max); |
3032 | 3032 | ||
3033 | spin_lock_irq(&phba->scsi_buf_list_get_lock); | 3033 | spin_lock_irq(&phba->scsi_buf_list_get_lock); |
3034 | spin_lock_irq(&phba->scsi_buf_list_put_lock); | 3034 | spin_lock(&phba->scsi_buf_list_put_lock); |
3035 | list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); | 3035 | list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); |
3036 | list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); | 3036 | list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); |
3037 | spin_unlock_irq(&phba->scsi_buf_list_put_lock); | 3037 | spin_unlock(&phba->scsi_buf_list_put_lock); |
3038 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); | 3038 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
3039 | 3039 | ||
3040 | if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { | 3040 | if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { |
@@ -3070,10 +3070,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) | |||
3070 | psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; | 3070 | psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; |
3071 | } | 3071 | } |
3072 | spin_lock_irq(&phba->scsi_buf_list_get_lock); | 3072 | spin_lock_irq(&phba->scsi_buf_list_get_lock); |
3073 | spin_lock_irq(&phba->scsi_buf_list_put_lock); | 3073 | spin_lock(&phba->scsi_buf_list_put_lock); |
3074 | list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); | 3074 | list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); |
3075 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); | 3075 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); |
3076 | spin_unlock_irq(&phba->scsi_buf_list_put_lock); | 3076 | spin_unlock(&phba->scsi_buf_list_put_lock); |
3077 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); | 3077 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
3078 | 3078 | ||
3079 | return 0; | 3079 | return 0; |
@@ -4859,6 +4859,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
4859 | struct lpfc_mqe *mqe; | 4859 | struct lpfc_mqe *mqe; |
4860 | int longs; | 4860 | int longs; |
4861 | 4861 | ||
4862 | /* Get all the module params for configuring this host */ | ||
4863 | lpfc_get_cfgparam(phba); | ||
4864 | |||
4862 | /* Before proceed, wait for POST done and device ready */ | 4865 | /* Before proceed, wait for POST done and device ready */ |
4863 | rc = lpfc_sli4_post_status_check(phba); | 4866 | rc = lpfc_sli4_post_status_check(phba); |
4864 | if (rc) | 4867 | if (rc) |
@@ -4902,15 +4905,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
4902 | sizeof(struct lpfc_mbox_ext_buf_ctx)); | 4905 | sizeof(struct lpfc_mbox_ext_buf_ctx)); |
4903 | INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); | 4906 | INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); |
4904 | 4907 | ||
4905 | /* | ||
4906 | * We need to do a READ_CONFIG mailbox command here before | ||
4907 | * calling lpfc_get_cfgparam. For VFs this will report the | ||
4908 | * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. | ||
4909 | * All of the resources allocated | ||
4910 | * for this Port are tied to these values. | ||
4911 | */ | ||
4912 | /* Get all the module params for configuring this host */ | ||
4913 | lpfc_get_cfgparam(phba); | ||
4914 | phba->max_vpi = LPFC_MAX_VPI; | 4908 | phba->max_vpi = LPFC_MAX_VPI; |
4915 | 4909 | ||
4916 | /* This will be set to correct value after the read_config mbox */ | 4910 | /* This will be set to correct value after the read_config mbox */ |
@@ -7141,19 +7135,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) | |||
7141 | phba->sli4_hba.fcp_wq = NULL; | 7135 | phba->sli4_hba.fcp_wq = NULL; |
7142 | } | 7136 | } |
7143 | 7137 | ||
7144 | if (phba->pci_bar0_memmap_p) { | ||
7145 | iounmap(phba->pci_bar0_memmap_p); | ||
7146 | phba->pci_bar0_memmap_p = NULL; | ||
7147 | } | ||
7148 | if (phba->pci_bar2_memmap_p) { | ||
7149 | iounmap(phba->pci_bar2_memmap_p); | ||
7150 | phba->pci_bar2_memmap_p = NULL; | ||
7151 | } | ||
7152 | if (phba->pci_bar4_memmap_p) { | ||
7153 | iounmap(phba->pci_bar4_memmap_p); | ||
7154 | phba->pci_bar4_memmap_p = NULL; | ||
7155 | } | ||
7156 | |||
7157 | /* Release FCP CQ mapping array */ | 7138 | /* Release FCP CQ mapping array */ |
7158 | if (phba->sli4_hba.fcp_cq_map != NULL) { | 7139 | if (phba->sli4_hba.fcp_cq_map != NULL) { |
7159 | kfree(phba->sli4_hba.fcp_cq_map); | 7140 | kfree(phba->sli4_hba.fcp_cq_map); |
@@ -7942,9 +7923,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7942 | * particular PCI BARs regions is dependent on the type of | 7923 | * particular PCI BARs regions is dependent on the type of |
7943 | * SLI4 device. | 7924 | * SLI4 device. |
7944 | */ | 7925 | */ |
7945 | if (pci_resource_start(pdev, 0)) { | 7926 | if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { |
7946 | phba->pci_bar0_map = pci_resource_start(pdev, 0); | 7927 | phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); |
7947 | bar0map_len = pci_resource_len(pdev, 0); | 7928 | bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); |
7948 | 7929 | ||
7949 | /* | 7930 | /* |
7950 | * Map SLI4 PCI Config Space Register base to a kernel virtual | 7931 | * Map SLI4 PCI Config Space Register base to a kernel virtual |
@@ -7958,6 +7939,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7958 | "registers.\n"); | 7939 | "registers.\n"); |
7959 | goto out; | 7940 | goto out; |
7960 | } | 7941 | } |
7942 | phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; | ||
7961 | /* Set up BAR0 PCI config space register memory map */ | 7943 | /* Set up BAR0 PCI config space register memory map */ |
7962 | lpfc_sli4_bar0_register_memmap(phba, if_type); | 7944 | lpfc_sli4_bar0_register_memmap(phba, if_type); |
7963 | } else { | 7945 | } else { |
@@ -7980,13 +7962,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7980 | } | 7962 | } |
7981 | 7963 | ||
7982 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && | 7964 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && |
7983 | (pci_resource_start(pdev, 2))) { | 7965 | (pci_resource_start(pdev, PCI_64BIT_BAR2))) { |
7984 | /* | 7966 | /* |
7985 | * Map SLI4 if type 0 HBA Control Register base to a kernel | 7967 | * Map SLI4 if type 0 HBA Control Register base to a kernel |
7986 | * virtual address and setup the registers. | 7968 | * virtual address and setup the registers. |
7987 | */ | 7969 | */ |
7988 | phba->pci_bar1_map = pci_resource_start(pdev, 2); | 7970 | phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); |
7989 | bar1map_len = pci_resource_len(pdev, 2); | 7971 | bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); |
7990 | phba->sli4_hba.ctrl_regs_memmap_p = | 7972 | phba->sli4_hba.ctrl_regs_memmap_p = |
7991 | ioremap(phba->pci_bar1_map, bar1map_len); | 7973 | ioremap(phba->pci_bar1_map, bar1map_len); |
7992 | if (!phba->sli4_hba.ctrl_regs_memmap_p) { | 7974 | if (!phba->sli4_hba.ctrl_regs_memmap_p) { |
@@ -7994,17 +7976,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7994 | "ioremap failed for SLI4 HBA control registers.\n"); | 7976 | "ioremap failed for SLI4 HBA control registers.\n"); |
7995 | goto out_iounmap_conf; | 7977 | goto out_iounmap_conf; |
7996 | } | 7978 | } |
7979 | phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; | ||
7997 | lpfc_sli4_bar1_register_memmap(phba); | 7980 | lpfc_sli4_bar1_register_memmap(phba); |
7998 | } | 7981 | } |
7999 | 7982 | ||
8000 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && | 7983 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && |
8001 | (pci_resource_start(pdev, 4))) { | 7984 | (pci_resource_start(pdev, PCI_64BIT_BAR4))) { |
8002 | /* | 7985 | /* |
8003 | * Map SLI4 if type 0 HBA Doorbell Register base to a kernel | 7986 | * Map SLI4 if type 0 HBA Doorbell Register base to a kernel |
8004 | * virtual address and setup the registers. | 7987 | * virtual address and setup the registers. |
8005 | */ | 7988 | */ |
8006 | phba->pci_bar2_map = pci_resource_start(pdev, 4); | 7989 | phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); |
8007 | bar2map_len = pci_resource_len(pdev, 4); | 7990 | bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); |
8008 | phba->sli4_hba.drbl_regs_memmap_p = | 7991 | phba->sli4_hba.drbl_regs_memmap_p = |
8009 | ioremap(phba->pci_bar2_map, bar2map_len); | 7992 | ioremap(phba->pci_bar2_map, bar2map_len); |
8010 | if (!phba->sli4_hba.drbl_regs_memmap_p) { | 7993 | if (!phba->sli4_hba.drbl_regs_memmap_p) { |
@@ -8012,6 +7995,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
8012 | "ioremap failed for SLI4 HBA doorbell registers.\n"); | 7995 | "ioremap failed for SLI4 HBA doorbell registers.\n"); |
8013 | goto out_iounmap_ctrl; | 7996 | goto out_iounmap_ctrl; |
8014 | } | 7997 | } |
7998 | phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; | ||
8015 | error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); | 7999 | error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); |
8016 | if (error) | 8000 | if (error) |
8017 | goto out_iounmap_all; | 8001 | goto out_iounmap_all; |
@@ -8405,7 +8389,8 @@ static int | |||
8405 | lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | 8389 | lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) |
8406 | { | 8390 | { |
8407 | int i, idx, saved_chann, used_chann, cpu, phys_id; | 8391 | int i, idx, saved_chann, used_chann, cpu, phys_id; |
8408 | int max_phys_id, num_io_channel, first_cpu; | 8392 | int max_phys_id, min_phys_id; |
8393 | int num_io_channel, first_cpu, chan; | ||
8409 | struct lpfc_vector_map_info *cpup; | 8394 | struct lpfc_vector_map_info *cpup; |
8410 | #ifdef CONFIG_X86 | 8395 | #ifdef CONFIG_X86 |
8411 | struct cpuinfo_x86 *cpuinfo; | 8396 | struct cpuinfo_x86 *cpuinfo; |
@@ -8423,6 +8408,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | |||
8423 | phba->sli4_hba.num_present_cpu)); | 8408 | phba->sli4_hba.num_present_cpu)); |
8424 | 8409 | ||
8425 | max_phys_id = 0; | 8410 | max_phys_id = 0; |
8411 | min_phys_id = 0xff; | ||
8426 | phys_id = 0; | 8412 | phys_id = 0; |
8427 | num_io_channel = 0; | 8413 | num_io_channel = 0; |
8428 | first_cpu = LPFC_VECTOR_MAP_EMPTY; | 8414 | first_cpu = LPFC_VECTOR_MAP_EMPTY; |
@@ -8446,9 +8432,12 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | |||
8446 | 8432 | ||
8447 | if (cpup->phys_id > max_phys_id) | 8433 | if (cpup->phys_id > max_phys_id) |
8448 | max_phys_id = cpup->phys_id; | 8434 | max_phys_id = cpup->phys_id; |
8435 | if (cpup->phys_id < min_phys_id) | ||
8436 | min_phys_id = cpup->phys_id; | ||
8449 | cpup++; | 8437 | cpup++; |
8450 | } | 8438 | } |
8451 | 8439 | ||
8440 | phys_id = min_phys_id; | ||
8452 | /* Now associate the HBA vectors with specific CPUs */ | 8441 | /* Now associate the HBA vectors with specific CPUs */ |
8453 | for (idx = 0; idx < vectors; idx++) { | 8442 | for (idx = 0; idx < vectors; idx++) { |
8454 | cpup = phba->sli4_hba.cpu_map; | 8443 | cpup = phba->sli4_hba.cpu_map; |
@@ -8459,13 +8448,25 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | |||
8459 | for (i = 1; i < max_phys_id; i++) { | 8448 | for (i = 1; i < max_phys_id; i++) { |
8460 | phys_id++; | 8449 | phys_id++; |
8461 | if (phys_id > max_phys_id) | 8450 | if (phys_id > max_phys_id) |
8462 | phys_id = 0; | 8451 | phys_id = min_phys_id; |
8463 | cpu = lpfc_find_next_cpu(phba, phys_id); | 8452 | cpu = lpfc_find_next_cpu(phba, phys_id); |
8464 | if (cpu == LPFC_VECTOR_MAP_EMPTY) | 8453 | if (cpu == LPFC_VECTOR_MAP_EMPTY) |
8465 | continue; | 8454 | continue; |
8466 | goto found; | 8455 | goto found; |
8467 | } | 8456 | } |
8468 | 8457 | ||
8458 | /* Use round robin for scheduling */ | ||
8459 | phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; | ||
8460 | chan = 0; | ||
8461 | cpup = phba->sli4_hba.cpu_map; | ||
8462 | for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { | ||
8463 | cpup->channel_id = chan; | ||
8464 | cpup++; | ||
8465 | chan++; | ||
8466 | if (chan >= phba->cfg_fcp_io_channel) | ||
8467 | chan = 0; | ||
8468 | } | ||
8469 | |||
8469 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8470 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8470 | "3329 Cannot set affinity:" | 8471 | "3329 Cannot set affinity:" |
8471 | "Error mapping vector %d (%d)\n", | 8472 | "Error mapping vector %d (%d)\n", |
@@ -8503,7 +8504,7 @@ found: | |||
8503 | /* Spread vector mapping across multple physical CPU nodes */ | 8504 | /* Spread vector mapping across multple physical CPU nodes */ |
8504 | phys_id++; | 8505 | phys_id++; |
8505 | if (phys_id > max_phys_id) | 8506 | if (phys_id > max_phys_id) |
8506 | phys_id = 0; | 8507 | phys_id = min_phys_id; |
8507 | } | 8508 | } |
8508 | 8509 | ||
8509 | /* | 8510 | /* |
@@ -8513,7 +8514,7 @@ found: | |||
8513 | * Base the remaining IO channel assigned, to IO channels already | 8514 | * Base the remaining IO channel assigned, to IO channels already |
8514 | * assigned to other CPUs on the same phys_id. | 8515 | * assigned to other CPUs on the same phys_id. |
8515 | */ | 8516 | */ |
8516 | for (i = 0; i <= max_phys_id; i++) { | 8517 | for (i = min_phys_id; i <= max_phys_id; i++) { |
8517 | /* | 8518 | /* |
8518 | * If there are no io channels already mapped to | 8519 | * If there are no io channels already mapped to |
8519 | * this phys_id, just round robin thru the io_channels. | 8520 | * this phys_id, just round robin thru the io_channels. |
@@ -8595,10 +8596,11 @@ out: | |||
8595 | if (num_io_channel != phba->sli4_hba.num_present_cpu) | 8596 | if (num_io_channel != phba->sli4_hba.num_present_cpu) |
8596 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8597 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8597 | "3333 Set affinity mismatch:" | 8598 | "3333 Set affinity mismatch:" |
8598 | "%d chann != %d cpus: %d vactors\n", | 8599 | "%d chann != %d cpus: %d vectors\n", |
8599 | num_io_channel, phba->sli4_hba.num_present_cpu, | 8600 | num_io_channel, phba->sli4_hba.num_present_cpu, |
8600 | vectors); | 8601 | vectors); |
8601 | 8602 | ||
8603 | /* Enable using cpu affinity for scheduling */ | ||
8602 | phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; | 8604 | phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; |
8603 | return 1; | 8605 | return 1; |
8604 | } | 8606 | } |
@@ -8689,9 +8691,12 @@ enable_msix_vectors: | |||
8689 | 8691 | ||
8690 | cfg_fail_out: | 8692 | cfg_fail_out: |
8691 | /* free the irq already requested */ | 8693 | /* free the irq already requested */ |
8692 | for (--index; index >= 0; index--) | 8694 | for (--index; index >= 0; index--) { |
8695 | irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. | ||
8696 | vector, NULL); | ||
8693 | free_irq(phba->sli4_hba.msix_entries[index].vector, | 8697 | free_irq(phba->sli4_hba.msix_entries[index].vector, |
8694 | &phba->sli4_hba.fcp_eq_hdl[index]); | 8698 | &phba->sli4_hba.fcp_eq_hdl[index]); |
8699 | } | ||
8695 | 8700 | ||
8696 | msi_fail_out: | 8701 | msi_fail_out: |
8697 | /* Unconfigure MSI-X capability structure */ | 8702 | /* Unconfigure MSI-X capability structure */ |
@@ -8712,9 +8717,12 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba) | |||
8712 | int index; | 8717 | int index; |
8713 | 8718 | ||
8714 | /* Free up MSI-X multi-message vectors */ | 8719 | /* Free up MSI-X multi-message vectors */ |
8715 | for (index = 0; index < phba->cfg_fcp_io_channel; index++) | 8720 | for (index = 0; index < phba->cfg_fcp_io_channel; index++) { |
8721 | irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. | ||
8722 | vector, NULL); | ||
8716 | free_irq(phba->sli4_hba.msix_entries[index].vector, | 8723 | free_irq(phba->sli4_hba.msix_entries[index].vector, |
8717 | &phba->sli4_hba.fcp_eq_hdl[index]); | 8724 | &phba->sli4_hba.fcp_eq_hdl[index]); |
8725 | } | ||
8718 | 8726 | ||
8719 | /* Disable MSI-X */ | 8727 | /* Disable MSI-X */ |
8720 | pci_disable_msix(phba->pcidev); | 8728 | pci_disable_msix(phba->pcidev); |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1242b6c4308b..c913e8cc3b26 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -926,10 +926,10 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) | |||
926 | 926 | ||
927 | /* get all SCSI buffers need to repost to a local list */ | 927 | /* get all SCSI buffers need to repost to a local list */ |
928 | spin_lock_irq(&phba->scsi_buf_list_get_lock); | 928 | spin_lock_irq(&phba->scsi_buf_list_get_lock); |
929 | spin_lock_irq(&phba->scsi_buf_list_put_lock); | 929 | spin_lock(&phba->scsi_buf_list_put_lock); |
930 | list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); | 930 | list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); |
931 | list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); | 931 | list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); |
932 | spin_unlock_irq(&phba->scsi_buf_list_put_lock); | 932 | spin_unlock(&phba->scsi_buf_list_put_lock); |
933 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); | 933 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
934 | 934 | ||
935 | /* post the list of scsi buffer sgls to port if available */ | 935 | /* post the list of scsi buffer sgls to port if available */ |
@@ -1000,9 +1000,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) | |||
1000 | } | 1000 | } |
1001 | memset(psb->data, 0, phba->cfg_sg_dma_buf_size); | 1001 | memset(psb->data, 0, phba->cfg_sg_dma_buf_size); |
1002 | 1002 | ||
1003 | /* Page alignment is CRITICAL, double check to be sure */ | 1003 | /* |
1004 | if (((unsigned long)(psb->data) & | 1004 | * 4K Page alignment is CRITICAL to BlockGuard, double check |
1005 | (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) { | 1005 | * to be sure. |
1006 | */ | ||
1007 | if (phba->cfg_enable_bg && (((unsigned long)(psb->data) & | ||
1008 | (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { | ||
1006 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, | 1009 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, |
1007 | psb->data, psb->dma_handle); | 1010 | psb->data, psb->dma_handle); |
1008 | kfree(psb); | 1011 | kfree(psb); |
@@ -1134,22 +1137,21 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1134 | { | 1137 | { |
1135 | struct lpfc_scsi_buf * lpfc_cmd = NULL; | 1138 | struct lpfc_scsi_buf * lpfc_cmd = NULL; |
1136 | struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; | 1139 | struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; |
1137 | unsigned long gflag = 0; | 1140 | unsigned long iflag = 0; |
1138 | unsigned long pflag = 0; | ||
1139 | 1141 | ||
1140 | spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); | 1142 | spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); |
1141 | list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, | 1143 | list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, |
1142 | list); | 1144 | list); |
1143 | if (!lpfc_cmd) { | 1145 | if (!lpfc_cmd) { |
1144 | spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); | 1146 | spin_lock(&phba->scsi_buf_list_put_lock); |
1145 | list_splice(&phba->lpfc_scsi_buf_list_put, | 1147 | list_splice(&phba->lpfc_scsi_buf_list_put, |
1146 | &phba->lpfc_scsi_buf_list_get); | 1148 | &phba->lpfc_scsi_buf_list_get); |
1147 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); | 1149 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); |
1148 | list_remove_head(scsi_buf_list_get, lpfc_cmd, | 1150 | list_remove_head(scsi_buf_list_get, lpfc_cmd, |
1149 | struct lpfc_scsi_buf, list); | 1151 | struct lpfc_scsi_buf, list); |
1150 | spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); | 1152 | spin_unlock(&phba->scsi_buf_list_put_lock); |
1151 | } | 1153 | } |
1152 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); | 1154 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); |
1153 | return lpfc_cmd; | 1155 | return lpfc_cmd; |
1154 | } | 1156 | } |
1155 | /** | 1157 | /** |
@@ -1167,11 +1169,10 @@ static struct lpfc_scsi_buf* | |||
1167 | lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | 1169 | lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
1168 | { | 1170 | { |
1169 | struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; | 1171 | struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; |
1170 | unsigned long gflag = 0; | 1172 | unsigned long iflag = 0; |
1171 | unsigned long pflag = 0; | ||
1172 | int found = 0; | 1173 | int found = 0; |
1173 | 1174 | ||
1174 | spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); | 1175 | spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); |
1175 | list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, | 1176 | list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, |
1176 | &phba->lpfc_scsi_buf_list_get, list) { | 1177 | &phba->lpfc_scsi_buf_list_get, list) { |
1177 | if (lpfc_test_rrq_active(phba, ndlp, | 1178 | if (lpfc_test_rrq_active(phba, ndlp, |
@@ -1182,11 +1183,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1182 | break; | 1183 | break; |
1183 | } | 1184 | } |
1184 | if (!found) { | 1185 | if (!found) { |
1185 | spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); | 1186 | spin_lock(&phba->scsi_buf_list_put_lock); |
1186 | list_splice(&phba->lpfc_scsi_buf_list_put, | 1187 | list_splice(&phba->lpfc_scsi_buf_list_put, |
1187 | &phba->lpfc_scsi_buf_list_get); | 1188 | &phba->lpfc_scsi_buf_list_get); |
1188 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); | 1189 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); |
1189 | spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); | 1190 | spin_unlock(&phba->scsi_buf_list_put_lock); |
1190 | list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, | 1191 | list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, |
1191 | &phba->lpfc_scsi_buf_list_get, list) { | 1192 | &phba->lpfc_scsi_buf_list_get, list) { |
1192 | if (lpfc_test_rrq_active( | 1193 | if (lpfc_test_rrq_active( |
@@ -1197,7 +1198,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1197 | break; | 1198 | break; |
1198 | } | 1199 | } |
1199 | } | 1200 | } |
1200 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); | 1201 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); |
1201 | if (!found) | 1202 | if (!found) |
1202 | return NULL; | 1203 | return NULL; |
1203 | return lpfc_cmd; | 1204 | return lpfc_cmd; |
@@ -3966,11 +3967,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
3966 | 3967 | ||
3967 | /* | 3968 | /* |
3968 | * Check SLI validation that all the transfer was actually done | 3969 | * Check SLI validation that all the transfer was actually done |
3969 | * (fcpi_parm should be zero). | 3970 | * (fcpi_parm should be zero). Apply check only to reads. |
3970 | */ | 3971 | */ |
3971 | } else if (fcpi_parm) { | 3972 | } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { |
3972 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, | 3973 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, |
3973 | "9029 FCP Data Transfer Check Error: " | 3974 | "9029 FCP Read Check Error Data: " |
3974 | "x%x x%x x%x x%x x%x\n", | 3975 | "x%x x%x x%x x%x x%x\n", |
3975 | be32_to_cpu(fcpcmd->fcpDl), | 3976 | be32_to_cpu(fcpcmd->fcpDl), |
3976 | be32_to_cpu(fcprsp->rspResId), | 3977 | be32_to_cpu(fcprsp->rspResId), |
@@ -4342,6 +4343,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
4342 | char tag[2]; | 4343 | char tag[2]; |
4343 | uint8_t *ptr; | 4344 | uint8_t *ptr; |
4344 | bool sli4; | 4345 | bool sli4; |
4346 | uint32_t fcpdl; | ||
4345 | 4347 | ||
4346 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) | 4348 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) |
4347 | return; | 4349 | return; |
@@ -4389,8 +4391,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
4389 | iocb_cmd->ulpPU = PARM_READ_CHECK; | 4391 | iocb_cmd->ulpPU = PARM_READ_CHECK; |
4390 | if (vport->cfg_first_burst_size && | 4392 | if (vport->cfg_first_burst_size && |
4391 | (pnode->nlp_flag & NLP_FIRSTBURST)) { | 4393 | (pnode->nlp_flag & NLP_FIRSTBURST)) { |
4392 | piocbq->iocb.un.fcpi.fcpi_XRdy = | 4394 | fcpdl = scsi_bufflen(scsi_cmnd); |
4393 | vport->cfg_first_burst_size; | 4395 | if (fcpdl < vport->cfg_first_burst_size) |
4396 | piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl; | ||
4397 | else | ||
4398 | piocbq->iocb.un.fcpi.fcpi_XRdy = | ||
4399 | vport->cfg_first_burst_size; | ||
4394 | } | 4400 | } |
4395 | fcp_cmnd->fcpCntl3 = WRITE_DATA; | 4401 | fcp_cmnd->fcpCntl3 = WRITE_DATA; |
4396 | phba->fc4OutputRequests++; | 4402 | phba->fc4OutputRequests++; |
@@ -4878,6 +4884,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
4878 | goto out_unlock; | 4884 | goto out_unlock; |
4879 | } | 4885 | } |
4880 | 4886 | ||
4887 | /* Indicate the IO is being aborted by the driver. */ | ||
4888 | iocb->iocb_flag |= LPFC_DRIVER_ABORTED; | ||
4889 | |||
4881 | /* | 4890 | /* |
4882 | * The scsi command can not be in txq and it is in flight because the | 4891 | * The scsi command can not be in txq and it is in flight because the |
4883 | * pCmd is still pointig at the SCSI command we have to abort. There | 4892 | * pCmd is still pointig at the SCSI command we have to abort. There |
@@ -5006,7 +5015,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, | |||
5006 | lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); | 5015 | lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); |
5007 | if (lpfc_cmd == NULL) | 5016 | if (lpfc_cmd == NULL) |
5008 | return FAILED; | 5017 | return FAILED; |
5009 | lpfc_cmd->timeout = 60; | 5018 | lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; |
5010 | lpfc_cmd->rdata = rdata; | 5019 | lpfc_cmd->rdata = rdata; |
5011 | 5020 | ||
5012 | status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, | 5021 | status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 0392e114531c..612f48973ff2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -9831,6 +9831,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
9831 | abort_cmd) != 0) | 9831 | abort_cmd) != 0) |
9832 | continue; | 9832 | continue; |
9833 | 9833 | ||
9834 | /* | ||
9835 | * If the iocbq is already being aborted, don't take a second | ||
9836 | * action, but do count it. | ||
9837 | */ | ||
9838 | if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) | ||
9839 | continue; | ||
9840 | |||
9834 | /* issue ABTS for this IOCB based on iotag */ | 9841 | /* issue ABTS for this IOCB based on iotag */ |
9835 | abtsiocb = lpfc_sli_get_iocbq(phba); | 9842 | abtsiocb = lpfc_sli_get_iocbq(phba); |
9836 | if (abtsiocb == NULL) { | 9843 | if (abtsiocb == NULL) { |
@@ -9838,6 +9845,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
9838 | continue; | 9845 | continue; |
9839 | } | 9846 | } |
9840 | 9847 | ||
9848 | /* indicate the IO is being aborted by the driver. */ | ||
9849 | iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; | ||
9850 | |||
9841 | cmd = &iocbq->iocb; | 9851 | cmd = &iocbq->iocb; |
9842 | abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; | 9852 | abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; |
9843 | abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; | 9853 | abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; |
@@ -9847,7 +9857,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
9847 | abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; | 9857 | abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; |
9848 | abtsiocb->iocb.ulpLe = 1; | 9858 | abtsiocb->iocb.ulpLe = 1; |
9849 | abtsiocb->iocb.ulpClass = cmd->ulpClass; | 9859 | abtsiocb->iocb.ulpClass = cmd->ulpClass; |
9850 | abtsiocb->vport = phba->pport; | 9860 | abtsiocb->vport = vport; |
9851 | 9861 | ||
9852 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ | 9862 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
9853 | abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; | 9863 | abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; |
@@ -12233,7 +12243,6 @@ static void __iomem * | |||
12233 | lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) | 12243 | lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) |
12234 | { | 12244 | { |
12235 | struct pci_dev *pdev; | 12245 | struct pci_dev *pdev; |
12236 | unsigned long bar_map, bar_map_len; | ||
12237 | 12246 | ||
12238 | if (!phba->pcidev) | 12247 | if (!phba->pcidev) |
12239 | return NULL; | 12248 | return NULL; |
@@ -12242,25 +12251,10 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) | |||
12242 | 12251 | ||
12243 | switch (pci_barset) { | 12252 | switch (pci_barset) { |
12244 | case WQ_PCI_BAR_0_AND_1: | 12253 | case WQ_PCI_BAR_0_AND_1: |
12245 | if (!phba->pci_bar0_memmap_p) { | ||
12246 | bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0); | ||
12247 | bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); | ||
12248 | phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len); | ||
12249 | } | ||
12250 | return phba->pci_bar0_memmap_p; | 12254 | return phba->pci_bar0_memmap_p; |
12251 | case WQ_PCI_BAR_2_AND_3: | 12255 | case WQ_PCI_BAR_2_AND_3: |
12252 | if (!phba->pci_bar2_memmap_p) { | ||
12253 | bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2); | ||
12254 | bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); | ||
12255 | phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len); | ||
12256 | } | ||
12257 | return phba->pci_bar2_memmap_p; | 12256 | return phba->pci_bar2_memmap_p; |
12258 | case WQ_PCI_BAR_4_AND_5: | 12257 | case WQ_PCI_BAR_4_AND_5: |
12259 | if (!phba->pci_bar4_memmap_p) { | ||
12260 | bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4); | ||
12261 | bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); | ||
12262 | phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len); | ||
12263 | } | ||
12264 | return phba->pci_bar4_memmap_p; | 12258 | return phba->pci_bar4_memmap_p; |
12265 | default: | 12259 | default: |
12266 | break; | 12260 | break; |
@@ -15808,7 +15802,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) | |||
15808 | void | 15802 | void |
15809 | lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) | 15803 | lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) |
15810 | { | 15804 | { |
15811 | struct lpfc_fcf_pri *fcf_pri; | 15805 | struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; |
15812 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { | 15806 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { |
15813 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, | 15807 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
15814 | "2762 FCF (x%x) reached driver's book " | 15808 | "2762 FCF (x%x) reached driver's book " |
@@ -15818,7 +15812,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) | |||
15818 | } | 15812 | } |
15819 | /* Clear the eligible FCF record index bmask */ | 15813 | /* Clear the eligible FCF record index bmask */ |
15820 | spin_lock_irq(&phba->hbalock); | 15814 | spin_lock_irq(&phba->hbalock); |
15821 | list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { | 15815 | list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, |
15816 | list) { | ||
15822 | if (fcf_pri->fcf_rec.fcf_index == fcf_index) { | 15817 | if (fcf_pri->fcf_rec.fcf_index == fcf_index) { |
15823 | list_del_init(&fcf_pri->list); | 15818 | list_del_init(&fcf_pri->list); |
15824 | break; | 15819 | break; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 97617996206d..6b0f2478706e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h | |||
@@ -58,7 +58,7 @@ struct lpfc_iocbq { | |||
58 | 58 | ||
59 | IOCB_t iocb; /* IOCB cmd */ | 59 | IOCB_t iocb; /* IOCB cmd */ |
60 | uint8_t retry; /* retry counter for IOCB cmd - if needed */ | 60 | uint8_t retry; /* retry counter for IOCB cmd - if needed */ |
61 | uint16_t iocb_flag; | 61 | uint32_t iocb_flag; |
62 | #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ | 62 | #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ |
63 | #define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ | 63 | #define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ |
64 | #define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ | 64 | #define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ |
@@ -73,11 +73,11 @@ struct lpfc_iocbq { | |||
73 | #define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ | 73 | #define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ |
74 | #define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ | 74 | #define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ |
75 | #define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ | 75 | #define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ |
76 | #define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */ | ||
76 | 77 | ||
77 | #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ | 78 | #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ |
78 | #define LPFC_FIP_ELS_ID_SHIFT 14 | 79 | #define LPFC_FIP_ELS_ID_SHIFT 14 |
79 | 80 | ||
80 | uint8_t rsvd2; | ||
81 | uint32_t drvrTimeout; /* driver timeout in seconds */ | 81 | uint32_t drvrTimeout; /* driver timeout in seconds */ |
82 | uint32_t fcp_wqidx; /* index to FCP work queue */ | 82 | uint32_t fcp_wqidx; /* index to FCP work queue */ |
83 | struct lpfc_vport *vport;/* virtual port pointer */ | 83 | struct lpfc_vport *vport;/* virtual port pointer */ |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 5bcc38223ac9..85120b77aa0e 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -523,7 +523,7 @@ struct lpfc_sli4_hba { | |||
523 | struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ | 523 | struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ |
524 | struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ | 524 | struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ |
525 | 525 | ||
526 | uint8_t fw_func_mode; /* FW function protocol mode */ | 526 | uint32_t fw_func_mode; /* FW function protocol mode */ |
527 | uint32_t ulp0_mode; /* ULP0 protocol mode */ | 527 | uint32_t ulp0_mode; /* ULP0 protocol mode */ |
528 | uint32_t ulp1_mode; /* ULP1 protocol mode */ | 528 | uint32_t ulp1_mode; /* ULP1 protocol mode */ |
529 | 529 | ||
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 21859d2006ce..f58f18342bc3 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.3.41" | 21 | #define LPFC_DRIVER_VERSION "8.3.42" |
22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
23 | 23 | ||
24 | /* Used for SLI 2/3 */ | 24 | /* Used for SLI 2/3 */ |