aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/be2iscsi/be_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/be2iscsi/be_main.c')
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1331
1 files changed, 971 insertions, 360 deletions
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index a1f5ac7a9806..1f375051483a 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -149,18 +149,25 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
149 "\t\t\t\tMiscellaneous Events : 0x04\n" 149 "\t\t\t\tMiscellaneous Events : 0x04\n"
150 "\t\t\t\tError Handling : 0x08\n" 150 "\t\t\t\tError Handling : 0x08\n"
151 "\t\t\t\tIO Path Events : 0x10\n" 151 "\t\t\t\tIO Path Events : 0x10\n"
152 "\t\t\t\tConfiguration Path : 0x20\n"); 152 "\t\t\t\tConfiguration Path : 0x20\n"
153 "\t\t\t\tiSCSI Protocol : 0x40\n");
153 154
154DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 155DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
155DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 156DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
156DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 157DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
157DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL); 158DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
159DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
160 beiscsi_active_session_disp, NULL);
161DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
162 beiscsi_free_session_disp, NULL);
158struct device_attribute *beiscsi_attrs[] = { 163struct device_attribute *beiscsi_attrs[] = {
159 &dev_attr_beiscsi_log_enable, 164 &dev_attr_beiscsi_log_enable,
160 &dev_attr_beiscsi_drvr_ver, 165 &dev_attr_beiscsi_drvr_ver,
161 &dev_attr_beiscsi_adapter_family, 166 &dev_attr_beiscsi_adapter_family,
162 &dev_attr_beiscsi_fw_ver, 167 &dev_attr_beiscsi_fw_ver,
163 &dev_attr_beiscsi_active_cid_count, 168 &dev_attr_beiscsi_active_session_count,
169 &dev_attr_beiscsi_free_session_count,
170 &dev_attr_beiscsi_phys_port,
164 NULL, 171 NULL,
165}; 172};
166 173
@@ -239,6 +246,11 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
239 return SUCCESS; 246 return SUCCESS;
240 } 247 }
241 spin_unlock_bh(&session->lock); 248 spin_unlock_bh(&session->lock);
249 /* Invalidate WRB Posted for this Task */
250 AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
251 aborted_io_task->pwrb_handle->pwrb,
252 1);
253
242 conn = aborted_task->conn; 254 conn = aborted_task->conn;
243 beiscsi_conn = conn->dd_data; 255 beiscsi_conn = conn->dd_data;
244 phba = beiscsi_conn->phba; 256 phba = beiscsi_conn->phba;
@@ -316,6 +328,11 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
316 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) 328 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
317 continue; 329 continue;
318 330
331 /* Invalidate WRB Posted for this Task */
332 AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
333 abrt_io_task->pwrb_handle->pwrb,
334 1);
335
319 inv_tbl->cid = cid; 336 inv_tbl->cid = cid;
320 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; 337 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
321 num_invalidate++; 338 num_invalidate++;
@@ -699,30 +716,85 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
699 return status; 716 return status;
700} 717}
701 718
719/**
720 * beiscsi_get_params()- Set the config paramters
721 * @phba: ptr device priv structure
722 **/
702static void beiscsi_get_params(struct beiscsi_hba *phba) 723static void beiscsi_get_params(struct beiscsi_hba *phba)
703{ 724{
704 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count 725 uint32_t total_cid_count = 0;
705 - (phba->fw_config.iscsi_cid_count 726 uint32_t total_icd_count = 0;
706 + BE2_TMFS 727 uint8_t ulp_num = 0;
707 + BE2_NOPOUT_REQ)); 728
708 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; 729 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
709 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count; 730 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
710 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; 731
732 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
733 uint32_t align_mask = 0;
734 uint32_t icd_post_per_page = 0;
735 uint32_t icd_count_unavailable = 0;
736 uint32_t icd_start = 0, icd_count = 0;
737 uint32_t icd_start_align = 0, icd_count_align = 0;
738
739 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
740 icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
741 icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
742
743 /* Get ICD count that can be posted on each page */
744 icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
745 sizeof(struct iscsi_sge)));
746 align_mask = (icd_post_per_page - 1);
747
748 /* Check if icd_start is aligned ICD per page posting */
749 if (icd_start % icd_post_per_page) {
750 icd_start_align = ((icd_start +
751 icd_post_per_page) &
752 ~(align_mask));
753 phba->fw_config.
754 iscsi_icd_start[ulp_num] =
755 icd_start_align;
756 }
757
758 icd_count_align = (icd_count & ~align_mask);
759
760 /* ICD discarded in the process of alignment */
761 if (icd_start_align)
762 icd_count_unavailable = ((icd_start_align -
763 icd_start) +
764 (icd_count -
765 icd_count_align));
766
767 /* Updated ICD count available */
768 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
769 icd_count_unavailable);
770
771 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
772 "BM_%d : Aligned ICD values\n"
773 "\t ICD Start : %d\n"
774 "\t ICD Count : %d\n"
775 "\t ICD Discarded : %d\n",
776 phba->fw_config.
777 iscsi_icd_start[ulp_num],
778 phba->fw_config.
779 iscsi_icd_count[ulp_num],
780 icd_count_unavailable);
781 break;
782 }
783 }
784
785 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
786 phba->params.ios_per_ctrl = (total_icd_count -
787 (total_cid_count +
788 BE2_TMFS + BE2_NOPOUT_REQ));
789 phba->params.cxns_per_ctrl = total_cid_count;
790 phba->params.asyncpdus_per_ctrl = total_cid_count;
791 phba->params.icds_per_ctrl = total_icd_count;
711 phba->params.num_sge_per_io = BE2_SGE; 792 phba->params.num_sge_per_io = BE2_SGE;
712 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 793 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
713 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 794 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
714 phba->params.eq_timer = 64; 795 phba->params.eq_timer = 64;
715 phba->params.num_eq_entries = 796 phba->params.num_eq_entries = 1024;
716 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 797 phba->params.num_cq_entries = 1024;
717 + BE2_TMFS) / 512) + 1) * 512;
718 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
719 ? 1024 : phba->params.num_eq_entries;
720 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
721 "BM_%d : phba->params.num_eq_entries=%d\n",
722 phba->params.num_eq_entries);
723 phba->params.num_cq_entries =
724 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
725 + BE2_TMFS) / 512) + 1) * 512;
726 phba->params.wrbs_per_cxn = 256; 798 phba->params.wrbs_per_cxn = 256;
727} 799}
728 800
@@ -1613,8 +1685,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1613 1685
1614 WARN_ON(!pasync_handle); 1686 WARN_ON(!pasync_handle);
1615 1687
1616 pasync_handle->cri = 1688 pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
1617 BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); 1689 beiscsi_conn->beiscsi_conn_cid);
1618 pasync_handle->is_header = is_header; 1690 pasync_handle->is_header = is_header;
1619 pasync_handle->buffer_len = dpl; 1691 pasync_handle->buffer_len = dpl;
1620 *pcq_index = index; 1692 *pcq_index = index;
@@ -1674,18 +1746,13 @@ hwi_update_async_writables(struct beiscsi_hba *phba,
1674} 1746}
1675 1747
1676static void hwi_free_async_msg(struct beiscsi_hba *phba, 1748static void hwi_free_async_msg(struct beiscsi_hba *phba,
1677 unsigned int cri) 1749 struct hwi_async_pdu_context *pasync_ctx,
1750 unsigned int cri)
1678{ 1751{
1679 struct hwi_controller *phwi_ctrlr;
1680 struct hwi_async_pdu_context *pasync_ctx;
1681 struct async_pdu_handle *pasync_handle, *tmp_handle; 1752 struct async_pdu_handle *pasync_handle, *tmp_handle;
1682 struct list_head *plist; 1753 struct list_head *plist;
1683 1754
1684 phwi_ctrlr = phba->phwi_ctrlr;
1685 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1686
1687 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1755 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1688
1689 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { 1756 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1690 list_del(&pasync_handle->link); 1757 list_del(&pasync_handle->link);
1691 1758
@@ -1720,7 +1787,7 @@ hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1720} 1787}
1721 1788
1722static void hwi_post_async_buffers(struct beiscsi_hba *phba, 1789static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1723 unsigned int is_header) 1790 unsigned int is_header, uint8_t ulp_num)
1724{ 1791{
1725 struct hwi_controller *phwi_ctrlr; 1792 struct hwi_controller *phwi_ctrlr;
1726 struct hwi_async_pdu_context *pasync_ctx; 1793 struct hwi_async_pdu_context *pasync_ctx;
@@ -1728,13 +1795,13 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1728 struct list_head *pfree_link, *pbusy_list; 1795 struct list_head *pfree_link, *pbusy_list;
1729 struct phys_addr *pasync_sge; 1796 struct phys_addr *pasync_sge;
1730 unsigned int ring_id, num_entries; 1797 unsigned int ring_id, num_entries;
1731 unsigned int host_write_num; 1798 unsigned int host_write_num, doorbell_offset;
1732 unsigned int writables; 1799 unsigned int writables;
1733 unsigned int i = 0; 1800 unsigned int i = 0;
1734 u32 doorbell = 0; 1801 u32 doorbell = 0;
1735 1802
1736 phwi_ctrlr = phba->phwi_ctrlr; 1803 phwi_ctrlr = phba->phwi_ctrlr;
1737 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1804 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1738 num_entries = pasync_ctx->num_entries; 1805 num_entries = pasync_ctx->num_entries;
1739 1806
1740 if (is_header) { 1807 if (is_header) {
@@ -1742,13 +1809,17 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1742 pasync_ctx->async_header.free_entries); 1809 pasync_ctx->async_header.free_entries);
1743 pfree_link = pasync_ctx->async_header.free_list.next; 1810 pfree_link = pasync_ctx->async_header.free_list.next;
1744 host_write_num = pasync_ctx->async_header.host_write_ptr; 1811 host_write_num = pasync_ctx->async_header.host_write_ptr;
1745 ring_id = phwi_ctrlr->default_pdu_hdr.id; 1812 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
1813 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
1814 doorbell_offset;
1746 } else { 1815 } else {
1747 writables = min(pasync_ctx->async_data.writables, 1816 writables = min(pasync_ctx->async_data.writables,
1748 pasync_ctx->async_data.free_entries); 1817 pasync_ctx->async_data.free_entries);
1749 pfree_link = pasync_ctx->async_data.free_list.next; 1818 pfree_link = pasync_ctx->async_data.free_list.next;
1750 host_write_num = pasync_ctx->async_data.host_write_ptr; 1819 host_write_num = pasync_ctx->async_data.host_write_ptr;
1751 ring_id = phwi_ctrlr->default_pdu_data.id; 1820 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
1821 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
1822 doorbell_offset;
1752 } 1823 }
1753 1824
1754 writables = (writables / 8) * 8; 1825 writables = (writables / 8) * 8;
@@ -1796,7 +1867,7 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1796 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK) 1867 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1797 << DB_DEF_PDU_CQPROC_SHIFT; 1868 << DB_DEF_PDU_CQPROC_SHIFT;
1798 1869
1799 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET); 1870 iowrite32(doorbell, phba->db_va + doorbell_offset);
1800 } 1871 }
1801} 1872}
1802 1873
@@ -1808,9 +1879,13 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1808 struct hwi_async_pdu_context *pasync_ctx; 1879 struct hwi_async_pdu_context *pasync_ctx;
1809 struct async_pdu_handle *pasync_handle = NULL; 1880 struct async_pdu_handle *pasync_handle = NULL;
1810 unsigned int cq_index = -1; 1881 unsigned int cq_index = -1;
1882 uint16_t cri_index = BE_GET_CRI_FROM_CID(
1883 beiscsi_conn->beiscsi_conn_cid);
1811 1884
1812 phwi_ctrlr = phba->phwi_ctrlr; 1885 phwi_ctrlr = phba->phwi_ctrlr;
1813 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1886 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1887 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1888 cri_index));
1814 1889
1815 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1890 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1816 pdpdu_cqe, &cq_index); 1891 pdpdu_cqe, &cq_index);
@@ -1819,8 +1894,10 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1819 hwi_update_async_writables(phba, pasync_ctx, 1894 hwi_update_async_writables(phba, pasync_ctx,
1820 pasync_handle->is_header, cq_index); 1895 pasync_handle->is_header, cq_index);
1821 1896
1822 hwi_free_async_msg(phba, pasync_handle->cri); 1897 hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
1823 hwi_post_async_buffers(phba, pasync_handle->is_header); 1898 hwi_post_async_buffers(phba, pasync_handle->is_header,
1899 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1900 cri_index));
1824} 1901}
1825 1902
1826static unsigned int 1903static unsigned int
@@ -1859,7 +1936,7 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1859 phdr, hdr_len, pfirst_buffer, 1936 phdr, hdr_len, pfirst_buffer,
1860 offset); 1937 offset);
1861 1938
1862 hwi_free_async_msg(phba, cri); 1939 hwi_free_async_msg(phba, pasync_ctx, cri);
1863 return 0; 1940 return 0;
1864} 1941}
1865 1942
@@ -1875,13 +1952,16 @@ hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1875 struct pdu_base *ppdu; 1952 struct pdu_base *ppdu;
1876 1953
1877 phwi_ctrlr = phba->phwi_ctrlr; 1954 phwi_ctrlr = phba->phwi_ctrlr;
1878 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1955 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1956 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1957 BE_GET_CRI_FROM_CID(beiscsi_conn->
1958 beiscsi_conn_cid)));
1879 1959
1880 list_del(&pasync_handle->link); 1960 list_del(&pasync_handle->link);
1881 if (pasync_handle->is_header) { 1961 if (pasync_handle->is_header) {
1882 pasync_ctx->async_header.busy_entries--; 1962 pasync_ctx->async_header.busy_entries--;
1883 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1963 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1884 hwi_free_async_msg(phba, cri); 1964 hwi_free_async_msg(phba, pasync_ctx, cri);
1885 BUG(); 1965 BUG();
1886 } 1966 }
1887 1967
@@ -1936,9 +2016,14 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1936 struct hwi_async_pdu_context *pasync_ctx; 2016 struct hwi_async_pdu_context *pasync_ctx;
1937 struct async_pdu_handle *pasync_handle = NULL; 2017 struct async_pdu_handle *pasync_handle = NULL;
1938 unsigned int cq_index = -1; 2018 unsigned int cq_index = -1;
2019 uint16_t cri_index = BE_GET_CRI_FROM_CID(
2020 beiscsi_conn->beiscsi_conn_cid);
1939 2021
1940 phwi_ctrlr = phba->phwi_ctrlr; 2022 phwi_ctrlr = phba->phwi_ctrlr;
1941 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 2023 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
2024 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
2025 cri_index));
2026
1942 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 2027 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1943 pdpdu_cqe, &cq_index); 2028 pdpdu_cqe, &cq_index);
1944 2029
@@ -1947,7 +2032,9 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1947 pasync_handle->is_header, cq_index); 2032 pasync_handle->is_header, cq_index);
1948 2033
1949 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); 2034 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1950 hwi_post_async_buffers(phba, pasync_handle->is_header); 2035 hwi_post_async_buffers(phba, pasync_handle->is_header,
2036 BEISCSI_GET_ULP_FROM_CRI(
2037 phwi_ctrlr, cri_index));
1951} 2038}
1952 2039
1953static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) 2040static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
@@ -2072,8 +2159,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2072 "BM_%d : Received %s[%d] on CID : %d\n", 2159 "BM_%d : Received %s[%d] on CID : %d\n",
2073 cqe_desc[code], code, cid); 2160 cqe_desc[code], code, cid);
2074 2161
2162 spin_lock_bh(&phba->async_pdu_lock);
2075 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2163 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2076 (struct i_t_dpdu_cqe *)sol); 2164 (struct i_t_dpdu_cqe *)sol);
2165 spin_unlock_bh(&phba->async_pdu_lock);
2077 break; 2166 break;
2078 case UNSOL_DATA_NOTIFY: 2167 case UNSOL_DATA_NOTIFY:
2079 beiscsi_log(phba, KERN_INFO, 2168 beiscsi_log(phba, KERN_INFO,
@@ -2081,8 +2170,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2081 "BM_%d : Received %s[%d] on CID : %d\n", 2170 "BM_%d : Received %s[%d] on CID : %d\n",
2082 cqe_desc[code], code, cid); 2171 cqe_desc[code], code, cid);
2083 2172
2173 spin_lock_bh(&phba->async_pdu_lock);
2084 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2174 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2085 (struct i_t_dpdu_cqe *)sol); 2175 (struct i_t_dpdu_cqe *)sol);
2176 spin_unlock_bh(&phba->async_pdu_lock);
2086 break; 2177 break;
2087 case CXN_INVALIDATE_INDEX_NOTIFY: 2178 case CXN_INVALIDATE_INDEX_NOTIFY:
2088 case CMD_INVALIDATED_NOTIFY: 2179 case CMD_INVALIDATED_NOTIFY:
@@ -2110,8 +2201,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2110 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2201 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2111 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 2202 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
2112 cqe_desc[code], code, cid); 2203 cqe_desc[code], code, cid);
2204 spin_lock_bh(&phba->async_pdu_lock);
2113 hwi_flush_default_pdu_buffer(phba, beiscsi_conn, 2205 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
2114 (struct i_t_dpdu_cqe *) sol); 2206 (struct i_t_dpdu_cqe *) sol);
2207 spin_unlock_bh(&phba->async_pdu_lock);
2115 break; 2208 break;
2116 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2209 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
2117 case CXN_KILLED_BURST_LEN_MISMATCH: 2210 case CXN_KILLED_BURST_LEN_MISMATCH:
@@ -2476,26 +2569,19 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2476 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2569 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2477} 2570}
2478 2571
2572/**
2573 * beiscsi_find_mem_req()- Find mem needed
2574 * @phba: ptr to HBA struct
2575 **/
2479static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2576static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2480{ 2577{
2578 uint8_t mem_descr_index, ulp_num;
2481 unsigned int num_cq_pages, num_async_pdu_buf_pages; 2579 unsigned int num_cq_pages, num_async_pdu_buf_pages;
2482 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2580 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2483 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2581 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2484 2582
2485 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 2583 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2486 sizeof(struct sol_cqe)); 2584 sizeof(struct sol_cqe));
2487 num_async_pdu_buf_pages =
2488 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2489 phba->params.defpdu_hdr_sz);
2490 num_async_pdu_buf_sgl_pages =
2491 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2492 sizeof(struct phys_addr));
2493 num_async_pdu_data_pages =
2494 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2495 phba->params.defpdu_data_sz);
2496 num_async_pdu_data_sgl_pages =
2497 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2498 sizeof(struct phys_addr));
2499 2585
2500 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2586 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2501 2587
@@ -2517,24 +2603,79 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2517 phba->params.icds_per_ctrl; 2603 phba->params.icds_per_ctrl;
2518 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2604 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2519 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2605 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2520 2606 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2521 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] = 2607 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2522 num_async_pdu_buf_pages * PAGE_SIZE; 2608
2523 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] = 2609 num_async_pdu_buf_sgl_pages =
2524 num_async_pdu_data_pages * PAGE_SIZE; 2610 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2525 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] = 2611 phba, ulp_num) *
2526 num_async_pdu_buf_sgl_pages * PAGE_SIZE; 2612 sizeof(struct phys_addr));
2527 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] = 2613
2528 num_async_pdu_data_sgl_pages * PAGE_SIZE; 2614 num_async_pdu_buf_pages =
2529 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] = 2615 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2530 phba->params.asyncpdus_per_ctrl * 2616 phba, ulp_num) *
2531 sizeof(struct async_pdu_handle); 2617 phba->params.defpdu_hdr_sz);
2532 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] = 2618
2533 phba->params.asyncpdus_per_ctrl * 2619 num_async_pdu_data_pages =
2534 sizeof(struct async_pdu_handle); 2620 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2535 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] = 2621 phba, ulp_num) *
2536 sizeof(struct hwi_async_pdu_context) + 2622 phba->params.defpdu_data_sz);
2537 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry)); 2623
2624 num_async_pdu_data_sgl_pages =
2625 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2626 phba, ulp_num) *
2627 sizeof(struct phys_addr));
2628
2629 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
2630 (ulp_num * MEM_DESCR_OFFSET));
2631 phba->mem_req[mem_descr_index] =
2632 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2633 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
2634
2635 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2636 (ulp_num * MEM_DESCR_OFFSET));
2637 phba->mem_req[mem_descr_index] =
2638 num_async_pdu_buf_pages *
2639 PAGE_SIZE;
2640
2641 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2642 (ulp_num * MEM_DESCR_OFFSET));
2643 phba->mem_req[mem_descr_index] =
2644 num_async_pdu_data_pages *
2645 PAGE_SIZE;
2646
2647 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2648 (ulp_num * MEM_DESCR_OFFSET));
2649 phba->mem_req[mem_descr_index] =
2650 num_async_pdu_buf_sgl_pages *
2651 PAGE_SIZE;
2652
2653 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
2654 (ulp_num * MEM_DESCR_OFFSET));
2655 phba->mem_req[mem_descr_index] =
2656 num_async_pdu_data_sgl_pages *
2657 PAGE_SIZE;
2658
2659 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2660 (ulp_num * MEM_DESCR_OFFSET));
2661 phba->mem_req[mem_descr_index] =
2662 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2663 sizeof(struct async_pdu_handle);
2664
2665 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2666 (ulp_num * MEM_DESCR_OFFSET));
2667 phba->mem_req[mem_descr_index] =
2668 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2669 sizeof(struct async_pdu_handle);
2670
2671 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2672 (ulp_num * MEM_DESCR_OFFSET));
2673 phba->mem_req[mem_descr_index] =
2674 sizeof(struct hwi_async_pdu_context) +
2675 (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2676 sizeof(struct hwi_async_entry));
2677 }
2678 }
2538} 2679}
2539 2680
2540static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2681static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
@@ -2576,6 +2717,12 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2576 2717
2577 mem_descr = phba->init_mem; 2718 mem_descr = phba->init_mem;
2578 for (i = 0; i < SE_MEM_MAX; i++) { 2719 for (i = 0; i < SE_MEM_MAX; i++) {
2720 if (!phba->mem_req[i]) {
2721 mem_descr->mem_array = NULL;
2722 mem_descr++;
2723 continue;
2724 }
2725
2579 j = 0; 2726 j = 0;
2580 mem_arr = mem_arr_orig; 2727 mem_arr = mem_arr_orig;
2581 alloc_size = phba->mem_req[i]; 2728 alloc_size = phba->mem_req[i];
@@ -2697,7 +2844,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2697 /* Allocate memory for WRBQ */ 2844 /* Allocate memory for WRBQ */
2698 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2845 phwi_ctxt = phwi_ctrlr->phwi_ctxt;
2699 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * 2846 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
2700 phba->fw_config.iscsi_cid_count, 2847 phba->params.cxns_per_ctrl,
2701 GFP_KERNEL); 2848 GFP_KERNEL);
2702 if (!phwi_ctxt->be_wrbq) { 2849 if (!phwi_ctxt->be_wrbq) {
2703 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2850 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -2779,6 +2926,7 @@ init_wrb_hndl_failed:
2779 2926
2780static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2927static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2781{ 2928{
2929 uint8_t ulp_num;
2782 struct hwi_controller *phwi_ctrlr; 2930 struct hwi_controller *phwi_ctrlr;
2783 struct hba_parameters *p = &phba->params; 2931 struct hba_parameters *p = &phba->params;
2784 struct hwi_async_pdu_context *pasync_ctx; 2932 struct hwi_async_pdu_context *pasync_ctx;
@@ -2786,155 +2934,150 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2786 unsigned int index, idx, num_per_mem, num_async_data; 2934 unsigned int index, idx, num_per_mem, num_async_data;
2787 struct be_mem_descriptor *mem_descr; 2935 struct be_mem_descriptor *mem_descr;
2788 2936
2789 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2937 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2790 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT; 2938 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2791 2939
2792 phwi_ctrlr = phba->phwi_ctrlr; 2940 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2793 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *) 2941 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2942 (ulp_num * MEM_DESCR_OFFSET));
2943
2944 phwi_ctrlr = phba->phwi_ctrlr;
2945 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
2946 (struct hwi_async_pdu_context *)
2947 mem_descr->mem_array[0].virtual_address;
2948
2949 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
2950 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2951
2952 pasync_ctx->async_entry =
2953 (struct hwi_async_entry *)
2954 ((long unsigned int)pasync_ctx +
2955 sizeof(struct hwi_async_pdu_context));
2956
2957 pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
2958 ulp_num);
2959 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2960
2961 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2962 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2963 (ulp_num * MEM_DESCR_OFFSET);
2964 if (mem_descr->mem_array[0].virtual_address) {
2965 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2966 "BM_%d : hwi_init_async_pdu_ctx"
2967 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
2968 ulp_num,
2969 mem_descr->mem_array[0].
2970 virtual_address);
2971 } else
2972 beiscsi_log(phba, KERN_WARNING,
2973 BEISCSI_LOG_INIT,
2974 "BM_%d : No Virtual address for ULP : %d\n",
2975 ulp_num);
2976
2977 pasync_ctx->async_header.va_base =
2794 mem_descr->mem_array[0].virtual_address; 2978 mem_descr->mem_array[0].virtual_address;
2795 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2796 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2797 2979
2798 pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) * 2980 pasync_ctx->async_header.pa_base.u.a64.address =
2799 phba->fw_config.iscsi_cid_count, 2981 mem_descr->mem_array[0].
2800 GFP_KERNEL); 2982 bus_address.u.a64.address;
2801 if (!pasync_ctx->async_entry) {
2802 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2803 "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
2804 return -ENOMEM;
2805 }
2806
2807 pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2808 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2809
2810 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2811 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2812 if (mem_descr->mem_array[0].virtual_address) {
2813 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2814 "BM_%d : hwi_init_async_pdu_ctx"
2815 " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
2816 mem_descr->mem_array[0].virtual_address);
2817 } else
2818 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2819 "BM_%d : No Virtual address\n");
2820
2821 pasync_ctx->async_header.va_base =
2822 mem_descr->mem_array[0].virtual_address;
2823
2824 pasync_ctx->async_header.pa_base.u.a64.address =
2825 mem_descr->mem_array[0].bus_address.u.a64.address;
2826
2827 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2828 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2829 if (mem_descr->mem_array[0].virtual_address) {
2830 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2831 "BM_%d : hwi_init_async_pdu_ctx"
2832 " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
2833 mem_descr->mem_array[0].virtual_address);
2834 } else
2835 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2836 "BM_%d : No Virtual address\n");
2837
2838 pasync_ctx->async_header.ring_base =
2839 mem_descr->mem_array[0].virtual_address;
2840
2841 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2842 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2843 if (mem_descr->mem_array[0].virtual_address) {
2844 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2845 "BM_%d : hwi_init_async_pdu_ctx"
2846 " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
2847 mem_descr->mem_array[0].virtual_address);
2848 } else
2849 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2850 "BM_%d : No Virtual address\n");
2851
2852 pasync_ctx->async_header.handle_base =
2853 mem_descr->mem_array[0].virtual_address;
2854 pasync_ctx->async_header.writables = 0;
2855 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2856
2857
2858 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2859 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2860 if (mem_descr->mem_array[0].virtual_address) {
2861 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2862 "BM_%d : hwi_init_async_pdu_ctx"
2863 " HWI_MEM_ASYNC_DATA_RING va=%p\n",
2864 mem_descr->mem_array[0].virtual_address);
2865 } else
2866 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2867 "BM_%d : No Virtual address\n");
2868
2869 pasync_ctx->async_data.ring_base =
2870 mem_descr->mem_array[0].virtual_address;
2871
2872 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2873 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2874 if (!mem_descr->mem_array[0].virtual_address)
2875 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2876 "BM_%d : No Virtual address\n");
2877 2983
2878 pasync_ctx->async_data.handle_base = 2984 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2879 mem_descr->mem_array[0].virtual_address; 2985 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2880 pasync_ctx->async_data.writables = 0; 2986 (ulp_num * MEM_DESCR_OFFSET);
2881 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); 2987 if (mem_descr->mem_array[0].virtual_address) {
2988 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2989 "BM_%d : hwi_init_async_pdu_ctx"
2990 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
2991 ulp_num,
2992 mem_descr->mem_array[0].
2993 virtual_address);
2994 } else
2995 beiscsi_log(phba, KERN_WARNING,
2996 BEISCSI_LOG_INIT,
2997 "BM_%d : No Virtual address for ULP : %d\n",
2998 ulp_num);
2999
3000 pasync_ctx->async_header.ring_base =
3001 mem_descr->mem_array[0].virtual_address;
2882 3002
2883 pasync_header_h = 3003 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2884 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base; 3004 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2885 pasync_data_h = 3005 (ulp_num * MEM_DESCR_OFFSET);
2886 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base; 3006 if (mem_descr->mem_array[0].virtual_address) {
3007 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3008 "BM_%d : hwi_init_async_pdu_ctx"
3009 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
3010 ulp_num,
3011 mem_descr->mem_array[0].
3012 virtual_address);
3013 } else
3014 beiscsi_log(phba, KERN_WARNING,
3015 BEISCSI_LOG_INIT,
3016 "BM_%d : No Virtual address for ULP : %d\n",
3017 ulp_num);
3018
3019 pasync_ctx->async_header.handle_base =
3020 mem_descr->mem_array[0].virtual_address;
3021 pasync_ctx->async_header.writables = 0;
3022 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
3023
3024 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3025 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3026 (ulp_num * MEM_DESCR_OFFSET);
3027 if (mem_descr->mem_array[0].virtual_address) {
3028 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3029 "BM_%d : hwi_init_async_pdu_ctx"
3030 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
3031 ulp_num,
3032 mem_descr->mem_array[0].
3033 virtual_address);
3034 } else
3035 beiscsi_log(phba, KERN_WARNING,
3036 BEISCSI_LOG_INIT,
3037 "BM_%d : No Virtual address for ULP : %d\n",
3038 ulp_num);
3039
3040 pasync_ctx->async_data.ring_base =
3041 mem_descr->mem_array[0].virtual_address;
2887 3042
2888 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3043 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2889 mem_descr += HWI_MEM_ASYNC_DATA_BUF; 3044 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2890 if (mem_descr->mem_array[0].virtual_address) { 3045 (ulp_num * MEM_DESCR_OFFSET);
2891 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3046 if (!mem_descr->mem_array[0].virtual_address)
2892 "BM_%d : hwi_init_async_pdu_ctx" 3047 beiscsi_log(phba, KERN_WARNING,
2893 " HWI_MEM_ASYNC_DATA_BUF va=%p\n", 3048 BEISCSI_LOG_INIT,
2894 mem_descr->mem_array[0].virtual_address); 3049 "BM_%d : No Virtual address for ULP : %d\n",
2895 } else 3050 ulp_num);
2896 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2897 "BM_%d : No Virtual address\n");
2898 3051
2899 idx = 0; 3052 pasync_ctx->async_data.handle_base =
2900 pasync_ctx->async_data.va_base = 3053 mem_descr->mem_array[0].virtual_address;
2901 mem_descr->mem_array[idx].virtual_address; 3054 pasync_ctx->async_data.writables = 0;
2902 pasync_ctx->async_data.pa_base.u.a64.address = 3055 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2903 mem_descr->mem_array[idx].bus_address.u.a64.address; 3056
2904 3057 pasync_header_h =
2905 num_async_data = ((mem_descr->mem_array[idx].size) / 3058 (struct async_pdu_handle *)
2906 phba->params.defpdu_data_sz); 3059 pasync_ctx->async_header.handle_base;
2907 num_per_mem = 0; 3060 pasync_data_h =
2908 3061 (struct async_pdu_handle *)
2909 for (index = 0; index < p->asyncpdus_per_ctrl; index++) { 3062 pasync_ctx->async_data.handle_base;
2910 pasync_header_h->cri = -1; 3063
2911 pasync_header_h->index = (char)index; 3064 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2912 INIT_LIST_HEAD(&pasync_header_h->link); 3065 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2913 pasync_header_h->pbuffer = 3066 (ulp_num * MEM_DESCR_OFFSET);
2914 (void *)((unsigned long) 3067 if (mem_descr->mem_array[0].virtual_address) {
2915 (pasync_ctx->async_header.va_base) + 3068 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2916 (p->defpdu_hdr_sz * index)); 3069 "BM_%d : hwi_init_async_pdu_ctx"
2917 3070 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
2918 pasync_header_h->pa.u.a64.address = 3071 ulp_num,
2919 pasync_ctx->async_header.pa_base.u.a64.address + 3072 mem_descr->mem_array[0].
2920 (p->defpdu_hdr_sz * index); 3073 virtual_address);
2921 3074 } else
2922 list_add_tail(&pasync_header_h->link, 3075 beiscsi_log(phba, KERN_WARNING,
2923 &pasync_ctx->async_header.free_list); 3076 BEISCSI_LOG_INIT,
2924 pasync_header_h++; 3077 "BM_%d : No Virtual address for ULP : %d\n",
2925 pasync_ctx->async_header.free_entries++; 3078 ulp_num);
2926 pasync_ctx->async_header.writables++; 3079
2927 3080 idx = 0;
2928 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2929 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2930 header_busy_list);
2931 pasync_data_h->cri = -1;
2932 pasync_data_h->index = (char)index;
2933 INIT_LIST_HEAD(&pasync_data_h->link);
2934
2935 if (!num_async_data) {
2936 num_per_mem = 0;
2937 idx++;
2938 pasync_ctx->async_data.va_base = 3081 pasync_ctx->async_data.va_base =
2939 mem_descr->mem_array[idx].virtual_address; 3082 mem_descr->mem_array[idx].virtual_address;
2940 pasync_ctx->async_data.pa_base.u.a64.address = 3083 pasync_ctx->async_data.pa_base.u.a64.address =
@@ -2943,32 +3086,83 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2943 3086
2944 num_async_data = ((mem_descr->mem_array[idx].size) / 3087 num_async_data = ((mem_descr->mem_array[idx].size) /
2945 phba->params.defpdu_data_sz); 3088 phba->params.defpdu_data_sz);
2946 } 3089 num_per_mem = 0;
2947 pasync_data_h->pbuffer =
2948 (void *)((unsigned long)
2949 (pasync_ctx->async_data.va_base) +
2950 (p->defpdu_data_sz * num_per_mem));
2951
2952 pasync_data_h->pa.u.a64.address =
2953 pasync_ctx->async_data.pa_base.u.a64.address +
2954 (p->defpdu_data_sz * num_per_mem);
2955 num_per_mem++;
2956 num_async_data--;
2957 3090
2958 list_add_tail(&pasync_data_h->link, 3091 for (index = 0; index < BEISCSI_GET_CID_COUNT
2959 &pasync_ctx->async_data.free_list); 3092 (phba, ulp_num); index++) {
2960 pasync_data_h++; 3093 pasync_header_h->cri = -1;
2961 pasync_ctx->async_data.free_entries++; 3094 pasync_header_h->index = (char)index;
2962 pasync_ctx->async_data.writables++; 3095 INIT_LIST_HEAD(&pasync_header_h->link);
3096 pasync_header_h->pbuffer =
3097 (void *)((unsigned long)
3098 (pasync_ctx->
3099 async_header.va_base) +
3100 (p->defpdu_hdr_sz * index));
3101
3102 pasync_header_h->pa.u.a64.address =
3103 pasync_ctx->async_header.pa_base.u.a64.
3104 address + (p->defpdu_hdr_sz * index);
3105
3106 list_add_tail(&pasync_header_h->link,
3107 &pasync_ctx->async_header.
3108 free_list);
3109 pasync_header_h++;
3110 pasync_ctx->async_header.free_entries++;
3111 pasync_ctx->async_header.writables++;
3112
3113 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3114 wait_queue.list);
3115 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3116 header_busy_list);
3117 pasync_data_h->cri = -1;
3118 pasync_data_h->index = (char)index;
3119 INIT_LIST_HEAD(&pasync_data_h->link);
3120
3121 if (!num_async_data) {
3122 num_per_mem = 0;
3123 idx++;
3124 pasync_ctx->async_data.va_base =
3125 mem_descr->mem_array[idx].
3126 virtual_address;
3127 pasync_ctx->async_data.pa_base.u.
3128 a64.address =
3129 mem_descr->mem_array[idx].
3130 bus_address.u.a64.address;
3131 num_async_data =
3132 ((mem_descr->mem_array[idx].
3133 size) /
3134 phba->params.defpdu_data_sz);
3135 }
3136 pasync_data_h->pbuffer =
3137 (void *)((unsigned long)
3138 (pasync_ctx->async_data.va_base) +
3139 (p->defpdu_data_sz * num_per_mem));
3140
3141 pasync_data_h->pa.u.a64.address =
3142 pasync_ctx->async_data.pa_base.u.a64.
3143 address + (p->defpdu_data_sz *
3144 num_per_mem);
3145 num_per_mem++;
3146 num_async_data--;
3147
3148 list_add_tail(&pasync_data_h->link,
3149 &pasync_ctx->async_data.
3150 free_list);
3151 pasync_data_h++;
3152 pasync_ctx->async_data.free_entries++;
3153 pasync_ctx->async_data.writables++;
3154
3155 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3156 data_busy_list);
3157 }
2963 3158
2964 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list); 3159 pasync_ctx->async_header.host_write_ptr = 0;
3160 pasync_ctx->async_header.ep_read_ptr = -1;
3161 pasync_ctx->async_data.host_write_ptr = 0;
3162 pasync_ctx->async_data.ep_read_ptr = -1;
3163 }
2965 } 3164 }
2966 3165
2967 pasync_ctx->async_header.host_write_ptr = 0;
2968 pasync_ctx->async_header.ep_read_ptr = -1;
2969 pasync_ctx->async_data.host_write_ptr = 0;
2970 pasync_ctx->async_data.ep_read_ptr = -1;
2971
2972 return 0; 3166 return 0;
2973} 3167}
2974 3168
@@ -3164,7 +3358,7 @@ static int
3164beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3358beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3165 struct hwi_context_memory *phwi_context, 3359 struct hwi_context_memory *phwi_context,
3166 struct hwi_controller *phwi_ctrlr, 3360 struct hwi_controller *phwi_ctrlr,
3167 unsigned int def_pdu_ring_sz) 3361 unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3168{ 3362{
3169 unsigned int idx; 3363 unsigned int idx;
3170 int ret; 3364 int ret;
@@ -3174,36 +3368,42 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3174 void *dq_vaddress; 3368 void *dq_vaddress;
3175 3369
3176 idx = 0; 3370 idx = 0;
3177 dq = &phwi_context->be_def_hdrq; 3371 dq = &phwi_context->be_def_hdrq[ulp_num];
3178 cq = &phwi_context->be_cq[0]; 3372 cq = &phwi_context->be_cq[0];
3179 mem = &dq->dma_mem; 3373 mem = &dq->dma_mem;
3180 mem_descr = phba->init_mem; 3374 mem_descr = phba->init_mem;
3181 mem_descr += HWI_MEM_ASYNC_HEADER_RING; 3375 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
3376 (ulp_num * MEM_DESCR_OFFSET);
3182 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3377 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3183 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3378 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
3184 sizeof(struct phys_addr), 3379 sizeof(struct phys_addr),
3185 sizeof(struct phys_addr), dq_vaddress); 3380 sizeof(struct phys_addr), dq_vaddress);
3186 if (ret) { 3381 if (ret) {
3187 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3382 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3188 "BM_%d : be_fill_queue Failed for DEF PDU HDR\n"); 3383 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
3384 ulp_num);
3385
3189 return ret; 3386 return ret;
3190 } 3387 }
3191 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3388 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3192 bus_address.u.a64.address; 3389 bus_address.u.a64.address;
3193 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3390 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
3194 def_pdu_ring_sz, 3391 def_pdu_ring_sz,
3195 phba->params.defpdu_hdr_sz); 3392 phba->params.defpdu_hdr_sz,
3393 BEISCSI_DEFQ_HDR, ulp_num);
3196 if (ret) { 3394 if (ret) {
3197 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3395 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3198 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n"); 3396 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
3397 ulp_num);
3398
3199 return ret; 3399 return ret;
3200 } 3400 }
3201 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
3202 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3203 "BM_%d : iscsi def pdu id is %d\n",
3204 phwi_context->be_def_hdrq.id);
3205 3401
3206 hwi_post_async_buffers(phba, 1); 3402 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3403 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
3404 ulp_num,
3405 phwi_context->be_def_hdrq[ulp_num].id);
3406 hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
3207 return 0; 3407 return 0;
3208} 3408}
3209 3409
@@ -3211,7 +3411,7 @@ static int
3211beiscsi_create_def_data(struct beiscsi_hba *phba, 3411beiscsi_create_def_data(struct beiscsi_hba *phba,
3212 struct hwi_context_memory *phwi_context, 3412 struct hwi_context_memory *phwi_context,
3213 struct hwi_controller *phwi_ctrlr, 3413 struct hwi_controller *phwi_ctrlr,
3214 unsigned int def_pdu_ring_sz) 3414 unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3215{ 3415{
3216 unsigned int idx; 3416 unsigned int idx;
3217 int ret; 3417 int ret;
@@ -3221,43 +3421,86 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
3221 void *dq_vaddress; 3421 void *dq_vaddress;
3222 3422
3223 idx = 0; 3423 idx = 0;
3224 dataq = &phwi_context->be_def_dataq; 3424 dataq = &phwi_context->be_def_dataq[ulp_num];
3225 cq = &phwi_context->be_cq[0]; 3425 cq = &phwi_context->be_cq[0];
3226 mem = &dataq->dma_mem; 3426 mem = &dataq->dma_mem;
3227 mem_descr = phba->init_mem; 3427 mem_descr = phba->init_mem;
3228 mem_descr += HWI_MEM_ASYNC_DATA_RING; 3428 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3429 (ulp_num * MEM_DESCR_OFFSET);
3229 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3430 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3230 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3431 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3231 sizeof(struct phys_addr), 3432 sizeof(struct phys_addr),
3232 sizeof(struct phys_addr), dq_vaddress); 3433 sizeof(struct phys_addr), dq_vaddress);
3233 if (ret) { 3434 if (ret) {
3234 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3435 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3235 "BM_%d : be_fill_queue Failed for DEF PDU DATA\n"); 3436 "BM_%d : be_fill_queue Failed for DEF PDU "
3437 "DATA on ULP : %d\n",
3438 ulp_num);
3439
3236 return ret; 3440 return ret;
3237 } 3441 }
3238 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3442 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3239 bus_address.u.a64.address; 3443 bus_address.u.a64.address;
3240 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3444 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3241 def_pdu_ring_sz, 3445 def_pdu_ring_sz,
3242 phba->params.defpdu_data_sz); 3446 phba->params.defpdu_data_sz,
3447 BEISCSI_DEFQ_DATA, ulp_num);
3243 if (ret) { 3448 if (ret) {
3244 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3449 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3245 "BM_%d be_cmd_create_default_pdu_queue" 3450 "BM_%d be_cmd_create_default_pdu_queue"
3246 " Failed for DEF PDU DATA\n"); 3451 " Failed for DEF PDU DATA on ULP : %d\n",
3452 ulp_num);
3247 return ret; 3453 return ret;
3248 } 3454 }
3249 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id; 3455
3250 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3456 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3251 "BM_%d : iscsi def data id is %d\n", 3457 "BM_%d : iscsi def data id on ULP : %d is %d\n",
3252 phwi_context->be_def_dataq.id); 3458 ulp_num,
3459 phwi_context->be_def_dataq[ulp_num].id);
3253 3460
3254 hwi_post_async_buffers(phba, 0); 3461 hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
3255 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3462 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3256 "BM_%d : DEFAULT PDU DATA RING CREATED\n"); 3463 "BM_%d : DEFAULT PDU DATA RING CREATED"
3464 "on ULP : %d\n", ulp_num);
3257 3465
3258 return 0; 3466 return 0;
3259} 3467}
3260 3468
3469
3470static int
3471beiscsi_post_template_hdr(struct beiscsi_hba *phba)
3472{
3473 struct be_mem_descriptor *mem_descr;
3474 struct mem_array *pm_arr;
3475 struct be_dma_mem sgl;
3476 int status, ulp_num;
3477
3478 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3479 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3480 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3481 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
3482 (ulp_num * MEM_DESCR_OFFSET);
3483 pm_arr = mem_descr->mem_array;
3484
3485 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3486 status = be_cmd_iscsi_post_template_hdr(
3487 &phba->ctrl, &sgl);
3488
3489 if (status != 0) {
3490 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3491 "BM_%d : Post Template HDR Failed for"
3492 "ULP_%d\n", ulp_num);
3493 return status;
3494 }
3495
3496 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3497 "BM_%d : Template HDR Pages Posted for"
3498 "ULP_%d\n", ulp_num);
3499 }
3500 }
3501 return 0;
3502}
3503
3261static int 3504static int
3262beiscsi_post_pages(struct beiscsi_hba *phba) 3505beiscsi_post_pages(struct beiscsi_hba *phba)
3263{ 3506{
@@ -3265,14 +3508,18 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
3265 struct mem_array *pm_arr; 3508 struct mem_array *pm_arr;
3266 unsigned int page_offset, i; 3509 unsigned int page_offset, i;
3267 struct be_dma_mem sgl; 3510 struct be_dma_mem sgl;
3268 int status; 3511 int status, ulp_num = 0;
3269 3512
3270 mem_descr = phba->init_mem; 3513 mem_descr = phba->init_mem;
3271 mem_descr += HWI_MEM_SGE; 3514 mem_descr += HWI_MEM_SGE;
3272 pm_arr = mem_descr->mem_array; 3515 pm_arr = mem_descr->mem_array;
3273 3516
3517 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3518 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3519 break;
3520
3274 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3521 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3275 phba->fw_config.iscsi_icd_start) / PAGE_SIZE; 3522 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
3276 for (i = 0; i < mem_descr->num_elements; i++) { 3523 for (i = 0; i < mem_descr->num_elements; i++) {
3277 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3524 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3278 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3525 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
@@ -3324,13 +3571,15 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3324{ 3571{
3325 unsigned int wrb_mem_index, offset, size, num_wrb_rings; 3572 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
3326 u64 pa_addr_lo; 3573 u64 pa_addr_lo;
3327 unsigned int idx, num, i; 3574 unsigned int idx, num, i, ulp_num;
3328 struct mem_array *pwrb_arr; 3575 struct mem_array *pwrb_arr;
3329 void *wrb_vaddr; 3576 void *wrb_vaddr;
3330 struct be_dma_mem sgl; 3577 struct be_dma_mem sgl;
3331 struct be_mem_descriptor *mem_descr; 3578 struct be_mem_descriptor *mem_descr;
3332 struct hwi_wrb_context *pwrb_context; 3579 struct hwi_wrb_context *pwrb_context;
3333 int status; 3580 int status;
3581 uint8_t ulp_count = 0, ulp_base_num = 0;
3582 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
3334 3583
3335 idx = 0; 3584 idx = 0;
3336 mem_descr = phba->init_mem; 3585 mem_descr = phba->init_mem;
@@ -3374,14 +3623,37 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3374 num_wrb_rings--; 3623 num_wrb_rings--;
3375 } 3624 }
3376 } 3625 }
3626
3627 /* Get the ULP Count */
3628 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3629 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3630 ulp_count++;
3631 ulp_base_num = ulp_num;
3632 cid_count_ulp[ulp_num] =
3633 BEISCSI_GET_CID_COUNT(phba, ulp_num);
3634 }
3635
3377 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3636 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3378 wrb_mem_index = 0; 3637 wrb_mem_index = 0;
3379 offset = 0; 3638 offset = 0;
3380 size = 0; 3639 size = 0;
3381 3640
3641 if (ulp_count > 1) {
3642 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
3643
3644 if (!cid_count_ulp[ulp_base_num])
3645 ulp_base_num = (ulp_base_num + 1) %
3646 BEISCSI_ULP_COUNT;
3647
3648 cid_count_ulp[ulp_base_num]--;
3649 }
3650
3651
3382 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3652 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3383 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3653 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3384 &phwi_context->be_wrbq[i]); 3654 &phwi_context->be_wrbq[i],
3655 &phwi_ctrlr->wrb_context[i],
3656 ulp_base_num);
3385 if (status != 0) { 3657 if (status != 0) {
3386 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3658 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3387 "BM_%d : wrbq create failed."); 3659 "BM_%d : wrbq create failed.");
@@ -3389,7 +3661,6 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3389 return status; 3661 return status;
3390 } 3662 }
3391 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3663 pwrb_context = &phwi_ctrlr->wrb_context[i];
3392 pwrb_context->cid = phwi_context->be_wrbq[i].id;
3393 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3664 BE_SET_CID_TO_CRI(i, pwrb_context->cid);
3394 } 3665 }
3395 kfree(pwrb_arr); 3666 kfree(pwrb_arr);
@@ -3433,10 +3704,13 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3433 struct hwi_controller *phwi_ctrlr; 3704 struct hwi_controller *phwi_ctrlr;
3434 struct hwi_context_memory *phwi_context; 3705 struct hwi_context_memory *phwi_context;
3435 struct hwi_async_pdu_context *pasync_ctx; 3706 struct hwi_async_pdu_context *pasync_ctx;
3436 int i, eq_num; 3707 int i, eq_num, ulp_num;
3437 3708
3438 phwi_ctrlr = phba->phwi_ctrlr; 3709 phwi_ctrlr = phba->phwi_ctrlr;
3439 phwi_context = phwi_ctrlr->phwi_ctxt; 3710 phwi_context = phwi_ctrlr->phwi_ctxt;
3711
3712 be_cmd_iscsi_remove_template_hdr(ctrl);
3713
3440 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3714 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3441 q = &phwi_context->be_wrbq[i]; 3715 q = &phwi_context->be_wrbq[i];
3442 if (q->created) 3716 if (q->created)
@@ -3445,13 +3719,20 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3445 kfree(phwi_context->be_wrbq); 3719 kfree(phwi_context->be_wrbq);
3446 free_wrb_handles(phba); 3720 free_wrb_handles(phba);
3447 3721
3448 q = &phwi_context->be_def_hdrq; 3722 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3449 if (q->created) 3723 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3450 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3451 3724
3452 q = &phwi_context->be_def_dataq; 3725 q = &phwi_context->be_def_hdrq[ulp_num];
3453 if (q->created) 3726 if (q->created)
3454 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3727 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3728
3729 q = &phwi_context->be_def_dataq[ulp_num];
3730 if (q->created)
3731 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3732
3733 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
3734 }
3735 }
3455 3736
3456 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3737 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3457 3738
@@ -3470,9 +3751,6 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3470 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3751 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3471 } 3752 }
3472 be_mcc_queues_destroy(phba); 3753 be_mcc_queues_destroy(phba);
3473
3474 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
3475 kfree(pasync_ctx->async_entry);
3476 be_cmd_fw_uninit(ctrl); 3754 be_cmd_fw_uninit(ctrl);
3477} 3755}
3478 3756
@@ -3538,8 +3816,19 @@ static void find_num_cpus(struct beiscsi_hba *phba)
3538 BEISCSI_MAX_NUM_CPUS : num_cpus; 3816 BEISCSI_MAX_NUM_CPUS : num_cpus;
3539 break; 3817 break;
3540 case BE_GEN4: 3818 case BE_GEN4:
3541 phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ? 3819 /*
3542 OC_SKH_MAX_NUM_CPUS : num_cpus; 3820 * If eqid_count == 1 fall back to
3821 * INTX mechanism
3822 **/
3823 if (phba->fw_config.eqid_count == 1) {
3824 enable_msix = 0;
3825 phba->num_cpus = 1;
3826 return;
3827 }
3828
3829 phba->num_cpus =
3830 (num_cpus > (phba->fw_config.eqid_count - 1)) ?
3831 (phba->fw_config.eqid_count - 1) : num_cpus;
3543 break; 3832 break;
3544 default: 3833 default:
3545 phba->num_cpus = 1; 3834 phba->num_cpus = 1;
@@ -3552,10 +3841,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3552 struct hwi_context_memory *phwi_context; 3841 struct hwi_context_memory *phwi_context;
3553 unsigned int def_pdu_ring_sz; 3842 unsigned int def_pdu_ring_sz;
3554 struct be_ctrl_info *ctrl = &phba->ctrl; 3843 struct be_ctrl_info *ctrl = &phba->ctrl;
3555 int status; 3844 int status, ulp_num;
3556 3845
3557 def_pdu_ring_sz =
3558 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3559 phwi_ctrlr = phba->phwi_ctrlr; 3846 phwi_ctrlr = phba->phwi_ctrlr;
3560 phwi_context = phwi_ctrlr->phwi_ctxt; 3847 phwi_context = phwi_ctrlr->phwi_ctxt;
3561 phwi_context->max_eqd = 0; 3848 phwi_context->max_eqd = 0;
@@ -3588,27 +3875,48 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3588 goto error; 3875 goto error;
3589 } 3876 }
3590 3877
3591 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, 3878 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3592 def_pdu_ring_sz); 3879 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3593 if (status != 0) { 3880
3594 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3881 def_pdu_ring_sz =
3595 "BM_%d : Default Header not created\n"); 3882 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
3596 goto error; 3883 sizeof(struct phys_addr);
3884
3885 status = beiscsi_create_def_hdr(phba, phwi_context,
3886 phwi_ctrlr,
3887 def_pdu_ring_sz,
3888 ulp_num);
3889 if (status != 0) {
3890 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3891 "BM_%d : Default Header not created for ULP : %d\n",
3892 ulp_num);
3893 goto error;
3894 }
3895
3896 status = beiscsi_create_def_data(phba, phwi_context,
3897 phwi_ctrlr,
3898 def_pdu_ring_sz,
3899 ulp_num);
3900 if (status != 0) {
3901 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3902 "BM_%d : Default Data not created for ULP : %d\n",
3903 ulp_num);
3904 goto error;
3905 }
3906 }
3597 } 3907 }
3598 3908
3599 status = beiscsi_create_def_data(phba, phwi_context, 3909 status = beiscsi_post_pages(phba);
3600 phwi_ctrlr, def_pdu_ring_sz);
3601 if (status != 0) { 3910 if (status != 0) {
3602 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3911 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3603 "BM_%d : Default Data not created\n"); 3912 "BM_%d : Post SGL Pages Failed\n");
3604 goto error; 3913 goto error;
3605 } 3914 }
3606 3915
3607 status = beiscsi_post_pages(phba); 3916 status = beiscsi_post_template_hdr(phba);
3608 if (status != 0) { 3917 if (status != 0) {
3609 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3918 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3610 "BM_%d : Post SGL Pages Failed\n"); 3919 "BM_%d : Template HDR Posting for CXN Failed\n");
3611 goto error;
3612 } 3920 }
3613 3921
3614 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3922 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
@@ -3618,6 +3926,26 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3618 goto error; 3926 goto error;
3619 } 3927 }
3620 3928
3929 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3930 uint16_t async_arr_idx = 0;
3931
3932 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3933 uint16_t cri = 0;
3934 struct hwi_async_pdu_context *pasync_ctx;
3935
3936 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
3937 phwi_ctrlr, ulp_num);
3938 for (cri = 0; cri <
3939 phba->params.cxns_per_ctrl; cri++) {
3940 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
3941 (phwi_ctrlr, cri))
3942 pasync_ctx->cid_to_async_cri_map[
3943 phwi_ctrlr->wrb_context[cri].cid] =
3944 async_arr_idx++;
3945 }
3946 }
3947 }
3948
3621 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3949 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3622 "BM_%d : hwi_init_port success\n"); 3950 "BM_%d : hwi_init_port success\n");
3623 return 0; 3951 return 0;
@@ -3682,6 +4010,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
3682 (unsigned long)mem_descr->mem_array[j - 1]. 4010 (unsigned long)mem_descr->mem_array[j - 1].
3683 bus_address.u.a64.address); 4011 bus_address.u.a64.address);
3684 } 4012 }
4013
3685 kfree(mem_descr->mem_array); 4014 kfree(mem_descr->mem_array);
3686 mem_descr++; 4015 mem_descr++;
3687 } 4016 }
@@ -3721,6 +4050,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3721 struct sgl_handle *psgl_handle; 4050 struct sgl_handle *psgl_handle;
3722 struct iscsi_sge *pfrag; 4051 struct iscsi_sge *pfrag;
3723 unsigned int arr_index, i, idx; 4052 unsigned int arr_index, i, idx;
4053 unsigned int ulp_icd_start, ulp_num = 0;
3724 4054
3725 phba->io_sgl_hndl_avbl = 0; 4055 phba->io_sgl_hndl_avbl = 0;
3726 phba->eh_sgl_hndl_avbl = 0; 4056 phba->eh_sgl_hndl_avbl = 0;
@@ -3787,6 +4117,12 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3787 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 4117 "\n BM_%d : mem_descr_sg->num_elements=%d\n",
3788 mem_descr_sg->num_elements); 4118 mem_descr_sg->num_elements);
3789 4119
4120 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
4121 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
4122 break;
4123
4124 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
4125
3790 arr_index = 0; 4126 arr_index = 0;
3791 idx = 0; 4127 idx = 0;
3792 while (idx < mem_descr_sg->num_elements) { 4128 while (idx < mem_descr_sg->num_elements) {
@@ -3805,8 +4141,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3805 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 4141 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3806 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 4142 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3807 pfrag += phba->params.num_sge_per_io; 4143 pfrag += phba->params.num_sge_per_io;
3808 psgl_handle->sgl_index = 4144 psgl_handle->sgl_index = ulp_icd_start + arr_index++;
3809 phba->fw_config.iscsi_icd_start + arr_index++;
3810 } 4145 }
3811 idx++; 4146 idx++;
3812 } 4147 }
@@ -3819,15 +4154,46 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3819 4154
3820static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 4155static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3821{ 4156{
3822 int i; 4157 int ret;
4158 uint16_t i, ulp_num;
4159 struct ulp_cid_info *ptr_cid_info = NULL;
3823 4160
3824 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, 4161 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3825 GFP_KERNEL); 4162 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
3826 if (!phba->cid_array) { 4163 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
3827 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4164 GFP_KERNEL);
3828 "BM_%d : Failed to allocate memory in " 4165
3829 "hba_setup_cid_tbls\n"); 4166 if (!ptr_cid_info) {
3830 return -ENOMEM; 4167 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4168 "BM_%d : Failed to allocate memory"
4169 "for ULP_CID_INFO for ULP : %d\n",
4170 ulp_num);
4171 ret = -ENOMEM;
4172 goto free_memory;
4173
4174 }
4175
4176 /* Allocate memory for CID array */
4177 ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
4178 BEISCSI_GET_CID_COUNT(phba,
4179 ulp_num), GFP_KERNEL);
4180 if (!ptr_cid_info->cid_array) {
4181 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4182 "BM_%d : Failed to allocate memory"
4183 "for CID_ARRAY for ULP : %d\n",
4184 ulp_num);
4185 kfree(ptr_cid_info);
4186 ptr_cid_info = NULL;
4187 ret = -ENOMEM;
4188
4189 goto free_memory;
4190 }
4191 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
4192 phba, ulp_num);
4193
4194 /* Save the cid_info_array ptr */
4195 phba->cid_array_info[ulp_num] = ptr_cid_info;
4196 }
3831 } 4197 }
3832 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 4198 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3833 phba->params.cxns_per_ctrl, GFP_KERNEL); 4199 phba->params.cxns_per_ctrl, GFP_KERNEL);
@@ -3835,9 +4201,9 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3835 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4201 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3836 "BM_%d : Failed to allocate memory in " 4202 "BM_%d : Failed to allocate memory in "
3837 "hba_setup_cid_tbls\n"); 4203 "hba_setup_cid_tbls\n");
3838 kfree(phba->cid_array); 4204 ret = -ENOMEM;
3839 phba->cid_array = NULL; 4205
3840 return -ENOMEM; 4206 goto free_memory;
3841 } 4207 }
3842 4208
3843 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * 4209 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
@@ -3847,18 +4213,44 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3847 "BM_%d : Failed to allocate memory in" 4213 "BM_%d : Failed to allocate memory in"
3848 "hba_setup_cid_tbls\n"); 4214 "hba_setup_cid_tbls\n");
3849 4215
3850 kfree(phba->cid_array);
3851 kfree(phba->ep_array); 4216 kfree(phba->ep_array);
3852 phba->cid_array = NULL;
3853 phba->ep_array = NULL; 4217 phba->ep_array = NULL;
3854 return -ENOMEM; 4218 ret = -ENOMEM;
4219 }
4220
4221 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
4222 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
4223
4224 ptr_cid_info = phba->cid_array_info[ulp_num];
4225 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
4226 phba->phwi_ctrlr->wrb_context[i].cid;
4227
3855 } 4228 }
3856 4229
3857 for (i = 0; i < phba->params.cxns_per_ctrl; i++) 4230 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3858 phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid; 4231 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4232 ptr_cid_info = phba->cid_array_info[ulp_num];
3859 4233
3860 phba->avlbl_cids = phba->params.cxns_per_ctrl; 4234 ptr_cid_info->cid_alloc = 0;
4235 ptr_cid_info->cid_free = 0;
4236 }
4237 }
3861 return 0; 4238 return 0;
4239
4240free_memory:
4241 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4242 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4243 ptr_cid_info = phba->cid_array_info[ulp_num];
4244
4245 if (ptr_cid_info) {
4246 kfree(ptr_cid_info->cid_array);
4247 kfree(ptr_cid_info);
4248 phba->cid_array_info[ulp_num] = NULL;
4249 }
4250 }
4251 }
4252
4253 return ret;
3862} 4254}
3863 4255
3864static void hwi_enable_intr(struct beiscsi_hba *phba) 4256static void hwi_enable_intr(struct beiscsi_hba *phba)
@@ -4113,20 +4505,39 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
4113 4505
4114static void beiscsi_clean_port(struct beiscsi_hba *phba) 4506static void beiscsi_clean_port(struct beiscsi_hba *phba)
4115{ 4507{
4116 int mgmt_status; 4508 int mgmt_status, ulp_num;
4117 4509 struct ulp_cid_info *ptr_cid_info = NULL;
4118 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0); 4510
4119 if (mgmt_status) 4511 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4120 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4512 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4121 "BM_%d : mgmt_epfw_cleanup FAILED\n"); 4513 mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
4514 if (mgmt_status)
4515 beiscsi_log(phba, KERN_WARNING,
4516 BEISCSI_LOG_INIT,
4517 "BM_%d : mgmt_epfw_cleanup FAILED"
4518 " for ULP_%d\n", ulp_num);
4519 }
4520 }
4122 4521
4123 hwi_purge_eq(phba); 4522 hwi_purge_eq(phba);
4124 hwi_cleanup(phba); 4523 hwi_cleanup(phba);
4125 kfree(phba->io_sgl_hndl_base); 4524 kfree(phba->io_sgl_hndl_base);
4126 kfree(phba->eh_sgl_hndl_base); 4525 kfree(phba->eh_sgl_hndl_base);
4127 kfree(phba->cid_array);
4128 kfree(phba->ep_array); 4526 kfree(phba->ep_array);
4129 kfree(phba->conn_table); 4527 kfree(phba->conn_table);
4528
4529 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4530 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4531 ptr_cid_info = phba->cid_array_info[ulp_num];
4532
4533 if (ptr_cid_info) {
4534 kfree(ptr_cid_info->cid_array);
4535 kfree(ptr_cid_info);
4536 phba->cid_array_info[ulp_num] = NULL;
4537 }
4538 }
4539 }
4540
4130} 4541}
4131 4542
4132/** 4543/**
@@ -4255,8 +4666,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4255 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4666 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
4256 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4667 << DB_DEF_PDU_WRB_INDEX_SHIFT;
4257 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4668 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4258 4669 iowrite32(doorbell, phba->db_va +
4259 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4670 beiscsi_conn->doorbell_offset);
4260} 4671}
4261 4672
4262static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4673static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
@@ -4481,7 +4892,8 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4481 DB_DEF_PDU_WRB_INDEX_MASK) << 4892 DB_DEF_PDU_WRB_INDEX_MASK) <<
4482 DB_DEF_PDU_WRB_INDEX_SHIFT; 4893 DB_DEF_PDU_WRB_INDEX_SHIFT;
4483 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4894 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4484 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4895 iowrite32(doorbell, phba->db_va +
4896 beiscsi_conn->doorbell_offset);
4485 return 0; 4897 return 0;
4486} 4898}
4487 4899
@@ -4536,7 +4948,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4536 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4948 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4537 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4949 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4538 4950
4539 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4951 iowrite32(doorbell, phba->db_va +
4952 beiscsi_conn->doorbell_offset);
4540 return 0; 4953 return 0;
4541} 4954}
4542 4955
@@ -4638,7 +5051,8 @@ static int beiscsi_mtask(struct iscsi_task *task)
4638 doorbell |= (io_task->pwrb_handle->wrb_index & 5051 doorbell |= (io_task->pwrb_handle->wrb_index &
4639 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 5052 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4640 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 5053 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4641 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 5054 iowrite32(doorbell, phba->db_va +
5055 beiscsi_conn->doorbell_offset);
4642 return 0; 5056 return 0;
4643} 5057}
4644 5058
@@ -4663,8 +5077,12 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
4663 struct beiscsi_hba *phba = NULL; 5077 struct beiscsi_hba *phba = NULL;
4664 5078
4665 phba = ((struct beiscsi_conn *)conn->dd_data)->phba; 5079 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
4666 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO, 5080 beiscsi_log(phba, KERN_ERR,
4667 "BM_%d : scsi_dma_map Failed\n"); 5081 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
5082 "BM_%d : scsi_dma_map Failed "
5083 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
5084 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
5085 io_task->libiscsi_itt, scsi_bufflen(sc));
4668 5086
4669 return num_sg; 5087 return num_sg;
4670 } 5088 }
@@ -4769,10 +5187,12 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
4769/* 5187/*
4770 * beiscsi_quiesce()- Cleanup Driver resources 5188 * beiscsi_quiesce()- Cleanup Driver resources
4771 * @phba: Instance Priv structure 5189 * @phba: Instance Priv structure
5190 * @unload_state:i Clean or EEH unload state
4772 * 5191 *
4773 * Free the OS and HW resources held by the driver 5192 * Free the OS and HW resources held by the driver
4774 **/ 5193 **/
4775static void beiscsi_quiesce(struct beiscsi_hba *phba) 5194static void beiscsi_quiesce(struct beiscsi_hba *phba,
5195 uint32_t unload_state)
4776{ 5196{
4777 struct hwi_controller *phwi_ctrlr; 5197 struct hwi_controller *phwi_ctrlr;
4778 struct hwi_context_memory *phwi_context; 5198 struct hwi_context_memory *phwi_context;
@@ -4785,28 +5205,37 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba)
4785 if (phba->msix_enabled) { 5205 if (phba->msix_enabled) {
4786 for (i = 0; i <= phba->num_cpus; i++) { 5206 for (i = 0; i <= phba->num_cpus; i++) {
4787 msix_vec = phba->msix_entries[i].vector; 5207 msix_vec = phba->msix_entries[i].vector;
5208 synchronize_irq(msix_vec);
4788 free_irq(msix_vec, &phwi_context->be_eq[i]); 5209 free_irq(msix_vec, &phwi_context->be_eq[i]);
4789 kfree(phba->msi_name[i]); 5210 kfree(phba->msi_name[i]);
4790 } 5211 }
4791 } else 5212 } else
4792 if (phba->pcidev->irq) 5213 if (phba->pcidev->irq) {
5214 synchronize_irq(phba->pcidev->irq);
4793 free_irq(phba->pcidev->irq, phba); 5215 free_irq(phba->pcidev->irq, phba);
5216 }
4794 pci_disable_msix(phba->pcidev); 5217 pci_disable_msix(phba->pcidev);
4795 destroy_workqueue(phba->wq); 5218
4796 if (blk_iopoll_enabled) 5219 if (blk_iopoll_enabled)
4797 for (i = 0; i < phba->num_cpus; i++) { 5220 for (i = 0; i < phba->num_cpus; i++) {
4798 pbe_eq = &phwi_context->be_eq[i]; 5221 pbe_eq = &phwi_context->be_eq[i];
4799 blk_iopoll_disable(&pbe_eq->iopoll); 5222 blk_iopoll_disable(&pbe_eq->iopoll);
4800 } 5223 }
4801 5224
4802 beiscsi_clean_port(phba); 5225 if (unload_state == BEISCSI_CLEAN_UNLOAD) {
4803 beiscsi_free_mem(phba); 5226 destroy_workqueue(phba->wq);
5227 beiscsi_clean_port(phba);
5228 beiscsi_free_mem(phba);
4804 5229
4805 beiscsi_unmap_pci_function(phba); 5230 beiscsi_unmap_pci_function(phba);
4806 pci_free_consistent(phba->pcidev, 5231 pci_free_consistent(phba->pcidev,
4807 phba->ctrl.mbox_mem_alloced.size, 5232 phba->ctrl.mbox_mem_alloced.size,
4808 phba->ctrl.mbox_mem_alloced.va, 5233 phba->ctrl.mbox_mem_alloced.va,
4809 phba->ctrl.mbox_mem_alloced.dma); 5234 phba->ctrl.mbox_mem_alloced.dma);
5235 } else {
5236 hwi_purge_eq(phba);
5237 hwi_cleanup(phba);
5238 }
4810 5239
4811 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); 5240 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
4812} 5241}
@@ -4823,11 +5252,13 @@ static void beiscsi_remove(struct pci_dev *pcidev)
4823 } 5252 }
4824 5253
4825 beiscsi_destroy_def_ifaces(phba); 5254 beiscsi_destroy_def_ifaces(phba);
4826 beiscsi_quiesce(phba); 5255 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
4827 iscsi_boot_destroy_kset(phba->boot_kset); 5256 iscsi_boot_destroy_kset(phba->boot_kset);
4828 iscsi_host_remove(phba->shost); 5257 iscsi_host_remove(phba->shost);
4829 pci_dev_put(phba->pcidev); 5258 pci_dev_put(phba->pcidev);
4830 iscsi_host_free(phba->shost); 5259 iscsi_host_free(phba->shost);
5260 pci_disable_pcie_error_reporting(pcidev);
5261 pci_set_drvdata(pcidev, NULL);
4831 pci_disable_device(pcidev); 5262 pci_disable_device(pcidev);
4832} 5263}
4833 5264
@@ -4842,7 +5273,7 @@ static void beiscsi_shutdown(struct pci_dev *pcidev)
4842 return; 5273 return;
4843 } 5274 }
4844 5275
4845 beiscsi_quiesce(phba); 5276 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
4846 pci_disable_device(pcidev); 5277 pci_disable_device(pcidev);
4847} 5278}
4848 5279
@@ -4880,6 +5311,167 @@ beiscsi_hw_health_check(struct work_struct *work)
4880 msecs_to_jiffies(1000)); 5311 msecs_to_jiffies(1000));
4881} 5312}
4882 5313
5314
5315static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
5316 pci_channel_state_t state)
5317{
5318 struct beiscsi_hba *phba = NULL;
5319
5320 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5321 phba->state |= BE_ADAPTER_PCI_ERR;
5322
5323 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5324 "BM_%d : EEH error detected\n");
5325
5326 beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
5327
5328 if (state == pci_channel_io_perm_failure) {
5329 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5330 "BM_%d : EEH : State PERM Failure");
5331 return PCI_ERS_RESULT_DISCONNECT;
5332 }
5333
5334 pci_disable_device(pdev);
5335
5336 /* The error could cause the FW to trigger a flash debug dump.
5337 * Resetting the card while flash dump is in progress
5338 * can cause it not to recover; wait for it to finish.
5339 * Wait only for first function as it is needed only once per
5340 * adapter.
5341 **/
5342 if (pdev->devfn == 0)
5343 ssleep(30);
5344
5345 return PCI_ERS_RESULT_NEED_RESET;
5346}
5347
5348static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
5349{
5350 struct beiscsi_hba *phba = NULL;
5351 int status = 0;
5352
5353 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5354
5355 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5356 "BM_%d : EEH Reset\n");
5357
5358 status = pci_enable_device(pdev);
5359 if (status)
5360 return PCI_ERS_RESULT_DISCONNECT;
5361
5362 pci_set_master(pdev);
5363 pci_set_power_state(pdev, PCI_D0);
5364 pci_restore_state(pdev);
5365
5366 /* Wait for the CHIP Reset to complete */
5367 status = be_chk_reset_complete(phba);
5368 if (!status) {
5369 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5370 "BM_%d : EEH Reset Completed\n");
5371 } else {
5372 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5373 "BM_%d : EEH Reset Completion Failure\n");
5374 return PCI_ERS_RESULT_DISCONNECT;
5375 }
5376
5377 pci_cleanup_aer_uncorrect_error_status(pdev);
5378 return PCI_ERS_RESULT_RECOVERED;
5379}
5380
5381static void beiscsi_eeh_resume(struct pci_dev *pdev)
5382{
5383 int ret = 0, i;
5384 struct be_eq_obj *pbe_eq;
5385 struct beiscsi_hba *phba = NULL;
5386 struct hwi_controller *phwi_ctrlr;
5387 struct hwi_context_memory *phwi_context;
5388
5389 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5390 pci_save_state(pdev);
5391
5392 if (enable_msix)
5393 find_num_cpus(phba);
5394 else
5395 phba->num_cpus = 1;
5396
5397 if (enable_msix) {
5398 beiscsi_msix_enable(phba);
5399 if (!phba->msix_enabled)
5400 phba->num_cpus = 1;
5401 }
5402
5403 ret = beiscsi_cmd_reset_function(phba);
5404 if (ret) {
5405 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5406 "BM_%d : Reset Failed\n");
5407 goto ret_err;
5408 }
5409
5410 ret = be_chk_reset_complete(phba);
5411 if (ret) {
5412 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5413 "BM_%d : Failed to get out of reset.\n");
5414 goto ret_err;
5415 }
5416
5417 beiscsi_get_params(phba);
5418 phba->shost->max_id = phba->params.cxns_per_ctrl;
5419 phba->shost->can_queue = phba->params.ios_per_ctrl;
5420 ret = hwi_init_controller(phba);
5421
5422 for (i = 0; i < MAX_MCC_CMD; i++) {
5423 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5424 phba->ctrl.mcc_tag[i] = i + 1;
5425 phba->ctrl.mcc_numtag[i + 1] = 0;
5426 phba->ctrl.mcc_tag_available++;
5427 }
5428
5429 phwi_ctrlr = phba->phwi_ctrlr;
5430 phwi_context = phwi_ctrlr->phwi_ctxt;
5431
5432 if (blk_iopoll_enabled) {
5433 for (i = 0; i < phba->num_cpus; i++) {
5434 pbe_eq = &phwi_context->be_eq[i];
5435 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
5436 be_iopoll);
5437 blk_iopoll_enable(&pbe_eq->iopoll);
5438 }
5439
5440 i = (phba->msix_enabled) ? i : 0;
5441 /* Work item for MCC handling */
5442 pbe_eq = &phwi_context->be_eq[i];
5443 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5444 } else {
5445 if (phba->msix_enabled) {
5446 for (i = 0; i <= phba->num_cpus; i++) {
5447 pbe_eq = &phwi_context->be_eq[i];
5448 INIT_WORK(&pbe_eq->work_cqs,
5449 beiscsi_process_all_cqs);
5450 }
5451 } else {
5452 pbe_eq = &phwi_context->be_eq[0];
5453 INIT_WORK(&pbe_eq->work_cqs,
5454 beiscsi_process_all_cqs);
5455 }
5456 }
5457
5458 ret = beiscsi_init_irqs(phba);
5459 if (ret < 0) {
5460 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5461 "BM_%d : beiscsi_eeh_resume - "
5462 "Failed to beiscsi_init_irqs\n");
5463 goto ret_err;
5464 }
5465
5466 hwi_enable_intr(phba);
5467 phba->state &= ~BE_ADAPTER_PCI_ERR;
5468
5469 return;
5470ret_err:
5471 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5472 "BM_%d : AER EEH Resume Failed\n");
5473}
5474
4883static int beiscsi_dev_probe(struct pci_dev *pcidev, 5475static int beiscsi_dev_probe(struct pci_dev *pcidev,
4884 const struct pci_device_id *id) 5476 const struct pci_device_id *id)
4885{ 5477{
@@ -4887,7 +5479,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
4887 struct hwi_controller *phwi_ctrlr; 5479 struct hwi_controller *phwi_ctrlr;
4888 struct hwi_context_memory *phwi_context; 5480 struct hwi_context_memory *phwi_context;
4889 struct be_eq_obj *pbe_eq; 5481 struct be_eq_obj *pbe_eq;
4890 int ret, i; 5482 int ret = 0, i;
4891 5483
4892 ret = beiscsi_enable_pci(pcidev); 5484 ret = beiscsi_enable_pci(pcidev);
4893 if (ret < 0) { 5485 if (ret < 0) {
@@ -4903,10 +5495,20 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
4903 goto disable_pci; 5495 goto disable_pci;
4904 } 5496 }
4905 5497
5498 /* Enable EEH reporting */
5499 ret = pci_enable_pcie_error_reporting(pcidev);
5500 if (ret)
5501 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5502 "BM_%d : PCIe Error Reporting "
5503 "Enabling Failed\n");
5504
5505 pci_save_state(pcidev);
5506
4906 /* Initialize Driver configuration Paramters */ 5507 /* Initialize Driver configuration Paramters */
4907 beiscsi_hba_attrs_init(phba); 5508 beiscsi_hba_attrs_init(phba);
4908 5509
4909 phba->fw_timeout = false; 5510 phba->fw_timeout = false;
5511 phba->mac_addr_set = false;
4910 5512
4911 5513
4912 switch (pcidev->device) { 5514 switch (pcidev->device) {
@@ -4929,20 +5531,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
4929 phba->generation = 0; 5531 phba->generation = 0;
4930 } 5532 }
4931 5533
4932 if (enable_msix)
4933 find_num_cpus(phba);
4934 else
4935 phba->num_cpus = 1;
4936
4937 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4938 "BM_%d : num_cpus = %d\n",
4939 phba->num_cpus);
4940
4941 if (enable_msix) {
4942 beiscsi_msix_enable(phba);
4943 if (!phba->msix_enabled)
4944 phba->num_cpus = 1;
4945 }
4946 ret = be_ctrl_init(phba, pcidev); 5534 ret = be_ctrl_init(phba, pcidev);
4947 if (ret) { 5535 if (ret) {
4948 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5536 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -4954,27 +5542,43 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
4954 ret = beiscsi_cmd_reset_function(phba); 5542 ret = beiscsi_cmd_reset_function(phba);
4955 if (ret) { 5543 if (ret) {
4956 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5544 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4957 "BM_%d : Reset Failed. Aborting Crashdump\n"); 5545 "BM_%d : Reset Failed\n");
4958 goto hba_free; 5546 goto hba_free;
4959 } 5547 }
4960 ret = be_chk_reset_complete(phba); 5548 ret = be_chk_reset_complete(phba);
4961 if (ret) { 5549 if (ret) {
4962 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5550 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4963 "BM_%d : Failed to get out of reset." 5551 "BM_%d : Failed to get out of reset.\n");
4964 "Aborting Crashdump\n");
4965 goto hba_free; 5552 goto hba_free;
4966 } 5553 }
4967 5554
4968 spin_lock_init(&phba->io_sgl_lock); 5555 spin_lock_init(&phba->io_sgl_lock);
4969 spin_lock_init(&phba->mgmt_sgl_lock); 5556 spin_lock_init(&phba->mgmt_sgl_lock);
4970 spin_lock_init(&phba->isr_lock); 5557 spin_lock_init(&phba->isr_lock);
5558 spin_lock_init(&phba->async_pdu_lock);
4971 ret = mgmt_get_fw_config(&phba->ctrl, phba); 5559 ret = mgmt_get_fw_config(&phba->ctrl, phba);
4972 if (ret != 0) { 5560 if (ret != 0) {
4973 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5561 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4974 "BM_%d : Error getting fw config\n"); 5562 "BM_%d : Error getting fw config\n");
4975 goto free_port; 5563 goto free_port;
4976 } 5564 }
4977 phba->shost->max_id = phba->fw_config.iscsi_cid_count; 5565
5566 if (enable_msix)
5567 find_num_cpus(phba);
5568 else
5569 phba->num_cpus = 1;
5570
5571 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5572 "BM_%d : num_cpus = %d\n",
5573 phba->num_cpus);
5574
5575 if (enable_msix) {
5576 beiscsi_msix_enable(phba);
5577 if (!phba->msix_enabled)
5578 phba->num_cpus = 1;
5579 }
5580
5581 phba->shost->max_id = phba->params.cxns_per_ctrl;
4978 beiscsi_get_params(phba); 5582 beiscsi_get_params(phba);
4979 phba->shost->can_queue = phba->params.ios_per_ctrl; 5583 phba->shost->can_queue = phba->params.ios_per_ctrl;
4980 ret = beiscsi_init_port(phba); 5584 ret = beiscsi_init_port(phba);
@@ -4985,7 +5589,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
4985 goto free_port; 5589 goto free_port;
4986 } 5590 }
4987 5591
4988 for (i = 0; i < MAX_MCC_CMD ; i++) { 5592 for (i = 0; i < MAX_MCC_CMD; i++) {
4989 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5593 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4990 phba->ctrl.mcc_tag[i] = i + 1; 5594 phba->ctrl.mcc_tag[i] = i + 1;
4991 phba->ctrl.mcc_numtag[i + 1] = 0; 5595 phba->ctrl.mcc_numtag[i + 1] = 0;
@@ -5089,6 +5693,12 @@ disable_pci:
5089 return ret; 5693 return ret;
5090} 5694}
5091 5695
5696static struct pci_error_handlers beiscsi_eeh_handlers = {
5697 .error_detected = beiscsi_eeh_err_detected,
5698 .slot_reset = beiscsi_eeh_reset,
5699 .resume = beiscsi_eeh_resume,
5700};
5701
5092struct iscsi_transport beiscsi_iscsi_transport = { 5702struct iscsi_transport beiscsi_iscsi_transport = {
5093 .owner = THIS_MODULE, 5703 .owner = THIS_MODULE,
5094 .name = DRV_NAME, 5704 .name = DRV_NAME,
@@ -5127,7 +5737,8 @@ static struct pci_driver beiscsi_pci_driver = {
5127 .probe = beiscsi_dev_probe, 5737 .probe = beiscsi_dev_probe,
5128 .remove = beiscsi_remove, 5738 .remove = beiscsi_remove,
5129 .shutdown = beiscsi_shutdown, 5739 .shutdown = beiscsi_shutdown,
5130 .id_table = beiscsi_pci_id_table 5740 .id_table = beiscsi_pci_id_table,
5741 .err_handler = &beiscsi_eeh_handlers
5131}; 5742};
5132 5743
5133 5744