aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2012-05-09 21:16:12 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-05-17 05:29:22 -0400
commit8a9d2e8003040d2e1cd24ac5e83bb30b68f7f488 (patch)
treea0fb34a954b174681a7af4706dee3823db4260ce /drivers/scsi/lpfc/lpfc_sli.c
parent587a37f6e007e97e4f88f10a51f5d0bc62eb6e0a (diff)
[SCSI] lpfc 8.3.31: Correct handling of SLI4-port XRI resource-provisioning profile change
Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c607
1 files changed, 196 insertions, 411 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index dbaf5b963bff..b887c9c5372a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -67,6 +67,8 @@ static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *); 67 struct hbq_dmabuf *);
68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, 68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *); 69 struct lpfc_cqe *);
70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71 int);
70 72
71static IOCB_t * 73static IOCB_t *
72lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 74lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -4967,7 +4969,12 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4967 &rsrc_info->u.rsp); 4969 &rsrc_info->u.rsp);
4968 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 4970 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
4969 &rsrc_info->u.rsp); 4971 &rsrc_info->u.rsp);
4970 err_exit: 4972
4973 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4974 "3162 Retrieved extents type-%d from port: count:%d, "
4975 "size:%d\n", type, *extnt_count, *extnt_size);
4976
4977err_exit:
4971 mempool_free(mbox, phba->mbox_mem_pool); 4978 mempool_free(mbox, phba->mbox_mem_pool);
4972 return rc; 4979 return rc;
4973} 4980}
@@ -5051,7 +5058,7 @@ lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5051 * 0: if successful 5058 * 0: if successful
5052 **/ 5059 **/
5053static int 5060static int
5054lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, 5061lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5055 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5062 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5056{ 5063{
5057 int rc = 0; 5064 int rc = 0;
@@ -5060,7 +5067,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
5060 uint32_t alloc_len, mbox_tmo; 5067 uint32_t alloc_len, mbox_tmo;
5061 5068
5062 /* Calculate the total requested length of the dma memory */ 5069 /* Calculate the total requested length of the dma memory */
5063 req_len = *extnt_cnt * sizeof(uint16_t); 5070 req_len = extnt_cnt * sizeof(uint16_t);
5064 5071
5065 /* 5072 /*
5066 * Calculate the size of an embedded mailbox. The uint32_t 5073 * Calculate the size of an embedded mailbox. The uint32_t
@@ -5075,7 +5082,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
5075 */ 5082 */
5076 *emb = LPFC_SLI4_MBX_EMBED; 5083 *emb = LPFC_SLI4_MBX_EMBED;
5077 if (req_len > emb_len) { 5084 if (req_len > emb_len) {
5078 req_len = *extnt_cnt * sizeof(uint16_t) + 5085 req_len = extnt_cnt * sizeof(uint16_t) +
5079 sizeof(union lpfc_sli4_cfg_shdr) + 5086 sizeof(union lpfc_sli4_cfg_shdr) +
5080 sizeof(uint32_t); 5087 sizeof(uint32_t);
5081 *emb = LPFC_SLI4_MBX_NEMBED; 5088 *emb = LPFC_SLI4_MBX_NEMBED;
@@ -5091,7 +5098,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
5091 "size (x%x)\n", alloc_len, req_len); 5098 "size (x%x)\n", alloc_len, req_len);
5092 return -ENOMEM; 5099 return -ENOMEM;
5093 } 5100 }
5094 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb); 5101 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5095 if (unlikely(rc)) 5102 if (unlikely(rc))
5096 return -EIO; 5103 return -EIO;
5097 5104
@@ -5149,17 +5156,15 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5149 return -ENOMEM; 5156 return -ENOMEM;
5150 } 5157 }
5151 5158
5152 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT, 5159 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5153 "2903 Available Resource Extents " 5160 "2903 Post resource extents type-0x%x: "
5154 "for resource type 0x%x: Count: 0x%x, " 5161 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5155 "Size 0x%x\n", type, rsrc_cnt,
5156 rsrc_size);
5157 5162
5158 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5163 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5159 if (!mbox) 5164 if (!mbox)
5160 return -ENOMEM; 5165 return -ENOMEM;
5161 5166
5162 rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox); 5167 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5163 if (unlikely(rc)) { 5168 if (unlikely(rc)) {
5164 rc = -EIO; 5169 rc = -EIO;
5165 goto err_exit; 5170 goto err_exit;
@@ -5250,6 +5255,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5250 rc = -ENOMEM; 5255 rc = -ENOMEM;
5251 goto err_exit; 5256 goto err_exit;
5252 } 5257 }
5258 phba->sli4_hba.max_cfg_param.xri_used = 0;
5253 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5259 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5254 sizeof(uint16_t), 5260 sizeof(uint16_t),
5255 GFP_KERNEL); 5261 GFP_KERNEL);
@@ -5420,7 +5426,6 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5420 case LPFC_RSC_TYPE_FCOE_XRI: 5426 case LPFC_RSC_TYPE_FCOE_XRI:
5421 kfree(phba->sli4_hba.xri_bmask); 5427 kfree(phba->sli4_hba.xri_bmask);
5422 kfree(phba->sli4_hba.xri_ids); 5428 kfree(phba->sli4_hba.xri_ids);
5423 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5424 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5429 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5425 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5430 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5426 list_del_init(&rsrc_blk->list); 5431 list_del_init(&rsrc_blk->list);
@@ -5612,7 +5617,6 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5612 goto free_vpi_ids; 5617 goto free_vpi_ids;
5613 } 5618 }
5614 phba->sli4_hba.max_cfg_param.xri_used = 0; 5619 phba->sli4_hba.max_cfg_param.xri_used = 0;
5615 phba->sli4_hba.xri_count = 0;
5616 phba->sli4_hba.xri_ids = kzalloc(count * 5620 phba->sli4_hba.xri_ids = kzalloc(count *
5617 sizeof(uint16_t), 5621 sizeof(uint16_t),
5618 GFP_KERNEL); 5622 GFP_KERNEL);
@@ -5694,7 +5698,6 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5694 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5698 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5695 kfree(phba->sli4_hba.xri_bmask); 5699 kfree(phba->sli4_hba.xri_bmask);
5696 kfree(phba->sli4_hba.xri_ids); 5700 kfree(phba->sli4_hba.xri_ids);
5697 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5698 kfree(phba->sli4_hba.vfi_bmask); 5701 kfree(phba->sli4_hba.vfi_bmask);
5699 kfree(phba->sli4_hba.vfi_ids); 5702 kfree(phba->sli4_hba.vfi_ids);
5700 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5703 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
@@ -5853,6 +5856,149 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5853} 5856}
5854 5857
5855/** 5858/**
5859 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
5860 * @phba: pointer to lpfc hba data structure.
5861 *
5862 * This routine walks the list of els buffers that have been allocated and
5863 * repost them to the port by using SGL block post. This is needed after a
5864 * pci_function_reset/warm_start or start. It attempts to construct blocks
5865 * of els buffer sgls which contains contiguous xris and uses the non-embedded
5866 * SGL block post mailbox commands to post them to the port. For single els
5867 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
5868 * mailbox command for posting.
5869 *
5870 * Returns: 0 = success, non-zero failure.
5871 **/
5872static int
5873lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5874{
5875 struct lpfc_sglq *sglq_entry = NULL;
5876 struct lpfc_sglq *sglq_entry_next = NULL;
5877 struct lpfc_sglq *sglq_entry_first = NULL;
5878 int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
5879 int last_xritag = NO_XRI;
5880 LIST_HEAD(prep_sgl_list);
5881 LIST_HEAD(blck_sgl_list);
5882 LIST_HEAD(allc_sgl_list);
5883 LIST_HEAD(post_sgl_list);
5884 LIST_HEAD(free_sgl_list);
5885
5886 spin_lock(&phba->hbalock);
5887 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
5888 spin_unlock(&phba->hbalock);
5889
5890 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
5891 &allc_sgl_list, list) {
5892 list_del_init(&sglq_entry->list);
5893 block_cnt++;
5894 if ((last_xritag != NO_XRI) &&
5895 (sglq_entry->sli4_xritag != last_xritag + 1)) {
5896 /* a hole in xri block, form a sgl posting block */
5897 list_splice_init(&prep_sgl_list, &blck_sgl_list);
5898 post_cnt = block_cnt - 1;
5899 /* prepare list for next posting block */
5900 list_add_tail(&sglq_entry->list, &prep_sgl_list);
5901 block_cnt = 1;
5902 } else {
5903 /* prepare list for next posting block */
5904 list_add_tail(&sglq_entry->list, &prep_sgl_list);
5905 /* enough sgls for non-embed sgl mbox command */
5906 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
5907 list_splice_init(&prep_sgl_list,
5908 &blck_sgl_list);
5909 post_cnt = block_cnt;
5910 block_cnt = 0;
5911 }
5912 }
5913 num_posted++;
5914
5915 /* keep track of last sgl's xritag */
5916 last_xritag = sglq_entry->sli4_xritag;
5917
5918 /* end of repost sgl list condition for els buffers */
5919 if (num_posted == phba->sli4_hba.els_xri_cnt) {
5920 if (post_cnt == 0) {
5921 list_splice_init(&prep_sgl_list,
5922 &blck_sgl_list);
5923 post_cnt = block_cnt;
5924 } else if (block_cnt == 1) {
5925 status = lpfc_sli4_post_sgl(phba,
5926 sglq_entry->phys, 0,
5927 sglq_entry->sli4_xritag);
5928 if (!status) {
5929 /* successful, put sgl to posted list */
5930 list_add_tail(&sglq_entry->list,
5931 &post_sgl_list);
5932 } else {
5933 /* Failure, put sgl to free list */
5934 lpfc_printf_log(phba, KERN_WARNING,
5935 LOG_SLI,
5936 "3159 Failed to post els "
5937 "sgl, xritag:x%x\n",
5938 sglq_entry->sli4_xritag);
5939 list_add_tail(&sglq_entry->list,
5940 &free_sgl_list);
5941 spin_lock_irq(&phba->hbalock);
5942 phba->sli4_hba.els_xri_cnt--;
5943 spin_unlock_irq(&phba->hbalock);
5944 }
5945 }
5946 }
5947
5948 /* continue until a nembed page worth of sgls */
5949 if (post_cnt == 0)
5950 continue;
5951
5952 /* post the els buffer list sgls as a block */
5953 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
5954 post_cnt);
5955
5956 if (!status) {
5957 /* success, put sgl list to posted sgl list */
5958 list_splice_init(&blck_sgl_list, &post_sgl_list);
5959 } else {
5960 /* Failure, put sgl list to free sgl list */
5961 sglq_entry_first = list_first_entry(&blck_sgl_list,
5962 struct lpfc_sglq,
5963 list);
5964 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5965 "3160 Failed to post els sgl-list, "
5966 "xritag:x%x-x%x\n",
5967 sglq_entry_first->sli4_xritag,
5968 (sglq_entry_first->sli4_xritag +
5969 post_cnt - 1));
5970 list_splice_init(&blck_sgl_list, &free_sgl_list);
5971 spin_lock_irq(&phba->hbalock);
5972 phba->sli4_hba.els_xri_cnt -= post_cnt;
5973 spin_unlock_irq(&phba->hbalock);
5974 }
5975
5976 /* don't reset xirtag due to hole in xri block */
5977 if (block_cnt == 0)
5978 last_xritag = NO_XRI;
5979
5980 /* reset els sgl post count for next round of posting */
5981 post_cnt = 0;
5982 }
5983
5984 /* free the els sgls failed to post */
5985 lpfc_free_sgl_list(phba, &free_sgl_list);
5986
5987 /* push els sgls posted to the availble list */
5988 if (!list_empty(&post_sgl_list)) {
5989 spin_lock(&phba->hbalock);
5990 list_splice_init(&post_sgl_list,
5991 &phba->sli4_hba.lpfc_sgl_list);
5992 spin_unlock(&phba->hbalock);
5993 } else {
5994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5995 "3161 Failure to post els sgl to port.\n");
5996 return -EIO;
5997 }
5998 return 0;
5999}
6000
6001/**
5856 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 6002 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
5857 * @phba: Pointer to HBA context object. 6003 * @phba: Pointer to HBA context object.
5858 * 6004 *
@@ -6063,8 +6209,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6063 "rc = x%x\n", rc); 6209 "rc = x%x\n", rc);
6064 goto out_free_mbox; 6210 goto out_free_mbox;
6065 } 6211 }
6066 /* update physical xri mappings in the scsi buffers */
6067 lpfc_scsi_buf_update(phba);
6068 6212
6069 /* Read the port's service parameters. */ 6213 /* Read the port's service parameters. */
6070 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6214 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
@@ -6105,28 +6249,26 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6105 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 6249 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6106 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 6250 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6107 6251
6108 /* Register SGL pool to the device using non-embedded mailbox command */ 6252 /* update host els and scsi xri-sgl sizes and mappings */
6109 if (!phba->sli4_hba.extents_in_use) { 6253 rc = lpfc_sli4_xri_sgl_update(phba);
6110 rc = lpfc_sli4_post_els_sgl_list(phba); 6254 if (unlikely(rc)) {
6111 if (unlikely(rc)) { 6255 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6112 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6256 "1400 Failed to update xri-sgl size and "
6113 "0582 Error %d during els sgl post " 6257 "mapping: %d\n", rc);
6114 "operation\n", rc); 6258 goto out_free_mbox;
6115 rc = -ENODEV;
6116 goto out_free_mbox;
6117 }
6118 } else {
6119 rc = lpfc_sli4_post_els_sgl_list_ext(phba);
6120 if (unlikely(rc)) {
6121 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6122 "2560 Error %d during els sgl post "
6123 "operation\n", rc);
6124 rc = -ENODEV;
6125 goto out_free_mbox;
6126 }
6127 } 6259 }
6128 6260
6129 /* Register SCSI SGL pool to the device */ 6261 /* register the els sgl pool to the port */
6262 rc = lpfc_sli4_repost_els_sgl_list(phba);
6263 if (unlikely(rc)) {
6264 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6265 "0582 Error %d during els sgl post "
6266 "operation\n", rc);
6267 rc = -ENODEV;
6268 goto out_free_mbox;
6269 }
6270
6271 /* register the allocated scsi sgl pool to the port */
6130 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 6272 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6131 if (unlikely(rc)) { 6273 if (unlikely(rc)) {
6132 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6274 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -13080,9 +13222,7 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
13080 } else { 13222 } else {
13081 set_bit(xri, phba->sli4_hba.xri_bmask); 13223 set_bit(xri, phba->sli4_hba.xri_bmask);
13082 phba->sli4_hba.max_cfg_param.xri_used++; 13224 phba->sli4_hba.max_cfg_param.xri_used++;
13083 phba->sli4_hba.xri_count++;
13084 } 13225 }
13085
13086 spin_unlock_irq(&phba->hbalock); 13226 spin_unlock_irq(&phba->hbalock);
13087 return xri; 13227 return xri;
13088} 13228}
@@ -13098,7 +13238,6 @@ void
13098__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 13238__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13099{ 13239{
13100 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 13240 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
13101 phba->sli4_hba.xri_count--;
13102 phba->sli4_hba.max_cfg_param.xri_used--; 13241 phba->sli4_hba.max_cfg_param.xri_used--;
13103 } 13242 }
13104} 13243}
@@ -13149,31 +13288,32 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
13149/** 13288/**
13150 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. 13289 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
13151 * @phba: pointer to lpfc hba data structure. 13290 * @phba: pointer to lpfc hba data structure.
13291 * @post_sgl_list: pointer to els sgl entry list.
13292 * @count: number of els sgl entries on the list.
13152 * 13293 *
13153 * This routine is invoked to post a block of driver's sgl pages to the 13294 * This routine is invoked to post a block of driver's sgl pages to the
13154 * HBA using non-embedded mailbox command. No Lock is held. This routine 13295 * HBA using non-embedded mailbox command. No Lock is held. This routine
13155 * is only called when the driver is loading and after all IO has been 13296 * is only called when the driver is loading and after all IO has been
13156 * stopped. 13297 * stopped.
13157 **/ 13298 **/
13158int 13299static int
13159lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba) 13300lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
13301 struct list_head *post_sgl_list,
13302 int post_cnt)
13160{ 13303{
13161 struct lpfc_sglq *sglq_entry; 13304 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
13162 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13305 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13163 struct sgl_page_pairs *sgl_pg_pairs; 13306 struct sgl_page_pairs *sgl_pg_pairs;
13164 void *viraddr; 13307 void *viraddr;
13165 LPFC_MBOXQ_t *mbox; 13308 LPFC_MBOXQ_t *mbox;
13166 uint32_t reqlen, alloclen, pg_pairs; 13309 uint32_t reqlen, alloclen, pg_pairs;
13167 uint32_t mbox_tmo; 13310 uint32_t mbox_tmo;
13168 uint16_t xritag_start = 0, lxri = 0; 13311 uint16_t xritag_start = 0;
13169 int els_xri_cnt, rc = 0; 13312 int rc = 0;
13170 uint32_t shdr_status, shdr_add_status; 13313 uint32_t shdr_status, shdr_add_status;
13171 union lpfc_sli4_cfg_shdr *shdr; 13314 union lpfc_sli4_cfg_shdr *shdr;
13172 13315
13173 /* The number of sgls to be posted */ 13316 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
13174 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
13175
13176 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
13177 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13317 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13178 if (reqlen > SLI4_PAGE_SIZE) { 13318 if (reqlen > SLI4_PAGE_SIZE) {
13179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13319 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -13203,25 +13343,8 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
13203 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13343 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13204 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13344 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13205 13345
13206 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 13346 pg_pairs = 0;
13207 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 13347 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
13208
13209 /*
13210 * Assign the sglq a physical xri only if the driver has not
13211 * initialized those resources. A port reset only needs
13212 * the sglq's posted.
13213 */
13214 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
13215 LPFC_XRI_RSRC_RDY) {
13216 lxri = lpfc_sli4_next_xritag(phba);
13217 if (lxri == NO_XRI) {
13218 lpfc_sli4_mbox_cmd_free(phba, mbox);
13219 return -ENOMEM;
13220 }
13221 sglq_entry->sli4_lxritag = lxri;
13222 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
13223 }
13224
13225 /* Set up the sge entry */ 13348 /* Set up the sge entry */
13226 sgl_pg_pairs->sgl_pg0_addr_lo = 13349 sgl_pg_pairs->sgl_pg0_addr_lo =
13227 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 13350 cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -13236,11 +13359,12 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
13236 if (pg_pairs == 0) 13359 if (pg_pairs == 0)
13237 xritag_start = sglq_entry->sli4_xritag; 13360 xritag_start = sglq_entry->sli4_xritag;
13238 sgl_pg_pairs++; 13361 sgl_pg_pairs++;
13362 pg_pairs++;
13239 } 13363 }
13240 13364
13241 /* Complete initialization and perform endian conversion. */ 13365 /* Complete initialization and perform endian conversion. */
13242 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 13366 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13243 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 13367 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
13244 sgl->word0 = cpu_to_le32(sgl->word0); 13368 sgl->word0 = cpu_to_le32(sgl->word0);
13245 if (!phba->sli4_hba.intr_enable) 13369 if (!phba->sli4_hba.intr_enable)
13246 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13370 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
@@ -13260,183 +13384,6 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
13260 shdr_status, shdr_add_status, rc); 13384 shdr_status, shdr_add_status, rc);
13261 rc = -ENXIO; 13385 rc = -ENXIO;
13262 } 13386 }
13263
13264 if (rc == 0)
13265 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13266 LPFC_XRI_RSRC_RDY);
13267 return rc;
13268}
13269
13270/**
13271 * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
13272 * @phba: pointer to lpfc hba data structure.
13273 *
13274 * This routine is invoked to post a block of driver's sgl pages to the
13275 * HBA using non-embedded mailbox command. No Lock is held. This routine
13276 * is only called when the driver is loading and after all IO has been
13277 * stopped.
13278 **/
13279int
13280lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
13281{
13282 struct lpfc_sglq *sglq_entry;
13283 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13284 struct sgl_page_pairs *sgl_pg_pairs;
13285 void *viraddr;
13286 LPFC_MBOXQ_t *mbox;
13287 uint32_t reqlen, alloclen, index;
13288 uint32_t mbox_tmo;
13289 uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt;
13290 uint16_t xritag_start = 0, lxri = 0;
13291 struct lpfc_rsrc_blks *rsrc_blk;
13292 int cnt, ttl_cnt, rc = 0;
13293 int loop_cnt;
13294 uint32_t shdr_status, shdr_add_status;
13295 union lpfc_sli4_cfg_shdr *shdr;
13296
13297 /* The number of sgls to be posted */
13298 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
13299
13300 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
13301 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13302 if (reqlen > SLI4_PAGE_SIZE) {
13303 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13304 "2989 Block sgl registration required DMA "
13305 "size (%d) great than a page\n", reqlen);
13306 return -ENOMEM;
13307 }
13308
13309 cnt = 0;
13310 ttl_cnt = 0;
13311 post_els_xri_cnt = els_xri_cnt;
13312 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
13313 list) {
13314 rsrc_start = rsrc_blk->rsrc_start;
13315 rsrc_size = rsrc_blk->rsrc_size;
13316
13317 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13318 "3014 Working ELS Extent start %d, cnt %d\n",
13319 rsrc_start, rsrc_size);
13320
13321 loop_cnt = min(post_els_xri_cnt, rsrc_size);
13322 if (loop_cnt < post_els_xri_cnt) {
13323 post_els_xri_cnt -= loop_cnt;
13324 ttl_cnt += loop_cnt;
13325 } else
13326 ttl_cnt += post_els_xri_cnt;
13327
13328 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13329 if (!mbox)
13330 return -ENOMEM;
13331 /*
13332 * Allocate DMA memory and set up the non-embedded mailbox
13333 * command.
13334 */
13335 alloclen = lpfc_sli4_config(phba, mbox,
13336 LPFC_MBOX_SUBSYSTEM_FCOE,
13337 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13338 reqlen, LPFC_SLI4_MBX_NEMBED);
13339 if (alloclen < reqlen) {
13340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13341 "2987 Allocated DMA memory size (%d) "
13342 "is less than the requested DMA memory "
13343 "size (%d)\n", alloclen, reqlen);
13344 lpfc_sli4_mbox_cmd_free(phba, mbox);
13345 return -ENOMEM;
13346 }
13347
13348 /* Set up the SGL pages in the non-embedded DMA pages */
13349 viraddr = mbox->sge_array->addr[0];
13350 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13351 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13352
13353 /*
13354 * The starting resource may not begin at zero. Control
13355 * the loop variants via the block resource parameters,
13356 * but handle the sge pointers with a zero-based index
13357 * that doesn't get reset per loop pass.
13358 */
13359 for (index = rsrc_start;
13360 index < rsrc_start + loop_cnt;
13361 index++) {
13362 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
13363
13364 /*
13365 * Assign the sglq a physical xri only if the driver
13366 * has not initialized those resources. A port reset
13367 * only needs the sglq's posted.
13368 */
13369 if (bf_get(lpfc_xri_rsrc_rdy,
13370 &phba->sli4_hba.sli4_flags) !=
13371 LPFC_XRI_RSRC_RDY) {
13372 lxri = lpfc_sli4_next_xritag(phba);
13373 if (lxri == NO_XRI) {
13374 lpfc_sli4_mbox_cmd_free(phba, mbox);
13375 rc = -ENOMEM;
13376 goto err_exit;
13377 }
13378 sglq_entry->sli4_lxritag = lxri;
13379 sglq_entry->sli4_xritag =
13380 phba->sli4_hba.xri_ids[lxri];
13381 }
13382
13383 /* Set up the sge entry */
13384 sgl_pg_pairs->sgl_pg0_addr_lo =
13385 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13386 sgl_pg_pairs->sgl_pg0_addr_hi =
13387 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13388 sgl_pg_pairs->sgl_pg1_addr_lo =
13389 cpu_to_le32(putPaddrLow(0));
13390 sgl_pg_pairs->sgl_pg1_addr_hi =
13391 cpu_to_le32(putPaddrHigh(0));
13392
13393 /* Track the starting physical XRI for the mailbox. */
13394 if (index == rsrc_start)
13395 xritag_start = sglq_entry->sli4_xritag;
13396 sgl_pg_pairs++;
13397 cnt++;
13398 }
13399
13400 /* Complete initialization and perform endian conversion. */
13401 rsrc_blk->rsrc_used += loop_cnt;
13402 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13403 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
13404 sgl->word0 = cpu_to_le32(sgl->word0);
13405
13406 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13407 "3015 Post ELS Extent SGL, start %d, "
13408 "cnt %d, used %d\n",
13409 xritag_start, loop_cnt, rsrc_blk->rsrc_used);
13410 if (!phba->sli4_hba.intr_enable)
13411 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13412 else {
13413 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13414 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13415 }
13416 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13417 shdr_status = bf_get(lpfc_mbox_hdr_status,
13418 &shdr->response);
13419 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13420 &shdr->response);
13421 if (rc != MBX_TIMEOUT)
13422 lpfc_sli4_mbox_cmd_free(phba, mbox);
13423 if (shdr_status || shdr_add_status || rc) {
13424 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13425 "2988 POST_SGL_BLOCK mailbox "
13426 "command failed status x%x "
13427 "add_status x%x mbx status x%x\n",
13428 shdr_status, shdr_add_status, rc);
13429 rc = -ENXIO;
13430 goto err_exit;
13431 }
13432 if (ttl_cnt >= els_xri_cnt)
13433 break;
13434 }
13435
13436 err_exit:
13437 if (rc == 0)
13438 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13439 LPFC_XRI_RSRC_RDY);
13440 return rc; 13387 return rc;
13441} 13388}
13442 13389
@@ -13452,8 +13399,9 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
13452 * 13399 *
13453 **/ 13400 **/
13454int 13401int
13455lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, 13402lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
13456 int cnt) 13403 struct list_head *sblist,
13404 int count)
13457{ 13405{
13458 struct lpfc_scsi_buf *psb; 13406 struct lpfc_scsi_buf *psb;
13459 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13407 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -13469,7 +13417,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
13469 union lpfc_sli4_cfg_shdr *shdr; 13417 union lpfc_sli4_cfg_shdr *shdr;
13470 13418
13471 /* Calculate the requested length of the dma memory */ 13419 /* Calculate the requested length of the dma memory */
13472 reqlen = cnt * sizeof(struct sgl_page_pairs) + 13420 reqlen = count * sizeof(struct sgl_page_pairs) +
13473 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13421 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13474 if (reqlen > SLI4_PAGE_SIZE) { 13422 if (reqlen > SLI4_PAGE_SIZE) {
13475 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13423 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -13553,169 +13501,6 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
13553} 13501}
13554 13502
13555/** 13503/**
13556 * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
13557 * @phba: pointer to lpfc hba data structure.
13558 * @sblist: pointer to scsi buffer list.
13559 * @count: number of scsi buffers on the list.
13560 *
13561 * This routine is invoked to post a block of @count scsi sgl pages from a
13562 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13563 * No Lock is held.
13564 *
13565 **/
13566int
13567lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
13568 int cnt)
13569{
13570 struct lpfc_scsi_buf *psb = NULL;
13571 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13572 struct sgl_page_pairs *sgl_pg_pairs;
13573 void *viraddr;
13574 LPFC_MBOXQ_t *mbox;
13575 uint32_t reqlen, alloclen, pg_pairs;
13576 uint32_t mbox_tmo;
13577 uint16_t xri_start = 0, scsi_xri_start;
13578 uint16_t rsrc_range;
13579 int rc = 0, avail_cnt;
13580 uint32_t shdr_status, shdr_add_status;
13581 dma_addr_t pdma_phys_bpl1;
13582 union lpfc_sli4_cfg_shdr *shdr;
13583 struct lpfc_rsrc_blks *rsrc_blk;
13584 uint32_t xri_cnt = 0;
13585
13586 /* Calculate the total requested length of the dma memory */
13587 reqlen = cnt * sizeof(struct sgl_page_pairs) +
13588 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13589 if (reqlen > SLI4_PAGE_SIZE) {
13590 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13591 "2932 Block sgl registration required DMA "
13592 "size (%d) great than a page\n", reqlen);
13593 return -ENOMEM;
13594 }
13595
13596 /*
13597 * The use of extents requires the driver to post the sgl headers
13598 * in multiple postings to meet the contiguous resource assignment.
13599 */
13600 psb = list_prepare_entry(psb, sblist, list);
13601 scsi_xri_start = phba->sli4_hba.scsi_xri_start;
13602 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
13603 list) {
13604 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
13605 if (rsrc_range < scsi_xri_start)
13606 continue;
13607 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
13608 continue;
13609 else
13610 avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
13611
13612 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
13613 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13614 /*
13615 * Allocate DMA memory and set up the non-embedded mailbox
13616 * command. The mbox is used to post an SGL page per loop
13617 * but the DMA memory has a use-once semantic so the mailbox
13618 * is used and freed per loop pass.
13619 */
13620 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13621 if (!mbox) {
13622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13623 "2933 Failed to allocate mbox cmd "
13624 "memory\n");
13625 return -ENOMEM;
13626 }
13627 alloclen = lpfc_sli4_config(phba, mbox,
13628 LPFC_MBOX_SUBSYSTEM_FCOE,
13629 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13630 reqlen,
13631 LPFC_SLI4_MBX_NEMBED);
13632 if (alloclen < reqlen) {
13633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13634 "2934 Allocated DMA memory size (%d) "
13635 "is less than the requested DMA memory "
13636 "size (%d)\n", alloclen, reqlen);
13637 lpfc_sli4_mbox_cmd_free(phba, mbox);
13638 return -ENOMEM;
13639 }
13640
13641 /* Get the first SGE entry from the non-embedded DMA memory */
13642 viraddr = mbox->sge_array->addr[0];
13643
13644 /* Set up the SGL pages in the non-embedded DMA pages */
13645 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13646 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13647
13648 /* pg_pairs tracks posted SGEs per loop iteration. */
13649 pg_pairs = 0;
13650 list_for_each_entry_continue(psb, sblist, list) {
13651 /* Set up the sge entry */
13652 sgl_pg_pairs->sgl_pg0_addr_lo =
13653 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13654 sgl_pg_pairs->sgl_pg0_addr_hi =
13655 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13656 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13657 pdma_phys_bpl1 = psb->dma_phys_bpl +
13658 SGL_PAGE_SIZE;
13659 else
13660 pdma_phys_bpl1 = 0;
13661 sgl_pg_pairs->sgl_pg1_addr_lo =
13662 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13663 sgl_pg_pairs->sgl_pg1_addr_hi =
13664 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13665 /* Keep the first xri for this extent. */
13666 if (pg_pairs == 0)
13667 xri_start = psb->cur_iocbq.sli4_xritag;
13668 sgl_pg_pairs++;
13669 pg_pairs++;
13670 xri_cnt++;
13671
13672 /*
13673 * Track two exit conditions - the loop has constructed
13674 * all of the caller's SGE pairs or all available
13675 * resource IDs in this extent are consumed.
13676 */
13677 if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
13678 break;
13679 }
13680 rsrc_blk->rsrc_used += pg_pairs;
13681 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
13682 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13683
13684 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13685 "3016 Post SCSI Extent SGL, start %d, cnt %d "
13686 "blk use %d\n",
13687 xri_start, pg_pairs, rsrc_blk->rsrc_used);
13688 /* Perform endian conversion if necessary */
13689 sgl->word0 = cpu_to_le32(sgl->word0);
13690 if (!phba->sli4_hba.intr_enable)
13691 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13692 else {
13693 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13694 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13695 }
13696 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13697 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13698 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13699 &shdr->response);
13700 if (rc != MBX_TIMEOUT)
13701 lpfc_sli4_mbox_cmd_free(phba, mbox);
13702 if (shdr_status || shdr_add_status || rc) {
13703 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13704 "2935 POST_SGL_BLOCK mailbox command "
13705 "failed status x%x add_status x%x "
13706 "mbx status x%x\n",
13707 shdr_status, shdr_add_status, rc);
13708 return -ENXIO;
13709 }
13710
13711 /* Post only what is requested. */
13712 if (xri_cnt >= cnt)
13713 break;
13714 }
13715 return rc;
13716}
13717
13718/**
13719 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 13504 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13720 * @phba: pointer to lpfc_hba struct that the frame was received on 13505 * @phba: pointer to lpfc_hba struct that the frame was received on
13721 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13506 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)