aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2012-05-09 21:16:12 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-05-17 05:29:22 -0400
commit8a9d2e8003040d2e1cd24ac5e83bb30b68f7f488 (patch)
treea0fb34a954b174681a7af4706dee3823db4260ce /drivers/scsi/lpfc
parent587a37f6e007e97e4f88f10a51f5d0bc62eb6e0a (diff)
[SCSI] lpfc 8.3.31: Correct handling of SLI4-port XRI resource-provisioning profile change
Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c367
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c296
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c607
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h9
6 files changed, 594 insertions, 702 deletions
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 330dd7192a7f..620fa45866dc 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -460,6 +460,7 @@ int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
460int lpfc_issue_reg_vfi(struct lpfc_vport *); 460int lpfc_issue_reg_vfi(struct lpfc_vport *);
461int lpfc_issue_unreg_vfi(struct lpfc_vport *); 461int lpfc_issue_unreg_vfi(struct lpfc_vport *);
462int lpfc_selective_reset(struct lpfc_hba *); 462int lpfc_selective_reset(struct lpfc_hba *);
463int lpfc_sli4_read_config(struct lpfc_hba *phba); 463int lpfc_sli4_read_config(struct lpfc_hba *);
464int lpfc_scsi_buf_update(struct lpfc_hba *phba); 464void lpfc_sli4_node_prep(struct lpfc_hba *);
465void lpfc_sli4_node_prep(struct lpfc_hba *phba); 465int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
466void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 91f09761bd32..24344c1fab5a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -228,19 +228,15 @@ struct lpfc_sli4_flags {
228#define lpfc_idx_rsrc_rdy_MASK 0x00000001 228#define lpfc_idx_rsrc_rdy_MASK 0x00000001
229#define lpfc_idx_rsrc_rdy_WORD word0 229#define lpfc_idx_rsrc_rdy_WORD word0
230#define LPFC_IDX_RSRC_RDY 1 230#define LPFC_IDX_RSRC_RDY 1
231#define lpfc_xri_rsrc_rdy_SHIFT 1 231#define lpfc_rpi_rsrc_rdy_SHIFT 1
232#define lpfc_xri_rsrc_rdy_MASK 0x00000001
233#define lpfc_xri_rsrc_rdy_WORD word0
234#define LPFC_XRI_RSRC_RDY 1
235#define lpfc_rpi_rsrc_rdy_SHIFT 2
236#define lpfc_rpi_rsrc_rdy_MASK 0x00000001 232#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
237#define lpfc_rpi_rsrc_rdy_WORD word0 233#define lpfc_rpi_rsrc_rdy_WORD word0
238#define LPFC_RPI_RSRC_RDY 1 234#define LPFC_RPI_RSRC_RDY 1
239#define lpfc_vpi_rsrc_rdy_SHIFT 3 235#define lpfc_vpi_rsrc_rdy_SHIFT 2
240#define lpfc_vpi_rsrc_rdy_MASK 0x00000001 236#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
241#define lpfc_vpi_rsrc_rdy_WORD word0 237#define lpfc_vpi_rsrc_rdy_WORD word0
242#define LPFC_VPI_RSRC_RDY 1 238#define LPFC_VPI_RSRC_RDY 1
243#define lpfc_vfi_rsrc_rdy_SHIFT 4 239#define lpfc_vfi_rsrc_rdy_SHIFT 3
244#define lpfc_vfi_rsrc_rdy_MASK 0x00000001 240#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
245#define lpfc_vfi_rsrc_rdy_WORD word0 241#define lpfc_vfi_rsrc_rdy_WORD word0
246#define LPFC_VFI_RSRC_RDY 1 242#define LPFC_VFI_RSRC_RDY 1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5538cd068611..411ed48d79da 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -64,8 +64,8 @@ static int lpfc_sli4_queue_verify(struct lpfc_hba *);
64static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 64static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
65static int lpfc_setup_endian_order(struct lpfc_hba *); 65static int lpfc_setup_endian_order(struct lpfc_hba *);
66static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 66static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67static void lpfc_free_sgl_list(struct lpfc_hba *); 67static void lpfc_free_els_sgl_list(struct lpfc_hba *);
68static int lpfc_init_sgl_list(struct lpfc_hba *); 68static void lpfc_init_sgl_list(struct lpfc_hba *);
69static int lpfc_init_active_sgl_array(struct lpfc_hba *); 69static int lpfc_init_active_sgl_array(struct lpfc_hba *);
70static void lpfc_free_active_sgl(struct lpfc_hba *); 70static void lpfc_free_active_sgl(struct lpfc_hba *);
71static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 71static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
@@ -2767,47 +2767,14 @@ lpfc_offline(struct lpfc_hba *phba)
2767} 2767}
2768 2768
2769/** 2769/**
2770 * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
2771 * @phba: pointer to lpfc hba data structure.
2772 *
2773 * This routine goes through all the scsi buffers in the system and updates the
2774 * Physical XRIs assigned to the SCSI buffer because these may change after any
2775 * firmware reset
2776 *
2777 * Return codes
2778 * 0 - successful (for now, it always returns 0)
2779 **/
2780int
2781lpfc_scsi_buf_update(struct lpfc_hba *phba)
2782{
2783 struct lpfc_scsi_buf *sb, *sb_next;
2784
2785 spin_lock_irq(&phba->hbalock);
2786 spin_lock(&phba->scsi_buf_list_lock);
2787 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2788 sb->cur_iocbq.sli4_xritag =
2789 phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
2790 set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
2791 phba->sli4_hba.max_cfg_param.xri_used++;
2792 phba->sli4_hba.xri_count++;
2793 }
2794 spin_unlock(&phba->scsi_buf_list_lock);
2795 spin_unlock_irq(&phba->hbalock);
2796 return 0;
2797}
2798
2799/**
2800 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2770 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2801 * @phba: pointer to lpfc hba data structure. 2771 * @phba: pointer to lpfc hba data structure.
2802 * 2772 *
2803 * This routine is to free all the SCSI buffers and IOCBs from the driver 2773 * This routine is to free all the SCSI buffers and IOCBs from the driver
2804 * list back to kernel. It is called from lpfc_pci_remove_one to free 2774 * list back to kernel. It is called from lpfc_pci_remove_one to free
2805 * the internal resources before the device is removed from the system. 2775 * the internal resources before the device is removed from the system.
2806 *
2807 * Return codes
2808 * 0 - successful (for now, it always returns 0)
2809 **/ 2776 **/
2810static int 2777static void
2811lpfc_scsi_free(struct lpfc_hba *phba) 2778lpfc_scsi_free(struct lpfc_hba *phba)
2812{ 2779{
2813 struct lpfc_scsi_buf *sb, *sb_next; 2780 struct lpfc_scsi_buf *sb, *sb_next;
@@ -2833,7 +2800,178 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2833 } 2800 }
2834 2801
2835 spin_unlock_irq(&phba->hbalock); 2802 spin_unlock_irq(&phba->hbalock);
2803}
2804
2805/**
2806 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
2807 * @phba: pointer to lpfc hba data structure.
2808 *
2809 * This routine first calculates the sizes of the current els and allocated
2810 * scsi sgl lists, and then goes through all sgls to updates the physical
2811 * XRIs assigned due to port function reset. During port initialization, the
2812 * current els and allocated scsi sgl lists are 0s.
2813 *
2814 * Return codes
2815 * 0 - successful (for now, it always returns 0)
2816 **/
2817int
2818lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2819{
2820 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
2821 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
2822 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
2823 LIST_HEAD(els_sgl_list);
2824 LIST_HEAD(scsi_sgl_list);
2825 int rc;
2826
2827 /*
2828 * update on pci function's els xri-sgl list
2829 */
2830 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
2831 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
2832 /* els xri-sgl expanded */
2833 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
2834 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2835 "3157 ELS xri-sgl count increased from "
2836 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2837 els_xri_cnt);
2838 /* allocate the additional els sgls */
2839 for (i = 0; i < xri_cnt; i++) {
2840 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
2841 GFP_KERNEL);
2842 if (sglq_entry == NULL) {
2843 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2844 "2562 Failure to allocate an "
2845 "ELS sgl entry:%d\n", i);
2846 rc = -ENOMEM;
2847 goto out_free_mem;
2848 }
2849 sglq_entry->buff_type = GEN_BUFF_TYPE;
2850 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
2851 &sglq_entry->phys);
2852 if (sglq_entry->virt == NULL) {
2853 kfree(sglq_entry);
2854 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2855 "2563 Failure to allocate an "
2856 "ELS mbuf:%d\n", i);
2857 rc = -ENOMEM;
2858 goto out_free_mem;
2859 }
2860 sglq_entry->sgl = sglq_entry->virt;
2861 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
2862 sglq_entry->state = SGL_FREED;
2863 list_add_tail(&sglq_entry->list, &els_sgl_list);
2864 }
2865 spin_lock(&phba->hbalock);
2866 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2867 spin_unlock(&phba->hbalock);
2868 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
2869 /* els xri-sgl shrinked */
2870 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
2871 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2872 "3158 ELS xri-sgl count decreased from "
2873 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2874 els_xri_cnt);
2875 spin_lock_irq(&phba->hbalock);
2876 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
2877 spin_unlock_irq(&phba->hbalock);
2878 /* release extra els sgls from list */
2879 for (i = 0; i < xri_cnt; i++) {
2880 list_remove_head(&els_sgl_list,
2881 sglq_entry, struct lpfc_sglq, list);
2882 if (sglq_entry) {
2883 lpfc_mbuf_free(phba, sglq_entry->virt,
2884 sglq_entry->phys);
2885 kfree(sglq_entry);
2886 }
2887 }
2888 spin_lock_irq(&phba->hbalock);
2889 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2890 spin_unlock_irq(&phba->hbalock);
2891 } else
2892 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2893 "3163 ELS xri-sgl count unchanged: %d\n",
2894 els_xri_cnt);
2895 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
2896
2897 /* update xris to els sgls on the list */
2898 sglq_entry = NULL;
2899 sglq_entry_next = NULL;
2900 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
2901 &phba->sli4_hba.lpfc_sgl_list, list) {
2902 lxri = lpfc_sli4_next_xritag(phba);
2903 if (lxri == NO_XRI) {
2904 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2905 "2400 Failed to allocate xri for "
2906 "ELS sgl\n");
2907 rc = -ENOMEM;
2908 goto out_free_mem;
2909 }
2910 sglq_entry->sli4_lxritag = lxri;
2911 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
2912 }
2913
2914 /*
2915 * update on pci function's allocated scsi xri-sgl list
2916 */
2917 phba->total_scsi_bufs = 0;
2918
2919 /* maximum number of xris available for scsi buffers */
2920 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
2921 els_xri_cnt;
2922
2923 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2924 "2401 Current allocated SCSI xri-sgl count:%d, "
2925 "maximum SCSI xri count:%d\n",
2926 phba->sli4_hba.scsi_xri_cnt,
2927 phba->sli4_hba.scsi_xri_max);
2928
2929 spin_lock_irq(&phba->scsi_buf_list_lock);
2930 list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
2931 spin_unlock_irq(&phba->scsi_buf_list_lock);
2932
2933 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
2934 /* max scsi xri shrinked below the allocated scsi buffers */
2935 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
2936 phba->sli4_hba.scsi_xri_max;
2937 /* release the extra allocated scsi buffers */
2938 for (i = 0; i < scsi_xri_cnt; i++) {
2939 list_remove_head(&scsi_sgl_list, psb,
2940 struct lpfc_scsi_buf, list);
2941 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data,
2942 psb->dma_handle);
2943 kfree(psb);
2944 }
2945 spin_lock_irq(&phba->scsi_buf_list_lock);
2946 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
2947 spin_unlock_irq(&phba->scsi_buf_list_lock);
2948 }
2949
2950 /* update xris associated to remaining allocated scsi buffers */
2951 psb = NULL;
2952 psb_next = NULL;
2953 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
2954 lxri = lpfc_sli4_next_xritag(phba);
2955 if (lxri == NO_XRI) {
2956 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2957 "2560 Failed to allocate xri for "
2958 "scsi buffer\n");
2959 rc = -ENOMEM;
2960 goto out_free_mem;
2961 }
2962 psb->cur_iocbq.sli4_lxritag = lxri;
2963 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
2964 }
2965 spin_lock(&phba->scsi_buf_list_lock);
2966 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
2967 spin_unlock(&phba->scsi_buf_list_lock);
2968
2836 return 0; 2969 return 0;
2970
2971out_free_mem:
2972 lpfc_free_els_sgl_list(phba);
2973 lpfc_scsi_free(phba);
2974 return rc;
2837} 2975}
2838 2976
2839/** 2977/**
@@ -4636,18 +4774,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4636 if (rc) 4774 if (rc)
4637 goto out_free_bsmbx; 4775 goto out_free_bsmbx;
4638 4776
4639 /* Initialize and populate the iocb list per host */ 4777 /* Initialize sgl lists per host */
4640 rc = lpfc_init_sgl_list(phba); 4778 lpfc_init_sgl_list(phba);
4641 if (rc) { 4779
4642 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4780 /* Allocate and initialize active sgl array */
4643 "1400 Failed to initialize sgl list.\n");
4644 goto out_destroy_cq_event_pool;
4645 }
4646 rc = lpfc_init_active_sgl_array(phba); 4781 rc = lpfc_init_active_sgl_array(phba);
4647 if (rc) { 4782 if (rc) {
4648 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4649 "1430 Failed to initialize sgl list.\n"); 4784 "1430 Failed to initialize sgl list.\n");
4650 goto out_free_sgl_list; 4785 goto out_destroy_cq_event_pool;
4651 } 4786 }
4652 rc = lpfc_sli4_init_rpi_hdrs(phba); 4787 rc = lpfc_sli4_init_rpi_hdrs(phba);
4653 if (rc) { 4788 if (rc) {
@@ -4722,8 +4857,6 @@ out_remove_rpi_hdrs:
4722 lpfc_sli4_remove_rpi_hdrs(phba); 4857 lpfc_sli4_remove_rpi_hdrs(phba);
4723out_free_active_sgl: 4858out_free_active_sgl:
4724 lpfc_free_active_sgl(phba); 4859 lpfc_free_active_sgl(phba);
4725out_free_sgl_list:
4726 lpfc_free_sgl_list(phba);
4727out_destroy_cq_event_pool: 4860out_destroy_cq_event_pool:
4728 lpfc_sli4_cq_event_pool_destroy(phba); 4861 lpfc_sli4_cq_event_pool_destroy(phba);
4729out_free_bsmbx: 4862out_free_bsmbx:
@@ -4760,10 +4893,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4760 4893
4761 /* Free the ELS sgl list */ 4894 /* Free the ELS sgl list */
4762 lpfc_free_active_sgl(phba); 4895 lpfc_free_active_sgl(phba);
4763 lpfc_free_sgl_list(phba); 4896 lpfc_free_els_sgl_list(phba);
4764
4765 /* Free the SCSI sgl management array */
4766 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4767 4897
4768 /* Free the completion queue EQ event pool */ 4898 /* Free the completion queue EQ event pool */
4769 lpfc_sli4_cq_event_release_all(phba); 4899 lpfc_sli4_cq_event_release_all(phba);
@@ -4990,29 +5120,42 @@ out_free_iocbq:
4990} 5120}
4991 5121
4992/** 5122/**
4993 * lpfc_free_sgl_list - Free sgl list. 5123 * lpfc_free_sgl_list - Free a given sgl list.
4994 * @phba: pointer to lpfc hba data structure. 5124 * @phba: pointer to lpfc hba data structure.
5125 * @sglq_list: pointer to the head of sgl list.
4995 * 5126 *
4996 * This routine is invoked to free the driver's sgl list and memory. 5127 * This routine is invoked to free a give sgl list and memory.
4997 **/ 5128 **/
4998static void 5129void
4999lpfc_free_sgl_list(struct lpfc_hba *phba) 5130lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
5000{ 5131{
5001 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 5132 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
5133
5134 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
5135 list_del(&sglq_entry->list);
5136 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5137 kfree(sglq_entry);
5138 }
5139}
5140
5141/**
5142 * lpfc_free_els_sgl_list - Free els sgl list.
5143 * @phba: pointer to lpfc hba data structure.
5144 *
5145 * This routine is invoked to free the driver's els sgl list and memory.
5146 **/
5147static void
5148lpfc_free_els_sgl_list(struct lpfc_hba *phba)
5149{
5002 LIST_HEAD(sglq_list); 5150 LIST_HEAD(sglq_list);
5003 5151
5152 /* Retrieve all els sgls from driver list */
5004 spin_lock_irq(&phba->hbalock); 5153 spin_lock_irq(&phba->hbalock);
5005 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 5154 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5006 spin_unlock_irq(&phba->hbalock); 5155 spin_unlock_irq(&phba->hbalock);
5007 5156
5008 list_for_each_entry_safe(sglq_entry, sglq_next, 5157 /* Now free the sgl list */
5009 &sglq_list, list) { 5158 lpfc_free_sgl_list(phba, &sglq_list);
5010 list_del(&sglq_entry->list);
5011 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5012 kfree(sglq_entry);
5013 phba->sli4_hba.total_sglq_bufs--;
5014 }
5015 kfree(phba->sli4_hba.lpfc_els_sgl_array);
5016} 5159}
5017 5160
5018/** 5161/**
@@ -5057,99 +5200,19 @@ lpfc_free_active_sgl(struct lpfc_hba *phba)
5057 * This routine is invoked to allocate and initizlize the driver's sgl 5200 * This routine is invoked to allocate and initizlize the driver's sgl
5058 * list and set up the sgl xritag tag array accordingly. 5201 * list and set up the sgl xritag tag array accordingly.
5059 * 5202 *
5060 * Return codes
5061 * 0 - successful
5062 * other values - error
5063 **/ 5203 **/
5064static int 5204static void
5065lpfc_init_sgl_list(struct lpfc_hba *phba) 5205lpfc_init_sgl_list(struct lpfc_hba *phba)
5066{ 5206{
5067 struct lpfc_sglq *sglq_entry = NULL;
5068 int i;
5069 int els_xri_cnt;
5070
5071 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
5072 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5073 "2400 ELS XRI count %d.\n",
5074 els_xri_cnt);
5075 /* Initialize and populate the sglq list per host/VF. */ 5207 /* Initialize and populate the sglq list per host/VF. */
5076 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 5208 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5077 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 5209 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5078 5210
5079 /* Sanity check on XRI management */ 5211 /* els xri-sgl book keeping */
5080 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 5212 phba->sli4_hba.els_xri_cnt = 0;
5081 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5082 "2562 No room left for SCSI XRI allocation: "
5083 "max_xri=%d, els_xri=%d\n",
5084 phba->sli4_hba.max_cfg_param.max_xri,
5085 els_xri_cnt);
5086 return -ENOMEM;
5087 }
5088
5089 /* Allocate memory for the ELS XRI management array */
5090 phba->sli4_hba.lpfc_els_sgl_array =
5091 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
5092 GFP_KERNEL);
5093 5213
5094 if (!phba->sli4_hba.lpfc_els_sgl_array) { 5214 /* scsi xri-buffer book keeping */
5095 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5096 "2401 Failed to allocate memory for ELS "
5097 "XRI management array of size %d.\n",
5098 els_xri_cnt);
5099 return -ENOMEM;
5100 }
5101
5102 /* Keep the SCSI XRI into the XRI management array */
5103 phba->sli4_hba.scsi_xri_max =
5104 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
5105 phba->sli4_hba.scsi_xri_cnt = 0; 5215 phba->sli4_hba.scsi_xri_cnt = 0;
5106 phba->sli4_hba.lpfc_scsi_psb_array =
5107 kzalloc((sizeof(struct lpfc_scsi_buf *) *
5108 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
5109
5110 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
5111 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5112 "2563 Failed to allocate memory for SCSI "
5113 "XRI management array of size %d.\n",
5114 phba->sli4_hba.scsi_xri_max);
5115 kfree(phba->sli4_hba.lpfc_els_sgl_array);
5116 return -ENOMEM;
5117 }
5118
5119 for (i = 0; i < els_xri_cnt; i++) {
5120 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
5121 if (sglq_entry == NULL) {
5122 printk(KERN_ERR "%s: only allocated %d sgls of "
5123 "expected %d count. Unloading driver.\n",
5124 __func__, i, els_xri_cnt);
5125 goto out_free_mem;
5126 }
5127
5128 sglq_entry->buff_type = GEN_BUFF_TYPE;
5129 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
5130 if (sglq_entry->virt == NULL) {
5131 kfree(sglq_entry);
5132 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
5133 "Unloading driver.\n", __func__);
5134 goto out_free_mem;
5135 }
5136 sglq_entry->sgl = sglq_entry->virt;
5137 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
5138
5139 /* The list order is used by later block SGL registraton */
5140 spin_lock_irq(&phba->hbalock);
5141 sglq_entry->state = SGL_FREED;
5142 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
5143 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
5144 phba->sli4_hba.total_sglq_bufs++;
5145 spin_unlock_irq(&phba->hbalock);
5146 }
5147 return 0;
5148
5149out_free_mem:
5150 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
5151 lpfc_free_sgl_list(phba);
5152 return -ENOMEM;
5153} 5216}
5154 5217
5155/** 5218/**
@@ -7320,9 +7383,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7320 phba->sli4_hba.u.if_type2.ERR2regaddr); 7383 phba->sli4_hba.u.if_type2.ERR2regaddr);
7321 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7322 "2890 Port error detected during port " 7385 "2890 Port error detected during port "
7323 "reset(%d): port status reg 0x%x, " 7386 "reset(%d): wait_tmo:%d ms, "
7387 "port status reg 0x%x, "
7324 "error 1=0x%x, error 2=0x%x\n", 7388 "error 1=0x%x, error 2=0x%x\n",
7325 num_resets, reg_data.word0, 7389 num_resets, rdy_chk*10,
7390 reg_data.word0,
7326 phba->work_status[0], 7391 phba->work_status[0],
7327 phba->work_status[1]); 7392 phba->work_status[1]);
7328 rc = -ENODEV; 7393 rc = -ENODEV;
@@ -9118,8 +9183,12 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9118 return 50; 9183 return 50;
9119 else if (max_xri <= 1024) 9184 else if (max_xri <= 1024)
9120 return 100; 9185 return 100;
9121 else 9186 else if (max_xri <= 1536)
9122 return 150; 9187 return 150;
9188 else if (max_xri <= 2048)
9189 return 200;
9190 else
9191 return 250;
9123 } else 9192 } else
9124 return 0; 9193 return 0;
9125} 9194}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 88f3a83dbd2e..bf0048a7a302 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -718,72 +718,162 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
718} 718}
719 719
720/** 720/**
721 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block 721 * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
722 * @phba: pointer to lpfc hba data structure. 722 * @phba: pointer to lpfc hba data structure.
723 * @post_sblist: pointer to the scsi buffer list.
723 * 724 *
724 * This routine walks the list of scsi buffers that have been allocated and 725 * This routine walks a list of scsi buffers that was passed in. It attempts
725 * repost them to the HBA by using SGL block post. This is needed after a 726 * to construct blocks of scsi buffer sgls which contains contiguous xris and
726 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 727 * uses the non-embedded SGL block post mailbox commands to post to the port.
727 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list 728 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
728 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. 729 * embedded SGL post mailbox command for posting. The @post_sblist passed in
730 * must be local list, thus no lock is needed when manipulate the list.
729 * 731 *
730 * Returns: 0 = success, non-zero failure. 732 * Returns: 0 = failure, non-zero number of successfully posted buffers.
731 **/ 733 **/
732int 734int
733lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) 735lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
736 struct list_head *post_sblist, int sb_count)
734{ 737{
735 struct lpfc_scsi_buf *psb; 738 struct lpfc_scsi_buf *psb, *psb_next;
736 int index, status, bcnt = 0, rcnt = 0, rc = 0; 739 int status;
737 LIST_HEAD(sblist); 740 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
738 741 dma_addr_t pdma_phys_bpl1;
739 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { 742 int last_xritag = NO_XRI;
740 psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; 743 LIST_HEAD(prep_sblist);
741 if (psb) { 744 LIST_HEAD(blck_sblist);
742 /* Remove from SCSI buffer list */ 745 LIST_HEAD(scsi_sblist);
743 list_del(&psb->list); 746
744 /* Add it to a local SCSI buffer list */ 747 /* sanity check */
745 list_add_tail(&psb->list, &sblist); 748 if (sb_count <= 0)
746 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { 749 return -EINVAL;
747 bcnt = rcnt; 750
748 rcnt = 0; 751 list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
752 list_del_init(&psb->list);
753 block_cnt++;
754 if ((last_xritag != NO_XRI) &&
755 (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
756 /* a hole in xri block, form a sgl posting block */
757 list_splice_init(&prep_sblist, &blck_sblist);
758 post_cnt = block_cnt - 1;
759 /* prepare list for next posting block */
760 list_add_tail(&psb->list, &prep_sblist);
761 block_cnt = 1;
762 } else {
763 /* prepare list for next posting block */
764 list_add_tail(&psb->list, &prep_sblist);
765 /* enough sgls for non-embed sgl mbox command */
766 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
767 list_splice_init(&prep_sblist, &blck_sblist);
768 post_cnt = block_cnt;
769 block_cnt = 0;
749 } 770 }
750 } else 771 }
751 /* A hole present in the XRI array, need to skip */ 772 num_posting++;
752 bcnt = rcnt; 773 last_xritag = psb->cur_iocbq.sli4_xritag;
753 774
754 if (index == phba->sli4_hba.scsi_xri_cnt - 1) 775 /* end of repost sgl list condition for SCSI buffers */
755 /* End of XRI array for SCSI buffer, complete */ 776 if (num_posting == sb_count) {
756 bcnt = rcnt; 777 if (post_cnt == 0) {
778 /* last sgl posting block */
779 list_splice_init(&prep_sblist, &blck_sblist);
780 post_cnt = block_cnt;
781 } else if (block_cnt == 1) {
782 /* last single sgl with non-contiguous xri */
783 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
784 pdma_phys_bpl1 = psb->dma_phys_bpl +
785 SGL_PAGE_SIZE;
786 else
787 pdma_phys_bpl1 = 0;
788 status = lpfc_sli4_post_sgl(phba,
789 psb->dma_phys_bpl,
790 pdma_phys_bpl1,
791 psb->cur_iocbq.sli4_xritag);
792 if (status) {
793 /* failure, put on abort scsi list */
794 psb->exch_busy = 1;
795 } else {
796 /* success, put on SCSI buffer list */
797 psb->exch_busy = 0;
798 psb->status = IOSTAT_SUCCESS;
799 num_posted++;
800 }
801 /* success, put on SCSI buffer sgl list */
802 list_add_tail(&psb->list, &scsi_sblist);
803 }
804 }
757 805
758 /* Continue until collect up to a nembed page worth of sgls */ 806 /* continue until a nembed page worth of sgls */
759 if (bcnt == 0) 807 if (post_cnt == 0)
760 continue; 808 continue;
761 /* Now, post the SCSI buffer list sgls as a block */ 809
762 if (!phba->sli4_hba.extents_in_use) 810 /* post block of SCSI buffer list sgls */
763 status = lpfc_sli4_post_scsi_sgl_block(phba, 811 status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
764 &sblist, 812 post_cnt);
765 bcnt); 813
766 else 814 /* don't reset xirtag due to hole in xri block */
767 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba, 815 if (block_cnt == 0)
768 &sblist, 816 last_xritag = NO_XRI;
769 bcnt); 817
770 /* Reset SCSI buffer count for next round of posting */ 818 /* reset SCSI buffer post count for next round of posting */
771 bcnt = 0; 819 post_cnt = 0;
772 while (!list_empty(&sblist)) { 820
773 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 821 /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
774 list); 822 while (!list_empty(&blck_sblist)) {
823 list_remove_head(&blck_sblist, psb,
824 struct lpfc_scsi_buf, list);
775 if (status) { 825 if (status) {
776 /* Put this back on the abort scsi list */ 826 /* failure, put on abort scsi list */
777 psb->exch_busy = 1; 827 psb->exch_busy = 1;
778 rc++;
779 } else { 828 } else {
829 /* success, put on SCSI buffer list */
780 psb->exch_busy = 0; 830 psb->exch_busy = 0;
781 psb->status = IOSTAT_SUCCESS; 831 psb->status = IOSTAT_SUCCESS;
832 num_posted++;
782 } 833 }
783 /* Put it back into the SCSI buffer list */ 834 list_add_tail(&psb->list, &scsi_sblist);
784 lpfc_release_scsi_buf_s4(phba, psb);
785 } 835 }
786 } 836 }
837 /* Push SCSI buffers with sgl posted to the availble list */
838 while (!list_empty(&scsi_sblist)) {
839 list_remove_head(&scsi_sblist, psb,
840 struct lpfc_scsi_buf, list);
841 lpfc_release_scsi_buf_s4(phba, psb);
842 }
843 return num_posted;
844}
845
846/**
847 * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
848 * @phba: pointer to lpfc hba data structure.
849 *
850 * This routine walks the list of scsi buffers that have been allocated and
851 * repost them to the port by using SGL block post. This is needed after a
852 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
853 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
854 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
855 *
856 * Returns: 0 = success, non-zero failure.
857 **/
858int
859lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
860{
861 LIST_HEAD(post_sblist);
862 int num_posted, rc = 0;
863
864 /* get all SCSI buffers need to repost to a local list */
865 spin_lock(&phba->scsi_buf_list_lock);
866 list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
867 spin_unlock(&phba->scsi_buf_list_lock);
868
869 /* post the list of scsi buffer sgls to port if available */
870 if (!list_empty(&post_sblist)) {
871 num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
872 phba->sli4_hba.scsi_xri_cnt);
873 /* failed to post any scsi buffer, return error */
874 if (num_posted == 0)
875 rc = -EIO;
876 }
787 return rc; 877 return rc;
788} 878}
789 879
@@ -792,12 +882,13 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
792 * @vport: The virtual port for which this call being executed. 882 * @vport: The virtual port for which this call being executed.
793 * @num_to_allocate: The requested number of buffers to allocate. 883 * @num_to_allocate: The requested number of buffers to allocate.
794 * 884 *
795 * This routine allocates a scsi buffer for device with SLI-4 interface spec, 885 * This routine allocates scsi buffers for device with SLI-4 interface spec,
796 * the scsi buffer contains all the necessary information needed to initiate 886 * the scsi buffer contains all the necessary information needed to initiate
797 * a SCSI I/O. 887 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
888 * them on a list, it post them to the port by using SGL block post.
798 * 889 *
799 * Return codes: 890 * Return codes:
800 * int - number of scsi buffers that were allocated. 891 * int - number of scsi buffers that were allocated and posted.
801 * 0 = failure, less than num_to_alloc is a partial failure. 892 * 0 = failure, less than num_to_alloc is a partial failure.
802 **/ 893 **/
803static int 894static int
@@ -810,22 +901,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
810 dma_addr_t pdma_phys_fcp_cmd; 901 dma_addr_t pdma_phys_fcp_cmd;
811 dma_addr_t pdma_phys_fcp_rsp; 902 dma_addr_t pdma_phys_fcp_rsp;
812 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 903 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
813 uint16_t iotag, last_xritag = NO_XRI, lxri = 0; 904 uint16_t iotag, lxri = 0;
814 int status = 0, index; 905 int bcnt, num_posted;
815 int bcnt; 906 LIST_HEAD(prep_sblist);
816 int non_sequential_xri = 0; 907 LIST_HEAD(post_sblist);
817 LIST_HEAD(sblist); 908 LIST_HEAD(scsi_sblist);
818 909
819 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 910 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
820 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 911 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
821 if (!psb) 912 if (!psb)
822 break; 913 break;
823
824 /* 914 /*
825 * Get memory from the pci pool to map the virt space to pci bus 915 * Get memory from the pci pool to map the virt space to
826 * space for an I/O. The DMA buffer includes space for the 916 * pci bus space for an I/O. The DMA buffer includes space
827 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 917 * for the struct fcp_cmnd, struct fcp_rsp and the number
828 * necessary to support the sg_tablesize. 918 * of bde's necessary to support the sg_tablesize.
829 */ 919 */
830 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, 920 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
831 GFP_KERNEL, &psb->dma_handle); 921 GFP_KERNEL, &psb->dma_handle);
@@ -833,8 +923,6 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
833 kfree(psb); 923 kfree(psb);
834 break; 924 break;
835 } 925 }
836
837 /* Initialize virtual ptrs to dma_buf region. */
838 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 926 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
839 927
840 /* Allocate iotag for psb->cur_iocbq. */ 928 /* Allocate iotag for psb->cur_iocbq. */
@@ -855,16 +943,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
855 } 943 }
856 psb->cur_iocbq.sli4_lxritag = lxri; 944 psb->cur_iocbq.sli4_lxritag = lxri;
857 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 945 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
858 if (last_xritag != NO_XRI
859 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
860 non_sequential_xri = 1;
861 } else
862 list_add_tail(&psb->list, &sblist);
863 last_xritag = psb->cur_iocbq.sli4_xritag;
864
865 index = phba->sli4_hba.scsi_xri_cnt++;
866 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 946 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
867
868 psb->fcp_bpl = psb->data; 947 psb->fcp_bpl = psb->data;
869 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) 948 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
870 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 949 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
@@ -880,9 +959,9 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
880 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 959 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
881 960
882 /* 961 /*
883 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 962 * The first two bdes are the FCP_CMD and FCP_RSP.
884 * are sg list bdes. Initialize the first two and leave the 963 * The balance are sg list bdes. Initialize the
885 * rest for queuecommand. 964 * first two and leave the rest for queuecommand.
886 */ 965 */
887 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 966 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
888 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 967 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
@@ -917,62 +996,31 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
917 iocb->ulpBdeCount = 1; 996 iocb->ulpBdeCount = 1;
918 iocb->ulpLe = 1; 997 iocb->ulpLe = 1;
919 iocb->ulpClass = CLASS3; 998 iocb->ulpClass = CLASS3;
920 psb->cur_iocbq.context1 = psb; 999 psb->cur_iocbq.context1 = psb;
921 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 1000 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
922 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; 1001 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
923 else 1002 else
924 pdma_phys_bpl1 = 0; 1003 pdma_phys_bpl1 = 0;
925 psb->dma_phys_bpl = pdma_phys_bpl; 1004 psb->dma_phys_bpl = pdma_phys_bpl;
926 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; 1005
927 if (non_sequential_xri) { 1006 /* add the scsi buffer to a post list */
928 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, 1007 list_add_tail(&psb->list, &post_sblist);
929 pdma_phys_bpl1, 1008 spin_lock_irq(&phba->scsi_buf_list_lock);
930 psb->cur_iocbq.sli4_xritag); 1009 phba->sli4_hba.scsi_xri_cnt++;
931 if (status) { 1010 spin_unlock_irq(&phba->scsi_buf_list_lock);
932 /* Put this back on the abort scsi list */
933 psb->exch_busy = 1;
934 } else {
935 psb->exch_busy = 0;
936 psb->status = IOSTAT_SUCCESS;
937 }
938 /* Put it back into the SCSI buffer list */
939 lpfc_release_scsi_buf_s4(phba, psb);
940 break;
941 }
942 }
943 if (bcnt) {
944 if (!phba->sli4_hba.extents_in_use)
945 status = lpfc_sli4_post_scsi_sgl_block(phba,
946 &sblist,
947 bcnt);
948 else
949 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
950 &sblist,
951 bcnt);
952
953 if (status) {
954 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
955 "3021 SCSI SGL post error %d\n",
956 status);
957 bcnt = 0;
958 }
959 /* Reset SCSI buffer count for next round of posting */
960 while (!list_empty(&sblist)) {
961 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
962 list);
963 if (status) {
964 /* Put this back on the abort scsi list */
965 psb->exch_busy = 1;
966 } else {
967 psb->exch_busy = 0;
968 psb->status = IOSTAT_SUCCESS;
969 }
970 /* Put it back into the SCSI buffer list */
971 lpfc_release_scsi_buf_s4(phba, psb);
972 }
973 } 1011 }
1012 lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1013 "3021 Allocate %d out of %d requested new SCSI "
1014 "buffers\n", bcnt, num_to_alloc);
1015
1016 /* post the list of scsi buffer sgls to port if available */
1017 if (!list_empty(&post_sblist))
1018 num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1019 &post_sblist, bcnt);
1020 else
1021 num_posted = 0;
974 1022
975 return bcnt + non_sequential_xri; 1023 return num_posted;
976} 1024}
977 1025
978/** 1026/**
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index dbaf5b963bff..b887c9c5372a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -67,6 +67,8 @@ static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *); 67 struct hbq_dmabuf *);
68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, 68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *); 69 struct lpfc_cqe *);
70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71 int);
70 72
71static IOCB_t * 73static IOCB_t *
72lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 74lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -4967,7 +4969,12 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4967 &rsrc_info->u.rsp); 4969 &rsrc_info->u.rsp);
4968 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 4970 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
4969 &rsrc_info->u.rsp); 4971 &rsrc_info->u.rsp);
4970 err_exit: 4972
4973 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4974 "3162 Retrieved extents type-%d from port: count:%d, "
4975 "size:%d\n", type, *extnt_count, *extnt_size);
4976
4977err_exit:
4971 mempool_free(mbox, phba->mbox_mem_pool); 4978 mempool_free(mbox, phba->mbox_mem_pool);
4972 return rc; 4979 return rc;
4973} 4980}
@@ -5051,7 +5058,7 @@ lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5051 * 0: if successful 5058 * 0: if successful
5052 **/ 5059 **/
5053static int 5060static int
5054lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, 5061lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5055 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5062 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5056{ 5063{
5057 int rc = 0; 5064 int rc = 0;
@@ -5060,7 +5067,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
5060 uint32_t alloc_len, mbox_tmo; 5067 uint32_t alloc_len, mbox_tmo;
5061 5068
5062 /* Calculate the total requested length of the dma memory */ 5069 /* Calculate the total requested length of the dma memory */
5063 req_len = *extnt_cnt * sizeof(uint16_t); 5070 req_len = extnt_cnt * sizeof(uint16_t);
5064 5071
5065 /* 5072 /*
5066 * Calculate the size of an embedded mailbox. The uint32_t 5073 * Calculate the size of an embedded mailbox. The uint32_t
@@ -5075,7 +5082,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
5075 */ 5082 */
5076 *emb = LPFC_SLI4_MBX_EMBED; 5083 *emb = LPFC_SLI4_MBX_EMBED;
5077 if (req_len > emb_len) { 5084 if (req_len > emb_len) {
5078 req_len = *extnt_cnt * sizeof(uint16_t) + 5085 req_len = extnt_cnt * sizeof(uint16_t) +
5079 sizeof(union lpfc_sli4_cfg_shdr) + 5086 sizeof(union lpfc_sli4_cfg_shdr) +
5080 sizeof(uint32_t); 5087 sizeof(uint32_t);
5081 *emb = LPFC_SLI4_MBX_NEMBED; 5088 *emb = LPFC_SLI4_MBX_NEMBED;
@@ -5091,7 +5098,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
5091 "size (x%x)\n", alloc_len, req_len); 5098 "size (x%x)\n", alloc_len, req_len);
5092 return -ENOMEM; 5099 return -ENOMEM;
5093 } 5100 }
5094 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb); 5101 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5095 if (unlikely(rc)) 5102 if (unlikely(rc))
5096 return -EIO; 5103 return -EIO;
5097 5104
@@ -5149,17 +5156,15 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5149 return -ENOMEM; 5156 return -ENOMEM;
5150 } 5157 }
5151 5158
5152 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT, 5159 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5153 "2903 Available Resource Extents " 5160 "2903 Post resource extents type-0x%x: "
5154 "for resource type 0x%x: Count: 0x%x, " 5161 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5155 "Size 0x%x\n", type, rsrc_cnt,
5156 rsrc_size);
5157 5162
5158 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5163 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5159 if (!mbox) 5164 if (!mbox)
5160 return -ENOMEM; 5165 return -ENOMEM;
5161 5166
5162 rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox); 5167 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5163 if (unlikely(rc)) { 5168 if (unlikely(rc)) {
5164 rc = -EIO; 5169 rc = -EIO;
5165 goto err_exit; 5170 goto err_exit;
@@ -5250,6 +5255,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5250 rc = -ENOMEM; 5255 rc = -ENOMEM;
5251 goto err_exit; 5256 goto err_exit;
5252 } 5257 }
5258 phba->sli4_hba.max_cfg_param.xri_used = 0;
5253 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5259 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5254 sizeof(uint16_t), 5260 sizeof(uint16_t),
5255 GFP_KERNEL); 5261 GFP_KERNEL);
@@ -5420,7 +5426,6 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5420 case LPFC_RSC_TYPE_FCOE_XRI: 5426 case LPFC_RSC_TYPE_FCOE_XRI:
5421 kfree(phba->sli4_hba.xri_bmask); 5427 kfree(phba->sli4_hba.xri_bmask);
5422 kfree(phba->sli4_hba.xri_ids); 5428 kfree(phba->sli4_hba.xri_ids);
5423 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5424 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5429 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5425 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5430 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5426 list_del_init(&rsrc_blk->list); 5431 list_del_init(&rsrc_blk->list);
@@ -5612,7 +5617,6 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5612 goto free_vpi_ids; 5617 goto free_vpi_ids;
5613 } 5618 }
5614 phba->sli4_hba.max_cfg_param.xri_used = 0; 5619 phba->sli4_hba.max_cfg_param.xri_used = 0;
5615 phba->sli4_hba.xri_count = 0;
5616 phba->sli4_hba.xri_ids = kzalloc(count * 5620 phba->sli4_hba.xri_ids = kzalloc(count *
5617 sizeof(uint16_t), 5621 sizeof(uint16_t),
5618 GFP_KERNEL); 5622 GFP_KERNEL);
@@ -5694,7 +5698,6 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5694 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5698 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5695 kfree(phba->sli4_hba.xri_bmask); 5699 kfree(phba->sli4_hba.xri_bmask);
5696 kfree(phba->sli4_hba.xri_ids); 5700 kfree(phba->sli4_hba.xri_ids);
5697 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5698 kfree(phba->sli4_hba.vfi_bmask); 5701 kfree(phba->sli4_hba.vfi_bmask);
5699 kfree(phba->sli4_hba.vfi_ids); 5702 kfree(phba->sli4_hba.vfi_ids);
5700 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5703 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
@@ -5853,6 +5856,149 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5853} 5856}
5854 5857
5855/** 5858/**
5859 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
5860 * @phba: pointer to lpfc hba data structure.
5861 *
5862 * This routine walks the list of els buffers that have been allocated and
5863 * repost them to the port by using SGL block post. This is needed after a
5864 * pci_function_reset/warm_start or start. It attempts to construct blocks
5865 * of els buffer sgls which contains contiguous xris and uses the non-embedded
5866 * SGL block post mailbox commands to post them to the port. For single els
5867 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
5868 * mailbox command for posting.
5869 *
5870 * Returns: 0 = success, non-zero failure.
5871 **/
5872static int
5873lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5874{
5875 struct lpfc_sglq *sglq_entry = NULL;
5876 struct lpfc_sglq *sglq_entry_next = NULL;
5877 struct lpfc_sglq *sglq_entry_first = NULL;
5878 int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
5879 int last_xritag = NO_XRI;
5880 LIST_HEAD(prep_sgl_list);
5881 LIST_HEAD(blck_sgl_list);
5882 LIST_HEAD(allc_sgl_list);
5883 LIST_HEAD(post_sgl_list);
5884 LIST_HEAD(free_sgl_list);
5885
5886 spin_lock(&phba->hbalock);
5887 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
5888 spin_unlock(&phba->hbalock);
5889
5890 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
5891 &allc_sgl_list, list) {
5892 list_del_init(&sglq_entry->list);
5893 block_cnt++;
5894 if ((last_xritag != NO_XRI) &&
5895 (sglq_entry->sli4_xritag != last_xritag + 1)) {
5896 /* a hole in xri block, form a sgl posting block */
5897 list_splice_init(&prep_sgl_list, &blck_sgl_list);
5898 post_cnt = block_cnt - 1;
5899 /* prepare list for next posting block */
5900 list_add_tail(&sglq_entry->list, &prep_sgl_list);
5901 block_cnt = 1;
5902 } else {
5903 /* prepare list for next posting block */
5904 list_add_tail(&sglq_entry->list, &prep_sgl_list);
5905 /* enough sgls for non-embed sgl mbox command */
5906 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
5907 list_splice_init(&prep_sgl_list,
5908 &blck_sgl_list);
5909 post_cnt = block_cnt;
5910 block_cnt = 0;
5911 }
5912 }
5913 num_posted++;
5914
5915 /* keep track of last sgl's xritag */
5916 last_xritag = sglq_entry->sli4_xritag;
5917
5918 /* end of repost sgl list condition for els buffers */
5919 if (num_posted == phba->sli4_hba.els_xri_cnt) {
5920 if (post_cnt == 0) {
5921 list_splice_init(&prep_sgl_list,
5922 &blck_sgl_list);
5923 post_cnt = block_cnt;
5924 } else if (block_cnt == 1) {
5925 status = lpfc_sli4_post_sgl(phba,
5926 sglq_entry->phys, 0,
5927 sglq_entry->sli4_xritag);
5928 if (!status) {
5929 /* successful, put sgl to posted list */
5930 list_add_tail(&sglq_entry->list,
5931 &post_sgl_list);
5932 } else {
5933 /* Failure, put sgl to free list */
5934 lpfc_printf_log(phba, KERN_WARNING,
5935 LOG_SLI,
5936 "3159 Failed to post els "
5937 "sgl, xritag:x%x\n",
5938 sglq_entry->sli4_xritag);
5939 list_add_tail(&sglq_entry->list,
5940 &free_sgl_list);
5941 spin_lock_irq(&phba->hbalock);
5942 phba->sli4_hba.els_xri_cnt--;
5943 spin_unlock_irq(&phba->hbalock);
5944 }
5945 }
5946 }
5947
5948 /* continue until a nembed page worth of sgls */
5949 if (post_cnt == 0)
5950 continue;
5951
5952 /* post the els buffer list sgls as a block */
5953 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
5954 post_cnt);
5955
5956 if (!status) {
5957 /* success, put sgl list to posted sgl list */
5958 list_splice_init(&blck_sgl_list, &post_sgl_list);
5959 } else {
5960 /* Failure, put sgl list to free sgl list */
5961 sglq_entry_first = list_first_entry(&blck_sgl_list,
5962 struct lpfc_sglq,
5963 list);
5964 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5965 "3160 Failed to post els sgl-list, "
5966 "xritag:x%x-x%x\n",
5967 sglq_entry_first->sli4_xritag,
5968 (sglq_entry_first->sli4_xritag +
5969 post_cnt - 1));
5970 list_splice_init(&blck_sgl_list, &free_sgl_list);
5971 spin_lock_irq(&phba->hbalock);
5972 phba->sli4_hba.els_xri_cnt -= post_cnt;
5973 spin_unlock_irq(&phba->hbalock);
5974 }
5975
5976 /* don't reset xirtag due to hole in xri block */
5977 if (block_cnt == 0)
5978 last_xritag = NO_XRI;
5979
5980 /* reset els sgl post count for next round of posting */
5981 post_cnt = 0;
5982 }
5983
5984 /* free the els sgls failed to post */
5985 lpfc_free_sgl_list(phba, &free_sgl_list);
5986
5987 /* push els sgls posted to the availble list */
5988 if (!list_empty(&post_sgl_list)) {
5989 spin_lock(&phba->hbalock);
5990 list_splice_init(&post_sgl_list,
5991 &phba->sli4_hba.lpfc_sgl_list);
5992 spin_unlock(&phba->hbalock);
5993 } else {
5994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5995 "3161 Failure to post els sgl to port.\n");
5996 return -EIO;
5997 }
5998 return 0;
5999}
6000
6001/**
5856 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 6002 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
5857 * @phba: Pointer to HBA context object. 6003 * @phba: Pointer to HBA context object.
5858 * 6004 *
@@ -6063,8 +6209,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6063 "rc = x%x\n", rc); 6209 "rc = x%x\n", rc);
6064 goto out_free_mbox; 6210 goto out_free_mbox;
6065 } 6211 }
6066 /* update physical xri mappings in the scsi buffers */
6067 lpfc_scsi_buf_update(phba);
6068 6212
6069 /* Read the port's service parameters. */ 6213 /* Read the port's service parameters. */
6070 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6214 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
@@ -6105,28 +6249,26 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6105 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 6249 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6106 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 6250 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6107 6251
6108 /* Register SGL pool to the device using non-embedded mailbox command */ 6252 /* update host els and scsi xri-sgl sizes and mappings */
6109 if (!phba->sli4_hba.extents_in_use) { 6253 rc = lpfc_sli4_xri_sgl_update(phba);
6110 rc = lpfc_sli4_post_els_sgl_list(phba); 6254 if (unlikely(rc)) {
6111 if (unlikely(rc)) { 6255 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6112 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6256 "1400 Failed to update xri-sgl size and "
6113 "0582 Error %d during els sgl post " 6257 "mapping: %d\n", rc);
6114 "operation\n", rc); 6258 goto out_free_mbox;
6115 rc = -ENODEV;
6116 goto out_free_mbox;
6117 }
6118 } else {
6119 rc = lpfc_sli4_post_els_sgl_list_ext(phba);
6120 if (unlikely(rc)) {
6121 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6122 "2560 Error %d during els sgl post "
6123 "operation\n", rc);
6124 rc = -ENODEV;
6125 goto out_free_mbox;
6126 }
6127 } 6259 }
6128 6260
6129 /* Register SCSI SGL pool to the device */ 6261 /* register the els sgl pool to the port */
6262 rc = lpfc_sli4_repost_els_sgl_list(phba);
6263 if (unlikely(rc)) {
6264 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6265 "0582 Error %d during els sgl post "
6266 "operation\n", rc);
6267 rc = -ENODEV;
6268 goto out_free_mbox;
6269 }
6270
6271 /* register the allocated scsi sgl pool to the port */
6130 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 6272 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6131 if (unlikely(rc)) { 6273 if (unlikely(rc)) {
6132 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6274 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -13080,9 +13222,7 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
13080 } else { 13222 } else {
13081 set_bit(xri, phba->sli4_hba.xri_bmask); 13223 set_bit(xri, phba->sli4_hba.xri_bmask);
13082 phba->sli4_hba.max_cfg_param.xri_used++; 13224 phba->sli4_hba.max_cfg_param.xri_used++;
13083 phba->sli4_hba.xri_count++;
13084 } 13225 }
13085
13086 spin_unlock_irq(&phba->hbalock); 13226 spin_unlock_irq(&phba->hbalock);
13087 return xri; 13227 return xri;
13088} 13228}
@@ -13098,7 +13238,6 @@ void
13098__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 13238__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13099{ 13239{
13100 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 13240 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
13101 phba->sli4_hba.xri_count--;
13102 phba->sli4_hba.max_cfg_param.xri_used--; 13241 phba->sli4_hba.max_cfg_param.xri_used--;
13103 } 13242 }
13104} 13243}
@@ -13149,31 +13288,32 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
13149/** 13288/**
13150 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. 13289 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
13151 * @phba: pointer to lpfc hba data structure. 13290 * @phba: pointer to lpfc hba data structure.
13291 * @post_sgl_list: pointer to els sgl entry list.
13292 * @count: number of els sgl entries on the list.
13152 * 13293 *
13153 * This routine is invoked to post a block of driver's sgl pages to the 13294 * This routine is invoked to post a block of driver's sgl pages to the
13154 * HBA using non-embedded mailbox command. No Lock is held. This routine 13295 * HBA using non-embedded mailbox command. No Lock is held. This routine
13155 * is only called when the driver is loading and after all IO has been 13296 * is only called when the driver is loading and after all IO has been
13156 * stopped. 13297 * stopped.
13157 **/ 13298 **/
13158int 13299static int
13159lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba) 13300lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
13301 struct list_head *post_sgl_list,
13302 int post_cnt)
13160{ 13303{
13161 struct lpfc_sglq *sglq_entry; 13304 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
13162 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13305 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13163 struct sgl_page_pairs *sgl_pg_pairs; 13306 struct sgl_page_pairs *sgl_pg_pairs;
13164 void *viraddr; 13307 void *viraddr;
13165 LPFC_MBOXQ_t *mbox; 13308 LPFC_MBOXQ_t *mbox;
13166 uint32_t reqlen, alloclen, pg_pairs; 13309 uint32_t reqlen, alloclen, pg_pairs;
13167 uint32_t mbox_tmo; 13310 uint32_t mbox_tmo;
13168 uint16_t xritag_start = 0, lxri = 0; 13311 uint16_t xritag_start = 0;
13169 int els_xri_cnt, rc = 0; 13312 int rc = 0;
13170 uint32_t shdr_status, shdr_add_status; 13313 uint32_t shdr_status, shdr_add_status;
13171 union lpfc_sli4_cfg_shdr *shdr; 13314 union lpfc_sli4_cfg_shdr *shdr;
13172 13315
13173 /* The number of sgls to be posted */ 13316 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
13174 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
13175
13176 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
13177 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13317 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13178 if (reqlen > SLI4_PAGE_SIZE) { 13318 if (reqlen > SLI4_PAGE_SIZE) {
13179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13319 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -13203,25 +13343,8 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
13203 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13343 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13204 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13344 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13205 13345
13206 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 13346 pg_pairs = 0;
13207 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 13347 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
13208
13209 /*
13210 * Assign the sglq a physical xri only if the driver has not
13211 * initialized those resources. A port reset only needs
13212 * the sglq's posted.
13213 */
13214 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
13215 LPFC_XRI_RSRC_RDY) {
13216 lxri = lpfc_sli4_next_xritag(phba);
13217 if (lxri == NO_XRI) {
13218 lpfc_sli4_mbox_cmd_free(phba, mbox);
13219 return -ENOMEM;
13220 }
13221 sglq_entry->sli4_lxritag = lxri;
13222 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
13223 }
13224
13225 /* Set up the sge entry */ 13348 /* Set up the sge entry */
13226 sgl_pg_pairs->sgl_pg0_addr_lo = 13349 sgl_pg_pairs->sgl_pg0_addr_lo =
13227 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 13350 cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -13236,11 +13359,12 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
13236 if (pg_pairs == 0) 13359 if (pg_pairs == 0)
13237 xritag_start = sglq_entry->sli4_xritag; 13360 xritag_start = sglq_entry->sli4_xritag;
13238 sgl_pg_pairs++; 13361 sgl_pg_pairs++;
13362 pg_pairs++;
13239 } 13363 }
13240 13364
13241 /* Complete initialization and perform endian conversion. */ 13365 /* Complete initialization and perform endian conversion. */
13242 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 13366 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13243 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 13367 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
13244 sgl->word0 = cpu_to_le32(sgl->word0); 13368 sgl->word0 = cpu_to_le32(sgl->word0);
13245 if (!phba->sli4_hba.intr_enable) 13369 if (!phba->sli4_hba.intr_enable)
13246 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13370 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
@@ -13260,183 +13384,6 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
13260 shdr_status, shdr_add_status, rc); 13384 shdr_status, shdr_add_status, rc);
13261 rc = -ENXIO; 13385 rc = -ENXIO;
13262 } 13386 }
13263
13264 if (rc == 0)
13265 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13266 LPFC_XRI_RSRC_RDY);
13267 return rc;
13268}
13269
13270/**
13271 * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
13272 * @phba: pointer to lpfc hba data structure.
13273 *
13274 * This routine is invoked to post a block of driver's sgl pages to the
13275 * HBA using non-embedded mailbox command. No Lock is held. This routine
13276 * is only called when the driver is loading and after all IO has been
13277 * stopped.
13278 **/
13279int
13280lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
13281{
13282 struct lpfc_sglq *sglq_entry;
13283 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13284 struct sgl_page_pairs *sgl_pg_pairs;
13285 void *viraddr;
13286 LPFC_MBOXQ_t *mbox;
13287 uint32_t reqlen, alloclen, index;
13288 uint32_t mbox_tmo;
13289 uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt;
13290 uint16_t xritag_start = 0, lxri = 0;
13291 struct lpfc_rsrc_blks *rsrc_blk;
13292 int cnt, ttl_cnt, rc = 0;
13293 int loop_cnt;
13294 uint32_t shdr_status, shdr_add_status;
13295 union lpfc_sli4_cfg_shdr *shdr;
13296
13297 /* The number of sgls to be posted */
13298 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
13299
13300 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
13301 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13302 if (reqlen > SLI4_PAGE_SIZE) {
13303 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13304 "2989 Block sgl registration required DMA "
13305 "size (%d) great than a page\n", reqlen);
13306 return -ENOMEM;
13307 }
13308
13309 cnt = 0;
13310 ttl_cnt = 0;
13311 post_els_xri_cnt = els_xri_cnt;
13312 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
13313 list) {
13314 rsrc_start = rsrc_blk->rsrc_start;
13315 rsrc_size = rsrc_blk->rsrc_size;
13316
13317 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13318 "3014 Working ELS Extent start %d, cnt %d\n",
13319 rsrc_start, rsrc_size);
13320
13321 loop_cnt = min(post_els_xri_cnt, rsrc_size);
13322 if (loop_cnt < post_els_xri_cnt) {
13323 post_els_xri_cnt -= loop_cnt;
13324 ttl_cnt += loop_cnt;
13325 } else
13326 ttl_cnt += post_els_xri_cnt;
13327
13328 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13329 if (!mbox)
13330 return -ENOMEM;
13331 /*
13332 * Allocate DMA memory and set up the non-embedded mailbox
13333 * command.
13334 */
13335 alloclen = lpfc_sli4_config(phba, mbox,
13336 LPFC_MBOX_SUBSYSTEM_FCOE,
13337 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13338 reqlen, LPFC_SLI4_MBX_NEMBED);
13339 if (alloclen < reqlen) {
13340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13341 "2987 Allocated DMA memory size (%d) "
13342 "is less than the requested DMA memory "
13343 "size (%d)\n", alloclen, reqlen);
13344 lpfc_sli4_mbox_cmd_free(phba, mbox);
13345 return -ENOMEM;
13346 }
13347
13348 /* Set up the SGL pages in the non-embedded DMA pages */
13349 viraddr = mbox->sge_array->addr[0];
13350 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13351 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13352
13353 /*
13354 * The starting resource may not begin at zero. Control
13355 * the loop variants via the block resource parameters,
13356 * but handle the sge pointers with a zero-based index
13357 * that doesn't get reset per loop pass.
13358 */
13359 for (index = rsrc_start;
13360 index < rsrc_start + loop_cnt;
13361 index++) {
13362 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
13363
13364 /*
13365 * Assign the sglq a physical xri only if the driver
13366 * has not initialized those resources. A port reset
13367 * only needs the sglq's posted.
13368 */
13369 if (bf_get(lpfc_xri_rsrc_rdy,
13370 &phba->sli4_hba.sli4_flags) !=
13371 LPFC_XRI_RSRC_RDY) {
13372 lxri = lpfc_sli4_next_xritag(phba);
13373 if (lxri == NO_XRI) {
13374 lpfc_sli4_mbox_cmd_free(phba, mbox);
13375 rc = -ENOMEM;
13376 goto err_exit;
13377 }
13378 sglq_entry->sli4_lxritag = lxri;
13379 sglq_entry->sli4_xritag =
13380 phba->sli4_hba.xri_ids[lxri];
13381 }
13382
13383 /* Set up the sge entry */
13384 sgl_pg_pairs->sgl_pg0_addr_lo =
13385 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13386 sgl_pg_pairs->sgl_pg0_addr_hi =
13387 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13388 sgl_pg_pairs->sgl_pg1_addr_lo =
13389 cpu_to_le32(putPaddrLow(0));
13390 sgl_pg_pairs->sgl_pg1_addr_hi =
13391 cpu_to_le32(putPaddrHigh(0));
13392
13393 /* Track the starting physical XRI for the mailbox. */
13394 if (index == rsrc_start)
13395 xritag_start = sglq_entry->sli4_xritag;
13396 sgl_pg_pairs++;
13397 cnt++;
13398 }
13399
13400 /* Complete initialization and perform endian conversion. */
13401 rsrc_blk->rsrc_used += loop_cnt;
13402 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13403 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
13404 sgl->word0 = cpu_to_le32(sgl->word0);
13405
13406 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13407 "3015 Post ELS Extent SGL, start %d, "
13408 "cnt %d, used %d\n",
13409 xritag_start, loop_cnt, rsrc_blk->rsrc_used);
13410 if (!phba->sli4_hba.intr_enable)
13411 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13412 else {
13413 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13414 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13415 }
13416 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13417 shdr_status = bf_get(lpfc_mbox_hdr_status,
13418 &shdr->response);
13419 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13420 &shdr->response);
13421 if (rc != MBX_TIMEOUT)
13422 lpfc_sli4_mbox_cmd_free(phba, mbox);
13423 if (shdr_status || shdr_add_status || rc) {
13424 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13425 "2988 POST_SGL_BLOCK mailbox "
13426 "command failed status x%x "
13427 "add_status x%x mbx status x%x\n",
13428 shdr_status, shdr_add_status, rc);
13429 rc = -ENXIO;
13430 goto err_exit;
13431 }
13432 if (ttl_cnt >= els_xri_cnt)
13433 break;
13434 }
13435
13436 err_exit:
13437 if (rc == 0)
13438 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13439 LPFC_XRI_RSRC_RDY);
13440 return rc; 13387 return rc;
13441} 13388}
13442 13389
@@ -13452,8 +13399,9 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
13452 * 13399 *
13453 **/ 13400 **/
13454int 13401int
13455lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, 13402lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
13456 int cnt) 13403 struct list_head *sblist,
13404 int count)
13457{ 13405{
13458 struct lpfc_scsi_buf *psb; 13406 struct lpfc_scsi_buf *psb;
13459 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13407 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -13469,7 +13417,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
13469 union lpfc_sli4_cfg_shdr *shdr; 13417 union lpfc_sli4_cfg_shdr *shdr;
13470 13418
13471 /* Calculate the requested length of the dma memory */ 13419 /* Calculate the requested length of the dma memory */
13472 reqlen = cnt * sizeof(struct sgl_page_pairs) + 13420 reqlen = count * sizeof(struct sgl_page_pairs) +
13473 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13421 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13474 if (reqlen > SLI4_PAGE_SIZE) { 13422 if (reqlen > SLI4_PAGE_SIZE) {
13475 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13423 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -13553,169 +13501,6 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
13553} 13501}
13554 13502
13555/** 13503/**
13556 * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
13557 * @phba: pointer to lpfc hba data structure.
13558 * @sblist: pointer to scsi buffer list.
13559 * @count: number of scsi buffers on the list.
13560 *
13561 * This routine is invoked to post a block of @count scsi sgl pages from a
13562 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13563 * No Lock is held.
13564 *
13565 **/
13566int
13567lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
13568 int cnt)
13569{
13570 struct lpfc_scsi_buf *psb = NULL;
13571 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13572 struct sgl_page_pairs *sgl_pg_pairs;
13573 void *viraddr;
13574 LPFC_MBOXQ_t *mbox;
13575 uint32_t reqlen, alloclen, pg_pairs;
13576 uint32_t mbox_tmo;
13577 uint16_t xri_start = 0, scsi_xri_start;
13578 uint16_t rsrc_range;
13579 int rc = 0, avail_cnt;
13580 uint32_t shdr_status, shdr_add_status;
13581 dma_addr_t pdma_phys_bpl1;
13582 union lpfc_sli4_cfg_shdr *shdr;
13583 struct lpfc_rsrc_blks *rsrc_blk;
13584 uint32_t xri_cnt = 0;
13585
13586 /* Calculate the total requested length of the dma memory */
13587 reqlen = cnt * sizeof(struct sgl_page_pairs) +
13588 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13589 if (reqlen > SLI4_PAGE_SIZE) {
13590 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13591 "2932 Block sgl registration required DMA "
13592 "size (%d) great than a page\n", reqlen);
13593 return -ENOMEM;
13594 }
13595
13596 /*
13597 * The use of extents requires the driver to post the sgl headers
13598 * in multiple postings to meet the contiguous resource assignment.
13599 */
13600 psb = list_prepare_entry(psb, sblist, list);
13601 scsi_xri_start = phba->sli4_hba.scsi_xri_start;
13602 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
13603 list) {
13604 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
13605 if (rsrc_range < scsi_xri_start)
13606 continue;
13607 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
13608 continue;
13609 else
13610 avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
13611
13612 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
13613 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13614 /*
13615 * Allocate DMA memory and set up the non-embedded mailbox
13616 * command. The mbox is used to post an SGL page per loop
13617 * but the DMA memory has a use-once semantic so the mailbox
13618 * is used and freed per loop pass.
13619 */
13620 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13621 if (!mbox) {
13622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13623 "2933 Failed to allocate mbox cmd "
13624 "memory\n");
13625 return -ENOMEM;
13626 }
13627 alloclen = lpfc_sli4_config(phba, mbox,
13628 LPFC_MBOX_SUBSYSTEM_FCOE,
13629 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13630 reqlen,
13631 LPFC_SLI4_MBX_NEMBED);
13632 if (alloclen < reqlen) {
13633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13634 "2934 Allocated DMA memory size (%d) "
13635 "is less than the requested DMA memory "
13636 "size (%d)\n", alloclen, reqlen);
13637 lpfc_sli4_mbox_cmd_free(phba, mbox);
13638 return -ENOMEM;
13639 }
13640
13641 /* Get the first SGE entry from the non-embedded DMA memory */
13642 viraddr = mbox->sge_array->addr[0];
13643
13644 /* Set up the SGL pages in the non-embedded DMA pages */
13645 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13646 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13647
13648 /* pg_pairs tracks posted SGEs per loop iteration. */
13649 pg_pairs = 0;
13650 list_for_each_entry_continue(psb, sblist, list) {
13651 /* Set up the sge entry */
13652 sgl_pg_pairs->sgl_pg0_addr_lo =
13653 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13654 sgl_pg_pairs->sgl_pg0_addr_hi =
13655 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13656 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13657 pdma_phys_bpl1 = psb->dma_phys_bpl +
13658 SGL_PAGE_SIZE;
13659 else
13660 pdma_phys_bpl1 = 0;
13661 sgl_pg_pairs->sgl_pg1_addr_lo =
13662 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13663 sgl_pg_pairs->sgl_pg1_addr_hi =
13664 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13665 /* Keep the first xri for this extent. */
13666 if (pg_pairs == 0)
13667 xri_start = psb->cur_iocbq.sli4_xritag;
13668 sgl_pg_pairs++;
13669 pg_pairs++;
13670 xri_cnt++;
13671
13672 /*
13673 * Track two exit conditions - the loop has constructed
13674 * all of the caller's SGE pairs or all available
13675 * resource IDs in this extent are consumed.
13676 */
13677 if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
13678 break;
13679 }
13680 rsrc_blk->rsrc_used += pg_pairs;
13681 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
13682 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13683
13684 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13685 "3016 Post SCSI Extent SGL, start %d, cnt %d "
13686 "blk use %d\n",
13687 xri_start, pg_pairs, rsrc_blk->rsrc_used);
13688 /* Perform endian conversion if necessary */
13689 sgl->word0 = cpu_to_le32(sgl->word0);
13690 if (!phba->sli4_hba.intr_enable)
13691 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13692 else {
13693 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13694 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13695 }
13696 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13697 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13698 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13699 &shdr->response);
13700 if (rc != MBX_TIMEOUT)
13701 lpfc_sli4_mbox_cmd_free(phba, mbox);
13702 if (shdr_status || shdr_add_status || rc) {
13703 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13704 "2935 POST_SGL_BLOCK mailbox command "
13705 "failed status x%x add_status x%x "
13706 "mbx status x%x\n",
13707 shdr_status, shdr_add_status, rc);
13708 return -ENXIO;
13709 }
13710
13711 /* Post only what is requested. */
13712 if (xri_cnt >= cnt)
13713 break;
13714 }
13715 return rc;
13716}
13717
13718/**
13719 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 13504 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13720 * @phba: pointer to lpfc_hba struct that the frame was received on 13505 * @phba: pointer to lpfc_hba struct that the frame was received on
13721 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13506 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c19d139618b7..f097382d7b91 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -493,14 +493,12 @@ struct lpfc_sli4_hba {
493 uint16_t next_rpi; 493 uint16_t next_rpi;
494 uint16_t scsi_xri_max; 494 uint16_t scsi_xri_max;
495 uint16_t scsi_xri_cnt; 495 uint16_t scsi_xri_cnt;
496 uint16_t els_xri_cnt;
496 uint16_t scsi_xri_start; 497 uint16_t scsi_xri_start;
497 struct list_head lpfc_free_sgl_list; 498 struct list_head lpfc_free_sgl_list;
498 struct list_head lpfc_sgl_list; 499 struct list_head lpfc_sgl_list;
499 struct lpfc_sglq **lpfc_els_sgl_array;
500 struct list_head lpfc_abts_els_sgl_list; 500 struct list_head lpfc_abts_els_sgl_list;
501 struct lpfc_scsi_buf **lpfc_scsi_psb_array;
502 struct list_head lpfc_abts_scsi_buf_list; 501 struct list_head lpfc_abts_scsi_buf_list;
503 uint32_t total_sglq_bufs;
504 struct lpfc_sglq **lpfc_sglq_active_list; 502 struct lpfc_sglq **lpfc_sglq_active_list;
505 struct list_head lpfc_rpi_hdr_list; 503 struct list_head lpfc_rpi_hdr_list;
506 unsigned long *rpi_bmask; 504 unsigned long *rpi_bmask;
@@ -509,7 +507,6 @@ struct lpfc_sli4_hba {
509 struct list_head lpfc_rpi_blk_list; 507 struct list_head lpfc_rpi_blk_list;
510 unsigned long *xri_bmask; 508 unsigned long *xri_bmask;
511 uint16_t *xri_ids; 509 uint16_t *xri_ids;
512 uint16_t xri_count;
513 struct list_head lpfc_xri_blk_list; 510 struct list_head lpfc_xri_blk_list;
514 unsigned long *vfi_bmask; 511 unsigned long *vfi_bmask;
515 uint16_t *vfi_ids; 512 uint16_t *vfi_ids;
@@ -614,11 +611,7 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
614int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); 611int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
615uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); 612uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
616int lpfc_sli4_post_async_mbox(struct lpfc_hba *); 613int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
617int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
618int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
619int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); 614int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
620int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
621 int);
622struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 615struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
623struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 616struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
624void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); 617void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);