aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-07-20 14:11:59 -0400
committerDavid S. Miller <davem@davemloft.net>2012-07-20 14:11:59 -0400
commite4bce0f288bafd8505ba5ce9c5284a4478f1b725 (patch)
tree361c9647757df8a6f9dc40b738325f717d6aeabd
parentaac3942cedc339b1e7b6bad28f3abe4ceb15bcc3 (diff)
parenta58915c7ecba89bef0914664ecf87c2156c68630 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jerr Kirsher says: ==================== This series contains updates to ixgbe. ... Alexander Duyck (9): ixgbe: Use VMDq offset to indicate the default pool ixgbe: Fix memory leak when SR-IOV VFs are direct assigned ixgbe: Drop references to deprecated pci_ DMA api and instead use dma_ API ixgbe: Cleanup configuration of FCoE registers ixgbe: Merge all FCoE percpu values into a single structure ixgbe: Make FCoE allocation and configuration closer to how rings work ixgbe: Correctly set SAN MAC RAR pool to default pool of PF ixgbe: Only enable anti-spoof on VF pools ixgbe: Enable FCoE FSO and CRC offloads based on CAPABLE instead of ENABLED flag ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c378
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c117
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c4
10 files changed, 344 insertions, 248 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index f7f6fe2255da..5a286adc65c0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -113,7 +113,7 @@
113#define IXGBE_MAX_VFTA_ENTRIES 128 113#define IXGBE_MAX_VFTA_ENTRIES 128
114#define MAX_EMULATION_MAC_ADDRS 16 114#define MAX_EMULATION_MAC_ADDRS 16
115#define IXGBE_MAX_PF_MACVLANS 15 115#define IXGBE_MAX_PF_MACVLANS 15
116#define VMDQ_P(p) ((p) + adapter->num_vfs) 116#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
117#define IXGBE_82599_VF_DEVICE_ID 0x10ED 117#define IXGBE_82599_VF_DEVICE_ID 0x10ED
118#define IXGBE_X540_VF_DEVICE_ID 0x1515 118#define IXGBE_X540_VF_DEVICE_ID 0x1515
119 119
@@ -691,7 +691,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
691extern int ixgbe_fso(struct ixgbe_ring *tx_ring, 691extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
692 struct ixgbe_tx_buffer *first, 692 struct ixgbe_tx_buffer *first,
693 u8 *hdr_len); 693 u8 *hdr_len);
694extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
695extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 694extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
696 union ixgbe_adv_rx_desc *rx_desc, 695 union ixgbe_adv_rx_desc *rx_desc,
697 struct sk_buff *skb); 696 struct sk_buff *skb);
@@ -700,6 +699,8 @@ extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
700extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 699extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
701 struct scatterlist *sgl, unsigned int sgc); 700 struct scatterlist *sgl, unsigned int sgc);
702extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); 701extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
702extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
703extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
703extern int ixgbe_fcoe_enable(struct net_device *netdev); 704extern int ixgbe_fcoe_enable(struct net_device *netdev);
704extern int ixgbe_fcoe_disable(struct net_device *netdev); 705extern int ixgbe_fcoe_disable(struct net_device *netdev);
705#ifdef CONFIG_IXGBE_DCB 706#ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index e7dddfd97cb9..50fc137501da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1025,6 +1025,9 @@ mac_reset_top:
1025 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1025 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1026 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1026 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1027 1027
1028 /* Save the SAN MAC RAR index */
1029 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1030
1028 /* Reserve the last RAR for the SAN MAC address */ 1031 /* Reserve the last RAR for the SAN MAC address */
1029 hw->mac.num_rar_entries--; 1032 hw->mac.num_rar_entries--;
1030 } 1033 }
@@ -2106,6 +2109,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2106 .set_rar = &ixgbe_set_rar_generic, 2109 .set_rar = &ixgbe_set_rar_generic,
2107 .clear_rar = &ixgbe_clear_rar_generic, 2110 .clear_rar = &ixgbe_clear_rar_generic,
2108 .set_vmdq = &ixgbe_set_vmdq_generic, 2111 .set_vmdq = &ixgbe_set_vmdq_generic,
2112 .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
2109 .clear_vmdq = &ixgbe_clear_vmdq_generic, 2113 .clear_vmdq = &ixgbe_clear_vmdq_generic,
2110 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 2114 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
2111 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 2115 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index bb7fde45c057..90e41db3cb69 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2848,6 +2848,31 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2848} 2848}
2849 2849
2850/** 2850/**
2851 * This function should only be involved in the IOV mode.
2852 * In IOV mode, Default pool is next pool after the number of
2853 * VFs advertized and not 0.
2854 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
2855 *
2856 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
2857 * @hw: pointer to hardware struct
2858 * @vmdq: VMDq pool index
2859 **/
2860s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
2861{
2862 u32 rar = hw->mac.san_mac_rar_index;
2863
2864 if (vmdq < 32) {
2865 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
2866 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2867 } else {
2868 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2869 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
2870 }
2871
2872 return 0;
2873}
2874
2875/**
2851 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 2876 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
2852 * @hw: pointer to hardware structure 2877 * @hw: pointer to hardware structure
2853 **/ 2878 **/
@@ -3200,20 +3225,22 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3200 * PFVFSPOOF register array is size 8 with 8 bits assigned to 3225 * PFVFSPOOF register array is size 8 with 8 bits assigned to
3201 * MAC anti-spoof enables in each register array element. 3226 * MAC anti-spoof enables in each register array element.
3202 */ 3227 */
3203 for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) 3228 for (j = 0; j < pf_target_reg; j++)
3204 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); 3229 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3205 3230
3206 /* If not enabling anti-spoofing then done */
3207 if (!enable)
3208 return;
3209
3210 /* 3231 /*
3211 * The PF should be allowed to spoof so that it can support 3232 * The PF should be allowed to spoof so that it can support
3212 * emulation mode NICs. Reset the bit assigned to the PF 3233 * emulation mode NICs. Do not set the bits assigned to the PF
3234 */
3235 pfvfspoof &= (1 << pf_target_shift) - 1;
3236 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3237
3238 /*
3239 * Remaining pools belong to the PF so they do not need to have
3240 * anti-spoofing enabled.
3213 */ 3241 */
3214 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); 3242 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3215 pfvfspoof ^= (1 << pf_target_shift); 3243 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
3216 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
3217} 3244}
3218 3245
3219/** 3246/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 6222fdb3d3f1..d813d1188c36 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -85,6 +85,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
85void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); 85void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
86s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); 86s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
87s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 87s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
88s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
88s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 89s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
89s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); 90s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
90s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, 91s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index cc28c44a048c..ae73ef14fdf3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -104,10 +104,10 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
104 udelay(100); 104 udelay(100);
105 } 105 }
106 if (ddp->sgl) 106 if (ddp->sgl)
107 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, 107 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
108 DMA_FROM_DEVICE); 108 DMA_FROM_DEVICE);
109 if (ddp->pool) { 109 if (ddp->pool) {
110 pci_pool_free(ddp->pool, ddp->udl, ddp->udp); 110 dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
111 ddp->pool = NULL; 111 ddp->pool = NULL;
112 } 112 }
113 113
@@ -134,6 +134,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
134 struct ixgbe_hw *hw; 134 struct ixgbe_hw *hw;
135 struct ixgbe_fcoe *fcoe; 135 struct ixgbe_fcoe *fcoe;
136 struct ixgbe_fcoe_ddp *ddp; 136 struct ixgbe_fcoe_ddp *ddp;
137 struct ixgbe_fcoe_ddp_pool *ddp_pool;
137 struct scatterlist *sg; 138 struct scatterlist *sg;
138 unsigned int i, j, dmacount; 139 unsigned int i, j, dmacount;
139 unsigned int len; 140 unsigned int len;
@@ -144,8 +145,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
144 unsigned int thislen = 0; 145 unsigned int thislen = 0;
145 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; 146 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
146 dma_addr_t addr = 0; 147 dma_addr_t addr = 0;
147 struct pci_pool *pool;
148 unsigned int cpu;
149 148
150 if (!netdev || !sgl) 149 if (!netdev || !sgl)
151 return 0; 150 return 0;
@@ -162,11 +161,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
162 return 0; 161 return 0;
163 162
164 fcoe = &adapter->fcoe; 163 fcoe = &adapter->fcoe;
165 if (!fcoe->pool) {
166 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
167 return 0;
168 }
169
170 ddp = &fcoe->ddp[xid]; 164 ddp = &fcoe->ddp[xid];
171 if (ddp->sgl) { 165 if (ddp->sgl) {
172 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", 166 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
@@ -175,22 +169,32 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
175 } 169 }
176 ixgbe_fcoe_clear_ddp(ddp); 170 ixgbe_fcoe_clear_ddp(ddp);
177 171
172
173 if (!fcoe->ddp_pool) {
174 e_warn(drv, "No ddp_pool resources allocated\n");
175 return 0;
176 }
177
178 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
179 if (!ddp_pool->pool) {
180 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
181 goto out_noddp;
182 }
183
178 /* setup dma from scsi command sgl */ 184 /* setup dma from scsi command sgl */
179 dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); 185 dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
180 if (dmacount == 0) { 186 if (dmacount == 0) {
181 e_err(drv, "xid 0x%x DMA map error\n", xid); 187 e_err(drv, "xid 0x%x DMA map error\n", xid);
182 return 0; 188 goto out_noddp;
183 } 189 }
184 190
185 /* alloc the udl from per cpu ddp pool */ 191 /* alloc the udl from per cpu ddp pool */
186 cpu = get_cpu(); 192 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
187 pool = *per_cpu_ptr(fcoe->pool, cpu);
188 ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
189 if (!ddp->udl) { 193 if (!ddp->udl) {
190 e_err(drv, "failed allocated ddp context\n"); 194 e_err(drv, "failed allocated ddp context\n");
191 goto out_noddp_unmap; 195 goto out_noddp_unmap;
192 } 196 }
193 ddp->pool = pool; 197 ddp->pool = ddp_pool->pool;
194 ddp->sgl = sgl; 198 ddp->sgl = sgl;
195 ddp->sgc = sgc; 199 ddp->sgc = sgc;
196 200
@@ -201,7 +205,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
201 while (len) { 205 while (len) {
202 /* max number of buffers allowed in one DDP context */ 206 /* max number of buffers allowed in one DDP context */
203 if (j >= IXGBE_BUFFCNT_MAX) { 207 if (j >= IXGBE_BUFFCNT_MAX) {
204 *per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1; 208 ddp_pool->noddp++;
205 goto out_noddp_free; 209 goto out_noddp_free;
206 } 210 }
207 211
@@ -241,7 +245,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
241 */ 245 */
242 if (lastsize == bufflen) { 246 if (lastsize == bufflen) {
243 if (j >= IXGBE_BUFFCNT_MAX) { 247 if (j >= IXGBE_BUFFCNT_MAX) {
244 *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1; 248 ddp_pool->noddp_ext_buff++;
245 goto out_noddp_free; 249 goto out_noddp_free;
246 } 250 }
247 251
@@ -293,11 +297,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
293 return 1; 297 return 1;
294 298
295out_noddp_free: 299out_noddp_free:
296 pci_pool_free(pool, ddp->udl, ddp->udp); 300 dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
297 ixgbe_fcoe_clear_ddp(ddp); 301 ixgbe_fcoe_clear_ddp(ddp);
298 302
299out_noddp_unmap: 303out_noddp_unmap:
300 pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); 304 dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
305out_noddp:
301 put_cpu(); 306 put_cpu();
302 return 0; 307 return 0;
303} 308}
@@ -409,7 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
409 break; 414 break;
410 /* unmap the sg list when FCPRSP is received */ 415 /* unmap the sg list when FCPRSP is received */
411 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): 416 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
412 pci_unmap_sg(adapter->pdev, ddp->sgl, 417 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
413 ddp->sgc, DMA_FROM_DEVICE); 418 ddp->sgc, DMA_FROM_DEVICE);
414 ddp->err = ddp_err; 419 ddp->err = ddp_err;
415 ddp->sgl = NULL; 420 ddp->sgl = NULL;
@@ -563,44 +568,37 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
563 return 0; 568 return 0;
564} 569}
565 570
566static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) 571static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
567{ 572{
568 unsigned int cpu; 573 struct ixgbe_fcoe_ddp_pool *ddp_pool;
569 struct pci_pool **pool;
570 574
571 for_each_possible_cpu(cpu) { 575 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
572 pool = per_cpu_ptr(fcoe->pool, cpu); 576 if (ddp_pool->pool)
573 if (*pool) 577 dma_pool_destroy(ddp_pool->pool);
574 pci_pool_destroy(*pool); 578 ddp_pool->pool = NULL;
575 }
576 free_percpu(fcoe->pool);
577 fcoe->pool = NULL;
578} 579}
579 580
580static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) 581static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
582 struct device *dev,
583 unsigned int cpu)
581{ 584{
582 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 585 struct ixgbe_fcoe_ddp_pool *ddp_pool;
583 unsigned int cpu; 586 struct dma_pool *pool;
584 struct pci_pool **pool;
585 char pool_name[32]; 587 char pool_name[32];
586 588
587 fcoe->pool = alloc_percpu(struct pci_pool *); 589 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
588 if (!fcoe->pool)
589 return;
590 590
591 /* allocate pci pool for each cpu */ 591 pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
592 for_each_possible_cpu(cpu) { 592 IXGBE_FCPTR_ALIGN, PAGE_SIZE);
593 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); 593 if (!pool)
594 pool = per_cpu_ptr(fcoe->pool, cpu); 594 return -ENOMEM;
595 *pool = pci_pool_create(pool_name, 595
596 adapter->pdev, IXGBE_FCPTR_MAX, 596 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
597 IXGBE_FCPTR_ALIGN, PAGE_SIZE); 597 ddp_pool->pool = pool;
598 if (!*pool) { 598 ddp_pool->noddp = 0;
599 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); 599 ddp_pool->noddp_ext_buff = 0;
600 ixgbe_fcoe_ddp_pools_free(fcoe); 600
601 return; 601 return 0;
602 }
603 }
604} 602}
605 603
606/** 604/**
@@ -613,132 +611,171 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
613 */ 611 */
614void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) 612void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
615{ 613{
616 int i, fcoe_q, fcoe_i; 614 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
617 struct ixgbe_hw *hw = &adapter->hw; 615 struct ixgbe_hw *hw = &adapter->hw;
618 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 616 int i, fcoe_q, fcoe_i;
619 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 617 u32 etqf;
620 unsigned int cpu;
621
622 if (!fcoe->pool) {
623 spin_lock_init(&fcoe->lock);
624
625 ixgbe_fcoe_ddp_pools_alloc(adapter);
626 if (!fcoe->pool) {
627 e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
628 return;
629 }
630
631 /* Extra buffer to be shared by all DDPs for HW work around */
632 fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
633 if (fcoe->extra_ddp_buffer == NULL) {
634 e_err(drv, "failed to allocated extra DDP buffer\n");
635 goto out_ddp_pools;
636 }
637 618
638 fcoe->extra_ddp_buffer_dma = 619 /* Minimal functionality for FCoE requires at least CRC offloads */
639 dma_map_single(&adapter->pdev->dev, 620 if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
640 fcoe->extra_ddp_buffer, 621 return;
641 IXGBE_FCBUFF_MIN,
642 DMA_FROM_DEVICE);
643 if (dma_mapping_error(&adapter->pdev->dev,
644 fcoe->extra_ddp_buffer_dma)) {
645 e_err(drv, "failed to map extra DDP buffer\n");
646 goto out_extra_ddp_buffer;
647 }
648 622
649 /* Alloc per cpu mem to count the ddp alloc failure number */ 623 /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
650 fcoe->pcpu_noddp = alloc_percpu(u64); 624 etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
651 if (!fcoe->pcpu_noddp) { 625 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
652 e_err(drv, "failed to alloc noddp counter\n"); 626 etqf |= IXGBE_ETQF_POOL_ENABLE;
653 goto out_pcpu_noddp_alloc_fail; 627 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
654 } 628 }
629 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf);
630 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
655 631
656 fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64); 632 /* leave registers un-configured if FCoE is disabled */
657 if (!fcoe->pcpu_noddp_ext_buff) { 633 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
658 e_err(drv, "failed to alloc noddp extra buff cnt\n"); 634 return;
659 goto out_pcpu_noddp_extra_buff_alloc_fail;
660 }
661 635
662 for_each_possible_cpu(cpu) { 636 /* Use one or more Rx queues for FCoE by redirection table */
663 *per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0; 637 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
664 *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0; 638 fcoe_i = fcoe->offset + (i % fcoe->indices);
665 } 639 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
640 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
641 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
666 } 642 }
643 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
667 644
668 /* Enable L2 eth type filter for FCoE */ 645 /* Enable L2 EtherType filter for FIP */
669 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), 646 etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN;
670 (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); 647 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
671 /* Enable L2 eth type filter for FIP */ 648 etqf |= IXGBE_ETQF_POOL_ENABLE;
672 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), 649 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
673 (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
674 if (adapter->ring_feature[RING_F_FCOE].indices) {
675 /* Use multiple rx queues for FCoE by redirection table */
676 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
677 fcoe_i = f->offset + i % f->indices;
678 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
679 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
680 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
681 }
682 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
683 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
684 } else {
685 /* Use single rx queue for FCoE */
686 fcoe_i = f->offset;
687 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
688 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
689 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
690 IXGBE_ETQS_QUEUE_EN |
691 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
692 } 650 }
693 /* send FIP frames to the first FCoE queue */ 651 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
694 fcoe_i = f->offset; 652
695 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 653 /* Send FIP frames to the first FCoE queue */
654 fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
696 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), 655 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
697 IXGBE_ETQS_QUEUE_EN | 656 IXGBE_ETQS_QUEUE_EN |
698 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 657 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
699 658
700 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO | 659 /* Configure FCoE Rx control */
660 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
661 IXGBE_FCRXCTRL_FCCRCBO |
701 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); 662 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
702 return;
703out_pcpu_noddp_extra_buff_alloc_fail:
704 free_percpu(fcoe->pcpu_noddp);
705out_pcpu_noddp_alloc_fail:
706 dma_unmap_single(&adapter->pdev->dev,
707 fcoe->extra_ddp_buffer_dma,
708 IXGBE_FCBUFF_MIN,
709 DMA_FROM_DEVICE);
710out_extra_ddp_buffer:
711 kfree(fcoe->extra_ddp_buffer);
712out_ddp_pools:
713 ixgbe_fcoe_ddp_pools_free(fcoe);
714} 663}
715 664
716/** 665/**
717 * ixgbe_cleanup_fcoe - release all fcoe ddp context resources 666 * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
718 * @adapter : ixgbe adapter 667 * @adapter : ixgbe adapter
719 * 668 *
720 * Cleans up outstanding ddp context resources 669 * Cleans up outstanding ddp context resources
721 * 670 *
722 * Returns : none 671 * Returns : none
723 */ 672 */
724void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) 673void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
725{ 674{
726 int i;
727 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 675 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
676 int cpu, i;
728 677
729 if (!fcoe->pool) 678 /* do nothing if no DDP pools were allocated */
679 if (!fcoe->ddp_pool)
730 return; 680 return;
731 681
732 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 682 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
733 ixgbe_fcoe_ddp_put(adapter->netdev, i); 683 ixgbe_fcoe_ddp_put(adapter->netdev, i);
684
685 for_each_possible_cpu(cpu)
686 ixgbe_fcoe_dma_pool_free(fcoe, cpu);
687
734 dma_unmap_single(&adapter->pdev->dev, 688 dma_unmap_single(&adapter->pdev->dev,
735 fcoe->extra_ddp_buffer_dma, 689 fcoe->extra_ddp_buffer_dma,
736 IXGBE_FCBUFF_MIN, 690 IXGBE_FCBUFF_MIN,
737 DMA_FROM_DEVICE); 691 DMA_FROM_DEVICE);
738 free_percpu(fcoe->pcpu_noddp);
739 free_percpu(fcoe->pcpu_noddp_ext_buff);
740 kfree(fcoe->extra_ddp_buffer); 692 kfree(fcoe->extra_ddp_buffer);
741 ixgbe_fcoe_ddp_pools_free(fcoe); 693
694 fcoe->extra_ddp_buffer = NULL;
695 fcoe->extra_ddp_buffer_dma = 0;
696}
697
698/**
699 * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
700 * @adapter: ixgbe adapter
701 *
702 * Sets up ddp context resouces
703 *
704 * Returns : 0 indicates success or -EINVAL on failure
705 */
706int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
707{
708 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
709 struct device *dev = &adapter->pdev->dev;
710 void *buffer;
711 dma_addr_t dma;
712 unsigned int cpu;
713
714 /* do nothing if no DDP pools were allocated */
715 if (!fcoe->ddp_pool)
716 return 0;
717
718 /* Extra buffer to be shared by all DDPs for HW work around */
719 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
720 if (!buffer) {
721 e_err(drv, "failed to allocate extra DDP buffer\n");
722 return -ENOMEM;
723 }
724
725 dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
726 if (dma_mapping_error(dev, dma)) {
727 e_err(drv, "failed to map extra DDP buffer\n");
728 kfree(buffer);
729 return -ENOMEM;
730 }
731
732 fcoe->extra_ddp_buffer = buffer;
733 fcoe->extra_ddp_buffer_dma = dma;
734
735 /* allocate pci pool for each cpu */
736 for_each_possible_cpu(cpu) {
737 int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
738 if (!err)
739 continue;
740
741 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
742 ixgbe_free_fcoe_ddp_resources(adapter);
743 return -ENOMEM;
744 }
745
746 return 0;
747}
748
749static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
750{
751 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
752
753 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
754 return -EINVAL;
755
756 fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
757
758 if (!fcoe->ddp_pool) {
759 e_err(drv, "failed to allocate percpu DDP resources\n");
760 return -ENOMEM;
761 }
762
763 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
764
765 return 0;
766}
767
768static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
769{
770 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
771
772 adapter->netdev->fcoe_ddp_xid = 0;
773
774 if (!fcoe->ddp_pool)
775 return;
776
777 free_percpu(fcoe->ddp_pool);
778 fcoe->ddp_pool = NULL;
742} 779}
743 780
744/** 781/**
@@ -751,40 +788,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
751 */ 788 */
752int ixgbe_fcoe_enable(struct net_device *netdev) 789int ixgbe_fcoe_enable(struct net_device *netdev)
753{ 790{
754 int rc = -EINVAL;
755 struct ixgbe_adapter *adapter = netdev_priv(netdev); 791 struct ixgbe_adapter *adapter = netdev_priv(netdev);
756 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 792 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
757 793
794 atomic_inc(&fcoe->refcnt);
758 795
759 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 796 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
760 goto out_enable; 797 return -EINVAL;
761 798
762 atomic_inc(&fcoe->refcnt);
763 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 799 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
764 goto out_enable; 800 return -EINVAL;
765 801
766 e_info(drv, "Enabling FCoE offload features.\n"); 802 e_info(drv, "Enabling FCoE offload features.\n");
767 if (netif_running(netdev)) 803 if (netif_running(netdev))
768 netdev->netdev_ops->ndo_stop(netdev); 804 netdev->netdev_ops->ndo_stop(netdev);
769 805
770 ixgbe_clear_interrupt_scheme(adapter); 806 /* Allocate per CPU memory to track DDP pools */
807 ixgbe_fcoe_ddp_enable(adapter);
771 808
809 /* enable FCoE and notify stack */
772 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; 810 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
773 adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
774 netdev->features |= NETIF_F_FCOE_CRC;
775 netdev->features |= NETIF_F_FSO;
776 netdev->features |= NETIF_F_FCOE_MTU; 811 netdev->features |= NETIF_F_FCOE_MTU;
777 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 812 netdev_features_change(netdev);
778 813
814 /* release existing queues and reallocate them */
815 ixgbe_clear_interrupt_scheme(adapter);
779 ixgbe_init_interrupt_scheme(adapter); 816 ixgbe_init_interrupt_scheme(adapter);
780 netdev_features_change(netdev);
781 817
782 if (netif_running(netdev)) 818 if (netif_running(netdev))
783 netdev->netdev_ops->ndo_open(netdev); 819 netdev->netdev_ops->ndo_open(netdev);
784 rc = 0;
785 820
786out_enable: 821 return 0;
787 return rc;
788} 822}
789 823
790/** 824/**
@@ -797,41 +831,35 @@ out_enable:
797 */ 831 */
798int ixgbe_fcoe_disable(struct net_device *netdev) 832int ixgbe_fcoe_disable(struct net_device *netdev)
799{ 833{
800 int rc = -EINVAL;
801 struct ixgbe_adapter *adapter = netdev_priv(netdev); 834 struct ixgbe_adapter *adapter = netdev_priv(netdev);
802 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
803 835
804 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 836 if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
805 goto out_disable; 837 return -EINVAL;
806 838
807 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 839 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
808 goto out_disable; 840 return -EINVAL;
809
810 if (!atomic_dec_and_test(&fcoe->refcnt))
811 goto out_disable;
812 841
813 e_info(drv, "Disabling FCoE offload features.\n"); 842 e_info(drv, "Disabling FCoE offload features.\n");
814 netdev->features &= ~NETIF_F_FCOE_CRC;
815 netdev->features &= ~NETIF_F_FSO;
816 netdev->features &= ~NETIF_F_FCOE_MTU;
817 netdev->fcoe_ddp_xid = 0;
818 netdev_features_change(netdev);
819
820 if (netif_running(netdev)) 843 if (netif_running(netdev))
821 netdev->netdev_ops->ndo_stop(netdev); 844 netdev->netdev_ops->ndo_stop(netdev);
822 845
823 ixgbe_clear_interrupt_scheme(adapter); 846 /* Free per CPU memory to track DDP pools */
847 ixgbe_fcoe_ddp_disable(adapter);
848
849 /* disable FCoE and notify stack */
824 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 850 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
825 adapter->ring_feature[RING_F_FCOE].indices = 0; 851 netdev->features &= ~NETIF_F_FCOE_MTU;
826 ixgbe_cleanup_fcoe(adapter); 852
853 netdev_features_change(netdev);
854
855 /* release existing queues and reallocate them */
856 ixgbe_clear_interrupt_scheme(adapter);
827 ixgbe_init_interrupt_scheme(adapter); 857 ixgbe_init_interrupt_scheme(adapter);
828 858
829 if (netif_running(netdev)) 859 if (netif_running(netdev))
830 netdev->netdev_ops->ndo_open(netdev); 860 netdev->netdev_ops->ndo_open(netdev);
831 rc = 0;
832 861
833out_disable: 862 return 0;
834 return rc;
835} 863}
836 864
837/** 865/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 1dbed17c8107..bf724da99375 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -62,19 +62,24 @@ struct ixgbe_fcoe_ddp {
62 struct scatterlist *sgl; 62 struct scatterlist *sgl;
63 dma_addr_t udp; 63 dma_addr_t udp;
64 u64 *udl; 64 u64 *udl;
65 struct pci_pool *pool; 65 struct dma_pool *pool;
66};
67
68/* per cpu variables */
69struct ixgbe_fcoe_ddp_pool {
70 struct dma_pool *pool;
71 u64 noddp;
72 u64 noddp_ext_buff;
66}; 73};
67 74
68struct ixgbe_fcoe { 75struct ixgbe_fcoe {
69 struct pci_pool **pool; 76 struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;
70 atomic_t refcnt; 77 atomic_t refcnt;
71 spinlock_t lock; 78 spinlock_t lock;
72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 79 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
73 unsigned char *extra_ddp_buffer; 80 void *extra_ddp_buffer;
74 dma_addr_t extra_ddp_buffer_dma; 81 dma_addr_t extra_ddp_buffer_dma;
75 unsigned long mode; 82 unsigned long mode;
76 u64 __percpu *pcpu_noddp;
77 u64 __percpu *pcpu_noddp_ext_buff;
78#ifdef CONFIG_IXGBE_DCB 83#ifdef CONFIG_IXGBE_DCB
79 u8 up; 84 u8 up;
80#endif 85#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2e4523c7ab9e..f4e53c1a7338 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3118,7 +3118,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3118 psrtype |= 1 << 29; 3118 psrtype |= 1 << 29;
3119 3119
3120 for (p = 0; p < adapter->num_rx_pools; p++) 3120 for (p = 0; p < adapter->num_rx_pools; p++)
3121 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), 3121 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
3122 psrtype); 3122 psrtype);
3123} 3123}
3124 3124
@@ -3135,12 +3135,12 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3135 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 3135 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3136 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; 3136 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3137 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; 3137 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3138 vmdctl |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); 3138 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3139 vmdctl |= IXGBE_VT_CTL_REPLEN; 3139 vmdctl |= IXGBE_VT_CTL_REPLEN;
3140 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); 3140 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3141 3141
3142 vf_shift = adapter->num_vfs % 32; 3142 vf_shift = VMDQ_P(0) % 32;
3143 reg_offset = (adapter->num_vfs >= 32) ? 1 : 0; 3143 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3144 3144
3145 /* Enable only the PF's pool for Tx/Rx */ 3145 /* Enable only the PF's pool for Tx/Rx */
3146 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); 3146 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
@@ -3150,7 +3150,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3150 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 3150 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3151 3151
3152 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ 3152 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3153 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); 3153 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3154 3154
3155 /* 3155 /*
3156 * Set up VF register offsets for selected VT Mode, 3156 * Set up VF register offsets for selected VT Mode,
@@ -3310,10 +3310,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3310{ 3310{
3311 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3311 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3312 struct ixgbe_hw *hw = &adapter->hw; 3312 struct ixgbe_hw *hw = &adapter->hw;
3313 int pool_ndx = adapter->num_vfs;
3314 3313
3315 /* add VID to filter table */ 3314 /* add VID to filter table */
3316 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); 3315 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
3317 set_bit(vid, adapter->active_vlans); 3316 set_bit(vid, adapter->active_vlans);
3318 3317
3319 return 0; 3318 return 0;
@@ -3323,10 +3322,9 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3323{ 3322{
3324 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3323 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3325 struct ixgbe_hw *hw = &adapter->hw; 3324 struct ixgbe_hw *hw = &adapter->hw;
3326 int pool_ndx = adapter->num_vfs;
3327 3325
3328 /* remove VID from filter table */ 3326 /* remove VID from filter table */
3329 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); 3327 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
3330 clear_bit(vid, adapter->active_vlans); 3328 clear_bit(vid, adapter->active_vlans);
3331 3329
3332 return 0; 3330 return 0;
@@ -3444,7 +3442,6 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3444{ 3442{
3445 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3443 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3446 struct ixgbe_hw *hw = &adapter->hw; 3444 struct ixgbe_hw *hw = &adapter->hw;
3447 unsigned int vfn = adapter->num_vfs;
3448 unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; 3445 unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
3449 int count = 0; 3446 int count = 0;
3450 3447
@@ -3462,7 +3459,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3462 if (!rar_entries) 3459 if (!rar_entries)
3463 break; 3460 break;
3464 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, 3461 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3465 vfn, IXGBE_RAH_AV); 3462 VMDQ_P(0), IXGBE_RAH_AV);
3466 count++; 3463 count++;
3467 } 3464 }
3468 } 3465 }
@@ -3536,12 +3533,14 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3536 vmolr |= IXGBE_VMOLR_ROPE; 3533 vmolr |= IXGBE_VMOLR_ROPE;
3537 } 3534 }
3538 3535
3539 if (adapter->num_vfs) { 3536 if (adapter->num_vfs)
3540 ixgbe_restore_vf_multicasts(adapter); 3537 ixgbe_restore_vf_multicasts(adapter);
3541 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & 3538
3539 if (hw->mac.type != ixgbe_mac_82598EB) {
3540 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
3542 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | 3541 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3543 IXGBE_VMOLR_ROPE); 3542 IXGBE_VMOLR_ROPE);
3544 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); 3543 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
3545 } 3544 }
3546 3545
3547 /* This is useful for sniffing bad packets. */ 3546 /* This is useful for sniffing bad packets. */
@@ -3808,12 +3807,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3808 ixgbe_set_rx_mode(adapter->netdev); 3807 ixgbe_set_rx_mode(adapter->netdev);
3809 ixgbe_restore_vlan(adapter); 3808 ixgbe_restore_vlan(adapter);
3810 3809
3811#ifdef IXGBE_FCOE
3812 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3813 ixgbe_configure_fcoe(adapter);
3814
3815#endif /* IXGBE_FCOE */
3816
3817 switch (hw->mac.type) { 3810 switch (hw->mac.type) {
3818 case ixgbe_mac_82599EB: 3811 case ixgbe_mac_82599EB:
3819 case ixgbe_mac_X540: 3812 case ixgbe_mac_X540:
@@ -3843,6 +3836,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3843 3836
3844 ixgbe_configure_virtualization(adapter); 3837 ixgbe_configure_virtualization(adapter);
3845 3838
3839#ifdef IXGBE_FCOE
3840 /* configure FCoE L2 filters, redirection table, and Rx control */
3841 ixgbe_configure_fcoe(adapter);
3842
3843#endif /* IXGBE_FCOE */
3846 ixgbe_configure_tx(adapter); 3844 ixgbe_configure_tx(adapter);
3847 ixgbe_configure_rx(adapter); 3845 ixgbe_configure_rx(adapter);
3848} 3846}
@@ -4120,8 +4118,11 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
4120 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 4118 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4121 4119
4122 /* reprogram the RAR[0] in case user changed it. */ 4120 /* reprogram the RAR[0] in case user changed it. */
4123 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, 4121 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
4124 IXGBE_RAH_AV); 4122
4123 /* update SAN MAC vmdq pool selection */
4124 if (hw->mac.san_mac_rar_index)
4125 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
4125} 4126}
4126 4127
4127/** 4128/**
@@ -4436,6 +4437,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4436 break; 4437 break;
4437 } 4438 }
4438 4439
4440#ifdef IXGBE_FCOE
4441 /* FCoE support exists, always init the FCoE lock */
4442 spin_lock_init(&adapter->fcoe.lock);
4443
4444#endif
4439 /* n-tuple support exists, always init our spinlock */ 4445 /* n-tuple support exists, always init our spinlock */
4440 spin_lock_init(&adapter->fdir_perfect_lock); 4446 spin_lock_init(&adapter->fdir_perfect_lock);
4441 4447
@@ -4664,7 +4670,11 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4664 goto err_setup_rx; 4670 goto err_setup_rx;
4665 } 4671 }
4666 4672
4667 return 0; 4673#ifdef IXGBE_FCOE
4674 err = ixgbe_setup_fcoe_ddp_resources(adapter);
4675 if (!err)
4676#endif
4677 return 0;
4668err_setup_rx: 4678err_setup_rx:
4669 /* rewind the index freeing the rings as we go */ 4679 /* rewind the index freeing the rings as we go */
4670 while (i--) 4680 while (i--)
@@ -4743,6 +4753,10 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4743{ 4753{
4744 int i; 4754 int i;
4745 4755
4756#ifdef IXGBE_FCOE
4757 ixgbe_free_fcoe_ddp_resources(adapter);
4758
4759#endif
4746 for (i = 0; i < adapter->num_rx_queues; i++) 4760 for (i = 0; i < adapter->num_rx_queues; i++)
4747 if (adapter->rx_ring[i]->desc) 4761 if (adapter->rx_ring[i]->desc)
4748 ixgbe_free_rx_resources(adapter->rx_ring[i]); 4762 ixgbe_free_rx_resources(adapter->rx_ring[i]);
@@ -5054,11 +5068,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5054 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; 5068 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5055 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; 5069 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5056 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; 5070 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
5057#ifdef IXGBE_FCOE
5058 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
5059 unsigned int cpu;
5060 u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0;
5061#endif /* IXGBE_FCOE */
5062 5071
5063 if (test_bit(__IXGBE_DOWN, &adapter->state) || 5072 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5064 test_bit(__IXGBE_RESETTING, &adapter->state)) 5073 test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5189,17 +5198,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5189 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 5198 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5190 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 5199 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5191 /* Add up per cpu counters for total ddp aloc fail */ 5200 /* Add up per cpu counters for total ddp aloc fail */
5192 if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) { 5201 if (adapter->fcoe.ddp_pool) {
5202 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
5203 struct ixgbe_fcoe_ddp_pool *ddp_pool;
5204 unsigned int cpu;
5205 u64 noddp = 0, noddp_ext_buff = 0;
5193 for_each_possible_cpu(cpu) { 5206 for_each_possible_cpu(cpu) {
5194 fcoe_noddp_counts_sum += 5207 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
5195 *per_cpu_ptr(fcoe->pcpu_noddp, cpu); 5208 noddp += ddp_pool->noddp;
5196 fcoe_noddp_ext_buff_counts_sum += 5209 noddp_ext_buff += ddp_pool->noddp_ext_buff;
5197 *per_cpu_ptr(fcoe->
5198 pcpu_noddp_ext_buff, cpu);
5199 } 5210 }
5211 hwstats->fcoe_noddp = noddp;
5212 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
5200 } 5213 }
5201 hwstats->fcoe_noddp = fcoe_noddp_counts_sum;
5202 hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;
5203#endif /* IXGBE_FCOE */ 5214#endif /* IXGBE_FCOE */
5204 break; 5215 break;
5205 default: 5216 default:
@@ -6371,7 +6382,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6371#ifdef IXGBE_FCOE 6382#ifdef IXGBE_FCOE
6372 /* setup tx offload for FCoE */ 6383 /* setup tx offload for FCoE */
6373 if ((protocol == __constant_htons(ETH_P_FCOE)) && 6384 if ((protocol == __constant_htons(ETH_P_FCOE)) &&
6374 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { 6385 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
6375 tso = ixgbe_fso(tx_ring, first, &hdr_len); 6386 tso = ixgbe_fso(tx_ring, first, &hdr_len);
6376 if (tso < 0) 6387 if (tso < 0)
6377 goto out_drop; 6388 goto out_drop;
@@ -6445,8 +6456,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
6445 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 6456 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
6446 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 6457 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
6447 6458
6448 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, 6459 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
6449 IXGBE_RAH_AV);
6450 6460
6451 return 0; 6461 return 0;
6452} 6462}
@@ -6503,12 +6513,15 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
6503{ 6513{
6504 int err = 0; 6514 int err = 0;
6505 struct ixgbe_adapter *adapter = netdev_priv(dev); 6515 struct ixgbe_adapter *adapter = netdev_priv(dev);
6506 struct ixgbe_mac_info *mac = &adapter->hw.mac; 6516 struct ixgbe_hw *hw = &adapter->hw;
6507 6517
6508 if (is_valid_ether_addr(mac->san_addr)) { 6518 if (is_valid_ether_addr(hw->mac.san_addr)) {
6509 rtnl_lock(); 6519 rtnl_lock();
6510 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); 6520 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
6511 rtnl_unlock(); 6521 rtnl_unlock();
6522
6523 /* update SAN MAC vmdq pool selection */
6524 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
6512 } 6525 }
6513 return err; 6526 return err;
6514} 6527}
@@ -7241,11 +7254,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7241 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) 7254 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
7242 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 7255 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
7243 } 7256 }
7244 } 7257
7245 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { 7258 adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
7246 netdev->vlan_features |= NETIF_F_FCOE_CRC; 7259
7247 netdev->vlan_features |= NETIF_F_FSO; 7260 netdev->features |= NETIF_F_FSO |
7248 netdev->vlan_features |= NETIF_F_FCOE_MTU; 7261 NETIF_F_FCOE_CRC;
7262
7263 netdev->vlan_features |= NETIF_F_FSO |
7264 NETIF_F_FCOE_CRC |
7265 NETIF_F_FCOE_MTU;
7249 } 7266 }
7250#endif /* IXGBE_FCOE */ 7267#endif /* IXGBE_FCOE */
7251 if (pci_using_dac) { 7268 if (pci_using_dac) {
@@ -7442,12 +7459,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7442 ixgbe_sysfs_exit(adapter); 7459 ixgbe_sysfs_exit(adapter);
7443#endif /* CONFIG_IXGBE_HWMON */ 7460#endif /* CONFIG_IXGBE_HWMON */
7444 7461
7445#ifdef IXGBE_FCOE
7446 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7447 ixgbe_cleanup_fcoe(adapter);
7448
7449#endif /* IXGBE_FCOE */
7450
7451 /* remove the added san mac */ 7462 /* remove the added san mac */
7452 ixgbe_del_sanmac_netdev(netdev); 7463 ixgbe_del_sanmac_netdev(netdev);
7453 7464
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 089468224e7e..a825d4808cd2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -208,6 +208,17 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
208 u32 vmdctl; 208 u32 vmdctl;
209 int i; 209 int i;
210 210
211 /* set num VFs to 0 to prevent access to vfinfo */
212 adapter->num_vfs = 0;
213
214 /* free VF control structures */
215 kfree(adapter->vfinfo);
216 adapter->vfinfo = NULL;
217
218 /* free macvlan list */
219 kfree(adapter->mv_list);
220 adapter->mv_list = NULL;
221
211#ifdef CONFIG_PCI_IOV 222#ifdef CONFIG_PCI_IOV
212 /* disable iov and allow time for transactions to clear */ 223 /* disable iov and allow time for transactions to clear */
213 pci_disable_sriov(adapter->pdev); 224 pci_disable_sriov(adapter->pdev);
@@ -225,6 +236,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
225 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); 236 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
226 IXGBE_WRITE_FLUSH(hw); 237 IXGBE_WRITE_FLUSH(hw);
227 238
239 /* Disable VMDq flag so device will be set in VM mode */
240 if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
241 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
242 adapter->ring_feature[RING_F_VMDQ].offset = 0;
243
228 /* take a breather then clean up driver data */ 244 /* take a breather then clean up driver data */
229 msleep(100); 245 msleep(100);
230 246
@@ -233,11 +249,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
233 if (adapter->vfinfo[i].vfdev) 249 if (adapter->vfinfo[i].vfdev)
234 pci_dev_put(adapter->vfinfo[i].vfdev); 250 pci_dev_put(adapter->vfinfo[i].vfdev);
235 } 251 }
236 kfree(adapter->vfinfo);
237 kfree(adapter->mv_list);
238 adapter->vfinfo = NULL;
239 252
240 adapter->num_vfs = 0;
241 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 253 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
242} 254}
243 255
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index a5ceea4d329a..fe0a19d91d4a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1449,6 +1449,7 @@ enum {
1449#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ 1449#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
1450#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ 1450#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
1451#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ 1451#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
1452#define IXGBE_ETQF_POOL_SHIFT 20
1452 1453
1453#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ 1454#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
1454#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 1455#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
@@ -2843,6 +2844,7 @@ struct ixgbe_mac_operations {
2843 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); 2844 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
2844 s32 (*clear_rar)(struct ixgbe_hw *, u32); 2845 s32 (*clear_rar)(struct ixgbe_hw *, u32);
2845 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); 2846 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
2847 s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
2846 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2848 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2847 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2849 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2848 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); 2850 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
@@ -2918,6 +2920,7 @@ struct ixgbe_mac_info {
2918 bool orig_link_settings_stored; 2920 bool orig_link_settings_stored;
2919 bool autotry_restart; 2921 bool autotry_restart;
2920 u8 flags; 2922 u8 flags;
2923 u8 san_mac_rar_index;
2921 struct ixgbe_thermal_sensor_data thermal_sensor_data; 2924 struct ixgbe_thermal_sensor_data thermal_sensor_data;
2922}; 2925};
2923 2926
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f90ec078ece2..de4da5219b71 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -156,6 +156,9 @@ mac_reset_top:
156 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 156 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
157 hw->mac.san_addr, 0, IXGBE_RAH_AV); 157 hw->mac.san_addr, 0, IXGBE_RAH_AV);
158 158
159 /* Save the SAN MAC RAR index */
160 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
161
159 /* Reserve the last RAR for the SAN MAC address */ 162 /* Reserve the last RAR for the SAN MAC address */
160 hw->mac.num_rar_entries--; 163 hw->mac.num_rar_entries--;
161 } 164 }
@@ -832,6 +835,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
832 .set_rar = &ixgbe_set_rar_generic, 835 .set_rar = &ixgbe_set_rar_generic,
833 .clear_rar = &ixgbe_clear_rar_generic, 836 .clear_rar = &ixgbe_clear_rar_generic,
834 .set_vmdq = &ixgbe_set_vmdq_generic, 837 .set_vmdq = &ixgbe_set_vmdq_generic,
838 .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
835 .clear_vmdq = &ixgbe_clear_vmdq_generic, 839 .clear_vmdq = &ixgbe_clear_vmdq_generic,
836 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 840 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
837 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 841 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,