aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-11-07 19:11:58 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-07 19:11:58 -0500
commit3cdcf1334cd76bbcabd0f273ee9a13e4cc7816bc (patch)
tree03664d96b9f9020411e9772ed21d3825a5ff69ef /drivers
parent1ec4864b10171b0691ee196d7006ae56d2c153f2 (diff)
parent2a47fa45d4dfbc54659d28de311a1f764b296a3c (diff)
Merge branch 'macvlan_hwaccel'
John Fastabend says: ==================== l2 hardware accelerated macvlans This patch adds support to offload macvlan net_devices to the hardware. With these patches packets are pushed to the macvlan net_device directly and do not pass through the lower dev. The patches here have made it through multiple iterations each with a slightly different focus. First I tried to push these as a new link type called "VMDQ". The patches shown here, http://comments.gmane.org/gmane.linux.network/237617 Following this implementation I renamed the link type "VSI" and addressed various comments. Finally Neil Horman picked up the patches and integrated the offload into the macvlan code. Here, http://permalink.gmane.org/gmane.linux.network/285658 The attached series is clean-up of his patches, with a few fixes. If folks find this series acceptable there are a few items we can work on next. First broadcast and multicast will use the hardware even for local traffic with this series. It would be best (I think) to use the software path for macvlan to macvlan traffic and save the PCIe bus. This depends on how much you value CPU time vs PCIE bandwidth. This will need another patch series to flush out. Also this series only allows for layer 2 mac forwarding where some hardware supports more interesting forwarding capabilities. Integrating with OVS may be useful here. As always any comments/feedback welcome. My basic I/O test is here but I've also done some link testing, SRIOV/DCB with macvlans and others, Changelog: v2: two fixes to ixgbe when all features DCB, FCoE, SR-IOV are enabled with macvlans. A VMDQ_P() reference should have been accel->pool and do not set the offset of the ring index from dfwd add call. The offset is used by SR-IOV so clearing it can cause SR-IOV quue index's to go sideways. With these fixes testing macvlan's with SRIOV enabled was successful. v3: addressed Neil's comments in ixgbe fixed error path on dfwd_add_station() in ixgbe fixed ixgbe to allow SRIOV and accelerated macvlans to coexist. v4: Dave caught some strange indentation, fixed it here ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c480
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c17
-rw-r--r--drivers/net/macvlan.c36
5 files changed, 478 insertions, 90 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 09149143ee0f..f38fc0a343a2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -223,6 +223,15 @@ enum ixgbe_ring_state_t {
223 __IXGBE_RX_FCOE, 223 __IXGBE_RX_FCOE,
224}; 224};
225 225
226struct ixgbe_fwd_adapter {
227 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
228 struct net_device *netdev;
229 struct ixgbe_adapter *real_adapter;
230 unsigned int tx_base_queue;
231 unsigned int rx_base_queue;
232 int pool;
233};
234
226#define check_for_tx_hang(ring) \ 235#define check_for_tx_hang(ring) \
227 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) 236 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
228#define set_check_for_tx_hang(ring) \ 237#define set_check_for_tx_hang(ring) \
@@ -240,6 +249,7 @@ struct ixgbe_ring {
240 struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ 249 struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
241 struct net_device *netdev; /* netdev ring belongs to */ 250 struct net_device *netdev; /* netdev ring belongs to */
242 struct device *dev; /* device for DMA mapping */ 251 struct device *dev; /* device for DMA mapping */
252 struct ixgbe_fwd_adapter *l2_accel_priv;
243 void *desc; /* descriptor ring memory */ 253 void *desc; /* descriptor ring memory */
244 union { 254 union {
245 struct ixgbe_tx_buffer *tx_buffer_info; 255 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -297,6 +307,12 @@ enum ixgbe_ring_f_enum {
297#define IXGBE_MAX_FCOE_INDICES 8 307#define IXGBE_MAX_FCOE_INDICES 8
298#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 308#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
299#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 309#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
310#define IXGBE_MAX_L2A_QUEUES 4
311#define IXGBE_MAX_L2A_QUEUES 4
312#define IXGBE_BAD_L2A_QUEUE 3
313#define IXGBE_MAX_MACVLANS 31
314#define IXGBE_MAX_DCBMACVLANS 8
315
300struct ixgbe_ring_feature { 316struct ixgbe_ring_feature {
301 u16 limit; /* upper limit on feature indices */ 317 u16 limit; /* upper limit on feature indices */
302 u16 indices; /* current value of indices */ 318 u16 indices; /* current value of indices */
@@ -766,6 +782,7 @@ struct ixgbe_adapter {
766#endif /*CONFIG_DEBUG_FS*/ 782#endif /*CONFIG_DEBUG_FS*/
767 783
768 u8 default_up; 784 u8 default_up;
785 unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
769}; 786};
770 787
771struct ixgbe_fdir_filter { 788struct ixgbe_fdir_filter {
@@ -939,4 +956,7 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
939void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); 956void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
940#endif 957#endif
941 958
959netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
960 struct ixgbe_adapter *adapter,
961 struct ixgbe_ring *tx_ring);
942#endif /* _IXGBE_H_ */ 962#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 90b4e1089ecc..32e3eaaa160a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -498,6 +498,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
498#ifdef IXGBE_FCOE 498#ifdef IXGBE_FCOE
499 u16 fcoe_i = 0; 499 u16 fcoe_i = 0;
500#endif 500#endif
501 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
501 502
502 /* only proceed if SR-IOV is enabled */ 503 /* only proceed if SR-IOV is enabled */
503 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 504 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
@@ -510,7 +511,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
510 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 511 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
511 512
512 /* 64 pool mode with 2 queues per pool */ 513 /* 64 pool mode with 2 queues per pool */
513 if ((vmdq_i > 32) || (rss_i < 4)) { 514 if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
514 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 515 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
515 rss_m = IXGBE_RSS_2Q_MASK; 516 rss_m = IXGBE_RSS_2Q_MASK;
516 rss_i = min_t(u16, rss_i, 2); 517 rss_i = min_t(u16, rss_i, 2);
@@ -852,7 +853,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
852 853
853 /* apply Tx specific ring traits */ 854 /* apply Tx specific ring traits */
854 ring->count = adapter->tx_ring_count; 855 ring->count = adapter->tx_ring_count;
855 ring->queue_index = txr_idx; 856 if (adapter->num_rx_pools > 1)
857 ring->queue_index =
858 txr_idx % adapter->num_rx_queues_per_pool;
859 else
860 ring->queue_index = txr_idx;
856 861
857 /* assign ring to adapter */ 862 /* assign ring to adapter */
858 adapter->tx_ring[txr_idx] = ring; 863 adapter->tx_ring[txr_idx] = ring;
@@ -895,7 +900,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
895#endif /* IXGBE_FCOE */ 900#endif /* IXGBE_FCOE */
896 /* apply Rx specific ring traits */ 901 /* apply Rx specific ring traits */
897 ring->count = adapter->rx_ring_count; 902 ring->count = adapter->rx_ring_count;
898 ring->queue_index = rxr_idx; 903 if (adapter->num_rx_pools > 1)
904 ring->queue_index =
905 rxr_idx % adapter->num_rx_queues_per_pool;
906 else
907 ring->queue_index = rxr_idx;
899 908
900 /* assign ring to adapter */ 909 /* assign ring to adapter */
901 adapter->rx_ring[rxr_idx] = ring; 910 adapter->rx_ring[rxr_idx] = ring;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5191b3ca9a26..607275de2f1e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
44#include <linux/ethtool.h> 44#include <linux/ethtool.h>
45#include <linux/if.h> 45#include <linux/if.h>
46#include <linux/if_vlan.h> 46#include <linux/if_vlan.h>
47#include <linux/if_macvlan.h>
47#include <linux/if_bridge.h> 48#include <linux/if_bridge.h>
48#include <linux/prefetch.h> 49#include <linux/prefetch.h>
49#include <scsi/fc/fc_fcoe.h> 50#include <scsi/fc/fc_fcoe.h>
@@ -870,11 +871,18 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
870 871
871static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) 872static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
872{ 873{
873 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); 874 struct ixgbe_adapter *adapter;
874 struct ixgbe_hw *hw = &adapter->hw; 875 struct ixgbe_hw *hw;
876 u32 head, tail;
877
878 if (ring->l2_accel_priv)
879 adapter = ring->l2_accel_priv->real_adapter;
880 else
881 adapter = netdev_priv(ring->netdev);
875 882
876 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); 883 hw = &adapter->hw;
877 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); 884 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
885 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
878 886
879 if (head != tail) 887 if (head != tail)
880 return (head < tail) ? 888 return (head < tail) ?
@@ -3003,7 +3011,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3003 struct ixgbe_q_vector *q_vector = ring->q_vector; 3011 struct ixgbe_q_vector *q_vector = ring->q_vector;
3004 3012
3005 if (q_vector) 3013 if (q_vector)
3006 netif_set_xps_queue(adapter->netdev, 3014 netif_set_xps_queue(ring->netdev,
3007 &q_vector->affinity_mask, 3015 &q_vector->affinity_mask,
3008 ring->queue_index); 3016 ring->queue_index);
3009 } 3017 }
@@ -3393,7 +3401,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3393{ 3401{
3394 struct ixgbe_hw *hw = &adapter->hw; 3402 struct ixgbe_hw *hw = &adapter->hw;
3395 int rss_i = adapter->ring_feature[RING_F_RSS].indices; 3403 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3396 int p; 3404 u16 pool;
3397 3405
3398 /* PSRTYPE must be initialized in non 82598 adapters */ 3406 /* PSRTYPE must be initialized in non 82598 adapters */
3399 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 3407 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -3410,9 +3418,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3410 else if (rss_i > 1) 3418 else if (rss_i > 1)
3411 psrtype |= 1 << 29; 3419 psrtype |= 1 << 29;
3412 3420
3413 for (p = 0; p < adapter->num_rx_pools; p++) 3421 for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
3414 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), 3422 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
3415 psrtype);
3416} 3423}
3417 3424
3418static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) 3425static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
@@ -3681,7 +3688,11 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3681 case ixgbe_mac_82599EB: 3688 case ixgbe_mac_82599EB:
3682 case ixgbe_mac_X540: 3689 case ixgbe_mac_X540:
3683 for (i = 0; i < adapter->num_rx_queues; i++) { 3690 for (i = 0; i < adapter->num_rx_queues; i++) {
3684 j = adapter->rx_ring[i]->reg_idx; 3691 struct ixgbe_ring *ring = adapter->rx_ring[i];
3692
3693 if (ring->l2_accel_priv)
3694 continue;
3695 j = ring->reg_idx;
3685 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3696 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3686 vlnctrl &= ~IXGBE_RXDCTL_VME; 3697 vlnctrl &= ~IXGBE_RXDCTL_VME;
3687 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 3698 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -3711,7 +3722,11 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3711 case ixgbe_mac_82599EB: 3722 case ixgbe_mac_82599EB:
3712 case ixgbe_mac_X540: 3723 case ixgbe_mac_X540:
3713 for (i = 0; i < adapter->num_rx_queues; i++) { 3724 for (i = 0; i < adapter->num_rx_queues; i++) {
3714 j = adapter->rx_ring[i]->reg_idx; 3725 struct ixgbe_ring *ring = adapter->rx_ring[i];
3726
3727 if (ring->l2_accel_priv)
3728 continue;
3729 j = ring->reg_idx;
3715 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3730 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3716 vlnctrl |= IXGBE_RXDCTL_VME; 3731 vlnctrl |= IXGBE_RXDCTL_VME;
3717 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 3732 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -3748,7 +3763,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3748 unsigned int rar_entries = hw->mac.num_rar_entries - 1; 3763 unsigned int rar_entries = hw->mac.num_rar_entries - 1;
3749 int count = 0; 3764 int count = 0;
3750 3765
3751 /* In SR-IOV mode significantly less RAR entries are available */ 3766 /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
3752 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 3767 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3753 rar_entries = IXGBE_MAX_PF_MACVLANS - 1; 3768 rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
3754 3769
@@ -4113,6 +4128,230 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
4113 spin_unlock(&adapter->fdir_perfect_lock); 4128 spin_unlock(&adapter->fdir_perfect_lock);
4114} 4129}
4115 4130
4131static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
4132 struct ixgbe_adapter *adapter)
4133{
4134 struct ixgbe_hw *hw = &adapter->hw;
4135 u32 vmolr;
4136
4137 /* No unicast promiscuous support for VMDQ devices. */
4138 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4139 vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
4140
4141 /* clear the affected bit */
4142 vmolr &= ~IXGBE_VMOLR_MPE;
4143
4144 if (dev->flags & IFF_ALLMULTI) {
4145 vmolr |= IXGBE_VMOLR_MPE;
4146 } else {
4147 vmolr |= IXGBE_VMOLR_ROMPE;
4148 hw->mac.ops.update_mc_addr_list(hw, dev);
4149 }
4150 ixgbe_write_uc_addr_list(adapter->netdev);
4151 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4152}
4153
4154static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4155 u8 *addr, u16 pool)
4156{
4157 struct ixgbe_hw *hw = &adapter->hw;
4158 unsigned int entry;
4159
4160 entry = hw->mac.num_rar_entries - pool;
4161 hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
4162}
4163
4164static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
4165{
4166 struct ixgbe_adapter *adapter = vadapter->real_adapter;
4167 int rss_i = vadapter->netdev->real_num_rx_queues;
4168 struct ixgbe_hw *hw = &adapter->hw;
4169 u16 pool = vadapter->pool;
4170 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4171 IXGBE_PSRTYPE_UDPHDR |
4172 IXGBE_PSRTYPE_IPV4HDR |
4173 IXGBE_PSRTYPE_L2HDR |
4174 IXGBE_PSRTYPE_IPV6HDR;
4175
4176 if (hw->mac.type == ixgbe_mac_82598EB)
4177 return;
4178
4179 if (rss_i > 3)
4180 psrtype |= 2 << 29;
4181 else if (rss_i > 1)
4182 psrtype |= 1 << 29;
4183
4184 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4185}
4186
4187/**
4188 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
4189 * @rx_ring: ring to free buffers from
4190 **/
4191static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4192{
4193 struct device *dev = rx_ring->dev;
4194 unsigned long size;
4195 u16 i;
4196
4197 /* ring already cleared, nothing to do */
4198 if (!rx_ring->rx_buffer_info)
4199 return;
4200
4201 /* Free all the Rx ring sk_buffs */
4202 for (i = 0; i < rx_ring->count; i++) {
4203 struct ixgbe_rx_buffer *rx_buffer;
4204
4205 rx_buffer = &rx_ring->rx_buffer_info[i];
4206 if (rx_buffer->skb) {
4207 struct sk_buff *skb = rx_buffer->skb;
4208 if (IXGBE_CB(skb)->page_released) {
4209 dma_unmap_page(dev,
4210 IXGBE_CB(skb)->dma,
4211 ixgbe_rx_bufsz(rx_ring),
4212 DMA_FROM_DEVICE);
4213 IXGBE_CB(skb)->page_released = false;
4214 }
4215 dev_kfree_skb(skb);
4216 }
4217 rx_buffer->skb = NULL;
4218 if (rx_buffer->dma)
4219 dma_unmap_page(dev, rx_buffer->dma,
4220 ixgbe_rx_pg_size(rx_ring),
4221 DMA_FROM_DEVICE);
4222 rx_buffer->dma = 0;
4223 if (rx_buffer->page)
4224 __free_pages(rx_buffer->page,
4225 ixgbe_rx_pg_order(rx_ring));
4226 rx_buffer->page = NULL;
4227 }
4228
4229 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4230 memset(rx_ring->rx_buffer_info, 0, size);
4231
4232 /* Zero out the descriptor ring */
4233 memset(rx_ring->desc, 0, rx_ring->size);
4234
4235 rx_ring->next_to_alloc = 0;
4236 rx_ring->next_to_clean = 0;
4237 rx_ring->next_to_use = 0;
4238}
4239
4240static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
4241 struct ixgbe_ring *rx_ring)
4242{
4243 struct ixgbe_adapter *adapter = vadapter->real_adapter;
4244 int index = rx_ring->queue_index + vadapter->rx_base_queue;
4245
4246 /* shutdown specific queue receive and wait for dma to settle */
4247 ixgbe_disable_rx_queue(adapter, rx_ring);
4248 usleep_range(10000, 20000);
4249 ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
4250 ixgbe_clean_rx_ring(rx_ring);
4251 rx_ring->l2_accel_priv = NULL;
4252}
4253
4254int ixgbe_fwd_ring_down(struct net_device *vdev,
4255 struct ixgbe_fwd_adapter *accel)
4256{
4257 struct ixgbe_adapter *adapter = accel->real_adapter;
4258 unsigned int rxbase = accel->rx_base_queue;
4259 unsigned int txbase = accel->tx_base_queue;
4260 int i;
4261
4262 netif_tx_stop_all_queues(vdev);
4263
4264 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4265 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4266 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
4267 }
4268
4269 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4270 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
4271 adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
4272 }
4273
4274
4275 return 0;
4276}
4277
4278static int ixgbe_fwd_ring_up(struct net_device *vdev,
4279 struct ixgbe_fwd_adapter *accel)
4280{
4281 struct ixgbe_adapter *adapter = accel->real_adapter;
4282 unsigned int rxbase, txbase, queues;
4283 int i, baseq, err = 0;
4284
4285 if (!test_bit(accel->pool, &adapter->fwd_bitmask))
4286 return 0;
4287
4288 baseq = accel->pool * adapter->num_rx_queues_per_pool;
4289 netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
4290 accel->pool, adapter->num_rx_pools,
4291 baseq, baseq + adapter->num_rx_queues_per_pool,
4292 adapter->fwd_bitmask);
4293
4294 accel->netdev = vdev;
4295 accel->rx_base_queue = rxbase = baseq;
4296 accel->tx_base_queue = txbase = baseq;
4297
4298 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
4299 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4300
4301 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4302 adapter->rx_ring[rxbase + i]->netdev = vdev;
4303 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
4304 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
4305 }
4306
4307 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4308 adapter->tx_ring[txbase + i]->netdev = vdev;
4309 adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
4310 }
4311
4312 queues = min_t(unsigned int,
4313 adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
4314 err = netif_set_real_num_tx_queues(vdev, queues);
4315 if (err)
4316 goto fwd_queue_err;
4317
4318 queues = min_t(unsigned int,
4319 adapter->num_rx_queues_per_pool, vdev->num_rx_queues);
4320 err = netif_set_real_num_rx_queues(vdev, queues);
4321 if (err)
4322 goto fwd_queue_err;
4323
4324 if (is_valid_ether_addr(vdev->dev_addr))
4325 ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
4326
4327 ixgbe_fwd_psrtype(accel);
4328 ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
4329 return err;
4330fwd_queue_err:
4331 ixgbe_fwd_ring_down(vdev, accel);
4332 return err;
4333}
4334
4335static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
4336{
4337 struct net_device *upper;
4338 struct list_head *iter;
4339 int err;
4340
4341 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4342 if (netif_is_macvlan(upper)) {
4343 struct macvlan_dev *dfwd = netdev_priv(upper);
4344 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
4345
4346 if (dfwd->fwd_priv) {
4347 err = ixgbe_fwd_ring_up(upper, vadapter);
4348 if (err)
4349 continue;
4350 }
4351 }
4352 }
4353}
4354
4116static void ixgbe_configure(struct ixgbe_adapter *adapter) 4355static void ixgbe_configure(struct ixgbe_adapter *adapter)
4117{ 4356{
4118 struct ixgbe_hw *hw = &adapter->hw; 4357 struct ixgbe_hw *hw = &adapter->hw;
@@ -4164,6 +4403,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
4164#endif /* IXGBE_FCOE */ 4403#endif /* IXGBE_FCOE */
4165 ixgbe_configure_tx(adapter); 4404 ixgbe_configure_tx(adapter);
4166 ixgbe_configure_rx(adapter); 4405 ixgbe_configure_rx(adapter);
4406 ixgbe_configure_dfwd(adapter);
4167} 4407}
4168 4408
4169static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) 4409static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -4317,6 +4557,8 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
4317static void ixgbe_up_complete(struct ixgbe_adapter *adapter) 4557static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4318{ 4558{
4319 struct ixgbe_hw *hw = &adapter->hw; 4559 struct ixgbe_hw *hw = &adapter->hw;
4560 struct net_device *upper;
4561 struct list_head *iter;
4320 int err; 4562 int err;
4321 u32 ctrl_ext; 4563 u32 ctrl_ext;
4322 4564
@@ -4360,6 +4602,16 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4360 /* enable transmits */ 4602 /* enable transmits */
4361 netif_tx_start_all_queues(adapter->netdev); 4603 netif_tx_start_all_queues(adapter->netdev);
4362 4604
4605 /* enable any upper devices */
4606 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4607 if (netif_is_macvlan(upper)) {
4608 struct macvlan_dev *vlan = netdev_priv(upper);
4609
4610 if (vlan->fwd_priv)
4611 netif_tx_start_all_queues(upper);
4612 }
4613 }
4614
4363 /* bring the link up in the watchdog, this could race with our first 4615 /* bring the link up in the watchdog, this could race with our first
4364 * link up interrupt but shouldn't be a problem */ 4616 * link up interrupt but shouldn't be a problem */
4365 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4617 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -4451,59 +4703,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
4451} 4703}
4452 4704
4453/** 4705/**
4454 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
4455 * @rx_ring: ring to free buffers from
4456 **/
4457static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4458{
4459 struct device *dev = rx_ring->dev;
4460 unsigned long size;
4461 u16 i;
4462
4463 /* ring already cleared, nothing to do */
4464 if (!rx_ring->rx_buffer_info)
4465 return;
4466
4467 /* Free all the Rx ring sk_buffs */
4468 for (i = 0; i < rx_ring->count; i++) {
4469 struct ixgbe_rx_buffer *rx_buffer;
4470
4471 rx_buffer = &rx_ring->rx_buffer_info[i];
4472 if (rx_buffer->skb) {
4473 struct sk_buff *skb = rx_buffer->skb;
4474 if (IXGBE_CB(skb)->page_released) {
4475 dma_unmap_page(dev,
4476 IXGBE_CB(skb)->dma,
4477 ixgbe_rx_bufsz(rx_ring),
4478 DMA_FROM_DEVICE);
4479 IXGBE_CB(skb)->page_released = false;
4480 }
4481 dev_kfree_skb(skb);
4482 }
4483 rx_buffer->skb = NULL;
4484 if (rx_buffer->dma)
4485 dma_unmap_page(dev, rx_buffer->dma,
4486 ixgbe_rx_pg_size(rx_ring),
4487 DMA_FROM_DEVICE);
4488 rx_buffer->dma = 0;
4489 if (rx_buffer->page)
4490 __free_pages(rx_buffer->page,
4491 ixgbe_rx_pg_order(rx_ring));
4492 rx_buffer->page = NULL;
4493 }
4494
4495 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4496 memset(rx_ring->rx_buffer_info, 0, size);
4497
4498 /* Zero out the descriptor ring */
4499 memset(rx_ring->desc, 0, rx_ring->size);
4500
4501 rx_ring->next_to_alloc = 0;
4502 rx_ring->next_to_clean = 0;
4503 rx_ring->next_to_use = 0;
4504}
4505
4506/**
4507 * ixgbe_clean_tx_ring - Free Tx Buffers 4706 * ixgbe_clean_tx_ring - Free Tx Buffers
4508 * @tx_ring: ring to be cleaned 4707 * @tx_ring: ring to be cleaned
4509 **/ 4708 **/
@@ -4580,6 +4779,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4580{ 4779{
4581 struct net_device *netdev = adapter->netdev; 4780 struct net_device *netdev = adapter->netdev;
4582 struct ixgbe_hw *hw = &adapter->hw; 4781 struct ixgbe_hw *hw = &adapter->hw;
4782 struct net_device *upper;
4783 struct list_head *iter;
4583 u32 rxctrl; 4784 u32 rxctrl;
4584 int i; 4785 int i;
4585 4786
@@ -4603,6 +4804,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4603 netif_carrier_off(netdev); 4804 netif_carrier_off(netdev);
4604 netif_tx_disable(netdev); 4805 netif_tx_disable(netdev);
4605 4806
4807 /* disable any upper devices */
4808 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4809 if (netif_is_macvlan(upper)) {
4810 struct macvlan_dev *vlan = netdev_priv(upper);
4811
4812 if (vlan->fwd_priv) {
4813 netif_tx_stop_all_queues(upper);
4814 netif_carrier_off(upper);
4815 netif_tx_disable(upper);
4816 }
4817 }
4818 }
4819
4606 ixgbe_irq_disable(adapter); 4820 ixgbe_irq_disable(adapter);
4607 4821
4608 ixgbe_napi_disable_all(adapter); 4822 ixgbe_napi_disable_all(adapter);
@@ -4833,6 +5047,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
4833 return -EIO; 5047 return -EIO;
4834 } 5048 }
4835 5049
5050 /* PF holds first pool slot */
5051 set_bit(0, &adapter->fwd_bitmask);
4836 set_bit(__IXGBE_DOWN, &adapter->state); 5052 set_bit(__IXGBE_DOWN, &adapter->state);
4837 5053
4838 return 0; 5054 return 0;
@@ -5138,7 +5354,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5138static int ixgbe_open(struct net_device *netdev) 5354static int ixgbe_open(struct net_device *netdev)
5139{ 5355{
5140 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5356 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5141 int err; 5357 int err, queues;
5142 5358
5143 /* disallow open during test */ 5359 /* disallow open during test */
5144 if (test_bit(__IXGBE_TESTING, &adapter->state)) 5360 if (test_bit(__IXGBE_TESTING, &adapter->state))
@@ -5163,16 +5379,21 @@ static int ixgbe_open(struct net_device *netdev)
5163 goto err_req_irq; 5379 goto err_req_irq;
5164 5380
5165 /* Notify the stack of the actual queue counts. */ 5381 /* Notify the stack of the actual queue counts. */
5166 err = netif_set_real_num_tx_queues(netdev, 5382 if (adapter->num_rx_pools > 1)
5167 adapter->num_rx_pools > 1 ? 1 : 5383 queues = adapter->num_rx_queues_per_pool;
5168 adapter->num_tx_queues); 5384 else
5385 queues = adapter->num_tx_queues;
5386
5387 err = netif_set_real_num_tx_queues(netdev, queues);
5169 if (err) 5388 if (err)
5170 goto err_set_queues; 5389 goto err_set_queues;
5171 5390
5172 5391 if (adapter->num_rx_pools > 1 &&
5173 err = netif_set_real_num_rx_queues(netdev, 5392 adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
5174 adapter->num_rx_pools > 1 ? 1 : 5393 queues = IXGBE_MAX_L2A_QUEUES;
5175 adapter->num_rx_queues); 5394 else
5395 queues = adapter->num_rx_queues;
5396 err = netif_set_real_num_rx_queues(netdev, queues);
5176 if (err) 5397 if (err)
5177 goto err_set_queues; 5398 goto err_set_queues;
5178 5399
@@ -6762,8 +6983,9 @@ out_drop:
6762 return NETDEV_TX_OK; 6983 return NETDEV_TX_OK;
6763} 6984}
6764 6985
6765static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, 6986static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
6766 struct net_device *netdev) 6987 struct net_device *netdev,
6988 struct ixgbe_ring *ring)
6767{ 6989{
6768 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6990 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6769 struct ixgbe_ring *tx_ring; 6991 struct ixgbe_ring *tx_ring;
@@ -6779,10 +7001,17 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6779 skb_set_tail_pointer(skb, 17); 7001 skb_set_tail_pointer(skb, 17);
6780 } 7002 }
6781 7003
6782 tx_ring = adapter->tx_ring[skb->queue_mapping]; 7004 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
7005
6783 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); 7006 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
6784} 7007}
6785 7008
7009static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
7010 struct net_device *netdev)
7011{
7012 return __ixgbe_xmit_frame(skb, netdev, NULL);
7013}
7014
6786/** 7015/**
6787 * ixgbe_set_mac - Change the Ethernet Address of the NIC 7016 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6788 * @netdev: network interface device structure 7017 * @netdev: network interface device structure
@@ -7039,6 +7268,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7039{ 7268{
7040 struct ixgbe_adapter *adapter = netdev_priv(dev); 7269 struct ixgbe_adapter *adapter = netdev_priv(dev);
7041 struct ixgbe_hw *hw = &adapter->hw; 7270 struct ixgbe_hw *hw = &adapter->hw;
7271 bool pools;
7042 7272
7043 /* Hardware supports up to 8 traffic classes */ 7273 /* Hardware supports up to 8 traffic classes */
7044 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || 7274 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
@@ -7046,6 +7276,10 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7046 tc < MAX_TRAFFIC_CLASS)) 7276 tc < MAX_TRAFFIC_CLASS))
7047 return -EINVAL; 7277 return -EINVAL;
7048 7278
7279 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
7280 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
7281 return -EBUSY;
7282
7049 /* Hardware has to reinitialize queues and interrupts to 7283 /* Hardware has to reinitialize queues and interrupts to
7050 * match packet buffer alignment. Unfortunately, the 7284 * match packet buffer alignment. Unfortunately, the
7051 * hardware is not flexible enough to do this dynamically. 7285 * hardware is not flexible enough to do this dynamically.
@@ -7300,6 +7534,94 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7300 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); 7534 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
7301} 7535}
7302 7536
7537static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
7538{
7539 struct ixgbe_fwd_adapter *fwd_adapter = NULL;
7540 struct ixgbe_adapter *adapter = netdev_priv(pdev);
7541 int pool, err;
7542
7543 /* Check for hardware restriction on number of rx/tx queues */
7544 if (vdev->num_rx_queues != vdev->num_tx_queues ||
7545 vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
7546 vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
7547 netdev_info(pdev,
7548 "%s: Supports RX/TX Queue counts 1,2, and 4\n",
7549 pdev->name);
7550 return ERR_PTR(-EINVAL);
7551 }
7552
7553 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
7554 adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
7555 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
7556 return ERR_PTR(-EBUSY);
7557
7558 fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
7559 if (!fwd_adapter)
7560 return ERR_PTR(-ENOMEM);
7561
7562 pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
7563 adapter->num_rx_pools++;
7564 set_bit(pool, &adapter->fwd_bitmask);
7565
7566 /* Enable VMDq flag so device will be set in VM mode */
7567 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
7568 adapter->ring_feature[RING_F_VMDQ].limit = adapter->num_rx_pools;
7569 adapter->ring_feature[RING_F_RSS].limit = vdev->num_rx_queues;
7570
7571 /* Force reinit of ring allocation with VMDQ enabled */
7572 err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
7573 if (err)
7574 goto fwd_add_err;
7575 fwd_adapter->pool = pool;
7576 fwd_adapter->real_adapter = adapter;
7577 err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
7578 if (err)
7579 goto fwd_add_err;
7580 netif_tx_start_all_queues(vdev);
7581 return fwd_adapter;
7582fwd_add_err:
7583 /* unwind counter and free adapter struct */
7584 netdev_info(pdev,
7585 "%s: dfwd hardware acceleration failed\n", vdev->name);
7586 clear_bit(pool, &adapter->fwd_bitmask);
7587 adapter->num_rx_pools--;
7588 kfree(fwd_adapter);
7589 return ERR_PTR(err);
7590}
7591
7592static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
7593{
7594 struct ixgbe_fwd_adapter *fwd_adapter = priv;
7595 struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
7596
7597 clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
7598 adapter->num_rx_pools--;
7599
7600 adapter->ring_feature[RING_F_VMDQ].limit = adapter->num_rx_pools;
7601 ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
7602 ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
7603 netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
7604 fwd_adapter->pool, adapter->num_rx_pools,
7605 fwd_adapter->rx_base_queue,
7606 fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
7607 adapter->fwd_bitmask);
7608 kfree(fwd_adapter);
7609}
7610
7611static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
7612 struct net_device *dev,
7613 void *priv)
7614{
7615 struct ixgbe_fwd_adapter *fwd_adapter = priv;
7616 unsigned int queue;
7617 struct ixgbe_ring *tx_ring;
7618
7619 queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
7620 tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
7621
7622 return __ixgbe_xmit_frame(skb, dev, tx_ring);
7623}
7624
7303static const struct net_device_ops ixgbe_netdev_ops = { 7625static const struct net_device_ops ixgbe_netdev_ops = {
7304 .ndo_open = ixgbe_open, 7626 .ndo_open = ixgbe_open,
7305 .ndo_stop = ixgbe_close, 7627 .ndo_stop = ixgbe_close,
@@ -7344,6 +7666,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7344 .ndo_fdb_add = ixgbe_ndo_fdb_add, 7666 .ndo_fdb_add = ixgbe_ndo_fdb_add,
7345 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, 7667 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
7346 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7668 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7669 .ndo_dfwd_add_station = ixgbe_fwd_add,
7670 .ndo_dfwd_del_station = ixgbe_fwd_del,
7671 .ndo_dfwd_start_xmit = ixgbe_fwd_xmit,
7347}; 7672};
7348 7673
7349/** 7674/**
@@ -7645,7 +7970,8 @@ skip_sriov:
7645 NETIF_F_TSO | 7970 NETIF_F_TSO |
7646 NETIF_F_TSO6 | 7971 NETIF_F_TSO6 |
7647 NETIF_F_RXHASH | 7972 NETIF_F_RXHASH |
7648 NETIF_F_RXCSUM; 7973 NETIF_F_RXCSUM |
7974 NETIF_F_HW_L2FW_DOFFLOAD;
7649 7975
7650 netdev->hw_features = netdev->features; 7976 netdev->hw_features = netdev->features;
7651 7977
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 1fe7cb0142e1..a8571e488ea4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -223,17 +223,19 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
223 IXGBE_WRITE_FLUSH(hw); 223 IXGBE_WRITE_FLUSH(hw);
224 224
225 /* Disable VMDq flag so device will be set in VM mode */ 225 /* Disable VMDq flag so device will be set in VM mode */
226 if (adapter->ring_feature[RING_F_VMDQ].limit == 1) 226 if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
227 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; 227 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
228 adapter->ring_feature[RING_F_VMDQ].offset = 0; 228 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
229 rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
230 } else {
231 rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
232 }
229 233
230 rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); 234 adapter->ring_feature[RING_F_VMDQ].offset = 0;
231 adapter->ring_feature[RING_F_RSS].limit = rss; 235 adapter->ring_feature[RING_F_RSS].limit = rss;
232 236
233 /* take a breather then clean up driver data */ 237 /* take a breather then clean up driver data */
234 msleep(100); 238 msleep(100);
235
236 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
237 return 0; 239 return 0;
238} 240}
239 241
@@ -298,13 +300,10 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
298 err = ixgbe_disable_sriov(adapter); 300 err = ixgbe_disable_sriov(adapter);
299 301
300 /* Only reinit if no error and state changed */ 302 /* Only reinit if no error and state changed */
301 if (!err && current_flags != adapter->flags) {
302 /* ixgbe_disable_sriov() doesn't clear VMDQ flag */
303 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
304#ifdef CONFIG_PCI_IOV 303#ifdef CONFIG_PCI_IOV
304 if (!err && current_flags != adapter->flags)
305 ixgbe_sriov_reinit(adapter); 305 ixgbe_sriov_reinit(adapter);
306#endif 306#endif
307 }
308 307
309 return err; 308 return err;
310} 309}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index cc9845ec91c1..af4aaa5893ff 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -297,7 +297,13 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
297 int ret; 297 int ret;
298 const struct macvlan_dev *vlan = netdev_priv(dev); 298 const struct macvlan_dev *vlan = netdev_priv(dev);
299 299
300 ret = macvlan_queue_xmit(skb, dev); 300 if (vlan->fwd_priv) {
301 skb->dev = vlan->lowerdev;
302 ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv);
303 } else {
304 ret = macvlan_queue_xmit(skb, dev);
305 }
306
301 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 307 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
302 struct macvlan_pcpu_stats *pcpu_stats; 308 struct macvlan_pcpu_stats *pcpu_stats;
303 309
@@ -347,6 +353,21 @@ static int macvlan_open(struct net_device *dev)
347 goto hash_add; 353 goto hash_add;
348 } 354 }
349 355
356 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
357 vlan->fwd_priv =
358 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
359
360 /* If we get a NULL pointer back, or if we get an error
361 * then we should just fall through to the non accelerated path
362 */
363 if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
364 vlan->fwd_priv = NULL;
365 } else {
366 dev->features &= ~NETIF_F_LLTX;
367 return 0;
368 }
369 }
370
350 err = -EBUSY; 371 err = -EBUSY;
351 if (macvlan_addr_busy(vlan->port, dev->dev_addr)) 372 if (macvlan_addr_busy(vlan->port, dev->dev_addr))
352 goto out; 373 goto out;
@@ -367,6 +388,11 @@ hash_add:
367del_unicast: 388del_unicast:
368 dev_uc_del(lowerdev, dev->dev_addr); 389 dev_uc_del(lowerdev, dev->dev_addr);
369out: 390out:
391 if (vlan->fwd_priv) {
392 lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
393 vlan->fwd_priv);
394 vlan->fwd_priv = NULL;
395 }
370 return err; 396 return err;
371} 397}
372 398
@@ -375,6 +401,13 @@ static int macvlan_stop(struct net_device *dev)
375 struct macvlan_dev *vlan = netdev_priv(dev); 401 struct macvlan_dev *vlan = netdev_priv(dev);
376 struct net_device *lowerdev = vlan->lowerdev; 402 struct net_device *lowerdev = vlan->lowerdev;
377 403
404 if (vlan->fwd_priv) {
405 lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
406 vlan->fwd_priv);
407 vlan->fwd_priv = NULL;
408 return 0;
409 }
410
378 dev_uc_unsync(lowerdev, dev); 411 dev_uc_unsync(lowerdev, dev);
379 dev_mc_unsync(lowerdev, dev); 412 dev_mc_unsync(lowerdev, dev);
380 413
@@ -833,6 +866,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
833 if (err < 0) 866 if (err < 0)
834 goto destroy_port; 867 goto destroy_port;
835 868
869 dev->priv_flags |= IFF_MACVLAN;
836 err = netdev_upper_dev_link(lowerdev, dev); 870 err = netdev_upper_dev_link(lowerdev, dev);
837 if (err) 871 if (err)
838 goto destroy_port; 872 goto destroy_port;