aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
diff options
context:
space:
mode:
authorJohn Fastabend <john.r.fastabend@intel.com>2013-11-06 12:54:52 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-07 19:11:41 -0500
commit2a47fa45d4dfbc54659d28de311a1f764b296a3c (patch)
tree03664d96b9f9020411e9772ed21d3825a5ff69ef /drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
parenta6cc0cfa72e0b6d9f2c8fd858aacc32313c4f272 (diff)
ixgbe: enable l2 forwarding acceleration for macvlans
Now that l2 acceleration ops are in place from the prior patch, enable ixgbe to take advantage of these operations. Allow it to allocate queues for a macvlan so that when we transmit a frame, we can do the switching in hardware inside the ixgbe card, rather than in software. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: Neil Horman <nhorman@tuxdriver.com> CC: Andy Gospodarek <andy@greyhouse.net> CC: "David S. Miller" <davem@davemloft.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 90b4e1089ecc..32e3eaaa160a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -498,6 +498,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
498#ifdef IXGBE_FCOE 498#ifdef IXGBE_FCOE
499 u16 fcoe_i = 0; 499 u16 fcoe_i = 0;
500#endif 500#endif
501 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
501 502
502 /* only proceed if SR-IOV is enabled */ 503 /* only proceed if SR-IOV is enabled */
503 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 504 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
@@ -510,7 +511,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
510 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 511 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
511 512
512 /* 64 pool mode with 2 queues per pool */ 513 /* 64 pool mode with 2 queues per pool */
513 if ((vmdq_i > 32) || (rss_i < 4)) { 514 if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
514 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 515 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
515 rss_m = IXGBE_RSS_2Q_MASK; 516 rss_m = IXGBE_RSS_2Q_MASK;
516 rss_i = min_t(u16, rss_i, 2); 517 rss_i = min_t(u16, rss_i, 2);
@@ -852,7 +853,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
852 853
853 /* apply Tx specific ring traits */ 854 /* apply Tx specific ring traits */
854 ring->count = adapter->tx_ring_count; 855 ring->count = adapter->tx_ring_count;
855 ring->queue_index = txr_idx; 856 if (adapter->num_rx_pools > 1)
857 ring->queue_index =
858 txr_idx % adapter->num_rx_queues_per_pool;
859 else
860 ring->queue_index = txr_idx;
856 861
857 /* assign ring to adapter */ 862 /* assign ring to adapter */
858 adapter->tx_ring[txr_idx] = ring; 863 adapter->tx_ring[txr_idx] = ring;
@@ -895,7 +900,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
895#endif /* IXGBE_FCOE */ 900#endif /* IXGBE_FCOE */
896 /* apply Rx specific ring traits */ 901 /* apply Rx specific ring traits */
897 ring->count = adapter->rx_ring_count; 902 ring->count = adapter->rx_ring_count;
898 ring->queue_index = rxr_idx; 903 if (adapter->num_rx_pools > 1)
904 ring->queue_index =
905 rxr_idx % adapter->num_rx_queues_per_pool;
906 else
907 ring->queue_index = rxr_idx;
899 908
900 /* assign ring to adapter */ 909 /* assign ring to adapter */
901 adapter->rx_ring[rxr_idx] = ring; 910 adapter->rx_ring[rxr_idx] = ring;