aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c121
1 files changed, 23 insertions, 98 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 048c7bc76b91..89f7b16c47b7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5341,12 +5341,11 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
5341 struct ixgbe_ring *rx_ring) 5341 struct ixgbe_ring *rx_ring)
5342{ 5342{
5343 struct ixgbe_adapter *adapter = vadapter->real_adapter; 5343 struct ixgbe_adapter *adapter = vadapter->real_adapter;
5344 int index = rx_ring->queue_index + vadapter->rx_base_queue;
5345 5344
5346 /* shutdown specific queue receive and wait for dma to settle */ 5345 /* shutdown specific queue receive and wait for dma to settle */
5347 ixgbe_disable_rx_queue(adapter, rx_ring); 5346 ixgbe_disable_rx_queue(adapter, rx_ring);
5348 usleep_range(10000, 20000); 5347 usleep_range(10000, 20000);
5349 ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); 5348 ixgbe_irq_disable_queues(adapter, BIT_ULL(rx_ring->queue_index));
5350 ixgbe_clean_rx_ring(rx_ring); 5349 ixgbe_clean_rx_ring(rx_ring);
5351} 5350}
5352 5351
@@ -5355,20 +5354,13 @@ static int ixgbe_fwd_ring_down(struct net_device *vdev,
5355{ 5354{
5356 struct ixgbe_adapter *adapter = accel->real_adapter; 5355 struct ixgbe_adapter *adapter = accel->real_adapter;
5357 unsigned int rxbase = accel->rx_base_queue; 5356 unsigned int rxbase = accel->rx_base_queue;
5358 unsigned int txbase = accel->tx_base_queue;
5359 int i; 5357 int i;
5360 5358
5361 netif_tx_stop_all_queues(vdev);
5362
5363 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 5359 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
5364 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); 5360 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
5365 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; 5361 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
5366 } 5362 }
5367 5363
5368 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5369 adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
5370
5371
5372 return 0; 5364 return 0;
5373} 5365}
5374 5366
@@ -5376,8 +5368,7 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
5376 struct ixgbe_fwd_adapter *accel) 5368 struct ixgbe_fwd_adapter *accel)
5377{ 5369{
5378 struct ixgbe_adapter *adapter = accel->real_adapter; 5370 struct ixgbe_adapter *adapter = accel->real_adapter;
5379 unsigned int rxbase, txbase, queues; 5371 int i, baseq, err;
5380 int i, baseq, err = 0;
5381 5372
5382 if (!test_bit(accel->pool, adapter->fwd_bitmask)) 5373 if (!test_bit(accel->pool, adapter->fwd_bitmask))
5383 return 0; 5374 return 0;
@@ -5388,30 +5379,17 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
5388 baseq, baseq + adapter->num_rx_queues_per_pool); 5379 baseq, baseq + adapter->num_rx_queues_per_pool);
5389 5380
5390 accel->netdev = vdev; 5381 accel->netdev = vdev;
5391 accel->rx_base_queue = rxbase = baseq; 5382 accel->rx_base_queue = baseq;
5392 accel->tx_base_queue = txbase = baseq; 5383 accel->tx_base_queue = baseq;
5393 5384
5394 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) 5385 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5395 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); 5386 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[baseq + i]);
5396 5387
5397 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 5388 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
5398 adapter->rx_ring[rxbase + i]->netdev = vdev; 5389 adapter->rx_ring[baseq + i]->netdev = vdev;
5399 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); 5390 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[baseq + i]);
5400 } 5391 }
5401 5392
5402 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5403 adapter->tx_ring[txbase + i]->netdev = vdev;
5404
5405 queues = min_t(unsigned int,
5406 adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
5407 err = netif_set_real_num_tx_queues(vdev, queues);
5408 if (err)
5409 goto fwd_queue_err;
5410
5411 err = netif_set_real_num_rx_queues(vdev, queues);
5412 if (err)
5413 goto fwd_queue_err;
5414
5415 /* ixgbe_add_mac_filter will return an index if it succeeds, so we 5393 /* ixgbe_add_mac_filter will return an index if it succeeds, so we
5416 * need to only treat it as an error value if it is negative. 5394 * need to only treat it as an error value if it is negative.
5417 */ 5395 */
@@ -5899,21 +5877,6 @@ static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
5899 spin_unlock(&adapter->fdir_perfect_lock); 5877 spin_unlock(&adapter->fdir_perfect_lock);
5900} 5878}
5901 5879
5902static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
5903{
5904 if (netif_is_macvlan(upper)) {
5905 struct macvlan_dev *vlan = netdev_priv(upper);
5906
5907 if (vlan->fwd_priv) {
5908 netif_tx_stop_all_queues(upper);
5909 netif_carrier_off(upper);
5910 netif_tx_disable(upper);
5911 }
5912 }
5913
5914 return 0;
5915}
5916
5917void ixgbe_down(struct ixgbe_adapter *adapter) 5880void ixgbe_down(struct ixgbe_adapter *adapter)
5918{ 5881{
5919 struct net_device *netdev = adapter->netdev; 5882 struct net_device *netdev = adapter->netdev;
@@ -5943,10 +5906,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
5943 netif_carrier_off(netdev); 5906 netif_carrier_off(netdev);
5944 netif_tx_disable(netdev); 5907 netif_tx_disable(netdev);
5945 5908
5946 /* disable any upper devices */
5947 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5948 ixgbe_disable_macvlan, NULL);
5949
5950 ixgbe_irq_disable(adapter); 5909 ixgbe_irq_disable(adapter);
5951 5910
5952 ixgbe_napi_disable_all(adapter); 5911 ixgbe_napi_disable_all(adapter);
@@ -7262,18 +7221,6 @@ static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7262#endif 7221#endif
7263} 7222}
7264 7223
7265static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
7266{
7267 if (netif_is_macvlan(upper)) {
7268 struct macvlan_dev *vlan = netdev_priv(upper);
7269
7270 if (vlan->fwd_priv)
7271 netif_tx_wake_all_queues(upper);
7272 }
7273
7274 return 0;
7275}
7276
7277/** 7224/**
7278 * ixgbe_watchdog_link_is_up - update netif_carrier status and 7225 * ixgbe_watchdog_link_is_up - update netif_carrier status and
7279 * print link up message 7226 * print link up message
@@ -7354,12 +7301,6 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7354 /* enable transmits */ 7301 /* enable transmits */
7355 netif_tx_wake_all_queues(adapter->netdev); 7302 netif_tx_wake_all_queues(adapter->netdev);
7356 7303
7357 /* enable any upper devices */
7358 rtnl_lock();
7359 netdev_walk_all_upper_dev_rcu(adapter->netdev,
7360 ixgbe_enable_macvlan, NULL);
7361 rtnl_unlock();
7362
7363 /* update the default user priority for VFs */ 7304 /* update the default user priority for VFs */
7364 ixgbe_update_default_up(adapter); 7305 ixgbe_update_default_up(adapter);
7365 7306
@@ -8320,14 +8261,19 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8320 void *accel_priv, select_queue_fallback_t fallback) 8261 void *accel_priv, select_queue_fallback_t fallback)
8321{ 8262{
8322 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; 8263 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
8323#ifdef IXGBE_FCOE
8324 struct ixgbe_adapter *adapter; 8264 struct ixgbe_adapter *adapter;
8325 struct ixgbe_ring_feature *f;
8326 int txq; 8265 int txq;
8266#ifdef IXGBE_FCOE
8267 struct ixgbe_ring_feature *f;
8327#endif 8268#endif
8328 8269
8329 if (fwd_adapter) 8270 if (fwd_adapter) {
8330 return skb->queue_mapping + fwd_adapter->tx_base_queue; 8271 adapter = netdev_priv(dev);
8272 txq = reciprocal_scale(skb_get_hash(skb),
8273 adapter->num_rx_queues_per_pool);
8274
8275 return txq + fwd_adapter->tx_base_queue;
8276 }
8331 8277
8332#ifdef IXGBE_FCOE 8278#ifdef IXGBE_FCOE
8333 8279
@@ -9816,22 +9762,6 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9816 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) 9762 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
9817 return ERR_PTR(-EINVAL); 9763 return ERR_PTR(-EINVAL);
9818 9764
9819#ifdef CONFIG_RPS
9820 if (vdev->num_rx_queues != vdev->num_tx_queues) {
9821 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
9822 vdev->name);
9823 return ERR_PTR(-EINVAL);
9824 }
9825#endif
9826 /* Check for hardware restriction on number of rx/tx queues */
9827 if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
9828 vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
9829 netdev_info(pdev,
9830 "%s: Supports RX/TX Queue counts 1,2, and 4\n",
9831 pdev->name);
9832 return ERR_PTR(-EINVAL);
9833 }
9834
9835 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 9765 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
9836 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || 9766 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
9837 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) 9767 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
@@ -9848,24 +9778,19 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9848 /* Enable VMDq flag so device will be set in VM mode */ 9778 /* Enable VMDq flag so device will be set in VM mode */
9849 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; 9779 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
9850 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; 9780 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
9851 adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
9852 9781
9853 /* Force reinit of ring allocation with VMDQ enabled */
9854 err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
9855 if (err)
9856 goto fwd_add_err;
9857 fwd_adapter->pool = pool; 9782 fwd_adapter->pool = pool;
9858 fwd_adapter->real_adapter = adapter; 9783 fwd_adapter->real_adapter = adapter;
9859 9784
9860 if (netif_running(pdev)) { 9785 /* Force reinit of ring allocation with VMDQ enabled */
9786 err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
9787
9788 if (!err && netif_running(pdev))
9861 err = ixgbe_fwd_ring_up(vdev, fwd_adapter); 9789 err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
9862 if (err)
9863 goto fwd_add_err;
9864 netif_tx_start_all_queues(vdev);
9865 }
9866 9790
9867 return fwd_adapter; 9791 if (!err)
9868fwd_add_err: 9792 return fwd_adapter;
9793
9869 /* unwind counter and free adapter struct */ 9794 /* unwind counter and free adapter struct */
9870 netdev_info(pdev, 9795 netdev_info(pdev,
9871 "%s: dfwd hardware acceleration failed\n", vdev->name); 9796 "%s: dfwd hardware acceleration failed\n", vdev->name);