aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_main.c51
2 files changed, 0 insertions, 53 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 56de7ec15b46..4ff6f0567f3f 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -287,9 +287,7 @@ struct igb_adapter {
287 int bars; 287 int bars;
288 int need_ioport; 288 int need_ioport;
289 289
290#ifdef CONFIG_NETDEVICES_MULTIQUEUE
291 struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; 290 struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES];
292#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
293#ifdef CONFIG_IGB_LRO 291#ifdef CONFIG_IGB_LRO
294 unsigned int lro_max_aggr; 292 unsigned int lro_max_aggr;
295 unsigned int lro_aggregated; 293 unsigned int lro_aggregated;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 74dc43e29261..64a150a16f39 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -532,10 +532,8 @@ msi_only:
532 if (!pci_enable_msi(adapter->pdev)) 532 if (!pci_enable_msi(adapter->pdev))
533 adapter->flags |= IGB_FLAG_HAS_MSI; 533 adapter->flags |= IGB_FLAG_HAS_MSI;
534 534
535#ifdef CONFIG_NETDEVICES_MULTIQUEUE
536 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 535 /* Notify the stack of the (possibly) reduced Tx Queue count. */
537 adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; 536 adapter->netdev->egress_subqueue_count = adapter->num_tx_queues;
538#endif
539 return; 537 return;
540} 538}
541 539
@@ -824,10 +822,8 @@ void igb_down(struct igb_adapter *adapter)
824 /* flush and sleep below */ 822 /* flush and sleep below */
825 823
826 netif_stop_queue(netdev); 824 netif_stop_queue(netdev);
827#ifdef CONFIG_NETDEVICES_MULTIQUEUE
828 for (i = 0; i < adapter->num_tx_queues; i++) 825 for (i = 0; i < adapter->num_tx_queues; i++)
829 netif_stop_subqueue(netdev, i); 826 netif_stop_subqueue(netdev, i);
830#endif
831 827
832 /* disable transmits in the hardware */ 828 /* disable transmits in the hardware */
833 tctl = rd32(E1000_TCTL); 829 tctl = rd32(E1000_TCTL);
@@ -1042,11 +1038,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1042 pci_save_state(pdev); 1038 pci_save_state(pdev);
1043 1039
1044 err = -ENOMEM; 1040 err = -ENOMEM;
1045#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1046 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES); 1041 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES);
1047#else
1048 netdev = alloc_etherdev(sizeof(struct igb_adapter));
1049#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
1050 if (!netdev) 1042 if (!netdev)
1051 goto err_alloc_etherdev; 1043 goto err_alloc_etherdev;
1052 1044
@@ -1163,9 +1155,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1163 if (pci_using_dac) 1155 if (pci_using_dac)
1164 netdev->features |= NETIF_F_HIGHDMA; 1156 netdev->features |= NETIF_F_HIGHDMA;
1165 1157
1166#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1167 netdev->features |= NETIF_F_MULTI_QUEUE; 1158 netdev->features |= NETIF_F_MULTI_QUEUE;
1168#endif
1169 1159
1170 netdev->features |= NETIF_F_LLTX; 1160 netdev->features |= NETIF_F_LLTX;
1171 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); 1161 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
@@ -1279,10 +1269,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1279 /* tell the stack to leave us alone until igb_open() is called */ 1269 /* tell the stack to leave us alone until igb_open() is called */
1280 netif_carrier_off(netdev); 1270 netif_carrier_off(netdev);
1281 netif_stop_queue(netdev); 1271 netif_stop_queue(netdev);
1282#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1283 for (i = 0; i < adapter->num_tx_queues; i++) 1272 for (i = 0; i < adapter->num_tx_queues; i++)
1284 netif_stop_subqueue(netdev, i); 1273 netif_stop_subqueue(netdev, i);
1285#endif
1286 1274
1287 strcpy(netdev->name, "eth%d"); 1275 strcpy(netdev->name, "eth%d");
1288 err = register_netdev(netdev); 1276 err = register_netdev(netdev);
@@ -1432,11 +1420,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1432 /* Number of supported queues. */ 1420 /* Number of supported queues. */
1433 /* Having more queues than CPUs doesn't make sense. */ 1421 /* Having more queues than CPUs doesn't make sense. */
1434 adapter->num_rx_queues = min((u32)IGB_MAX_RX_QUEUES, (u32)num_online_cpus()); 1422 adapter->num_rx_queues = min((u32)IGB_MAX_RX_QUEUES, (u32)num_online_cpus());
1435#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1436 adapter->num_tx_queues = min(IGB_MAX_TX_QUEUES, num_online_cpus()); 1423 adapter->num_tx_queues = min(IGB_MAX_TX_QUEUES, num_online_cpus());
1437#else
1438 adapter->num_tx_queues = 1;
1439#endif /* CONFIG_NET_MULTI_QUEUE_DEVICE */
1440 1424
1441 /* This call may decrease the number of queues depending on 1425 /* This call may decrease the number of queues depending on
1442 * interrupt mode. */ 1426 * interrupt mode. */
@@ -1619,9 +1603,7 @@ err:
1619static int igb_setup_all_tx_resources(struct igb_adapter *adapter) 1603static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1620{ 1604{
1621 int i, err = 0; 1605 int i, err = 0;
1622#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1623 int r_idx; 1606 int r_idx;
1624#endif
1625 1607
1626 for (i = 0; i < adapter->num_tx_queues; i++) { 1608 for (i = 0; i < adapter->num_tx_queues; i++) {
1627 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1609 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
@@ -1634,12 +1616,10 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1634 } 1616 }
1635 } 1617 }
1636 1618
1637#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1638 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { 1619 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1639 r_idx = i % adapter->num_tx_queues; 1620 r_idx = i % adapter->num_tx_queues;
1640 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 1621 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1641 } 1622 }
1642#endif
1643 return err; 1623 return err;
1644} 1624}
1645 1625
@@ -2337,9 +2317,7 @@ static void igb_watchdog_task(struct work_struct *work)
2337 struct e1000_mac_info *mac = &adapter->hw.mac; 2317 struct e1000_mac_info *mac = &adapter->hw.mac;
2338 u32 link; 2318 u32 link;
2339 s32 ret_val; 2319 s32 ret_val;
2340#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2341 int i; 2320 int i;
2342#endif
2343 2321
2344 if ((netif_carrier_ok(netdev)) && 2322 if ((netif_carrier_ok(netdev)) &&
2345 (rd32(E1000_STATUS) & E1000_STATUS_LU)) 2323 (rd32(E1000_STATUS) & E1000_STATUS_LU))
@@ -2396,10 +2374,8 @@ static void igb_watchdog_task(struct work_struct *work)
2396 2374
2397 netif_carrier_on(netdev); 2375 netif_carrier_on(netdev);
2398 netif_wake_queue(netdev); 2376 netif_wake_queue(netdev);
2399#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2400 for (i = 0; i < adapter->num_tx_queues; i++) 2377 for (i = 0; i < adapter->num_tx_queues; i++)
2401 netif_wake_subqueue(netdev, i); 2378 netif_wake_subqueue(netdev, i);
2402#endif
2403 2379
2404 if (!test_bit(__IGB_DOWN, &adapter->state)) 2380 if (!test_bit(__IGB_DOWN, &adapter->state))
2405 mod_timer(&adapter->phy_info_timer, 2381 mod_timer(&adapter->phy_info_timer,
@@ -2412,10 +2388,8 @@ static void igb_watchdog_task(struct work_struct *work)
2412 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2388 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2413 netif_carrier_off(netdev); 2389 netif_carrier_off(netdev);
2414 netif_stop_queue(netdev); 2390 netif_stop_queue(netdev);
2415#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2416 for (i = 0; i < adapter->num_tx_queues; i++) 2391 for (i = 0; i < adapter->num_tx_queues; i++)
2417 netif_stop_subqueue(netdev, i); 2392 netif_stop_subqueue(netdev, i);
2418#endif
2419 if (!test_bit(__IGB_DOWN, &adapter->state)) 2393 if (!test_bit(__IGB_DOWN, &adapter->state))
2420 mod_timer(&adapter->phy_info_timer, 2394 mod_timer(&adapter->phy_info_timer,
2421 round_jiffies(jiffies + 2 * HZ)); 2395 round_jiffies(jiffies + 2 * HZ));
@@ -2943,11 +2917,7 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
2943{ 2917{
2944 struct igb_adapter *adapter = netdev_priv(netdev); 2918 struct igb_adapter *adapter = netdev_priv(netdev);
2945 2919
2946#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2947 netif_stop_subqueue(netdev, tx_ring->queue_index); 2920 netif_stop_subqueue(netdev, tx_ring->queue_index);
2948#else
2949 netif_stop_queue(netdev);
2950#endif
2951 2921
2952 /* Herbert's original patch had: 2922 /* Herbert's original patch had:
2953 * smp_mb__after_netif_stop_queue(); 2923 * smp_mb__after_netif_stop_queue();
@@ -2960,11 +2930,7 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
2960 return -EBUSY; 2930 return -EBUSY;
2961 2931
2962 /* A reprieve! */ 2932 /* A reprieve! */
2963#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2964 netif_wake_subqueue(netdev, tx_ring->queue_index); 2933 netif_wake_subqueue(netdev, tx_ring->queue_index);
2965#else
2966 netif_wake_queue(netdev);
2967#endif
2968 ++adapter->restart_queue; 2934 ++adapter->restart_queue;
2969 return 0; 2935 return 0;
2970} 2936}
@@ -3051,14 +3017,9 @@ static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
3051 struct igb_adapter *adapter = netdev_priv(netdev); 3017 struct igb_adapter *adapter = netdev_priv(netdev);
3052 struct igb_ring *tx_ring; 3018 struct igb_ring *tx_ring;
3053 3019
3054#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3055 int r_idx = 0; 3020 int r_idx = 0;
3056 r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1); 3021 r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1);
3057 tx_ring = adapter->multi_tx_table[r_idx]; 3022 tx_ring = adapter->multi_tx_table[r_idx];
3058#else
3059 tx_ring = &adapter->tx_ring[0];
3060#endif
3061
3062 3023
3063 /* This goes back to the question of how to logically map a tx queue 3024 /* This goes back to the question of how to logically map a tx queue
3064 * to a flow. Right now, performance is impacted slightly negatively 3025 * to a flow. Right now, performance is impacted slightly negatively
@@ -3745,19 +3706,11 @@ done_cleaning:
3745 * sees the new next_to_clean. 3706 * sees the new next_to_clean.
3746 */ 3707 */
3747 smp_mb(); 3708 smp_mb();
3748#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3749 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 3709 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
3750 !(test_bit(__IGB_DOWN, &adapter->state))) { 3710 !(test_bit(__IGB_DOWN, &adapter->state))) {
3751 netif_wake_subqueue(netdev, tx_ring->queue_index); 3711 netif_wake_subqueue(netdev, tx_ring->queue_index);
3752 ++adapter->restart_queue; 3712 ++adapter->restart_queue;
3753 } 3713 }
3754#else
3755 if (netif_queue_stopped(netdev) &&
3756 !(test_bit(__IGB_DOWN, &adapter->state))) {
3757 netif_wake_queue(netdev);
3758 ++adapter->restart_queue;
3759 }
3760#endif
3761 } 3714 }
3762 3715
3763 if (tx_ring->detect_tx_hung) { 3716 if (tx_ring->detect_tx_hung) {
@@ -3793,11 +3746,7 @@ done_cleaning:
3793 tx_ring->buffer_info[i].time_stamp, 3746 tx_ring->buffer_info[i].time_stamp,
3794 jiffies, 3747 jiffies,
3795 tx_desc->upper.fields.status); 3748 tx_desc->upper.fields.status);
3796#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3797 netif_stop_subqueue(netdev, tx_ring->queue_index); 3749 netif_stop_subqueue(netdev, tx_ring->queue_index);
3798#else
3799 netif_stop_queue(netdev);
3800#endif
3801 } 3750 }
3802 } 3751 }
3803 tx_ring->total_bytes += total_bytes; 3752 tx_ring->total_bytes += total_bytes;