aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-09 02:14:24 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-09 02:14:24 -0400
commitb19fa1fa91845234961c64dbd564671aa7c0fd27 (patch)
treeefb09da87299ef503b59396b69a7667f1650e378 /drivers/net/ixgbe/ixgbe_main.c
parentc773e847ea8f6812804e40f52399c6921a00eab1 (diff)
net: Delete NETDEVICES_MULTIQUEUE kconfig option.
Multiple TX queue support is a core networking feature. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c40
1 files changed, 0 insertions, 40 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index b37d618d8e2a..10a1c8c5cda1 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -266,28 +266,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
266 * sees the new next_to_clean. 266 * sees the new next_to_clean.
267 */ 267 */
268 smp_mb(); 268 smp_mb();
269#ifdef CONFIG_NETDEVICES_MULTIQUEUE
270 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 269 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
271 !test_bit(__IXGBE_DOWN, &adapter->state)) { 270 !test_bit(__IXGBE_DOWN, &adapter->state)) {
272 netif_wake_subqueue(netdev, tx_ring->queue_index); 271 netif_wake_subqueue(netdev, tx_ring->queue_index);
273 adapter->restart_queue++; 272 adapter->restart_queue++;
274 } 273 }
275#else
276 if (netif_queue_stopped(netdev) &&
277 !test_bit(__IXGBE_DOWN, &adapter->state)) {
278 netif_wake_queue(netdev);
279 adapter->restart_queue++;
280 }
281#endif
282 } 274 }
283 275
284 if (adapter->detect_tx_hung) 276 if (adapter->detect_tx_hung)
285 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) 277 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
286#ifdef CONFIG_NETDEVICES_MULTIQUEUE
287 netif_stop_subqueue(netdev, tx_ring->queue_index); 278 netif_stop_subqueue(netdev, tx_ring->queue_index);
288#else
289 netif_stop_queue(netdev);
290#endif
291 279
292 if (total_tx_packets >= tx_ring->work_limit) 280 if (total_tx_packets >= tx_ring->work_limit)
293 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); 281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
@@ -2192,11 +2180,7 @@ static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2192 case (IXGBE_FLAG_RSS_ENABLED): 2180 case (IXGBE_FLAG_RSS_ENABLED):
2193 rss_m = 0xF; 2181 rss_m = 0xF;
2194 nrq = rss_i; 2182 nrq = rss_i;
2195#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2196 ntq = rss_i; 2183 ntq = rss_i;
2197#else
2198 ntq = 1;
2199#endif
2200 break; 2184 break;
2201 case 0: 2185 case 0:
2202 default: 2186 default:
@@ -2370,10 +2354,8 @@ try_msi:
2370 } 2354 }
2371 2355
2372out: 2356out:
2373#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2374 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 2357 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2375 adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; 2358 adapter->netdev->egress_subqueue_count = adapter->num_tx_queues;
2376#endif
2377 2359
2378 return err; 2360 return err;
2379} 2361}
@@ -2910,9 +2892,7 @@ static void ixgbe_watchdog(unsigned long data)
2910 struct net_device *netdev = adapter->netdev; 2892 struct net_device *netdev = adapter->netdev;
2911 bool link_up; 2893 bool link_up;
2912 u32 link_speed = 0; 2894 u32 link_speed = 0;
2913#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2914 int i; 2895 int i;
2915#endif
2916 2896
2917 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); 2897 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
2918 2898
@@ -2934,10 +2914,8 @@ static void ixgbe_watchdog(unsigned long data)
2934 2914
2935 netif_carrier_on(netdev); 2915 netif_carrier_on(netdev);
2936 netif_wake_queue(netdev); 2916 netif_wake_queue(netdev);
2937#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2938 for (i = 0; i < adapter->num_tx_queues; i++) 2917 for (i = 0; i < adapter->num_tx_queues; i++)
2939 netif_wake_subqueue(netdev, i); 2918 netif_wake_subqueue(netdev, i);
2940#endif
2941 } else { 2919 } else {
2942 /* Force detection of hung controller */ 2920 /* Force detection of hung controller */
2943 adapter->detect_tx_hung = true; 2921 adapter->detect_tx_hung = true;
@@ -3264,11 +3242,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3264{ 3242{
3265 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3243 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3266 3244
3267#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3268 netif_stop_subqueue(netdev, tx_ring->queue_index); 3245 netif_stop_subqueue(netdev, tx_ring->queue_index);
3269#else
3270 netif_stop_queue(netdev);
3271#endif
3272 /* Herbert's original patch had: 3246 /* Herbert's original patch had:
3273 * smp_mb__after_netif_stop_queue(); 3247 * smp_mb__after_netif_stop_queue();
3274 * but since that doesn't exist yet, just open code it. */ 3248 * but since that doesn't exist yet, just open code it. */
@@ -3280,11 +3254,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3280 return -EBUSY; 3254 return -EBUSY;
3281 3255
3282 /* A reprieve! - use start_queue because it doesn't call schedule */ 3256 /* A reprieve! - use start_queue because it doesn't call schedule */
3283#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3284 netif_wake_subqueue(netdev, tx_ring->queue_index); 3257 netif_wake_subqueue(netdev, tx_ring->queue_index);
3285#else
3286 netif_wake_queue(netdev);
3287#endif
3288 ++adapter->restart_queue; 3258 ++adapter->restart_queue;
3289 return 0; 3259 return 0;
3290} 3260}
@@ -3312,9 +3282,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3312 unsigned int f; 3282 unsigned int f;
3313 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 3283 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3314 len -= skb->data_len; 3284 len -= skb->data_len;
3315#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3316 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; 3285 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
3317#endif
3318 tx_ring = &adapter->tx_ring[r_idx]; 3286 tx_ring = &adapter->tx_ring[r_idx];
3319 3287
3320 3288
@@ -3502,11 +3470,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3502 pci_set_master(pdev); 3470 pci_set_master(pdev);
3503 pci_save_state(pdev); 3471 pci_save_state(pdev);
3504 3472
3505#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3506 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); 3473 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
3507#else
3508 netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
3509#endif
3510 if (!netdev) { 3474 if (!netdev) {
3511 err = -ENOMEM; 3475 err = -ENOMEM;
3512 goto err_alloc_etherdev; 3476 goto err_alloc_etherdev;
@@ -3598,9 +3562,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3598 if (pci_using_dac) 3562 if (pci_using_dac)
3599 netdev->features |= NETIF_F_HIGHDMA; 3563 netdev->features |= NETIF_F_HIGHDMA;
3600 3564
3601#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3602 netdev->features |= NETIF_F_MULTI_QUEUE; 3565 netdev->features |= NETIF_F_MULTI_QUEUE;
3603#endif
3604 3566
3605 /* make sure the EEPROM is good */ 3567 /* make sure the EEPROM is good */
3606 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 3568 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
@@ -3668,10 +3630,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3668 3630
3669 netif_carrier_off(netdev); 3631 netif_carrier_off(netdev);
3670 netif_stop_queue(netdev); 3632 netif_stop_queue(netdev);
3671#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3672 for (i = 0; i < adapter->num_tx_queues; i++) 3633 for (i = 0; i < adapter->num_tx_queues; i++)
3673 netif_stop_subqueue(netdev, i); 3634 netif_stop_subqueue(netdev, i);
3674#endif
3675 3635
3676 ixgbe_napi_add_all(adapter); 3636 ixgbe_napi_add_all(adapter);
3677 3637