diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-09 02:14:24 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-09 02:14:24 -0400 |
commit | b19fa1fa91845234961c64dbd564671aa7c0fd27 (patch) | |
tree | efb09da87299ef503b59396b69a7667f1650e378 /drivers/net | |
parent | c773e847ea8f6812804e40f52399c6921a00eab1 (diff) |
net: Delete NETDEVICES_MULTIQUEUE kconfig option.
Multiple TX queue support is a core networking feature.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/Kconfig | 8 | ||||
-rw-r--r-- | drivers/net/cpmac.c | 14 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 6 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 40 | ||||
-rw-r--r-- | drivers/net/s2io.c | 47 |
5 files changed, 8 insertions, 107 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index ef733abc857d..4675c1bd6fb9 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -26,14 +26,6 @@ menuconfig NETDEVICES | |||
26 | # that for each of the symbols. | 26 | # that for each of the symbols. |
27 | if NETDEVICES | 27 | if NETDEVICES |
28 | 28 | ||
29 | config NETDEVICES_MULTIQUEUE | ||
30 | bool "Netdevice multiple hardware queue support" | ||
31 | ---help--- | ||
32 | Say Y here if you want to allow the network stack to use multiple | ||
33 | hardware TX queues on an ethernet device. | ||
34 | |||
35 | Most people will say N here. | ||
36 | |||
37 | config IFB | 29 | config IFB |
38 | tristate "Intermediate Functional Block support" | 30 | tristate "Intermediate Functional Block support" |
39 | depends on NET_CLS_ACT | 31 | depends on NET_CLS_ACT |
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 7f3f62e1b113..d630e2a72f42 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -569,11 +569,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
569 | 569 | ||
570 | len = max(skb->len, ETH_ZLEN); | 570 | len = max(skb->len, ETH_ZLEN); |
571 | queue = skb_get_queue_mapping(skb); | 571 | queue = skb_get_queue_mapping(skb); |
572 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
573 | netif_stop_subqueue(dev, queue); | 572 | netif_stop_subqueue(dev, queue); |
574 | #else | ||
575 | netif_stop_queue(dev); | ||
576 | #endif | ||
577 | 573 | ||
578 | desc = &priv->desc_ring[queue]; | 574 | desc = &priv->desc_ring[queue]; |
579 | if (unlikely(desc->dataflags & CPMAC_OWN)) { | 575 | if (unlikely(desc->dataflags & CPMAC_OWN)) { |
@@ -626,24 +622,14 @@ static void cpmac_end_xmit(struct net_device *dev, int queue) | |||
626 | 622 | ||
627 | dev_kfree_skb_irq(desc->skb); | 623 | dev_kfree_skb_irq(desc->skb); |
628 | desc->skb = NULL; | 624 | desc->skb = NULL; |
629 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
630 | if (netif_subqueue_stopped(dev, queue)) | 625 | if (netif_subqueue_stopped(dev, queue)) |
631 | netif_wake_subqueue(dev, queue); | 626 | netif_wake_subqueue(dev, queue); |
632 | #else | ||
633 | if (netif_queue_stopped(dev)) | ||
634 | netif_wake_queue(dev); | ||
635 | #endif | ||
636 | } else { | 627 | } else { |
637 | if (netif_msg_tx_err(priv) && net_ratelimit()) | 628 | if (netif_msg_tx_err(priv) && net_ratelimit()) |
638 | printk(KERN_WARNING | 629 | printk(KERN_WARNING |
639 | "%s: end_xmit: spurious interrupt\n", dev->name); | 630 | "%s: end_xmit: spurious interrupt\n", dev->name); |
640 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
641 | if (netif_subqueue_stopped(dev, queue)) | 631 | if (netif_subqueue_stopped(dev, queue)) |
642 | netif_wake_subqueue(dev, queue); | 632 | netif_wake_subqueue(dev, queue); |
643 | #else | ||
644 | if (netif_queue_stopped(dev)) | ||
645 | netif_wake_queue(dev); | ||
646 | #endif | ||
647 | } | 633 | } |
648 | } | 634 | } |
649 | 635 | ||
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 12990b1fe7e4..81b769093d22 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -252,21 +252,15 @@ static int ixgbe_set_tso(struct net_device *netdev, u32 data) | |||
252 | netdev->features |= NETIF_F_TSO; | 252 | netdev->features |= NETIF_F_TSO; |
253 | netdev->features |= NETIF_F_TSO6; | 253 | netdev->features |= NETIF_F_TSO6; |
254 | } else { | 254 | } else { |
255 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
256 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 255 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
257 | int i; | 256 | int i; |
258 | #endif | ||
259 | netif_stop_queue(netdev); | 257 | netif_stop_queue(netdev); |
260 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
261 | for (i = 0; i < adapter->num_tx_queues; i++) | 258 | for (i = 0; i < adapter->num_tx_queues; i++) |
262 | netif_stop_subqueue(netdev, i); | 259 | netif_stop_subqueue(netdev, i); |
263 | #endif | ||
264 | netdev->features &= ~NETIF_F_TSO; | 260 | netdev->features &= ~NETIF_F_TSO; |
265 | netdev->features &= ~NETIF_F_TSO6; | 261 | netdev->features &= ~NETIF_F_TSO6; |
266 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
267 | for (i = 0; i < adapter->num_tx_queues; i++) | 262 | for (i = 0; i < adapter->num_tx_queues; i++) |
268 | netif_start_subqueue(netdev, i); | 263 | netif_start_subqueue(netdev, i); |
269 | #endif | ||
270 | netif_start_queue(netdev); | 264 | netif_start_queue(netdev); |
271 | } | 265 | } |
272 | return 0; | 266 | return 0; |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b37d618d8e2a..10a1c8c5cda1 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -266,28 +266,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, | |||
266 | * sees the new next_to_clean. | 266 | * sees the new next_to_clean. |
267 | */ | 267 | */ |
268 | smp_mb(); | 268 | smp_mb(); |
269 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
270 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && | 269 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && |
271 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | 270 | !test_bit(__IXGBE_DOWN, &adapter->state)) { |
272 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 271 | netif_wake_subqueue(netdev, tx_ring->queue_index); |
273 | adapter->restart_queue++; | 272 | adapter->restart_queue++; |
274 | } | 273 | } |
275 | #else | ||
276 | if (netif_queue_stopped(netdev) && | ||
277 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | ||
278 | netif_wake_queue(netdev); | ||
279 | adapter->restart_queue++; | ||
280 | } | ||
281 | #endif | ||
282 | } | 274 | } |
283 | 275 | ||
284 | if (adapter->detect_tx_hung) | 276 | if (adapter->detect_tx_hung) |
285 | if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) | 277 | if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) |
286 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
287 | netif_stop_subqueue(netdev, tx_ring->queue_index); | 278 | netif_stop_subqueue(netdev, tx_ring->queue_index); |
288 | #else | ||
289 | netif_stop_queue(netdev); | ||
290 | #endif | ||
291 | 279 | ||
292 | if (total_tx_packets >= tx_ring->work_limit) | 280 | if (total_tx_packets >= tx_ring->work_limit) |
293 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); | 281 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); |
@@ -2192,11 +2180,7 @@ static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
2192 | case (IXGBE_FLAG_RSS_ENABLED): | 2180 | case (IXGBE_FLAG_RSS_ENABLED): |
2193 | rss_m = 0xF; | 2181 | rss_m = 0xF; |
2194 | nrq = rss_i; | 2182 | nrq = rss_i; |
2195 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2196 | ntq = rss_i; | 2183 | ntq = rss_i; |
2197 | #else | ||
2198 | ntq = 1; | ||
2199 | #endif | ||
2200 | break; | 2184 | break; |
2201 | case 0: | 2185 | case 0: |
2202 | default: | 2186 | default: |
@@ -2370,10 +2354,8 @@ try_msi: | |||
2370 | } | 2354 | } |
2371 | 2355 | ||
2372 | out: | 2356 | out: |
2373 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2374 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ | 2357 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ |
2375 | adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; | 2358 | adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; |
2376 | #endif | ||
2377 | 2359 | ||
2378 | return err; | 2360 | return err; |
2379 | } | 2361 | } |
@@ -2910,9 +2892,7 @@ static void ixgbe_watchdog(unsigned long data) | |||
2910 | struct net_device *netdev = adapter->netdev; | 2892 | struct net_device *netdev = adapter->netdev; |
2911 | bool link_up; | 2893 | bool link_up; |
2912 | u32 link_speed = 0; | 2894 | u32 link_speed = 0; |
2913 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2914 | int i; | 2895 | int i; |
2915 | #endif | ||
2916 | 2896 | ||
2917 | adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); | 2897 | adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); |
2918 | 2898 | ||
@@ -2934,10 +2914,8 @@ static void ixgbe_watchdog(unsigned long data) | |||
2934 | 2914 | ||
2935 | netif_carrier_on(netdev); | 2915 | netif_carrier_on(netdev); |
2936 | netif_wake_queue(netdev); | 2916 | netif_wake_queue(netdev); |
2937 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2938 | for (i = 0; i < adapter->num_tx_queues; i++) | 2917 | for (i = 0; i < adapter->num_tx_queues; i++) |
2939 | netif_wake_subqueue(netdev, i); | 2918 | netif_wake_subqueue(netdev, i); |
2940 | #endif | ||
2941 | } else { | 2919 | } else { |
2942 | /* Force detection of hung controller */ | 2920 | /* Force detection of hung controller */ |
2943 | adapter->detect_tx_hung = true; | 2921 | adapter->detect_tx_hung = true; |
@@ -3264,11 +3242,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | |||
3264 | { | 3242 | { |
3265 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 3243 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
3266 | 3244 | ||
3267 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3268 | netif_stop_subqueue(netdev, tx_ring->queue_index); | 3245 | netif_stop_subqueue(netdev, tx_ring->queue_index); |
3269 | #else | ||
3270 | netif_stop_queue(netdev); | ||
3271 | #endif | ||
3272 | /* Herbert's original patch had: | 3246 | /* Herbert's original patch had: |
3273 | * smp_mb__after_netif_stop_queue(); | 3247 | * smp_mb__after_netif_stop_queue(); |
3274 | * but since that doesn't exist yet, just open code it. */ | 3248 | * but since that doesn't exist yet, just open code it. */ |
@@ -3280,11 +3254,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | |||
3280 | return -EBUSY; | 3254 | return -EBUSY; |
3281 | 3255 | ||
3282 | /* A reprieve! - use start_queue because it doesn't call schedule */ | 3256 | /* A reprieve! - use start_queue because it doesn't call schedule */ |
3283 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3284 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 3257 | netif_wake_subqueue(netdev, tx_ring->queue_index); |
3285 | #else | ||
3286 | netif_wake_queue(netdev); | ||
3287 | #endif | ||
3288 | ++adapter->restart_queue; | 3258 | ++adapter->restart_queue; |
3289 | return 0; | 3259 | return 0; |
3290 | } | 3260 | } |
@@ -3312,9 +3282,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3312 | unsigned int f; | 3282 | unsigned int f; |
3313 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 3283 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
3314 | len -= skb->data_len; | 3284 | len -= skb->data_len; |
3315 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3316 | r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; | 3285 | r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; |
3317 | #endif | ||
3318 | tx_ring = &adapter->tx_ring[r_idx]; | 3286 | tx_ring = &adapter->tx_ring[r_idx]; |
3319 | 3287 | ||
3320 | 3288 | ||
@@ -3502,11 +3470,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3502 | pci_set_master(pdev); | 3470 | pci_set_master(pdev); |
3503 | pci_save_state(pdev); | 3471 | pci_save_state(pdev); |
3504 | 3472 | ||
3505 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3506 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); | 3473 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); |
3507 | #else | ||
3508 | netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); | ||
3509 | #endif | ||
3510 | if (!netdev) { | 3474 | if (!netdev) { |
3511 | err = -ENOMEM; | 3475 | err = -ENOMEM; |
3512 | goto err_alloc_etherdev; | 3476 | goto err_alloc_etherdev; |
@@ -3598,9 +3562,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3598 | if (pci_using_dac) | 3562 | if (pci_using_dac) |
3599 | netdev->features |= NETIF_F_HIGHDMA; | 3563 | netdev->features |= NETIF_F_HIGHDMA; |
3600 | 3564 | ||
3601 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3602 | netdev->features |= NETIF_F_MULTI_QUEUE; | 3565 | netdev->features |= NETIF_F_MULTI_QUEUE; |
3603 | #endif | ||
3604 | 3566 | ||
3605 | /* make sure the EEPROM is good */ | 3567 | /* make sure the EEPROM is good */ |
3606 | if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { | 3568 | if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { |
@@ -3668,10 +3630,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3668 | 3630 | ||
3669 | netif_carrier_off(netdev); | 3631 | netif_carrier_off(netdev); |
3670 | netif_stop_queue(netdev); | 3632 | netif_stop_queue(netdev); |
3671 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3672 | for (i = 0; i < adapter->num_tx_queues; i++) | 3633 | for (i = 0; i < adapter->num_tx_queues; i++) |
3673 | netif_stop_subqueue(netdev, i); | 3634 | netif_stop_subqueue(netdev, i); |
3674 | #endif | ||
3675 | 3635 | ||
3676 | ixgbe_napi_add_all(adapter); | 3636 | ixgbe_napi_add_all(adapter); |
3677 | 3637 | ||
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index e7a3dbec674c..51a91154125d 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -546,13 +546,10 @@ static struct pci_driver s2io_driver = { | |||
546 | static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) | 546 | static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) |
547 | { | 547 | { |
548 | int i; | 548 | int i; |
549 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
550 | if (sp->config.multiq) { | 549 | if (sp->config.multiq) { |
551 | for (i = 0; i < sp->config.tx_fifo_num; i++) | 550 | for (i = 0; i < sp->config.tx_fifo_num; i++) |
552 | netif_stop_subqueue(sp->dev, i); | 551 | netif_stop_subqueue(sp->dev, i); |
553 | } else | 552 | } else { |
554 | #endif | ||
555 | { | ||
556 | for (i = 0; i < sp->config.tx_fifo_num; i++) | 553 | for (i = 0; i < sp->config.tx_fifo_num; i++) |
557 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP; | 554 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP; |
558 | netif_stop_queue(sp->dev); | 555 | netif_stop_queue(sp->dev); |
@@ -561,12 +558,9 @@ static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) | |||
561 | 558 | ||
562 | static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no) | 559 | static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no) |
563 | { | 560 | { |
564 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
565 | if (sp->config.multiq) | 561 | if (sp->config.multiq) |
566 | netif_stop_subqueue(sp->dev, fifo_no); | 562 | netif_stop_subqueue(sp->dev, fifo_no); |
567 | else | 563 | else { |
568 | #endif | ||
569 | { | ||
570 | sp->mac_control.fifos[fifo_no].queue_state = | 564 | sp->mac_control.fifos[fifo_no].queue_state = |
571 | FIFO_QUEUE_STOP; | 565 | FIFO_QUEUE_STOP; |
572 | netif_stop_queue(sp->dev); | 566 | netif_stop_queue(sp->dev); |
@@ -576,13 +570,10 @@ static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no) | |||
576 | static inline void s2io_start_all_tx_queue(struct s2io_nic *sp) | 570 | static inline void s2io_start_all_tx_queue(struct s2io_nic *sp) |
577 | { | 571 | { |
578 | int i; | 572 | int i; |
579 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
580 | if (sp->config.multiq) { | 573 | if (sp->config.multiq) { |
581 | for (i = 0; i < sp->config.tx_fifo_num; i++) | 574 | for (i = 0; i < sp->config.tx_fifo_num; i++) |
582 | netif_start_subqueue(sp->dev, i); | 575 | netif_start_subqueue(sp->dev, i); |
583 | } else | 576 | } else { |
584 | #endif | ||
585 | { | ||
586 | for (i = 0; i < sp->config.tx_fifo_num; i++) | 577 | for (i = 0; i < sp->config.tx_fifo_num; i++) |
587 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; | 578 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; |
588 | netif_start_queue(sp->dev); | 579 | netif_start_queue(sp->dev); |
@@ -591,12 +582,9 @@ static inline void s2io_start_all_tx_queue(struct s2io_nic *sp) | |||
591 | 582 | ||
592 | static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no) | 583 | static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no) |
593 | { | 584 | { |
594 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
595 | if (sp->config.multiq) | 585 | if (sp->config.multiq) |
596 | netif_start_subqueue(sp->dev, fifo_no); | 586 | netif_start_subqueue(sp->dev, fifo_no); |
597 | else | 587 | else { |
598 | #endif | ||
599 | { | ||
600 | sp->mac_control.fifos[fifo_no].queue_state = | 588 | sp->mac_control.fifos[fifo_no].queue_state = |
601 | FIFO_QUEUE_START; | 589 | FIFO_QUEUE_START; |
602 | netif_start_queue(sp->dev); | 590 | netif_start_queue(sp->dev); |
@@ -606,13 +594,10 @@ static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no) | |||
606 | static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp) | 594 | static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp) |
607 | { | 595 | { |
608 | int i; | 596 | int i; |
609 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
610 | if (sp->config.multiq) { | 597 | if (sp->config.multiq) { |
611 | for (i = 0; i < sp->config.tx_fifo_num; i++) | 598 | for (i = 0; i < sp->config.tx_fifo_num; i++) |
612 | netif_wake_subqueue(sp->dev, i); | 599 | netif_wake_subqueue(sp->dev, i); |
613 | } else | 600 | } else { |
614 | #endif | ||
615 | { | ||
616 | for (i = 0; i < sp->config.tx_fifo_num; i++) | 601 | for (i = 0; i < sp->config.tx_fifo_num; i++) |
617 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; | 602 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; |
618 | netif_wake_queue(sp->dev); | 603 | netif_wake_queue(sp->dev); |
@@ -623,13 +608,10 @@ static inline void s2io_wake_tx_queue( | |||
623 | struct fifo_info *fifo, int cnt, u8 multiq) | 608 | struct fifo_info *fifo, int cnt, u8 multiq) |
624 | { | 609 | { |
625 | 610 | ||
626 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
627 | if (multiq) { | 611 | if (multiq) { |
628 | if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no)) | 612 | if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no)) |
629 | netif_wake_subqueue(fifo->dev, fifo->fifo_no); | 613 | netif_wake_subqueue(fifo->dev, fifo->fifo_no); |
630 | } else | 614 | } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) { |
631 | #endif | ||
632 | if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) { | ||
633 | if (netif_queue_stopped(fifo->dev)) { | 615 | if (netif_queue_stopped(fifo->dev)) { |
634 | fifo->queue_state = FIFO_QUEUE_START; | 616 | fifo->queue_state = FIFO_QUEUE_START; |
635 | netif_wake_queue(fifo->dev); | 617 | netif_wake_queue(fifo->dev); |
@@ -4189,15 +4171,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4189 | return NETDEV_TX_LOCKED; | 4171 | return NETDEV_TX_LOCKED; |
4190 | } | 4172 | } |
4191 | 4173 | ||
4192 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
4193 | if (sp->config.multiq) { | 4174 | if (sp->config.multiq) { |
4194 | if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { | 4175 | if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { |
4195 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | 4176 | spin_unlock_irqrestore(&fifo->tx_lock, flags); |
4196 | return NETDEV_TX_BUSY; | 4177 | return NETDEV_TX_BUSY; |
4197 | } | 4178 | } |
4198 | } else | 4179 | } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) { |
4199 | #endif | ||
4200 | if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) { | ||
4201 | if (netif_queue_stopped(dev)) { | 4180 | if (netif_queue_stopped(dev)) { |
4202 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | 4181 | spin_unlock_irqrestore(&fifo->tx_lock, flags); |
4203 | return NETDEV_TX_BUSY; | 4182 | return NETDEV_TX_BUSY; |
@@ -7633,12 +7612,6 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, | |||
7633 | DBG_PRINT(ERR_DBG, "tx fifos\n"); | 7612 | DBG_PRINT(ERR_DBG, "tx fifos\n"); |
7634 | } | 7613 | } |
7635 | 7614 | ||
7636 | #ifndef CONFIG_NETDEVICES_MULTIQUEUE | ||
7637 | if (multiq) { | ||
7638 | DBG_PRINT(ERR_DBG, "s2io: Multiqueue support not enabled\n"); | ||
7639 | multiq = 0; | ||
7640 | } | ||
7641 | #endif | ||
7642 | if (multiq) | 7615 | if (multiq) |
7643 | *dev_multiq = multiq; | 7616 | *dev_multiq = multiq; |
7644 | 7617 | ||
@@ -7783,12 +7756,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7783 | pci_disable_device(pdev); | 7756 | pci_disable_device(pdev); |
7784 | return -ENODEV; | 7757 | return -ENODEV; |
7785 | } | 7758 | } |
7786 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
7787 | if (dev_multiq) | 7759 | if (dev_multiq) |
7788 | dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num); | 7760 | dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num); |
7789 | else | 7761 | else |
7790 | #endif | 7762 | dev = alloc_etherdev(sizeof(struct s2io_nic)); |
7791 | dev = alloc_etherdev(sizeof(struct s2io_nic)); | ||
7792 | if (dev == NULL) { | 7763 | if (dev == NULL) { |
7793 | DBG_PRINT(ERR_DBG, "Device allocation failed\n"); | 7764 | DBG_PRINT(ERR_DBG, "Device allocation failed\n"); |
7794 | pci_disable_device(pdev); | 7765 | pci_disable_device(pdev); |
@@ -7979,10 +7950,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7979 | dev->features |= NETIF_F_UFO; | 7950 | dev->features |= NETIF_F_UFO; |
7980 | dev->features |= NETIF_F_HW_CSUM; | 7951 | dev->features |= NETIF_F_HW_CSUM; |
7981 | } | 7952 | } |
7982 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
7983 | if (config->multiq) | 7953 | if (config->multiq) |
7984 | dev->features |= NETIF_F_MULTI_QUEUE; | 7954 | dev->features |= NETIF_F_MULTI_QUEUE; |
7985 | #endif | ||
7986 | dev->tx_timeout = &s2io_tx_watchdog; | 7955 | dev->tx_timeout = &s2io_tx_watchdog; |
7987 | dev->watchdog_timeo = WATCH_DOG_TIMEOUT; | 7956 | dev->watchdog_timeo = WATCH_DOG_TIMEOUT; |
7988 | INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); | 7957 | INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); |