diff options
author | Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> | 2008-07-08 18:06:51 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-07-11 01:20:31 -0400 |
commit | 661086df6cf35f62d0aec09ccb9164eb2baaaecd (patch) | |
tree | 5e0419fc106b5663595678512236d207b870faa7 /drivers/net | |
parent | f731a9ef82c6728559b34743bca19d231e5e1b63 (diff) |
igb: Introduce multiple TX queues with infrastructure
This code adds multiple Tx queue infrastructure much like we
previously did in ixgbe. The MSI-X vector mapping is the bulk of
the change.
IAM can now be safely enabled and we've verified that it does
work correctly. We can also eliminate the tx ring lock.
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/igb/igb.h | 7 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 160 |
2 files changed, 114 insertions, 53 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 2c48eec17660..a1431c8797b9 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -62,6 +62,7 @@ struct igb_adapter; | |||
62 | 62 | ||
63 | /* Transmit and receive queues */ | 63 | /* Transmit and receive queues */ |
64 | #define IGB_MAX_RX_QUEUES 4 | 64 | #define IGB_MAX_RX_QUEUES 4 |
65 | #define IGB_MAX_TX_QUEUES 4 | ||
65 | 66 | ||
66 | /* RX descriptor control thresholds. | 67 | /* RX descriptor control thresholds. |
67 | * PTHRESH - MAC will consider prefetch if it has fewer than this number of | 68 | * PTHRESH - MAC will consider prefetch if it has fewer than this number of |
@@ -157,8 +158,6 @@ struct igb_ring { | |||
157 | union { | 158 | union { |
158 | /* TX */ | 159 | /* TX */ |
159 | struct { | 160 | struct { |
160 | spinlock_t tx_clean_lock; | ||
161 | spinlock_t tx_lock; | ||
162 | bool detect_tx_hung; | 161 | bool detect_tx_hung; |
163 | }; | 162 | }; |
164 | /* RX */ | 163 | /* RX */ |
@@ -277,6 +276,10 @@ struct igb_adapter { | |||
277 | /* for ioport free */ | 276 | /* for ioport free */ |
278 | int bars; | 277 | int bars; |
279 | int need_ioport; | 278 | int need_ioport; |
279 | |||
280 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
281 | struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; | ||
282 | #endif /* CONFIG_NETDEVICES_MULTIQUEUE */ | ||
280 | }; | 283 | }; |
281 | 284 | ||
282 | enum e1000_state_t { | 285 | enum e1000_state_t { |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index afd4ce3f7b53..e11a5dae668a 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -103,7 +103,7 @@ static irqreturn_t igb_msix_rx(int irq, void *); | |||
103 | static irqreturn_t igb_msix_tx(int irq, void *); | 103 | static irqreturn_t igb_msix_tx(int irq, void *); |
104 | static int igb_clean_rx_ring_msix(struct napi_struct *, int); | 104 | static int igb_clean_rx_ring_msix(struct napi_struct *, int); |
105 | static bool igb_clean_tx_irq(struct igb_ring *); | 105 | static bool igb_clean_tx_irq(struct igb_ring *); |
106 | static int igb_clean(struct napi_struct *, int); | 106 | static int igb_poll(struct napi_struct *, int); |
107 | static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); | 107 | static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); |
108 | static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); | 108 | static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); |
109 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); | 109 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); |
@@ -224,6 +224,11 @@ static int igb_alloc_queues(struct igb_adapter *adapter) | |||
224 | return -ENOMEM; | 224 | return -ENOMEM; |
225 | } | 225 | } |
226 | 226 | ||
227 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
228 | struct igb_ring *ring = &(adapter->tx_ring[i]); | ||
229 | ring->adapter = adapter; | ||
230 | ring->queue_index = i; | ||
231 | } | ||
227 | for (i = 0; i < adapter->num_rx_queues; i++) { | 232 | for (i = 0; i < adapter->num_rx_queues; i++) { |
228 | struct igb_ring *ring = &(adapter->rx_ring[i]); | 233 | struct igb_ring *ring = &(adapter->rx_ring[i]); |
229 | ring->adapter = adapter; | 234 | ring->adapter = adapter; |
@@ -231,7 +236,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter) | |||
231 | ring->itr_register = E1000_ITR; | 236 | ring->itr_register = E1000_ITR; |
232 | 237 | ||
233 | /* set a default napi handler for each rx_ring */ | 238 | /* set a default napi handler for each rx_ring */ |
234 | netif_napi_add(adapter->netdev, &ring->napi, igb_clean, 64); | 239 | netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); |
235 | } | 240 | } |
236 | return 0; | 241 | return 0; |
237 | } | 242 | } |
@@ -412,8 +417,14 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) | |||
412 | /* If we can't do MSI-X, try MSI */ | 417 | /* If we can't do MSI-X, try MSI */ |
413 | msi_only: | 418 | msi_only: |
414 | adapter->num_rx_queues = 1; | 419 | adapter->num_rx_queues = 1; |
420 | adapter->num_tx_queues = 1; | ||
415 | if (!pci_enable_msi(adapter->pdev)) | 421 | if (!pci_enable_msi(adapter->pdev)) |
416 | adapter->msi_enabled = 1; | 422 | adapter->msi_enabled = 1; |
423 | |||
424 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
425 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ | ||
426 | adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; | ||
427 | #endif | ||
417 | return; | 428 | return; |
418 | } | 429 | } |
419 | 430 | ||
@@ -693,6 +704,10 @@ void igb_down(struct igb_adapter *adapter) | |||
693 | /* flush and sleep below */ | 704 | /* flush and sleep below */ |
694 | 705 | ||
695 | netif_stop_queue(netdev); | 706 | netif_stop_queue(netdev); |
707 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
708 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
709 | netif_stop_subqueue(netdev, i); | ||
710 | #endif | ||
696 | 711 | ||
697 | /* disable transmits in the hardware */ | 712 | /* disable transmits in the hardware */ |
698 | tctl = rd32(E1000_TCTL); | 713 | tctl = rd32(E1000_TCTL); |
@@ -895,7 +910,11 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
895 | pci_save_state(pdev); | 910 | pci_save_state(pdev); |
896 | 911 | ||
897 | err = -ENOMEM; | 912 | err = -ENOMEM; |
913 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
914 | netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES); | ||
915 | #else | ||
898 | netdev = alloc_etherdev(sizeof(struct igb_adapter)); | 916 | netdev = alloc_etherdev(sizeof(struct igb_adapter)); |
917 | #endif /* CONFIG_NETDEVICES_MULTIQUEUE */ | ||
899 | if (!netdev) | 918 | if (!netdev) |
900 | goto err_alloc_etherdev; | 919 | goto err_alloc_etherdev; |
901 | 920 | ||
@@ -997,6 +1016,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
997 | if (pci_using_dac) | 1016 | if (pci_using_dac) |
998 | netdev->features |= NETIF_F_HIGHDMA; | 1017 | netdev->features |= NETIF_F_HIGHDMA; |
999 | 1018 | ||
1019 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
1020 | netdev->features |= NETIF_F_MULTI_QUEUE; | ||
1021 | #endif | ||
1022 | |||
1000 | netdev->features |= NETIF_F_LLTX; | 1023 | netdev->features |= NETIF_F_LLTX; |
1001 | adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); | 1024 | adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); |
1002 | 1025 | ||
@@ -1097,6 +1120,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1097 | /* tell the stack to leave us alone until igb_open() is called */ | 1120 | /* tell the stack to leave us alone until igb_open() is called */ |
1098 | netif_carrier_off(netdev); | 1121 | netif_carrier_off(netdev); |
1099 | netif_stop_queue(netdev); | 1122 | netif_stop_queue(netdev); |
1123 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
1124 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
1125 | netif_stop_subqueue(netdev, i); | ||
1126 | #endif | ||
1100 | 1127 | ||
1101 | strcpy(netdev->name, "eth%d"); | 1128 | strcpy(netdev->name, "eth%d"); |
1102 | err = register_netdev(netdev); | 1129 | err = register_netdev(netdev); |
@@ -1223,9 +1250,15 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) | |||
1223 | 1250 | ||
1224 | /* Number of supported queues. */ | 1251 | /* Number of supported queues. */ |
1225 | /* Having more queues than CPUs doesn't make sense. */ | 1252 | /* Having more queues than CPUs doesn't make sense. */ |
1253 | adapter->num_rx_queues = min((u32)IGB_MAX_RX_QUEUES, (u32)num_online_cpus()); | ||
1254 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
1255 | adapter->num_tx_queues = min(IGB_MAX_TX_QUEUES, num_online_cpus()); | ||
1256 | #else | ||
1226 | adapter->num_tx_queues = 1; | 1257 | adapter->num_tx_queues = 1; |
1227 | adapter->num_rx_queues = min(IGB_MAX_RX_QUEUES, num_online_cpus()); | 1258 | #endif /* CONFIG_NET_MULTI_QUEUE_DEVICE */ |
1228 | 1259 | ||
1260 | /* This call may decrease the number of queues depending on | ||
1261 | * interrupt mode. */ | ||
1229 | igb_set_interrupt_capability(adapter); | 1262 | igb_set_interrupt_capability(adapter); |
1230 | 1263 | ||
1231 | if (igb_alloc_queues(adapter)) { | 1264 | if (igb_alloc_queues(adapter)) { |
@@ -1386,8 +1419,6 @@ int igb_setup_tx_resources(struct igb_adapter *adapter, | |||
1386 | tx_ring->adapter = adapter; | 1419 | tx_ring->adapter = adapter; |
1387 | tx_ring->next_to_use = 0; | 1420 | tx_ring->next_to_use = 0; |
1388 | tx_ring->next_to_clean = 0; | 1421 | tx_ring->next_to_clean = 0; |
1389 | spin_lock_init(&tx_ring->tx_clean_lock); | ||
1390 | spin_lock_init(&tx_ring->tx_lock); | ||
1391 | return 0; | 1422 | return 0; |
1392 | 1423 | ||
1393 | err: | 1424 | err: |
@@ -1407,6 +1438,9 @@ err: | |||
1407 | static int igb_setup_all_tx_resources(struct igb_adapter *adapter) | 1438 | static int igb_setup_all_tx_resources(struct igb_adapter *adapter) |
1408 | { | 1439 | { |
1409 | int i, err = 0; | 1440 | int i, err = 0; |
1441 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
1442 | int r_idx; | ||
1443 | #endif | ||
1410 | 1444 | ||
1411 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1445 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1412 | err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); | 1446 | err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); |
@@ -1419,6 +1453,12 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) | |||
1419 | } | 1453 | } |
1420 | } | 1454 | } |
1421 | 1455 | ||
1456 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
1457 | for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { | ||
1458 | r_idx = i % adapter->num_tx_queues; | ||
1459 | adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; | ||
1460 | } | ||
1461 | #endif | ||
1422 | return err; | 1462 | return err; |
1423 | } | 1463 | } |
1424 | 1464 | ||
@@ -2096,6 +2136,9 @@ static void igb_watchdog_task(struct work_struct *work) | |||
2096 | struct e1000_mac_info *mac = &adapter->hw.mac; | 2136 | struct e1000_mac_info *mac = &adapter->hw.mac; |
2097 | u32 link; | 2137 | u32 link; |
2098 | s32 ret_val; | 2138 | s32 ret_val; |
2139 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2140 | int i; | ||
2141 | #endif | ||
2099 | 2142 | ||
2100 | if ((netif_carrier_ok(netdev)) && | 2143 | if ((netif_carrier_ok(netdev)) && |
2101 | (rd32(E1000_STATUS) & E1000_STATUS_LU)) | 2144 | (rd32(E1000_STATUS) & E1000_STATUS_LU)) |
@@ -2152,6 +2195,10 @@ static void igb_watchdog_task(struct work_struct *work) | |||
2152 | 2195 | ||
2153 | netif_carrier_on(netdev); | 2196 | netif_carrier_on(netdev); |
2154 | netif_wake_queue(netdev); | 2197 | netif_wake_queue(netdev); |
2198 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2199 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2200 | netif_wake_subqueue(netdev, i); | ||
2201 | #endif | ||
2155 | 2202 | ||
2156 | if (!test_bit(__IGB_DOWN, &adapter->state)) | 2203 | if (!test_bit(__IGB_DOWN, &adapter->state)) |
2157 | mod_timer(&adapter->phy_info_timer, | 2204 | mod_timer(&adapter->phy_info_timer, |
@@ -2164,6 +2211,10 @@ static void igb_watchdog_task(struct work_struct *work) | |||
2164 | dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); | 2211 | dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); |
2165 | netif_carrier_off(netdev); | 2212 | netif_carrier_off(netdev); |
2166 | netif_stop_queue(netdev); | 2213 | netif_stop_queue(netdev); |
2214 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2215 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2216 | netif_stop_subqueue(netdev, i); | ||
2217 | #endif | ||
2167 | if (!test_bit(__IGB_DOWN, &adapter->state)) | 2218 | if (!test_bit(__IGB_DOWN, &adapter->state)) |
2168 | mod_timer(&adapter->phy_info_timer, | 2219 | mod_timer(&adapter->phy_info_timer, |
2169 | round_jiffies(jiffies + 2 * HZ)); | 2220 | round_jiffies(jiffies + 2 * HZ)); |
@@ -2524,7 +2575,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | |||
2524 | context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); | 2575 | context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); |
2525 | context_desc->seqnum_seed = 0; | 2576 | context_desc->seqnum_seed = 0; |
2526 | context_desc->mss_l4len_idx = | 2577 | context_desc->mss_l4len_idx = |
2527 | cpu_to_le32(tx_ring->eims_value >> 4); | 2578 | cpu_to_le32(tx_ring->queue_index << 4); |
2528 | 2579 | ||
2529 | buffer_info->time_stamp = jiffies; | 2580 | buffer_info->time_stamp = jiffies; |
2530 | buffer_info->dma = 0; | 2581 | buffer_info->dma = 0; |
@@ -2627,7 +2678,7 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter, | |||
2627 | 2678 | ||
2628 | if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | | 2679 | if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | |
2629 | IGB_TX_FLAGS_VLAN)) | 2680 | IGB_TX_FLAGS_VLAN)) |
2630 | olinfo_status |= tx_ring->eims_value >> 4; | 2681 | olinfo_status |= tx_ring->queue_index << 4; |
2631 | 2682 | ||
2632 | olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); | 2683 | olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); |
2633 | 2684 | ||
@@ -2663,7 +2714,12 @@ static int __igb_maybe_stop_tx(struct net_device *netdev, | |||
2663 | { | 2714 | { |
2664 | struct igb_adapter *adapter = netdev_priv(netdev); | 2715 | struct igb_adapter *adapter = netdev_priv(netdev); |
2665 | 2716 | ||
2717 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2718 | netif_stop_subqueue(netdev, tx_ring->queue_index); | ||
2719 | #else | ||
2666 | netif_stop_queue(netdev); | 2720 | netif_stop_queue(netdev); |
2721 | #endif | ||
2722 | |||
2667 | /* Herbert's original patch had: | 2723 | /* Herbert's original patch had: |
2668 | * smp_mb__after_netif_stop_queue(); | 2724 | * smp_mb__after_netif_stop_queue(); |
2669 | * but since that doesn't exist yet, just open code it. */ | 2725 | * but since that doesn't exist yet, just open code it. */ |
@@ -2675,7 +2731,11 @@ static int __igb_maybe_stop_tx(struct net_device *netdev, | |||
2675 | return -EBUSY; | 2731 | return -EBUSY; |
2676 | 2732 | ||
2677 | /* A reprieve! */ | 2733 | /* A reprieve! */ |
2678 | netif_start_queue(netdev); | 2734 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE |
2735 | netif_wake_subqueue(netdev, tx_ring->queue_index); | ||
2736 | #else | ||
2737 | netif_wake_queue(netdev); | ||
2738 | #endif | ||
2679 | ++adapter->restart_queue; | 2739 | ++adapter->restart_queue; |
2680 | return 0; | 2740 | return 0; |
2681 | } | 2741 | } |
@@ -2697,7 +2757,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2697 | struct igb_adapter *adapter = netdev_priv(netdev); | 2757 | struct igb_adapter *adapter = netdev_priv(netdev); |
2698 | unsigned int tx_flags = 0; | 2758 | unsigned int tx_flags = 0; |
2699 | unsigned int len; | 2759 | unsigned int len; |
2700 | unsigned long irq_flags; | ||
2701 | u8 hdr_len = 0; | 2760 | u8 hdr_len = 0; |
2702 | int tso = 0; | 2761 | int tso = 0; |
2703 | 2762 | ||
@@ -2713,10 +2772,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2713 | return NETDEV_TX_OK; | 2772 | return NETDEV_TX_OK; |
2714 | } | 2773 | } |
2715 | 2774 | ||
2716 | if (!spin_trylock_irqsave(&tx_ring->tx_lock, irq_flags)) | ||
2717 | /* Collision - tell upper layer to requeue */ | ||
2718 | return NETDEV_TX_LOCKED; | ||
2719 | |||
2720 | /* need: 1 descriptor per page, | 2775 | /* need: 1 descriptor per page, |
2721 | * + 2 desc gap to keep tail from touching head, | 2776 | * + 2 desc gap to keep tail from touching head, |
2722 | * + 1 desc for skb->data, | 2777 | * + 1 desc for skb->data, |
@@ -2724,7 +2779,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2724 | * otherwise try next time */ | 2779 | * otherwise try next time */ |
2725 | if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { | 2780 | if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { |
2726 | /* this is a hard error */ | 2781 | /* this is a hard error */ |
2727 | spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags); | ||
2728 | return NETDEV_TX_BUSY; | 2782 | return NETDEV_TX_BUSY; |
2729 | } | 2783 | } |
2730 | 2784 | ||
@@ -2733,12 +2787,14 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2733 | tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); | 2787 | tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); |
2734 | } | 2788 | } |
2735 | 2789 | ||
2790 | if (skb->protocol == htons(ETH_P_IP)) | ||
2791 | tx_flags |= IGB_TX_FLAGS_IPV4; | ||
2792 | |||
2736 | tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, | 2793 | tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, |
2737 | &hdr_len) : 0; | 2794 | &hdr_len) : 0; |
2738 | 2795 | ||
2739 | if (tso < 0) { | 2796 | if (tso < 0) { |
2740 | dev_kfree_skb_any(skb); | 2797 | dev_kfree_skb_any(skb); |
2741 | spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags); | ||
2742 | return NETDEV_TX_OK; | 2798 | return NETDEV_TX_OK; |
2743 | } | 2799 | } |
2744 | 2800 | ||
@@ -2748,9 +2804,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2748 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 2804 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
2749 | tx_flags |= IGB_TX_FLAGS_CSUM; | 2805 | tx_flags |= IGB_TX_FLAGS_CSUM; |
2750 | 2806 | ||
2751 | if (skb->protocol == htons(ETH_P_IP)) | ||
2752 | tx_flags |= IGB_TX_FLAGS_IPV4; | ||
2753 | |||
2754 | igb_tx_queue_adv(adapter, tx_ring, tx_flags, | 2807 | igb_tx_queue_adv(adapter, tx_ring, tx_flags, |
2755 | igb_tx_map_adv(adapter, tx_ring, skb), | 2808 | igb_tx_map_adv(adapter, tx_ring, skb), |
2756 | skb->len, hdr_len); | 2809 | skb->len, hdr_len); |
@@ -2760,14 +2813,22 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2760 | /* Make sure there is space in the ring for the next send. */ | 2813 | /* Make sure there is space in the ring for the next send. */ |
2761 | igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); | 2814 | igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); |
2762 | 2815 | ||
2763 | spin_unlock_irqrestore(&tx_ring->tx_lock, irq_flags); | ||
2764 | return NETDEV_TX_OK; | 2816 | return NETDEV_TX_OK; |
2765 | } | 2817 | } |
2766 | 2818 | ||
2767 | static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev) | 2819 | static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev) |
2768 | { | 2820 | { |
2769 | struct igb_adapter *adapter = netdev_priv(netdev); | 2821 | struct igb_adapter *adapter = netdev_priv(netdev); |
2770 | struct igb_ring *tx_ring = &adapter->tx_ring[0]; | 2822 | struct igb_ring *tx_ring; |
2823 | |||
2824 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2825 | int r_idx = 0; | ||
2826 | r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1); | ||
2827 | tx_ring = adapter->multi_tx_table[r_idx]; | ||
2828 | #else | ||
2829 | tx_ring = &adapter->tx_ring[0]; | ||
2830 | #endif | ||
2831 | |||
2771 | 2832 | ||
2772 | /* This goes back to the question of how to logically map a tx queue | 2833 | /* This goes back to the question of how to logically map a tx queue |
2773 | * to a flow. Right now, performance is impacted slightly negatively | 2834 | * to a flow. Right now, performance is impacted slightly negatively |
@@ -3035,7 +3096,7 @@ static irqreturn_t igb_msix_other(int irq, void *data) | |||
3035 | /* guard against interrupt when we're going down */ | 3096 | /* guard against interrupt when we're going down */ |
3036 | if (!test_bit(__IGB_DOWN, &adapter->state)) | 3097 | if (!test_bit(__IGB_DOWN, &adapter->state)) |
3037 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 3098 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
3038 | 3099 | ||
3039 | no_link_interrupt: | 3100 | no_link_interrupt: |
3040 | wr32(E1000_IMS, E1000_IMS_LSC); | 3101 | wr32(E1000_IMS, E1000_IMS_LSC); |
3041 | wr32(E1000_EIMS, adapter->eims_other); | 3102 | wr32(E1000_EIMS, adapter->eims_other); |
@@ -3054,12 +3115,15 @@ static irqreturn_t igb_msix_tx(int irq, void *data) | |||
3054 | 3115 | ||
3055 | tx_ring->total_bytes = 0; | 3116 | tx_ring->total_bytes = 0; |
3056 | tx_ring->total_packets = 0; | 3117 | tx_ring->total_packets = 0; |
3118 | |||
3119 | /* auto mask will automatically reenable the interrupt when we write | ||
3120 | * EICS */ | ||
3057 | if (!igb_clean_tx_irq(tx_ring)) | 3121 | if (!igb_clean_tx_irq(tx_ring)) |
3058 | /* Ring was not completely cleaned, so fire another interrupt */ | 3122 | /* Ring was not completely cleaned, so fire another interrupt */ |
3059 | wr32(E1000_EICS, tx_ring->eims_value); | 3123 | wr32(E1000_EICS, tx_ring->eims_value); |
3060 | 3124 | else | |
3061 | if (!tx_ring->itr_val) | ||
3062 | wr32(E1000_EIMS, tx_ring->eims_value); | 3125 | wr32(E1000_EIMS, tx_ring->eims_value); |
3126 | |||
3063 | return IRQ_HANDLED; | 3127 | return IRQ_HANDLED; |
3064 | } | 3128 | } |
3065 | 3129 | ||
@@ -3163,42 +3227,24 @@ static irqreturn_t igb_intr(int irq, void *data) | |||
3163 | } | 3227 | } |
3164 | 3228 | ||
3165 | /** | 3229 | /** |
3166 | * igb_clean - NAPI Rx polling callback | 3230 | * igb_poll - NAPI Rx polling callback |
3167 | * @adapter: board private structure | 3231 | * @napi: napi polling structure |
3232 | * @budget: count of how many packets we should handle | ||
3168 | **/ | 3233 | **/ |
3169 | static int igb_clean(struct napi_struct *napi, int budget) | 3234 | static int igb_poll(struct napi_struct *napi, int budget) |
3170 | { | 3235 | { |
3171 | struct igb_adapter *adapter = container_of(napi, struct igb_adapter, | 3236 | struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); |
3172 | napi); | 3237 | struct igb_adapter *adapter = rx_ring->adapter; |
3173 | struct net_device *netdev = adapter->netdev; | 3238 | struct net_device *netdev = adapter->netdev; |
3174 | int tx_clean_complete = 1, work_done = 0; | 3239 | int tx_clean_complete, work_done = 0; |
3175 | int i; | ||
3176 | 3240 | ||
3177 | /* Must NOT use netdev_priv macro here. */ | 3241 | /* this poll routine only supports one tx and one rx queue */ |
3178 | adapter = netdev->priv; | 3242 | tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); |
3179 | 3243 | igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget); | |
3180 | /* Keep link state information with original netdev */ | ||
3181 | if (!netif_carrier_ok(netdev)) | ||
3182 | goto quit_polling; | ||
3183 | |||
3184 | /* igb_clean is called per-cpu. This lock protects tx_ring[i] from | ||
3185 | * being cleaned by multiple cpus simultaneously. A failure obtaining | ||
3186 | * the lock means tx_ring[i] is currently being cleaned anyway. */ | ||
3187 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
3188 | if (spin_trylock(&adapter->tx_ring[i].tx_clean_lock)) { | ||
3189 | tx_clean_complete &= igb_clean_tx_irq(&adapter->tx_ring[i]); | ||
3190 | spin_unlock(&adapter->tx_ring[i].tx_clean_lock); | ||
3191 | } | ||
3192 | } | ||
3193 | |||
3194 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3195 | igb_clean_rx_irq_adv(&adapter->rx_ring[i], &work_done, | ||
3196 | adapter->rx_ring[i].napi.weight); | ||
3197 | 3244 | ||
3198 | /* If no Tx and not enough Rx work done, exit the polling mode */ | 3245 | /* If no Tx and not enough Rx work done, exit the polling mode */ |
3199 | if ((tx_clean_complete && (work_done < budget)) || | 3246 | if ((tx_clean_complete && (work_done < budget)) || |
3200 | !netif_running(netdev)) { | 3247 | !netif_running(netdev)) { |
3201 | quit_polling: | ||
3202 | if (adapter->itr_setting & 3) | 3248 | if (adapter->itr_setting & 3) |
3203 | igb_set_itr(adapter, E1000_ITR, false); | 3249 | igb_set_itr(adapter, E1000_ITR, false); |
3204 | netif_rx_complete(netdev, napi); | 3250 | netif_rx_complete(netdev, napi); |
@@ -3327,11 +3373,19 @@ done_cleaning: | |||
3327 | * sees the new next_to_clean. | 3373 | * sees the new next_to_clean. |
3328 | */ | 3374 | */ |
3329 | smp_mb(); | 3375 | smp_mb(); |
3376 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3377 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && | ||
3378 | !(test_bit(__IGB_DOWN, &adapter->state))) { | ||
3379 | netif_wake_subqueue(netdev, tx_ring->queue_index); | ||
3380 | ++adapter->restart_queue; | ||
3381 | } | ||
3382 | #else | ||
3330 | if (netif_queue_stopped(netdev) && | 3383 | if (netif_queue_stopped(netdev) && |
3331 | !(test_bit(__IGB_DOWN, &adapter->state))) { | 3384 | !(test_bit(__IGB_DOWN, &adapter->state))) { |
3332 | netif_wake_queue(netdev); | 3385 | netif_wake_queue(netdev); |
3333 | ++adapter->restart_queue; | 3386 | ++adapter->restart_queue; |
3334 | } | 3387 | } |
3388 | #endif | ||
3335 | } | 3389 | } |
3336 | 3390 | ||
3337 | if (tx_ring->detect_tx_hung) { | 3391 | if (tx_ring->detect_tx_hung) { |
@@ -3368,7 +3422,11 @@ done_cleaning: | |||
3368 | tx_ring->buffer_info[i].time_stamp, | 3422 | tx_ring->buffer_info[i].time_stamp, |
3369 | jiffies, | 3423 | jiffies, |
3370 | tx_desc->upper.fields.status); | 3424 | tx_desc->upper.fields.status); |
3425 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3426 | netif_stop_subqueue(netdev, tx_ring->queue_index); | ||
3427 | #else | ||
3371 | netif_stop_queue(netdev); | 3428 | netif_stop_queue(netdev); |
3429 | #endif | ||
3372 | } | 3430 | } |
3373 | } | 3431 | } |
3374 | tx_ring->total_bytes += total_bytes; | 3432 | tx_ring->total_bytes += total_bytes; |