diff options
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 9 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 27 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 191 |
3 files changed, 134 insertions, 93 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index d98113472a89..956914a5028d 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
34 | #include <linux/netdevice.h> | 34 | #include <linux/netdevice.h> |
35 | #include <linux/inet_lro.h> | ||
35 | 36 | ||
36 | #include "ixgbe_type.h" | 37 | #include "ixgbe_type.h" |
37 | #include "ixgbe_common.h" | 38 | #include "ixgbe_common.h" |
@@ -100,6 +101,9 @@ | |||
100 | #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 | 101 | #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 |
101 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 | 102 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 |
102 | 103 | ||
104 | #define IXGBE_MAX_LRO_DESCRIPTORS 8 | ||
105 | #define IXGBE_MAX_LRO_AGGREGATE 32 | ||
106 | |||
103 | /* wrapper around a pointer to a socket buffer, | 107 | /* wrapper around a pointer to a socket buffer, |
104 | * so a DMA handle can be stored along with the buffer */ | 108 | * so a DMA handle can be stored along with the buffer */ |
105 | struct ixgbe_tx_buffer { | 109 | struct ixgbe_tx_buffer { |
@@ -150,6 +154,8 @@ struct ixgbe_ring { | |||
150 | /* cpu for tx queue */ | 154 | /* cpu for tx queue */ |
151 | int cpu; | 155 | int cpu; |
152 | #endif | 156 | #endif |
157 | struct net_lro_mgr lro_mgr; | ||
158 | bool lro_used; | ||
153 | struct ixgbe_queue_stats stats; | 159 | struct ixgbe_queue_stats stats; |
154 | u8 v_idx; /* maps directly to the index for this ring in the hardware | 160 | u8 v_idx; /* maps directly to the index for this ring in the hardware |
155 | * vector array, can also be used for finding the bit in EICR | 161 | * vector array, can also be used for finding the bit in EICR |
@@ -287,6 +293,9 @@ struct ixgbe_adapter { | |||
287 | 293 | ||
288 | unsigned long state; | 294 | unsigned long state; |
289 | u64 tx_busy; | 295 | u64 tx_busy; |
296 | u64 lro_aggregated; | ||
297 | u64 lro_flushed; | ||
298 | u64 lro_no_desc; | ||
290 | }; | 299 | }; |
291 | 300 | ||
292 | enum ixbge_state_t { | 301 | enum ixbge_state_t { |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 4e463778bcfd..3efe5dda10af 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -90,6 +90,8 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { | |||
90 | {"rx_header_split", IXGBE_STAT(rx_hdr_split)}, | 90 | {"rx_header_split", IXGBE_STAT(rx_hdr_split)}, |
91 | {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, | 91 | {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, |
92 | {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, | 92 | {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, |
93 | {"lro_aggregated", IXGBE_STAT(lro_aggregated)}, | ||
94 | {"lro_flushed", IXGBE_STAT(lro_flushed)}, | ||
93 | }; | 95 | }; |
94 | 96 | ||
95 | #define IXGBE_QUEUE_STATS_LEN \ | 97 | #define IXGBE_QUEUE_STATS_LEN \ |
@@ -250,22 +252,10 @@ static int ixgbe_set_tso(struct net_device *netdev, u32 data) | |||
250 | netdev->features |= NETIF_F_TSO; | 252 | netdev->features |= NETIF_F_TSO; |
251 | netdev->features |= NETIF_F_TSO6; | 253 | netdev->features |= NETIF_F_TSO6; |
252 | } else { | 254 | } else { |
253 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 255 | netif_tx_stop_all_queues(netdev); |
254 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
255 | int i; | ||
256 | #endif | ||
257 | netif_stop_queue(netdev); | ||
258 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
259 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
260 | netif_stop_subqueue(netdev, i); | ||
261 | #endif | ||
262 | netdev->features &= ~NETIF_F_TSO; | 256 | netdev->features &= ~NETIF_F_TSO; |
263 | netdev->features &= ~NETIF_F_TSO6; | 257 | netdev->features &= ~NETIF_F_TSO6; |
264 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 258 | netif_tx_start_all_queues(netdev); |
265 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
266 | netif_start_subqueue(netdev, i); | ||
267 | #endif | ||
268 | netif_start_queue(netdev); | ||
269 | } | 259 | } |
270 | return 0; | 260 | return 0; |
271 | } | 261 | } |
@@ -787,6 +777,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, | |||
787 | int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); | 777 | int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); |
788 | int j, k; | 778 | int j, k; |
789 | int i; | 779 | int i; |
780 | u64 aggregated = 0, flushed = 0, no_desc = 0; | ||
790 | 781 | ||
791 | ixgbe_update_stats(adapter); | 782 | ixgbe_update_stats(adapter); |
792 | for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { | 783 | for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { |
@@ -801,11 +792,17 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, | |||
801 | i += k; | 792 | i += k; |
802 | } | 793 | } |
803 | for (j = 0; j < adapter->num_rx_queues; j++) { | 794 | for (j = 0; j < adapter->num_rx_queues; j++) { |
795 | aggregated += adapter->rx_ring[j].lro_mgr.stats.aggregated; | ||
796 | flushed += adapter->rx_ring[j].lro_mgr.stats.flushed; | ||
797 | no_desc += adapter->rx_ring[j].lro_mgr.stats.no_desc; | ||
804 | queue_stat = (u64 *)&adapter->rx_ring[j].stats; | 798 | queue_stat = (u64 *)&adapter->rx_ring[j].stats; |
805 | for (k = 0; k < stat_count; k++) | 799 | for (k = 0; k < stat_count; k++) |
806 | data[i + k] = queue_stat[k]; | 800 | data[i + k] = queue_stat[k]; |
807 | i += k; | 801 | i += k; |
808 | } | 802 | } |
803 | adapter->lro_aggregated = aggregated; | ||
804 | adapter->lro_flushed = flushed; | ||
805 | adapter->lro_no_desc = no_desc; | ||
809 | } | 806 | } |
810 | 807 | ||
811 | static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, | 808 | static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, |
@@ -973,6 +970,8 @@ static struct ethtool_ops ixgbe_ethtool_ops = { | |||
973 | .get_ethtool_stats = ixgbe_get_ethtool_stats, | 970 | .get_ethtool_stats = ixgbe_get_ethtool_stats, |
974 | .get_coalesce = ixgbe_get_coalesce, | 971 | .get_coalesce = ixgbe_get_coalesce, |
975 | .set_coalesce = ixgbe_set_coalesce, | 972 | .set_coalesce = ixgbe_set_coalesce, |
973 | .get_flags = ethtool_op_get_flags, | ||
974 | .set_flags = ethtool_op_set_flags, | ||
976 | }; | 975 | }; |
977 | 976 | ||
978 | void ixgbe_set_ethtool_ops(struct net_device *netdev) | 977 | void ixgbe_set_ethtool_ops(struct net_device *netdev) |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 8f0460901153..be7b723c924f 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -266,28 +266,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, | |||
266 | * sees the new next_to_clean. | 266 | * sees the new next_to_clean. |
267 | */ | 267 | */ |
268 | smp_mb(); | 268 | smp_mb(); |
269 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
270 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && | 269 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && |
271 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | 270 | !test_bit(__IXGBE_DOWN, &adapter->state)) { |
272 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 271 | netif_wake_subqueue(netdev, tx_ring->queue_index); |
273 | adapter->restart_queue++; | 272 | adapter->restart_queue++; |
274 | } | 273 | } |
275 | #else | ||
276 | if (netif_queue_stopped(netdev) && | ||
277 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | ||
278 | netif_wake_queue(netdev); | ||
279 | adapter->restart_queue++; | ||
280 | } | ||
281 | #endif | ||
282 | } | 274 | } |
283 | 275 | ||
284 | if (adapter->detect_tx_hung) | 276 | if (adapter->detect_tx_hung) |
285 | if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) | 277 | if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) |
286 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
287 | netif_stop_subqueue(netdev, tx_ring->queue_index); | 278 | netif_stop_subqueue(netdev, tx_ring->queue_index); |
288 | #else | ||
289 | netif_stop_queue(netdev); | ||
290 | #endif | ||
291 | 279 | ||
292 | if (total_tx_packets >= tx_ring->work_limit) | 280 | if (total_tx_packets >= tx_ring->work_limit) |
293 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); | 281 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); |
@@ -389,24 +377,39 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
389 | * ixgbe_receive_skb - Send a completed packet up the stack | 377 | * ixgbe_receive_skb - Send a completed packet up the stack |
390 | * @adapter: board private structure | 378 | * @adapter: board private structure |
391 | * @skb: packet to send up | 379 | * @skb: packet to send up |
392 | * @is_vlan: packet has a VLAN tag | 380 | * @status: hardware indication of status of receive |
393 | * @tag: VLAN tag from descriptor | 381 | * @rx_ring: rx descriptor ring (for a specific queue) to setup |
382 | * @rx_desc: rx descriptor | ||
394 | **/ | 383 | **/ |
395 | static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, | 384 | static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, |
396 | struct sk_buff *skb, bool is_vlan, | 385 | struct sk_buff *skb, u8 status, |
397 | u16 tag) | 386 | struct ixgbe_ring *ring, |
387 | union ixgbe_adv_rx_desc *rx_desc) | ||
398 | { | 388 | { |
399 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { | 389 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); |
400 | if (adapter->vlgrp && is_vlan) | 390 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); |
401 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); | ||
402 | else | ||
403 | netif_receive_skb(skb); | ||
404 | } else { | ||
405 | 391 | ||
392 | if (adapter->netdev->features & NETIF_F_LRO && | ||
393 | skb->ip_summed == CHECKSUM_UNNECESSARY) { | ||
406 | if (adapter->vlgrp && is_vlan) | 394 | if (adapter->vlgrp && is_vlan) |
407 | vlan_hwaccel_rx(skb, adapter->vlgrp, tag); | 395 | lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, |
396 | adapter->vlgrp, tag, | ||
397 | rx_desc); | ||
408 | else | 398 | else |
409 | netif_rx(skb); | 399 | lro_receive_skb(&ring->lro_mgr, skb, rx_desc); |
400 | ring->lro_used = true; | ||
401 | } else { | ||
402 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { | ||
403 | if (adapter->vlgrp && is_vlan) | ||
404 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); | ||
405 | else | ||
406 | netif_receive_skb(skb); | ||
407 | } else { | ||
408 | if (adapter->vlgrp && is_vlan) | ||
409 | vlan_hwaccel_rx(skb, adapter->vlgrp, tag); | ||
410 | else | ||
411 | netif_rx(skb); | ||
412 | } | ||
410 | } | 413 | } |
411 | } | 414 | } |
412 | 415 | ||
@@ -546,8 +549,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
546 | struct sk_buff *skb; | 549 | struct sk_buff *skb; |
547 | unsigned int i; | 550 | unsigned int i; |
548 | u32 upper_len, len, staterr; | 551 | u32 upper_len, len, staterr; |
549 | u16 hdr_info, vlan_tag; | 552 | u16 hdr_info; |
550 | bool is_vlan, cleaned = false; | 553 | bool cleaned = false; |
551 | int cleaned_count = 0; | 554 | int cleaned_count = 0; |
552 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 555 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
553 | 556 | ||
@@ -556,8 +559,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
556 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); | 559 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); |
557 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 560 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
558 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 561 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
559 | is_vlan = (staterr & IXGBE_RXD_STAT_VP); | ||
560 | vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan); | ||
561 | 562 | ||
562 | while (staterr & IXGBE_RXD_STAT_DD) { | 563 | while (staterr & IXGBE_RXD_STAT_DD) { |
563 | if (*work_done >= work_to_do) | 564 | if (*work_done >= work_to_do) |
@@ -635,7 +636,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
635 | total_rx_packets++; | 636 | total_rx_packets++; |
636 | 637 | ||
637 | skb->protocol = eth_type_trans(skb, netdev); | 638 | skb->protocol = eth_type_trans(skb, netdev); |
638 | ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag); | 639 | ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); |
639 | netdev->last_rx = jiffies; | 640 | netdev->last_rx = jiffies; |
640 | 641 | ||
641 | next_desc: | 642 | next_desc: |
@@ -652,8 +653,11 @@ next_desc: | |||
652 | rx_buffer_info = next_buffer; | 653 | rx_buffer_info = next_buffer; |
653 | 654 | ||
654 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 655 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
655 | is_vlan = (staterr & IXGBE_RXD_STAT_VP); | 656 | } |
656 | vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan); | 657 | |
658 | if (rx_ring->lro_used) { | ||
659 | lro_flush_all(&rx_ring->lro_mgr); | ||
660 | rx_ring->lro_used = false; | ||
657 | } | 661 | } |
658 | 662 | ||
659 | rx_ring->next_to_clean = i; | 663 | rx_ring->next_to_clean = i; |
@@ -1382,6 +1386,33 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | |||
1382 | 1386 | ||
1383 | #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 | 1387 | #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 |
1384 | /** | 1388 | /** |
1389 | * ixgbe_get_skb_hdr - helper function for LRO header processing | ||
1390 | * @skb: pointer to sk_buff to be added to LRO packet | ||
1391 | * @iphdr: pointer to tcp header structure | ||
1392 | * @tcph: pointer to tcp header structure | ||
1393 | * @hdr_flags: pointer to header flags | ||
1394 | * @priv: private data | ||
1395 | **/ | ||
1396 | static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, | ||
1397 | u64 *hdr_flags, void *priv) | ||
1398 | { | ||
1399 | union ixgbe_adv_rx_desc *rx_desc = priv; | ||
1400 | |||
1401 | /* Verify that this is a valid IPv4 TCP packet */ | ||
1402 | if (!(rx_desc->wb.lower.lo_dword.pkt_info & | ||
1403 | (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP))) | ||
1404 | return -1; | ||
1405 | |||
1406 | /* Set network headers */ | ||
1407 | skb_reset_network_header(skb); | ||
1408 | skb_set_transport_header(skb, ip_hdrlen(skb)); | ||
1409 | *iphdr = ip_hdr(skb); | ||
1410 | *tcph = tcp_hdr(skb); | ||
1411 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
1412 | return 0; | ||
1413 | } | ||
1414 | |||
1415 | /** | ||
1385 | * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset | 1416 | * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset |
1386 | * @adapter: board private structure | 1417 | * @adapter: board private structure |
1387 | * | 1418 | * |
@@ -1470,6 +1501,17 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1470 | adapter->rx_ring[i].tail = IXGBE_RDT(i); | 1501 | adapter->rx_ring[i].tail = IXGBE_RDT(i); |
1471 | } | 1502 | } |
1472 | 1503 | ||
1504 | /* Intitial LRO Settings */ | ||
1505 | adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE; | ||
1506 | adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS; | ||
1507 | adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr; | ||
1508 | adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID; | ||
1509 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) | ||
1510 | adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI; | ||
1511 | adapter->rx_ring[i].lro_mgr.dev = adapter->netdev; | ||
1512 | adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; | ||
1513 | adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
1514 | |||
1473 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 1515 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
1474 | /* Fill out redirection table */ | 1516 | /* Fill out redirection table */ |
1475 | for (i = 0, j = 0; i < 128; i++, j++) { | 1517 | for (i = 0, j = 0; i < 128; i++, j++) { |
@@ -1532,7 +1574,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev, | |||
1532 | if (grp) { | 1574 | if (grp) { |
1533 | /* enable VLAN tag insert/strip */ | 1575 | /* enable VLAN tag insert/strip */ |
1534 | ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); | 1576 | ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); |
1535 | ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; | 1577 | ctrl |= IXGBE_VLNCTRL_VME; |
1536 | ctrl &= ~IXGBE_VLNCTRL_CFIEN; | 1578 | ctrl &= ~IXGBE_VLNCTRL_CFIEN; |
1537 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); | 1579 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); |
1538 | } | 1580 | } |
@@ -1603,11 +1645,15 @@ static void ixgbe_set_multi(struct net_device *netdev) | |||
1603 | 1645 | ||
1604 | if (netdev->flags & IFF_PROMISC) { | 1646 | if (netdev->flags & IFF_PROMISC) { |
1605 | fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); | 1647 | fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); |
1606 | } else if (netdev->flags & IFF_ALLMULTI) { | 1648 | fctrl &= ~IXGBE_VLNCTRL_VFE; |
1607 | fctrl |= IXGBE_FCTRL_MPE; | ||
1608 | fctrl &= ~IXGBE_FCTRL_UPE; | ||
1609 | } else { | 1649 | } else { |
1610 | fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); | 1650 | if (netdev->flags & IFF_ALLMULTI) { |
1651 | fctrl |= IXGBE_FCTRL_MPE; | ||
1652 | fctrl &= ~IXGBE_FCTRL_UPE; | ||
1653 | } else { | ||
1654 | fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); | ||
1655 | } | ||
1656 | fctrl |= IXGBE_VLNCTRL_VFE; | ||
1611 | } | 1657 | } |
1612 | 1658 | ||
1613 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | 1659 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); |
@@ -1967,7 +2013,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
1967 | del_timer_sync(&adapter->watchdog_timer); | 2013 | del_timer_sync(&adapter->watchdog_timer); |
1968 | 2014 | ||
1969 | netif_carrier_off(netdev); | 2015 | netif_carrier_off(netdev); |
1970 | netif_stop_queue(netdev); | 2016 | netif_tx_stop_all_queues(netdev); |
1971 | 2017 | ||
1972 | if (!pci_channel_offline(adapter->pdev)) | 2018 | if (!pci_channel_offline(adapter->pdev)) |
1973 | ixgbe_reset(adapter); | 2019 | ixgbe_reset(adapter); |
@@ -2138,11 +2184,7 @@ static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
2138 | case (IXGBE_FLAG_RSS_ENABLED): | 2184 | case (IXGBE_FLAG_RSS_ENABLED): |
2139 | rss_m = 0xF; | 2185 | rss_m = 0xF; |
2140 | nrq = rss_i; | 2186 | nrq = rss_i; |
2141 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2142 | ntq = rss_i; | 2187 | ntq = rss_i; |
2143 | #else | ||
2144 | ntq = 1; | ||
2145 | #endif | ||
2146 | break; | 2188 | break; |
2147 | case 0: | 2189 | case 0: |
2148 | default: | 2190 | default: |
@@ -2316,10 +2358,8 @@ try_msi: | |||
2316 | } | 2358 | } |
2317 | 2359 | ||
2318 | out: | 2360 | out: |
2319 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2320 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ | 2361 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ |
2321 | adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; | 2362 | adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; |
2322 | #endif | ||
2323 | 2363 | ||
2324 | return err; | 2364 | return err; |
2325 | } | 2365 | } |
@@ -2490,12 +2530,18 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
2490 | struct pci_dev *pdev = adapter->pdev; | 2530 | struct pci_dev *pdev = adapter->pdev; |
2491 | int size; | 2531 | int size; |
2492 | 2532 | ||
2533 | size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS; | ||
2534 | rxdr->lro_mgr.lro_arr = vmalloc(size); | ||
2535 | if (!rxdr->lro_mgr.lro_arr) | ||
2536 | return -ENOMEM; | ||
2537 | memset(rxdr->lro_mgr.lro_arr, 0, size); | ||
2538 | |||
2493 | size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; | 2539 | size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; |
2494 | rxdr->rx_buffer_info = vmalloc(size); | 2540 | rxdr->rx_buffer_info = vmalloc(size); |
2495 | if (!rxdr->rx_buffer_info) { | 2541 | if (!rxdr->rx_buffer_info) { |
2496 | DPRINTK(PROBE, ERR, | 2542 | DPRINTK(PROBE, ERR, |
2497 | "vmalloc allocation failed for the rx desc ring\n"); | 2543 | "vmalloc allocation failed for the rx desc ring\n"); |
2498 | return -ENOMEM; | 2544 | goto alloc_failed; |
2499 | } | 2545 | } |
2500 | memset(rxdr->rx_buffer_info, 0, size); | 2546 | memset(rxdr->rx_buffer_info, 0, size); |
2501 | 2547 | ||
@@ -2509,13 +2555,18 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
2509 | DPRINTK(PROBE, ERR, | 2555 | DPRINTK(PROBE, ERR, |
2510 | "Memory allocation failed for the rx desc ring\n"); | 2556 | "Memory allocation failed for the rx desc ring\n"); |
2511 | vfree(rxdr->rx_buffer_info); | 2557 | vfree(rxdr->rx_buffer_info); |
2512 | return -ENOMEM; | 2558 | goto alloc_failed; |
2513 | } | 2559 | } |
2514 | 2560 | ||
2515 | rxdr->next_to_clean = 0; | 2561 | rxdr->next_to_clean = 0; |
2516 | rxdr->next_to_use = 0; | 2562 | rxdr->next_to_use = 0; |
2517 | 2563 | ||
2518 | return 0; | 2564 | return 0; |
2565 | |||
2566 | alloc_failed: | ||
2567 | vfree(rxdr->lro_mgr.lro_arr); | ||
2568 | rxdr->lro_mgr.lro_arr = NULL; | ||
2569 | return -ENOMEM; | ||
2519 | } | 2570 | } |
2520 | 2571 | ||
2521 | /** | 2572 | /** |
@@ -2566,6 +2617,9 @@ static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, | |||
2566 | { | 2617 | { |
2567 | struct pci_dev *pdev = adapter->pdev; | 2618 | struct pci_dev *pdev = adapter->pdev; |
2568 | 2619 | ||
2620 | vfree(rx_ring->lro_mgr.lro_arr); | ||
2621 | rx_ring->lro_mgr.lro_arr = NULL; | ||
2622 | |||
2569 | ixgbe_clean_rx_ring(adapter, rx_ring); | 2623 | ixgbe_clean_rx_ring(adapter, rx_ring); |
2570 | 2624 | ||
2571 | vfree(rx_ring->rx_buffer_info); | 2625 | vfree(rx_ring->rx_buffer_info); |
@@ -2711,6 +2765,8 @@ static int ixgbe_open(struct net_device *netdev) | |||
2711 | if (err) | 2765 | if (err) |
2712 | goto err_up; | 2766 | goto err_up; |
2713 | 2767 | ||
2768 | netif_tx_start_all_queues(netdev); | ||
2769 | |||
2714 | return 0; | 2770 | return 0; |
2715 | 2771 | ||
2716 | err_up: | 2772 | err_up: |
@@ -2842,9 +2898,6 @@ static void ixgbe_watchdog(unsigned long data) | |||
2842 | struct net_device *netdev = adapter->netdev; | 2898 | struct net_device *netdev = adapter->netdev; |
2843 | bool link_up; | 2899 | bool link_up; |
2844 | u32 link_speed = 0; | 2900 | u32 link_speed = 0; |
2845 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2846 | int i; | ||
2847 | #endif | ||
2848 | 2901 | ||
2849 | adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); | 2902 | adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); |
2850 | 2903 | ||
@@ -2865,11 +2918,7 @@ static void ixgbe_watchdog(unsigned long data) | |||
2865 | (FLOW_TX ? "TX" : "None")))); | 2918 | (FLOW_TX ? "TX" : "None")))); |
2866 | 2919 | ||
2867 | netif_carrier_on(netdev); | 2920 | netif_carrier_on(netdev); |
2868 | netif_wake_queue(netdev); | 2921 | netif_tx_wake_all_queues(netdev); |
2869 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2870 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2871 | netif_wake_subqueue(netdev, i); | ||
2872 | #endif | ||
2873 | } else { | 2922 | } else { |
2874 | /* Force detection of hung controller */ | 2923 | /* Force detection of hung controller */ |
2875 | adapter->detect_tx_hung = true; | 2924 | adapter->detect_tx_hung = true; |
@@ -2878,7 +2927,7 @@ static void ixgbe_watchdog(unsigned long data) | |||
2878 | if (netif_carrier_ok(netdev)) { | 2927 | if (netif_carrier_ok(netdev)) { |
2879 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); | 2928 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); |
2880 | netif_carrier_off(netdev); | 2929 | netif_carrier_off(netdev); |
2881 | netif_stop_queue(netdev); | 2930 | netif_tx_stop_all_queues(netdev); |
2882 | } | 2931 | } |
2883 | } | 2932 | } |
2884 | 2933 | ||
@@ -3196,11 +3245,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | |||
3196 | { | 3245 | { |
3197 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 3246 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
3198 | 3247 | ||
3199 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3200 | netif_stop_subqueue(netdev, tx_ring->queue_index); | 3248 | netif_stop_subqueue(netdev, tx_ring->queue_index); |
3201 | #else | ||
3202 | netif_stop_queue(netdev); | ||
3203 | #endif | ||
3204 | /* Herbert's original patch had: | 3249 | /* Herbert's original patch had: |
3205 | * smp_mb__after_netif_stop_queue(); | 3250 | * smp_mb__after_netif_stop_queue(); |
3206 | * but since that doesn't exist yet, just open code it. */ | 3251 | * but since that doesn't exist yet, just open code it. */ |
@@ -3212,11 +3257,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | |||
3212 | return -EBUSY; | 3257 | return -EBUSY; |
3213 | 3258 | ||
3214 | /* A reprieve! - use start_queue because it doesn't call schedule */ | 3259 | /* A reprieve! - use start_queue because it doesn't call schedule */ |
3215 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3216 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 3260 | netif_wake_subqueue(netdev, tx_ring->queue_index); |
3217 | #else | ||
3218 | netif_wake_queue(netdev); | ||
3219 | #endif | ||
3220 | ++adapter->restart_queue; | 3261 | ++adapter->restart_queue; |
3221 | return 0; | 3262 | return 0; |
3222 | } | 3263 | } |
@@ -3244,9 +3285,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3244 | unsigned int f; | 3285 | unsigned int f; |
3245 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 3286 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
3246 | len -= skb->data_len; | 3287 | len -= skb->data_len; |
3247 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3248 | r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; | 3288 | r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; |
3249 | #endif | ||
3250 | tx_ring = &adapter->tx_ring[r_idx]; | 3289 | tx_ring = &adapter->tx_ring[r_idx]; |
3251 | 3290 | ||
3252 | 3291 | ||
@@ -3434,11 +3473,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3434 | pci_set_master(pdev); | 3473 | pci_set_master(pdev); |
3435 | pci_save_state(pdev); | 3474 | pci_save_state(pdev); |
3436 | 3475 | ||
3437 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3438 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); | 3476 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); |
3439 | #else | ||
3440 | netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); | ||
3441 | #endif | ||
3442 | if (!netdev) { | 3477 | if (!netdev) { |
3443 | err = -ENOMEM; | 3478 | err = -ENOMEM; |
3444 | goto err_alloc_etherdev; | 3479 | goto err_alloc_etherdev; |
@@ -3518,16 +3553,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3518 | NETIF_F_HW_VLAN_RX | | 3553 | NETIF_F_HW_VLAN_RX | |
3519 | NETIF_F_HW_VLAN_FILTER; | 3554 | NETIF_F_HW_VLAN_FILTER; |
3520 | 3555 | ||
3556 | netdev->features |= NETIF_F_LRO; | ||
3521 | netdev->features |= NETIF_F_TSO; | 3557 | netdev->features |= NETIF_F_TSO; |
3522 | |||
3523 | netdev->features |= NETIF_F_TSO6; | 3558 | netdev->features |= NETIF_F_TSO6; |
3559 | |||
3560 | netdev->vlan_features |= NETIF_F_TSO; | ||
3561 | netdev->vlan_features |= NETIF_F_TSO6; | ||
3562 | netdev->vlan_features |= NETIF_F_HW_CSUM; | ||
3563 | netdev->vlan_features |= NETIF_F_SG; | ||
3564 | |||
3524 | if (pci_using_dac) | 3565 | if (pci_using_dac) |
3525 | netdev->features |= NETIF_F_HIGHDMA; | 3566 | netdev->features |= NETIF_F_HIGHDMA; |
3526 | 3567 | ||
3527 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3528 | netdev->features |= NETIF_F_MULTI_QUEUE; | ||
3529 | #endif | ||
3530 | |||
3531 | /* make sure the EEPROM is good */ | 3568 | /* make sure the EEPROM is good */ |
3532 | if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { | 3569 | if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { |
3533 | dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); | 3570 | dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); |
@@ -3593,11 +3630,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3593 | ixgbe_start_hw(hw); | 3630 | ixgbe_start_hw(hw); |
3594 | 3631 | ||
3595 | netif_carrier_off(netdev); | 3632 | netif_carrier_off(netdev); |
3596 | netif_stop_queue(netdev); | 3633 | netif_tx_stop_all_queues(netdev); |
3597 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3598 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
3599 | netif_stop_subqueue(netdev, i); | ||
3600 | #endif | ||
3601 | 3634 | ||
3602 | ixgbe_napi_add_all(adapter); | 3635 | ixgbe_napi_add_all(adapter); |
3603 | 3636 | ||