aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-04-27 18:42:37 -0400
committerDavid S. Miller <davem@davemloft.net>2009-04-28 04:53:16 -0400
commitf8212f979f777af2a8e3a9deb0c11a9fcf35e305 (patch)
tree4b92bdc66e3ed82208a8dcc28b3b9a0511621669 /drivers/net/ixgbe/ixgbe_main.c
parent45a5ead0220cc7cc70f6961879decffbd0a54cc0 (diff)
ixgbe: enable HW RSC for 82599
This patch enables hardware receive side coalescing for 82599 hardware. 82599 can merge multiple frames from the same TCP/IP flow into a single structure that can span one ore more descriptors. The accumulated data is arranged similar to how jumbo frames are arranged with the exception that other packets can be interlaced inbetween. To overcome this issue a next pointer is included in the written back descriptor which indicates the next descriptor in the writeback sequence. This feature sets the NETIF_F_LRO flag and clearing it via the ethtool set flags operation will also disable hardware RSC. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c121
1 files changed, 110 insertions, 11 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index c3dff8f02e33..419ce472cef8 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -615,6 +615,40 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
615 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 615 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
616} 616}
617 617
618static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
619{
620 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
621 IXGBE_RXDADV_RSCCNT_MASK) >>
622 IXGBE_RXDADV_RSCCNT_SHIFT;
623}
624
625/**
626 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
627 * @skb: pointer to the last skb in the rsc queue
628 *
629 * This function changes a queue full of hw rsc buffers into a completed
630 * packet. It uses the ->prev pointers to find the first packet and then
631 * turns it into the frag list owner.
632 **/
633static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
634{
635 unsigned int frag_list_size = 0;
636
637 while (skb->prev) {
638 struct sk_buff *prev = skb->prev;
639 frag_list_size += skb->len;
640 skb->prev = NULL;
641 skb = prev;
642 }
643
644 skb_shinfo(skb)->frag_list = skb->next;
645 skb->next = NULL;
646 skb->len += frag_list_size;
647 skb->data_len += frag_list_size;
648 skb->truesize += frag_list_size;
649 return skb;
650}
651
618static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 652static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
619 struct ixgbe_ring *rx_ring, 653 struct ixgbe_ring *rx_ring,
620 int *work_done, int work_to_do) 654 int *work_done, int work_to_do)
@@ -624,7 +658,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
624 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 658 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
625 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 659 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
626 struct sk_buff *skb; 660 struct sk_buff *skb;
627 unsigned int i; 661 unsigned int i, rsc_count = 0;
628 u32 len, staterr; 662 u32 len, staterr;
629 u16 hdr_info; 663 u16 hdr_info;
630 bool cleaned = false; 664 bool cleaned = false;
@@ -690,20 +724,38 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
690 i++; 724 i++;
691 if (i == rx_ring->count) 725 if (i == rx_ring->count)
692 i = 0; 726 i = 0;
693 next_buffer = &rx_ring->rx_buffer_info[i];
694 727
695 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); 728 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
696 prefetch(next_rxd); 729 prefetch(next_rxd);
697
698 cleaned_count++; 730 cleaned_count++;
731
732 if (adapter->flags & IXGBE_FLAG_RSC_CAPABLE)
733 rsc_count = ixgbe_get_rsc_count(rx_desc);
734
735 if (rsc_count) {
736 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
737 IXGBE_RXDADV_NEXTP_SHIFT;
738 next_buffer = &rx_ring->rx_buffer_info[nextp];
739 rx_ring->rsc_count += (rsc_count - 1);
740 } else {
741 next_buffer = &rx_ring->rx_buffer_info[i];
742 }
743
699 if (staterr & IXGBE_RXD_STAT_EOP) { 744 if (staterr & IXGBE_RXD_STAT_EOP) {
745 if (skb->prev)
746 skb = ixgbe_transform_rsc_queue(skb);
700 rx_ring->stats.packets++; 747 rx_ring->stats.packets++;
701 rx_ring->stats.bytes += skb->len; 748 rx_ring->stats.bytes += skb->len;
702 } else { 749 } else {
703 rx_buffer_info->skb = next_buffer->skb; 750 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
704 rx_buffer_info->dma = next_buffer->dma; 751 rx_buffer_info->skb = next_buffer->skb;
705 next_buffer->skb = skb; 752 rx_buffer_info->dma = next_buffer->dma;
706 next_buffer->dma = 0; 753 next_buffer->skb = skb;
754 next_buffer->dma = 0;
755 } else {
756 skb->next = next_buffer->skb;
757 skb->next->prev = skb;
758 }
707 adapter->non_eop_descs++; 759 adapter->non_eop_descs++;
708 goto next_desc; 760 goto next_desc;
709 } 761 }
@@ -733,7 +785,7 @@ next_desc:
733 785
734 /* use prefetched values */ 786 /* use prefetched values */
735 rx_desc = next_rxd; 787 rx_desc = next_rxd;
736 rx_buffer_info = next_buffer; 788 rx_buffer_info = &rx_ring->rx_buffer_info[i];
737 789
738 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 790 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
739 } 791 }
@@ -1729,6 +1781,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1729 u32 fctrl, hlreg0; 1781 u32 fctrl, hlreg0;
1730 u32 reta = 0, mrqc = 0; 1782 u32 reta = 0, mrqc = 0;
1731 u32 rdrxctl; 1783 u32 rdrxctl;
1784 u32 rscctrl;
1732 int rx_buf_len; 1785 int rx_buf_len;
1733 1786
1734 /* Decide whether to use packet split mode or not */ 1787 /* Decide whether to use packet split mode or not */
@@ -1746,7 +1799,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1746 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 1799 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
1747 } 1800 }
1748 } else { 1801 } else {
1749 if (netdev->mtu <= ETH_DATA_LEN) 1802 if (!(adapter->flags & IXGBE_FLAG_RSC_ENABLED) &&
1803 (netdev->mtu <= ETH_DATA_LEN))
1750 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1804 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1751 else 1805 else
1752 rx_buf_len = ALIGN(max_frame, 1024); 1806 rx_buf_len = ALIGN(max_frame, 1024);
@@ -1868,8 +1922,38 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1868 if (hw->mac.type == ixgbe_mac_82599EB) { 1922 if (hw->mac.type == ixgbe_mac_82599EB) {
1869 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 1923 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1870 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; 1924 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1925 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1871 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 1926 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1872 } 1927 }
1928
1929 if (adapter->flags & IXGBE_FLAG_RSC_ENABLED) {
1930 /* Enable 82599 HW-RSC */
1931 for (i = 0; i < adapter->num_rx_queues; i++) {
1932 j = adapter->rx_ring[i].reg_idx;
1933 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
1934 rscctrl |= IXGBE_RSCCTL_RSCEN;
1935 /*
1936 * if packet split is enabled we can only support up
1937 * to max frags + 1 descriptors.
1938 */
1939 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
1940#if (MAX_SKB_FRAGS < 3)
1941 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1942#elif (MAX_SKB_FRAGS < 7)
1943 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1944#elif (MAX_SKB_FRAGS < 15)
1945 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1946#else
1947 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1948#endif
1949 else
1950 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1951 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
1952 }
1953 /* Disable RSC for ACK packets */
1954 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1955 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1956 }
1873} 1957}
1874 1958
1875static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1959static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -2438,8 +2522,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
2438 rx_buffer_info->dma = 0; 2522 rx_buffer_info->dma = 0;
2439 } 2523 }
2440 if (rx_buffer_info->skb) { 2524 if (rx_buffer_info->skb) {
2441 dev_kfree_skb(rx_buffer_info->skb); 2525 struct sk_buff *skb = rx_buffer_info->skb;
2442 rx_buffer_info->skb = NULL; 2526 rx_buffer_info->skb = NULL;
2527 do {
2528 struct sk_buff *this = skb;
2529 skb = skb->prev;
2530 dev_kfree_skb(this);
2531 } while (skb);
2443 } 2532 }
2444 if (!rx_buffer_info->page) 2533 if (!rx_buffer_info->page)
2445 continue; 2534 continue;
@@ -3180,8 +3269,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3180 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; 3269 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
3181 if (hw->mac.type == ixgbe_mac_82598EB) 3270 if (hw->mac.type == ixgbe_mac_82598EB)
3182 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 3271 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
3183 else if (hw->mac.type == ixgbe_mac_82599EB) 3272 else if (hw->mac.type == ixgbe_mac_82599EB) {
3184 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 3273 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
3274 adapter->flags |= IXGBE_FLAG_RSC_CAPABLE;
3275 adapter->flags |= IXGBE_FLAG_RSC_ENABLED;
3276 }
3185 3277
3186#ifdef CONFIG_IXGBE_DCB 3278#ifdef CONFIG_IXGBE_DCB
3187 /* Configure DCB traffic classes */ 3279 /* Configure DCB traffic classes */
@@ -3765,9 +3857,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3765 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 3857 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
3766 3858
3767 if (hw->mac.type == ixgbe_mac_82599EB) { 3859 if (hw->mac.type == ixgbe_mac_82599EB) {
3860 u64 rsc_count = 0;
3768 for (i = 0; i < 16; i++) 3861 for (i = 0; i < 16; i++)
3769 adapter->hw_rx_no_dma_resources += 3862 adapter->hw_rx_no_dma_resources +=
3770 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3863 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3864 for (i = 0; i < adapter->num_rx_queues; i++)
3865 rsc_count += adapter->rx_ring[i].rsc_count;
3866 adapter->rsc_count = rsc_count;
3771 } 3867 }
3772 3868
3773 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3869 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -4742,6 +4838,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
4742 if (pci_using_dac) 4838 if (pci_using_dac)
4743 netdev->features |= NETIF_F_HIGHDMA; 4839 netdev->features |= NETIF_F_HIGHDMA;
4744 4840
4841 if (adapter->flags & IXGBE_FLAG_RSC_ENABLED)
4842 netdev->features |= NETIF_F_LRO;
4843
4745 /* make sure the EEPROM is good */ 4844 /* make sure the EEPROM is good */
4746 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { 4845 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
4747 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); 4846 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");