aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>2005-10-04 07:05:44 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-10-04 07:05:44 -0400
commite4c811c9d2f2728ce15440c99b3b44b72799b43f (patch)
tree0806fd1239c088d970db38c9a5bfec9b35ce3477 /drivers/net
parent2ae76d98fb9f0a9226dd62cf0a0b7547507d2862 (diff)
e1000: Fixes for packet split related issues
Fixes for packet split related issues * On platforms where PAGE_SIZE > 4K, driver will use only required number of pages compared to always using 3 pages. * Packet split won't be used if the PAGE_SIZE is > 16K * Adds a statistics counter to splits. * Setting the non Null ptr to zero sized buffers to solve packet split receive descriptor error * When the no of pages needed is calculated, the header buffer is not included for a given MTU. Signed-off-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com> Signed-off-by: Ganesh Venkatesan <ganesh.venkatesan@intel.com> Signed-off-by: John Ronciak <john.ronciak@intel.com> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/e1000/e1000.h7
-rw-r--r--drivers/net/e1000/e1000_ethtool.c3
-rw-r--r--drivers/net/e1000/e1000_main.c88
3 files changed, 60 insertions, 38 deletions
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 9b7274b111f3..3f653a93e1bc 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -169,8 +169,8 @@ struct e1000_buffer {
169 uint16_t next_to_watch; 169 uint16_t next_to_watch;
170}; 170};
171 171
172struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; }; 172struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
173struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; }; 173struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
174 174
175struct e1000_tx_ring { 175struct e1000_tx_ring {
176 /* pointer to the descriptor ring memory */ 176 /* pointer to the descriptor ring memory */
@@ -300,10 +300,11 @@ struct e1000_adapter {
300 300
301 uint64_t hw_csum_err; 301 uint64_t hw_csum_err;
302 uint64_t hw_csum_good; 302 uint64_t hw_csum_good;
303 uint64_t rx_hdr_split;
303 uint32_t rx_int_delay; 304 uint32_t rx_int_delay;
304 uint32_t rx_abs_int_delay; 305 uint32_t rx_abs_int_delay;
305 boolean_t rx_csum; 306 boolean_t rx_csum;
306 boolean_t rx_ps; 307 unsigned int rx_ps_pages;
307 uint32_t gorcl; 308 uint32_t gorcl;
308 uint64_t gorcl_old; 309 uint64_t gorcl_old;
309 uint16_t rx_ps_bsize0; 310 uint16_t rx_ps_bsize0;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 6e7e34f59a34..183b583df1d7 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -91,7 +91,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
91 { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, 91 { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
92 { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, 92 { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
93 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, 93 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
94 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) } 94 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
95 { "rx_header_split", E1000_STAT(rx_hdr_split) },
95}; 96};
96#define E1000_STATS_LEN \ 97#define E1000_STATS_LEN \
97 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) 98 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ad92115f20a0..090229f7a95c 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1484,12 +1484,16 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1484 * e1000_setup_rctl - configure the receive control registers 1484 * e1000_setup_rctl - configure the receive control registers
1485 * @adapter: Board private structure 1485 * @adapter: Board private structure
1486 **/ 1486 **/
1487 1487#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1488 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1488static void 1489static void
1489e1000_setup_rctl(struct e1000_adapter *adapter) 1490e1000_setup_rctl(struct e1000_adapter *adapter)
1490{ 1491{
1491 uint32_t rctl, rfctl; 1492 uint32_t rctl, rfctl;
1492 uint32_t psrctl = 0; 1493 uint32_t psrctl = 0;
1494#ifdef CONFIG_E1000_PACKET_SPLIT
1495 uint32_t pages = 0;
1496#endif
1493 1497
1494 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1498 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1495 1499
@@ -1543,11 +1547,14 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1543 * followed by the page buffers. Therefore, skb->data is 1547 * followed by the page buffers. Therefore, skb->data is
1544 * sized to hold the largest protocol header. 1548 * sized to hold the largest protocol header.
1545 */ 1549 */
1546 adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2) 1550 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1547 && (adapter->netdev->mtu 1551 if ((adapter->hw.mac_type > e1000_82547_rev_2) && (pages <= 3) &&
1548 < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0)); 1552 PAGE_SIZE <= 16384)
1553 adapter->rx_ps_pages = pages;
1554 else
1555 adapter->rx_ps_pages = 0;
1549#endif 1556#endif
1550 if(adapter->rx_ps) { 1557 if (adapter->rx_ps_pages) {
1551 /* Configure extra packet-split registers */ 1558 /* Configure extra packet-split registers */
1552 rfctl = E1000_READ_REG(&adapter->hw, RFCTL); 1559 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1553 rfctl |= E1000_RFCTL_EXTEN; 1560 rfctl |= E1000_RFCTL_EXTEN;
@@ -1559,12 +1566,19 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1559 1566
1560 psrctl |= adapter->rx_ps_bsize0 >> 1567 psrctl |= adapter->rx_ps_bsize0 >>
1561 E1000_PSRCTL_BSIZE0_SHIFT; 1568 E1000_PSRCTL_BSIZE0_SHIFT;
1562 psrctl |= PAGE_SIZE >> 1569
1563 E1000_PSRCTL_BSIZE1_SHIFT; 1570 switch (adapter->rx_ps_pages) {
1564 psrctl |= PAGE_SIZE << 1571 case 3:
1565 E1000_PSRCTL_BSIZE2_SHIFT; 1572 psrctl |= PAGE_SIZE <<
1566 psrctl |= PAGE_SIZE << 1573 E1000_PSRCTL_BSIZE3_SHIFT;
1567 E1000_PSRCTL_BSIZE3_SHIFT; 1574 case 2:
1575 psrctl |= PAGE_SIZE <<
1576 E1000_PSRCTL_BSIZE2_SHIFT;
1577 case 1:
1578 psrctl |= PAGE_SIZE >>
1579 E1000_PSRCTL_BSIZE1_SHIFT;
1580 break;
1581 }
1568 1582
1569 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); 1583 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
1570 } 1584 }
@@ -1590,7 +1604,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1590 int i; 1604 int i;
1591#endif 1605#endif
1592 1606
1593 if(adapter->rx_ps) { 1607 if (adapter->rx_ps_pages) {
1594 rdlen = adapter->rx_ring[0].count * 1608 rdlen = adapter->rx_ring[0].count *
1595 sizeof(union e1000_rx_desc_packet_split); 1609 sizeof(union e1000_rx_desc_packet_split);
1596 adapter->clean_rx = e1000_clean_rx_irq_ps; 1610 adapter->clean_rx = e1000_clean_rx_irq_ps;
@@ -1700,8 +1714,8 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1700 1714
1701 /* Enable 82571 IPv4 payload checksum for UDP fragments 1715 /* Enable 82571 IPv4 payload checksum for UDP fragments
1702 * Must be used in conjunction with packet-split. */ 1716 * Must be used in conjunction with packet-split. */
1703 if((adapter->hw.mac_type > e1000_82547_rev_2) && 1717 if ((hw->mac_type >= e1000_82571) &&
1704 (adapter->rx_ps)) { 1718 (adapter->rx_ps_pages)) {
1705 rxcsum |= E1000_RXCSUM_IPPCSE; 1719 rxcsum |= E1000_RXCSUM_IPPCSE;
1706 } 1720 }
1707 } else { 1721 } else {
@@ -1906,7 +1920,7 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
1906 dev_kfree_skb(buffer_info->skb); 1920 dev_kfree_skb(buffer_info->skb);
1907 buffer_info->skb = NULL; 1921 buffer_info->skb = NULL;
1908 1922
1909 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 1923 for(j = 0; j < adapter->rx_ps_pages; j++) {
1910 if(!ps_page->ps_page[j]) break; 1924 if(!ps_page->ps_page[j]) break;
1911 pci_unmap_single(pdev, 1925 pci_unmap_single(pdev,
1912 ps_page_dma->ps_page_dma[j], 1926 ps_page_dma->ps_page_dma[j],
@@ -3551,7 +3565,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3551 /* Good Receive */ 3565 /* Good Receive */
3552 skb_put(skb, length); 3566 skb_put(skb, length);
3553 3567
3554 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 3568 for(j = 0; j < adapter->rx_ps_pages; j++) {
3555 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 3569 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3556 break; 3570 break;
3557 3571
@@ -3572,11 +3586,13 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3572 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 3586 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3573 skb->protocol = eth_type_trans(skb, netdev); 3587 skb->protocol = eth_type_trans(skb, netdev);
3574 3588
3575#ifdef HAVE_RX_ZERO_COPY
3576 if(likely(rx_desc->wb.upper.header_status & 3589 if(likely(rx_desc->wb.upper.header_status &
3577 E1000_RXDPS_HDRSTAT_HDRSP)) 3590 E1000_RXDPS_HDRSTAT_HDRSP)) {
3591 adapter->rx_hdr_split++;
3592#ifdef HAVE_RX_ZERO_COPY
3578 skb_shinfo(skb)->zero_copy = TRUE; 3593 skb_shinfo(skb)->zero_copy = TRUE;
3579#endif 3594#endif
3595 }
3580#ifdef CONFIG_E1000_NAPI 3596#ifdef CONFIG_E1000_NAPI
3581 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3597 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3582 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3598 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
@@ -3740,22 +3756,26 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3740 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3756 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3741 3757
3742 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 3758 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
3743 if(unlikely(!ps_page->ps_page[j])) { 3759 if (j < adapter->rx_ps_pages) {
3744 ps_page->ps_page[j] = 3760 if (likely(!ps_page->ps_page[j])) {
3745 alloc_page(GFP_ATOMIC); 3761 ps_page->ps_page[j] =
3746 if(unlikely(!ps_page->ps_page[j])) 3762 alloc_page(GFP_ATOMIC);
3747 goto no_buffers; 3763 if (unlikely(!ps_page->ps_page[j]))
3748 ps_page_dma->ps_page_dma[j] = 3764 goto no_buffers;
3749 pci_map_page(pdev, 3765 ps_page_dma->ps_page_dma[j] =
3750 ps_page->ps_page[j], 3766 pci_map_page(pdev,
3751 0, PAGE_SIZE, 3767 ps_page->ps_page[j],
3752 PCI_DMA_FROMDEVICE); 3768 0, PAGE_SIZE,
3753 } 3769 PCI_DMA_FROMDEVICE);
3754 /* Refresh the desc even if buffer_addrs didn't 3770 }
3755 * change because each write-back erases this info. 3771 /* Refresh the desc even if buffer_addrs didn't
3756 */ 3772 * change because each write-back erases
3757 rx_desc->read.buffer_addr[j+1] = 3773 * this info.
3758 cpu_to_le64(ps_page_dma->ps_page_dma[j]); 3774 */
3775 rx_desc->read.buffer_addr[j+1] =
3776 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
3777 } else
3778 rx_desc->read.buffer_addr[j+1] = ~0;
3759 } 3779 }
3760 3780
3761 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 3781 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);