aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgb
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2010-05-13 11:26:17 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-14 06:06:19 -0400
commit57bf6eef2f43ae810504753208b3a2c0bb2e4765 (patch)
tree54ea4e292e1a6279139580b7d3e9ea74f3d09c61 /drivers/net/ixgb
parent621b99b6f6a8ae69ca9b69dec0fec3a68f774bb7 (diff)
ixgb and e1000: Use new function for copybreak tests
There appears to be an off-by-1 defect in the maximum packet size copied when copybreak is speified in these modules. The copybreak module params are specified as: "Maximum size of packet that is copied to a new buffer on receive" The tests are changed from "< copybreak" to "<= copybreak" and moved into new static functions for readability. Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgb')
-rw-r--r--drivers/net/ixgb/ixgb_main.c52
1 files changed, 31 insertions, 21 deletions
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index d58ca6b578cc..c6b75c83100c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1921,6 +1921,31 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1921 } 1921 }
1922} 1922}
1923 1923
1924/*
1925 * this should improve performance for small packets with large amounts
1926 * of reassembly being done in the stack
1927 */
1928static void ixgb_check_copybreak(struct net_device *netdev,
1929 struct ixgb_buffer *buffer_info,
1930 u32 length, struct sk_buff **skb)
1931{
1932 struct sk_buff *new_skb;
1933
1934 if (length > copybreak)
1935 return;
1936
1937 new_skb = netdev_alloc_skb_ip_align(netdev, length);
1938 if (!new_skb)
1939 return;
1940
1941 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1942 (*skb)->data - NET_IP_ALIGN,
1943 length + NET_IP_ALIGN);
1944 /* save the skb in buffer_info as good */
1945 buffer_info->skb = *skb;
1946 *skb = new_skb;
1947}
1948
1924/** 1949/**
1925 * ixgb_clean_rx_irq - Send received data up the network stack, 1950 * ixgb_clean_rx_irq - Send received data up the network stack,
1926 * @adapter: board private structure 1951 * @adapter: board private structure
@@ -1957,11 +1982,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1957 1982
1958 prefetch(skb->data - NET_IP_ALIGN); 1983 prefetch(skb->data - NET_IP_ALIGN);
1959 1984
1960 if (++i == rx_ring->count) i = 0; 1985 if (++i == rx_ring->count)
1986 i = 0;
1961 next_rxd = IXGB_RX_DESC(*rx_ring, i); 1987 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1962 prefetch(next_rxd); 1988 prefetch(next_rxd);
1963 1989
1964 if ((j = i + 1) == rx_ring->count) j = 0; 1990 j = i + 1;
1991 if (j == rx_ring->count)
1992 j = 0;
1965 next2_buffer = &rx_ring->buffer_info[j]; 1993 next2_buffer = &rx_ring->buffer_info[j];
1966 prefetch(next2_buffer); 1994 prefetch(next2_buffer);
1967 1995
@@ -1997,25 +2025,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1997 goto rxdesc_done; 2025 goto rxdesc_done;
1998 } 2026 }
1999 2027
2000 /* code added for copybreak, this should improve 2028 ixgb_check_copybreak(netdev, buffer_info, length, &skb);
2001 * performance for small packets with large amounts
2002 * of reassembly being done in the stack */
2003 if (length < copybreak) {
2004 struct sk_buff *new_skb =
2005 netdev_alloc_skb_ip_align(netdev, length);
2006 if (new_skb) {
2007 skb_copy_to_linear_data_offset(new_skb,
2008 -NET_IP_ALIGN,
2009 (skb->data -
2010 NET_IP_ALIGN),
2011 (length +
2012 NET_IP_ALIGN));
2013 /* save the skb in buffer_info as good */
2014 buffer_info->skb = skb;
2015 skb = new_skb;
2016 }
2017 }
2018 /* end copybreak code */
2019 2029
2020 /* Good Receive */ 2030 /* Good Receive */
2021 skb_put(skb, length); 2031 skb_put(skb, length);