aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2010-05-13 11:26:17 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-14 06:06:19 -0400
commit57bf6eef2f43ae810504753208b3a2c0bb2e4765 (patch)
tree54ea4e292e1a6279139580b7d3e9ea74f3d09c61 /drivers
parent621b99b6f6a8ae69ca9b69dec0fec3a68f774bb7 (diff)
ixgb and e1000: Use new function for copybreak tests
There appears to be an off-by-1 defect in the maximum packet size copied when copybreak is speified in these modules. The copybreak module params are specified as: "Maximum size of packet that is copied to a new buffer on receive" The tests are changed from "< copybreak" to "<= copybreak" and moved into new static functions for readability. Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/e1000/e1000_main.c47
-rw-r--r--drivers/net/ixgb/ixgb_main.c52
2 files changed, 58 insertions, 41 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5de738a6d0e6..ebdea0891665 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3785,6 +3785,31 @@ next_desc:
3785 return cleaned; 3785 return cleaned;
3786} 3786}
3787 3787
3788/*
3789 * this should improve performance for small packets with large amounts
3790 * of reassembly being done in the stack
3791 */
3792static void e1000_check_copybreak(struct net_device *netdev,
3793 struct e1000_buffer *buffer_info,
3794 u32 length, struct sk_buff **skb)
3795{
3796 struct sk_buff *new_skb;
3797
3798 if (length > copybreak)
3799 return;
3800
3801 new_skb = netdev_alloc_skb_ip_align(netdev, length);
3802 if (!new_skb)
3803 return;
3804
3805 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
3806 (*skb)->data - NET_IP_ALIGN,
3807 length + NET_IP_ALIGN);
3808 /* save the skb in buffer_info as good */
3809 buffer_info->skb = *skb;
3810 *skb = new_skb;
3811}
3812
3788/** 3813/**
3789 * e1000_clean_rx_irq - Send received data up the network stack; legacy 3814 * e1000_clean_rx_irq - Send received data up the network stack; legacy
3790 * @adapter: board private structure 3815 * @adapter: board private structure
@@ -3883,26 +3908,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3883 total_rx_bytes += length; 3908 total_rx_bytes += length;
3884 total_rx_packets++; 3909 total_rx_packets++;
3885 3910
3886 /* code added for copybreak, this should improve 3911 e1000_check_copybreak(netdev, buffer_info, length, &skb);
3887 * performance for small packets with large amounts 3912
3888 * of reassembly being done in the stack */
3889 if (length < copybreak) {
3890 struct sk_buff *new_skb =
3891 netdev_alloc_skb_ip_align(netdev, length);
3892 if (new_skb) {
3893 skb_copy_to_linear_data_offset(new_skb,
3894 -NET_IP_ALIGN,
3895 (skb->data -
3896 NET_IP_ALIGN),
3897 (length +
3898 NET_IP_ALIGN));
3899 /* save the skb in buffer_info as good */
3900 buffer_info->skb = skb;
3901 skb = new_skb;
3902 }
3903 /* else just continue with the old one */
3904 }
3905 /* end copybreak code */
3906 skb_put(skb, length); 3913 skb_put(skb, length);
3907 3914
3908 /* Receive Checksum Offload */ 3915 /* Receive Checksum Offload */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index d58ca6b578cc..c6b75c83100c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1921,6 +1921,31 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1921 } 1921 }
1922} 1922}
1923 1923
1924/*
1925 * this should improve performance for small packets with large amounts
1926 * of reassembly being done in the stack
1927 */
1928static void ixgb_check_copybreak(struct net_device *netdev,
1929 struct ixgb_buffer *buffer_info,
1930 u32 length, struct sk_buff **skb)
1931{
1932 struct sk_buff *new_skb;
1933
1934 if (length > copybreak)
1935 return;
1936
1937 new_skb = netdev_alloc_skb_ip_align(netdev, length);
1938 if (!new_skb)
1939 return;
1940
1941 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1942 (*skb)->data - NET_IP_ALIGN,
1943 length + NET_IP_ALIGN);
1944 /* save the skb in buffer_info as good */
1945 buffer_info->skb = *skb;
1946 *skb = new_skb;
1947}
1948
1924/** 1949/**
1925 * ixgb_clean_rx_irq - Send received data up the network stack, 1950 * ixgb_clean_rx_irq - Send received data up the network stack,
1926 * @adapter: board private structure 1951 * @adapter: board private structure
@@ -1957,11 +1982,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1957 1982
1958 prefetch(skb->data - NET_IP_ALIGN); 1983 prefetch(skb->data - NET_IP_ALIGN);
1959 1984
1960 if (++i == rx_ring->count) i = 0; 1985 if (++i == rx_ring->count)
1986 i = 0;
1961 next_rxd = IXGB_RX_DESC(*rx_ring, i); 1987 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1962 prefetch(next_rxd); 1988 prefetch(next_rxd);
1963 1989
1964 if ((j = i + 1) == rx_ring->count) j = 0; 1990 j = i + 1;
1991 if (j == rx_ring->count)
1992 j = 0;
1965 next2_buffer = &rx_ring->buffer_info[j]; 1993 next2_buffer = &rx_ring->buffer_info[j];
1966 prefetch(next2_buffer); 1994 prefetch(next2_buffer);
1967 1995
@@ -1997,25 +2025,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1997 goto rxdesc_done; 2025 goto rxdesc_done;
1998 } 2026 }
1999 2027
2000 /* code added for copybreak, this should improve 2028 ixgb_check_copybreak(netdev, buffer_info, length, &skb);
2001 * performance for small packets with large amounts
2002 * of reassembly being done in the stack */
2003 if (length < copybreak) {
2004 struct sk_buff *new_skb =
2005 netdev_alloc_skb_ip_align(netdev, length);
2006 if (new_skb) {
2007 skb_copy_to_linear_data_offset(new_skb,
2008 -NET_IP_ALIGN,
2009 (skb->data -
2010 NET_IP_ALIGN),
2011 (length +
2012 NET_IP_ALIGN));
2013 /* save the skb in buffer_info as good */
2014 buffer_info->skb = skb;
2015 skb = new_skb;
2016 }
2017 }
2018 /* end copybreak code */
2019 2029
2020 /* Good Receive */ 2030 /* Good Receive */
2021 skb_put(skb, length); 2031 skb_put(skb, length);