aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2010-05-13 11:26:17 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-14 06:06:19 -0400
commit57bf6eef2f43ae810504753208b3a2c0bb2e4765 (patch)
tree54ea4e292e1a6279139580b7d3e9ea74f3d09c61 /drivers/net/e1000
parent621b99b6f6a8ae69ca9b69dec0fec3a68f774bb7 (diff)
ixgb and e1000: Use new function for copybreak tests
There appears to be an off-by-1 defect in the maximum packet size copied when copybreak is speified in these modules. The copybreak module params are specified as: "Maximum size of packet that is copied to a new buffer on receive" The tests are changed from "< copybreak" to "<= copybreak" and moved into new static functions for readability. Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r--drivers/net/e1000/e1000_main.c47
1 files changed, 27 insertions, 20 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5de738a6d0e6..ebdea0891665 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3785,6 +3785,31 @@ next_desc:
3785 return cleaned; 3785 return cleaned;
3786} 3786}
3787 3787
3788/*
3789 * this should improve performance for small packets with large amounts
3790 * of reassembly being done in the stack
3791 */
3792static void e1000_check_copybreak(struct net_device *netdev,
3793 struct e1000_buffer *buffer_info,
3794 u32 length, struct sk_buff **skb)
3795{
3796 struct sk_buff *new_skb;
3797
3798 if (length > copybreak)
3799 return;
3800
3801 new_skb = netdev_alloc_skb_ip_align(netdev, length);
3802 if (!new_skb)
3803 return;
3804
3805 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
3806 (*skb)->data - NET_IP_ALIGN,
3807 length + NET_IP_ALIGN);
3808 /* save the skb in buffer_info as good */
3809 buffer_info->skb = *skb;
3810 *skb = new_skb;
3811}
3812
3788/** 3813/**
3789 * e1000_clean_rx_irq - Send received data up the network stack; legacy 3814 * e1000_clean_rx_irq - Send received data up the network stack; legacy
3790 * @adapter: board private structure 3815 * @adapter: board private structure
@@ -3883,26 +3908,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3883 total_rx_bytes += length; 3908 total_rx_bytes += length;
3884 total_rx_packets++; 3909 total_rx_packets++;
3885 3910
3886 /* code added for copybreak, this should improve 3911 e1000_check_copybreak(netdev, buffer_info, length, &skb);
3887 * performance for small packets with large amounts 3912
3888 * of reassembly being done in the stack */
3889 if (length < copybreak) {
3890 struct sk_buff *new_skb =
3891 netdev_alloc_skb_ip_align(netdev, length);
3892 if (new_skb) {
3893 skb_copy_to_linear_data_offset(new_skb,
3894 -NET_IP_ALIGN,
3895 (skb->data -
3896 NET_IP_ALIGN),
3897 (length +
3898 NET_IP_ALIGN));
3899 /* save the skb in buffer_info as good */
3900 buffer_info->skb = skb;
3901 skb = new_skb;
3902 }
3903 /* else just continue with the old one */
3904 }
3905 /* end copybreak code */
3906 skb_put(skb, length); 3913 skb_put(skb, length);
3907 3914
3908 /* Receive Checksum Offload */ 3915 /* Receive Checksum Offload */