aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorJohn Fastabend <john.r.fastabend@intel.com>2010-06-16 10:18:12 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-23 15:58:41 -0400
commit6afff0caa721211e8c04bdc7627ee3bff95bcb95 (patch)
treeec0b2f5b2e15f847ffad3db9501bbd9661bf671b /net/core/dev.c
parent1dc8d8c06d4002be4d1373fc06f25cd589be47e1 (diff)
net: consolidate netif_needs_gso() checks
netif_needs_gso() is checked twice in the TX path once, before submitting the skb to the qdisc and once after it is dequeued from the qdisc just before calling ndo_hard_start(). This opens a window for a user to change the gso/tso or tx checksum settings that can cause netif_needs_gso to be true in one check and false in the other. Specifically, changing TX checksum setting may cause the warning in skb_gso_segment() to be triggered if the checksum is calculated earlier. This consolidates the netif_needs_gso() calls so that the stack only checks if gso is needed in dev_hard_start_xmit(). Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Acked-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c68
1 files changed, 32 insertions, 36 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 5902426ef585..7f390b52caab 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1895,6 +1895,22 @@ static inline void skb_orphan_try(struct sk_buff *skb)
1895 skb_orphan(skb); 1895 skb_orphan(skb);
1896} 1896}
1897 1897
1898/*
1899 * Returns true if either:
1900 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
1901 * 2. skb is fragmented and the device does not support SG, or if
1902 * at least one of fragments is in highmem and device does not
1903 * support DMA from it.
1904 */
1905static inline int skb_needs_linearize(struct sk_buff *skb,
1906 struct net_device *dev)
1907{
1908 return skb_is_nonlinear(skb) &&
1909 ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
1910 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
1911 illegal_highdma(dev, skb))));
1912}
1913
1898int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 1914int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1899 struct netdev_queue *txq) 1915 struct netdev_queue *txq)
1900{ 1916{
@@ -1919,6 +1935,22 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1919 goto out_kfree_skb; 1935 goto out_kfree_skb;
1920 if (skb->next) 1936 if (skb->next)
1921 goto gso; 1937 goto gso;
1938 } else {
1939 if (skb_needs_linearize(skb, dev) &&
1940 __skb_linearize(skb))
1941 goto out_kfree_skb;
1942
1943 /* If packet is not checksummed and device does not
1944 * support checksumming for this protocol, complete
1945 * checksumming here.
1946 */
1947 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1948 skb_set_transport_header(skb, skb->csum_start -
1949 skb_headroom(skb));
1950 if (!dev_can_checksum(dev, skb) &&
1951 skb_checksum_help(skb))
1952 goto out_kfree_skb;
1953 }
1922 } 1954 }
1923 1955
1924 rc = ops->ndo_start_xmit(skb, dev); 1956 rc = ops->ndo_start_xmit(skb, dev);
@@ -2089,22 +2121,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2089 return rc; 2121 return rc;
2090} 2122}
2091 2123
2092/*
2093 * Returns true if either:
2094 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2095 * 2. skb is fragmented and the device does not support SG, or if
2096 * at least one of fragments is in highmem and device does not
2097 * support DMA from it.
2098 */
2099static inline int skb_needs_linearize(struct sk_buff *skb,
2100 struct net_device *dev)
2101{
2102 return skb_is_nonlinear(skb) &&
2103 ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2104 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2105 illegal_highdma(dev, skb))));
2106}
2107
2108/** 2124/**
2109 * dev_queue_xmit - transmit a buffer 2125 * dev_queue_xmit - transmit a buffer
2110 * @skb: buffer to transmit 2126 * @skb: buffer to transmit
@@ -2137,25 +2153,6 @@ int dev_queue_xmit(struct sk_buff *skb)
2137 struct Qdisc *q; 2153 struct Qdisc *q;
2138 int rc = -ENOMEM; 2154 int rc = -ENOMEM;
2139 2155
2140 /* GSO will handle the following emulations directly. */
2141 if (netif_needs_gso(dev, skb))
2142 goto gso;
2143
2144 /* Convert a paged skb to linear, if required */
2145 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
2146 goto out_kfree_skb;
2147
2148 /* If packet is not checksummed and device does not support
2149 * checksumming for this protocol, complete checksumming here.
2150 */
2151 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2152 skb_set_transport_header(skb, skb->csum_start -
2153 skb_headroom(skb));
2154 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2155 goto out_kfree_skb;
2156 }
2157
2158gso:
2159 /* Disable soft irqs for various locks below. Also 2156 /* Disable soft irqs for various locks below. Also
2160 * stops preemption for RCU. 2157 * stops preemption for RCU.
2161 */ 2158 */
@@ -2214,7 +2211,6 @@ gso:
2214 rc = -ENETDOWN; 2211 rc = -ENETDOWN;
2215 rcu_read_unlock_bh(); 2212 rcu_read_unlock_bh();
2216 2213
2217out_kfree_skb:
2218 kfree_skb(skb); 2214 kfree_skb(skb);
2219 return rc; 2215 return rc;
2220out: 2216out: