diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2006-06-22 05:40:14 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-06-23 05:07:29 -0400 |
commit | 7967168cefdbc63bf332d6b1548eca7cd65ebbcc (patch) | |
tree | c45759149ae0acdc89d746e556a0ae278d11776d | |
parent | d4828d85d188dc70ed172802e798d3978bb6e29e (diff) |
[NET]: Merge TSO/UFO fields in sk_buff
Having separate fields in sk_buff for TSO/UFO (tso_size/ufo_size) is not
going to scale if we add any more segmentation methods (e.g., DCCP). So
let's merge them.
They were used to tell the protocol of a packet. This function has been
subsumed by the new gso_type field. This is essentially a set of netdev
feature bits (shifted by 16 bits) that are required to process a specific
skb. As such it's easy to tell whether a given device can process a GSO
skb: you just have to and the gso_type field and the netdev's features
field.
I've made gso_type a conjunction. The idea is that you have a base type
(e.g., SKB_GSO_TCPV4) that can be modified further to support new features.
For example, if we add a hardware TSO type that supports ECN, they would
declare NETIF_F_TSO | NETIF_F_TSO_ECN. All TSO packets with CWR set would
have a gso_type of SKB_GSO_TCPV4 | SKB_GSO_TCPV4_ECN while all other TSO
packets would be SKB_GSO_TCPV4. This means that only the CWR packets need
to be emulated in software.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/8139cp.c | 2 | ||||
-rw-r--r-- | drivers/net/bnx2.c | 4 | ||||
-rw-r--r-- | drivers/net/chelsio/sge.c | 4 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 10 | ||||
-rw-r--r-- | drivers/net/forcedeth.c | 4 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_main.c | 4 | ||||
-rw-r--r-- | drivers/net/loopback.c | 4 | ||||
-rw-r--r-- | drivers/net/myri10ge/myri10ge.c | 4 | ||||
-rw-r--r-- | drivers/net/r8169.c | 2 | ||||
-rw-r--r-- | drivers/net/s2io.c | 16 | ||||
-rw-r--r-- | drivers/net/sky2.c | 4 | ||||
-rw-r--r-- | drivers/net/tg3.c | 4 | ||||
-rw-r--r-- | drivers/net/typhoon.c | 2 | ||||
-rw-r--r-- | drivers/s390/net/qeth_eddp.c | 12 | ||||
-rw-r--r-- | drivers/s390/net/qeth_main.c | 4 | ||||
-rw-r--r-- | drivers/s390/net/qeth_tso.h | 2 | ||||
-rw-r--r-- | include/linux/netdevice.h | 14 | ||||
-rw-r--r-- | include/linux/skbuff.h | 12 | ||||
-rw-r--r-- | include/net/tcp.h | 4 | ||||
-rw-r--r-- | net/bridge/br_forward.c | 4 | ||||
-rw-r--r-- | net/bridge/br_netfilter.c | 2 | ||||
-rw-r--r-- | net/core/skbuff.c | 16 | ||||
-rw-r--r-- | net/ipv4/ip_output.c | 16 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 47 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 7 |
27 files changed, 120 insertions, 90 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index a26077a175a..0cdc830449d 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -797,7 +797,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
797 | entry = cp->tx_head; | 797 | entry = cp->tx_head; |
798 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; | 798 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; |
799 | if (dev->features & NETIF_F_TSO) | 799 | if (dev->features & NETIF_F_TSO) |
800 | mss = skb_shinfo(skb)->tso_size; | 800 | mss = skb_shinfo(skb)->gso_size; |
801 | 801 | ||
802 | if (skb_shinfo(skb)->nr_frags == 0) { | 802 | if (skb_shinfo(skb)->nr_frags == 0) { |
803 | struct cp_desc *txd = &cp->tx_ring[entry]; | 803 | struct cp_desc *txd = &cp->tx_ring[entry]; |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 702d546567a..7635736cc79 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -1640,7 +1640,7 @@ bnx2_tx_int(struct bnx2 *bp) | |||
1640 | skb = tx_buf->skb; | 1640 | skb = tx_buf->skb; |
1641 | #ifdef BCM_TSO | 1641 | #ifdef BCM_TSO |
1642 | /* partial BD completions possible with TSO packets */ | 1642 | /* partial BD completions possible with TSO packets */ |
1643 | if (skb_shinfo(skb)->tso_size) { | 1643 | if (skb_shinfo(skb)->gso_size) { |
1644 | u16 last_idx, last_ring_idx; | 1644 | u16 last_idx, last_ring_idx; |
1645 | 1645 | ||
1646 | last_idx = sw_cons + | 1646 | last_idx = sw_cons + |
@@ -4428,7 +4428,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4428 | (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); | 4428 | (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); |
4429 | } | 4429 | } |
4430 | #ifdef BCM_TSO | 4430 | #ifdef BCM_TSO |
4431 | if ((mss = skb_shinfo(skb)->tso_size) && | 4431 | if ((mss = skb_shinfo(skb)->gso_size) && |
4432 | (skb->len > (bp->dev->mtu + ETH_HLEN))) { | 4432 | (skb->len > (bp->dev->mtu + ETH_HLEN))) { |
4433 | u32 tcp_opt_len, ip_tcp_len; | 4433 | u32 tcp_opt_len, ip_tcp_len; |
4434 | 4434 | ||
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 4391bf4bf57..53efff6da78 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1418,7 +1418,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1418 | struct cpl_tx_pkt *cpl; | 1418 | struct cpl_tx_pkt *cpl; |
1419 | 1419 | ||
1420 | #ifdef NETIF_F_TSO | 1420 | #ifdef NETIF_F_TSO |
1421 | if (skb_shinfo(skb)->tso_size) { | 1421 | if (skb_shinfo(skb)->gso_size) { |
1422 | int eth_type; | 1422 | int eth_type; |
1423 | struct cpl_tx_pkt_lso *hdr; | 1423 | struct cpl_tx_pkt_lso *hdr; |
1424 | 1424 | ||
@@ -1433,7 +1433,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1433 | hdr->ip_hdr_words = skb->nh.iph->ihl; | 1433 | hdr->ip_hdr_words = skb->nh.iph->ihl; |
1434 | hdr->tcp_hdr_words = skb->h.th->doff; | 1434 | hdr->tcp_hdr_words = skb->h.th->doff; |
1435 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, | 1435 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, |
1436 | skb_shinfo(skb)->tso_size)); | 1436 | skb_shinfo(skb)->gso_size)); |
1437 | hdr->len = htonl(skb->len - sizeof(*hdr)); | 1437 | hdr->len = htonl(skb->len - sizeof(*hdr)); |
1438 | cpl = (struct cpl_tx_pkt *)hdr; | 1438 | cpl = (struct cpl_tx_pkt *)hdr; |
1439 | sge->stats.tx_lso_pkts++; | 1439 | sge->stats.tx_lso_pkts++; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index a373ccb308d..32b7d444b37 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -2394,7 +2394,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2394 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; | 2394 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; |
2395 | int err; | 2395 | int err; |
2396 | 2396 | ||
2397 | if (skb_shinfo(skb)->tso_size) { | 2397 | if (skb_shinfo(skb)->gso_size) { |
2398 | if (skb_header_cloned(skb)) { | 2398 | if (skb_header_cloned(skb)) { |
2399 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 2399 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
2400 | if (err) | 2400 | if (err) |
@@ -2402,7 +2402,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2402 | } | 2402 | } |
2403 | 2403 | ||
2404 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 2404 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); |
2405 | mss = skb_shinfo(skb)->tso_size; | 2405 | mss = skb_shinfo(skb)->gso_size; |
2406 | if (skb->protocol == htons(ETH_P_IP)) { | 2406 | if (skb->protocol == htons(ETH_P_IP)) { |
2407 | skb->nh.iph->tot_len = 0; | 2407 | skb->nh.iph->tot_len = 0; |
2408 | skb->nh.iph->check = 0; | 2408 | skb->nh.iph->check = 0; |
@@ -2519,7 +2519,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2519 | * tso gets written back prematurely before the data is fully | 2519 | * tso gets written back prematurely before the data is fully |
2520 | * DMA'd to the controller */ | 2520 | * DMA'd to the controller */ |
2521 | if (!skb->data_len && tx_ring->last_tx_tso && | 2521 | if (!skb->data_len && tx_ring->last_tx_tso && |
2522 | !skb_shinfo(skb)->tso_size) { | 2522 | !skb_shinfo(skb)->gso_size) { |
2523 | tx_ring->last_tx_tso = 0; | 2523 | tx_ring->last_tx_tso = 0; |
2524 | size -= 4; | 2524 | size -= 4; |
2525 | } | 2525 | } |
@@ -2757,7 +2757,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2757 | } | 2757 | } |
2758 | 2758 | ||
2759 | #ifdef NETIF_F_TSO | 2759 | #ifdef NETIF_F_TSO |
2760 | mss = skb_shinfo(skb)->tso_size; | 2760 | mss = skb_shinfo(skb)->gso_size; |
2761 | /* The controller does a simple calculation to | 2761 | /* The controller does a simple calculation to |
2762 | * make sure there is enough room in the FIFO before | 2762 | * make sure there is enough room in the FIFO before |
2763 | * initiating the DMA for each buffer. The calc is: | 2763 | * initiating the DMA for each buffer. The calc is: |
@@ -2807,7 +2807,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2807 | #ifdef NETIF_F_TSO | 2807 | #ifdef NETIF_F_TSO |
2808 | /* Controller Erratum workaround */ | 2808 | /* Controller Erratum workaround */ |
2809 | if (!skb->data_len && tx_ring->last_tx_tso && | 2809 | if (!skb->data_len && tx_ring->last_tx_tso && |
2810 | !skb_shinfo(skb)->tso_size) | 2810 | !skb_shinfo(skb)->gso_size) |
2811 | count++; | 2811 | count++; |
2812 | #endif | 2812 | #endif |
2813 | 2813 | ||
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 191383d461d..21be4fa071b 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1495,8 +1495,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1495 | np->tx_skbuff[nr] = skb; | 1495 | np->tx_skbuff[nr] = skb; |
1496 | 1496 | ||
1497 | #ifdef NETIF_F_TSO | 1497 | #ifdef NETIF_F_TSO |
1498 | if (skb_shinfo(skb)->tso_size) | 1498 | if (skb_shinfo(skb)->gso_size) |
1499 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT); | 1499 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); |
1500 | else | 1500 | else |
1501 | #endif | 1501 | #endif |
1502 | tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); | 1502 | tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 57006fb8840..8bb32f94699 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1173 | uint16_t ipcse, tucse, mss; | 1173 | uint16_t ipcse, tucse, mss; |
1174 | int err; | 1174 | int err; |
1175 | 1175 | ||
1176 | if(likely(skb_shinfo(skb)->tso_size)) { | 1176 | if(likely(skb_shinfo(skb)->gso_size)) { |
1177 | if (skb_header_cloned(skb)) { | 1177 | if (skb_header_cloned(skb)) { |
1178 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 1178 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
1179 | if (err) | 1179 | if (err) |
@@ -1181,7 +1181,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1181 | } | 1181 | } |
1182 | 1182 | ||
1183 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 1183 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); |
1184 | mss = skb_shinfo(skb)->tso_size; | 1184 | mss = skb_shinfo(skb)->gso_size; |
1185 | skb->nh.iph->tot_len = 0; | 1185 | skb->nh.iph->tot_len = 0; |
1186 | skb->nh.iph->check = 0; | 1186 | skb->nh.iph->check = 0; |
1187 | skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, | 1187 | skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index b79d6e8d304..43fef7de8cb 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -74,7 +74,7 @@ static void emulate_large_send_offload(struct sk_buff *skb) | |||
74 | struct iphdr *iph = skb->nh.iph; | 74 | struct iphdr *iph = skb->nh.iph; |
75 | struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4)); | 75 | struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4)); |
76 | unsigned int doffset = (iph->ihl + th->doff) * 4; | 76 | unsigned int doffset = (iph->ihl + th->doff) * 4; |
77 | unsigned int mtu = skb_shinfo(skb)->tso_size + doffset; | 77 | unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; |
78 | unsigned int offset = 0; | 78 | unsigned int offset = 0; |
79 | u32 seq = ntohl(th->seq); | 79 | u32 seq = ntohl(th->seq); |
80 | u16 id = ntohs(iph->id); | 80 | u16 id = ntohs(iph->id); |
@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) | |||
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | #ifdef LOOPBACK_TSO | 141 | #ifdef LOOPBACK_TSO |
142 | if (skb_shinfo(skb)->tso_size) { | 142 | if (skb_shinfo(skb)->gso_size) { |
143 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | 143 | BUG_ON(skb->protocol != htons(ETH_P_IP)); |
144 | BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); | 144 | BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); |
145 | 145 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index b983e1e0434..dbdf189436f 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -1879,7 +1879,7 @@ again: | |||
1879 | 1879 | ||
1880 | #ifdef NETIF_F_TSO | 1880 | #ifdef NETIF_F_TSO |
1881 | if (skb->len > (dev->mtu + ETH_HLEN)) { | 1881 | if (skb->len > (dev->mtu + ETH_HLEN)) { |
1882 | mss = skb_shinfo(skb)->tso_size; | 1882 | mss = skb_shinfo(skb)->gso_size; |
1883 | if (mss != 0) | 1883 | if (mss != 0) |
1884 | max_segments = MYRI10GE_MAX_SEND_DESC_TSO; | 1884 | max_segments = MYRI10GE_MAX_SEND_DESC_TSO; |
1885 | } | 1885 | } |
@@ -2112,7 +2112,7 @@ abort_linearize: | |||
2112 | } | 2112 | } |
2113 | idx = (idx + 1) & tx->mask; | 2113 | idx = (idx + 1) & tx->mask; |
2114 | } while (idx != last_idx); | 2114 | } while (idx != last_idx); |
2115 | if (skb_shinfo(skb)->tso_size) { | 2115 | if (skb_shinfo(skb)->gso_size) { |
2116 | printk(KERN_ERR | 2116 | printk(KERN_ERR |
2117 | "myri10ge: %s: TSO but wanted to linearize?!?!?\n", | 2117 | "myri10ge: %s: TSO but wanted to linearize?!?!?\n", |
2118 | mgp->dev->name); | 2118 | mgp->dev->name); |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 985afe0e627..12d1cb289bb 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2172,7 +2172,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, | |||
2172 | static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) | 2172 | static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) |
2173 | { | 2173 | { |
2174 | if (dev->features & NETIF_F_TSO) { | 2174 | if (dev->features & NETIF_F_TSO) { |
2175 | u32 mss = skb_shinfo(skb)->tso_size; | 2175 | u32 mss = skb_shinfo(skb)->gso_size; |
2176 | 2176 | ||
2177 | if (mss) | 2177 | if (mss) |
2178 | return LargeSend | ((mss & MSSMask) << MSSShift); | 2178 | return LargeSend | ((mss & MSSMask) << MSSShift); |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 11daed495b9..3defe5d4f7d 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -3959,8 +3959,8 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3959 | txdp->Control_1 = 0; | 3959 | txdp->Control_1 = 0; |
3960 | txdp->Control_2 = 0; | 3960 | txdp->Control_2 = 0; |
3961 | #ifdef NETIF_F_TSO | 3961 | #ifdef NETIF_F_TSO |
3962 | mss = skb_shinfo(skb)->tso_size; | 3962 | mss = skb_shinfo(skb)->gso_size; |
3963 | if (mss) { | 3963 | if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) { |
3964 | txdp->Control_1 |= TXD_TCP_LSO_EN; | 3964 | txdp->Control_1 |= TXD_TCP_LSO_EN; |
3965 | txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); | 3965 | txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); |
3966 | } | 3966 | } |
@@ -3980,10 +3980,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3980 | } | 3980 | } |
3981 | 3981 | ||
3982 | frg_len = skb->len - skb->data_len; | 3982 | frg_len = skb->len - skb->data_len; |
3983 | if (skb_shinfo(skb)->ufo_size) { | 3983 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) { |
3984 | int ufo_size; | 3984 | int ufo_size; |
3985 | 3985 | ||
3986 | ufo_size = skb_shinfo(skb)->ufo_size; | 3986 | ufo_size = skb_shinfo(skb)->gso_size; |
3987 | ufo_size &= ~7; | 3987 | ufo_size &= ~7; |
3988 | txdp->Control_1 |= TXD_UFO_EN; | 3988 | txdp->Control_1 |= TXD_UFO_EN; |
3989 | txdp->Control_1 |= TXD_UFO_MSS(ufo_size); | 3989 | txdp->Control_1 |= TXD_UFO_MSS(ufo_size); |
@@ -4009,7 +4009,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4009 | txdp->Host_Control = (unsigned long) skb; | 4009 | txdp->Host_Control = (unsigned long) skb; |
4010 | txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); | 4010 | txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); |
4011 | 4011 | ||
4012 | if (skb_shinfo(skb)->ufo_size) | 4012 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) |
4013 | txdp->Control_1 |= TXD_UFO_EN; | 4013 | txdp->Control_1 |= TXD_UFO_EN; |
4014 | 4014 | ||
4015 | frg_cnt = skb_shinfo(skb)->nr_frags; | 4015 | frg_cnt = skb_shinfo(skb)->nr_frags; |
@@ -4024,12 +4024,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4024 | (sp->pdev, frag->page, frag->page_offset, | 4024 | (sp->pdev, frag->page, frag->page_offset, |
4025 | frag->size, PCI_DMA_TODEVICE); | 4025 | frag->size, PCI_DMA_TODEVICE); |
4026 | txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); | 4026 | txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); |
4027 | if (skb_shinfo(skb)->ufo_size) | 4027 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) |
4028 | txdp->Control_1 |= TXD_UFO_EN; | 4028 | txdp->Control_1 |= TXD_UFO_EN; |
4029 | } | 4029 | } |
4030 | txdp->Control_1 |= TXD_GATHER_CODE_LAST; | 4030 | txdp->Control_1 |= TXD_GATHER_CODE_LAST; |
4031 | 4031 | ||
4032 | if (skb_shinfo(skb)->ufo_size) | 4032 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) |
4033 | frg_cnt++; /* as Txd0 was used for inband header */ | 4033 | frg_cnt++; /* as Txd0 was used for inband header */ |
4034 | 4034 | ||
4035 | tx_fifo = mac_control->tx_FIFO_start[queue]; | 4035 | tx_fifo = mac_control->tx_FIFO_start[queue]; |
@@ -4043,7 +4043,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4043 | if (mss) | 4043 | if (mss) |
4044 | val64 |= TX_FIFO_SPECIAL_FUNC; | 4044 | val64 |= TX_FIFO_SPECIAL_FUNC; |
4045 | #endif | 4045 | #endif |
4046 | if (skb_shinfo(skb)->ufo_size) | 4046 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) |
4047 | val64 |= TX_FIFO_SPECIAL_FUNC; | 4047 | val64 |= TX_FIFO_SPECIAL_FUNC; |
4048 | writeq(val64, &tx_fifo->List_Control); | 4048 | writeq(val64, &tx_fifo->List_Control); |
4049 | 4049 | ||
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index fba1e4d4d83..d3577871be2 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1160,7 +1160,7 @@ static unsigned tx_le_req(const struct sk_buff *skb) | |||
1160 | count = sizeof(dma_addr_t) / sizeof(u32); | 1160 | count = sizeof(dma_addr_t) / sizeof(u32); |
1161 | count += skb_shinfo(skb)->nr_frags * count; | 1161 | count += skb_shinfo(skb)->nr_frags * count; |
1162 | 1162 | ||
1163 | if (skb_shinfo(skb)->tso_size) | 1163 | if (skb_shinfo(skb)->gso_size) |
1164 | ++count; | 1164 | ++count; |
1165 | 1165 | ||
1166 | if (skb->ip_summed == CHECKSUM_HW) | 1166 | if (skb->ip_summed == CHECKSUM_HW) |
@@ -1232,7 +1232,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1232 | } | 1232 | } |
1233 | 1233 | ||
1234 | /* Check for TCP Segmentation Offload */ | 1234 | /* Check for TCP Segmentation Offload */ |
1235 | mss = skb_shinfo(skb)->tso_size; | 1235 | mss = skb_shinfo(skb)->gso_size; |
1236 | if (mss != 0) { | 1236 | if (mss != 0) { |
1237 | /* just drop the packet if non-linear expansion fails */ | 1237 | /* just drop the packet if non-linear expansion fails */ |
1238 | if (skb_header_cloned(skb) && | 1238 | if (skb_header_cloned(skb) && |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index b2ddd4522a8..e3e380f90f8 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -3780,7 +3780,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3780 | #if TG3_TSO_SUPPORT != 0 | 3780 | #if TG3_TSO_SUPPORT != 0 |
3781 | mss = 0; | 3781 | mss = 0; |
3782 | if (skb->len > (tp->dev->mtu + ETH_HLEN) && | 3782 | if (skb->len > (tp->dev->mtu + ETH_HLEN) && |
3783 | (mss = skb_shinfo(skb)->tso_size) != 0) { | 3783 | (mss = skb_shinfo(skb)->gso_size) != 0) { |
3784 | int tcp_opt_len, ip_tcp_len; | 3784 | int tcp_opt_len, ip_tcp_len; |
3785 | 3785 | ||
3786 | if (skb_header_cloned(skb) && | 3786 | if (skb_header_cloned(skb) && |
@@ -3905,7 +3905,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
3905 | #if TG3_TSO_SUPPORT != 0 | 3905 | #if TG3_TSO_SUPPORT != 0 |
3906 | mss = 0; | 3906 | mss = 0; |
3907 | if (skb->len > (tp->dev->mtu + ETH_HLEN) && | 3907 | if (skb->len > (tp->dev->mtu + ETH_HLEN) && |
3908 | (mss = skb_shinfo(skb)->tso_size) != 0) { | 3908 | (mss = skb_shinfo(skb)->gso_size) != 0) { |
3909 | int tcp_opt_len, ip_tcp_len; | 3909 | int tcp_opt_len, ip_tcp_len; |
3910 | 3910 | ||
3911 | if (skb_header_cloned(skb) && | 3911 | if (skb_header_cloned(skb) && |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index d9258d42090..e49e8b520c2 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -340,7 +340,7 @@ enum state_values { | |||
340 | #endif | 340 | #endif |
341 | 341 | ||
342 | #if defined(NETIF_F_TSO) | 342 | #if defined(NETIF_F_TSO) |
343 | #define skb_tso_size(x) (skb_shinfo(x)->tso_size) | 343 | #define skb_tso_size(x) (skb_shinfo(x)->gso_size) |
344 | #define TSO_NUM_DESCRIPTORS 2 | 344 | #define TSO_NUM_DESCRIPTORS 2 |
345 | #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT | 345 | #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT |
346 | #else | 346 | #else |
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 0bab60a2030..38aad832145 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c | |||
@@ -420,7 +420,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
420 | } | 420 | } |
421 | tcph = eddp->skb->h.th; | 421 | tcph = eddp->skb->h.th; |
422 | while (eddp->skb_offset < eddp->skb->len) { | 422 | while (eddp->skb_offset < eddp->skb->len) { |
423 | data_len = min((int)skb_shinfo(eddp->skb)->tso_size, | 423 | data_len = min((int)skb_shinfo(eddp->skb)->gso_size, |
424 | (int)(eddp->skb->len - eddp->skb_offset)); | 424 | (int)(eddp->skb->len - eddp->skb_offset)); |
425 | /* prepare qdio hdr */ | 425 | /* prepare qdio hdr */ |
426 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ | 426 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ |
@@ -515,20 +515,20 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, | |||
515 | 515 | ||
516 | QETH_DBF_TEXT(trace, 5, "eddpcanp"); | 516 | QETH_DBF_TEXT(trace, 5, "eddpcanp"); |
517 | /* can we put multiple skbs in one page? */ | 517 | /* can we put multiple skbs in one page? */ |
518 | skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len); | 518 | skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len); |
519 | if (skbs_per_page > 1){ | 519 | if (skbs_per_page > 1){ |
520 | ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) / | 520 | ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) / |
521 | skbs_per_page + 1; | 521 | skbs_per_page + 1; |
522 | ctx->elements_per_skb = 1; | 522 | ctx->elements_per_skb = 1; |
523 | } else { | 523 | } else { |
524 | /* no -> how many elements per skb? */ | 524 | /* no -> how many elements per skb? */ |
525 | ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len + | 525 | ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len + |
526 | PAGE_SIZE) >> PAGE_SHIFT; | 526 | PAGE_SIZE) >> PAGE_SHIFT; |
527 | ctx->num_pages = ctx->elements_per_skb * | 527 | ctx->num_pages = ctx->elements_per_skb * |
528 | (skb_shinfo(skb)->tso_segs + 1); | 528 | (skb_shinfo(skb)->gso_segs + 1); |
529 | } | 529 | } |
530 | ctx->num_elements = ctx->elements_per_skb * | 530 | ctx->num_elements = ctx->elements_per_skb * |
531 | (skb_shinfo(skb)->tso_segs + 1); | 531 | (skb_shinfo(skb)->gso_segs + 1); |
532 | } | 532 | } |
533 | 533 | ||
534 | static inline struct qeth_eddp_context * | 534 | static inline struct qeth_eddp_context * |
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 9e671a48cd2..56009d76832 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -4417,7 +4417,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) | |||
4417 | struct qeth_eddp_context *ctx = NULL; | 4417 | struct qeth_eddp_context *ctx = NULL; |
4418 | int tx_bytes = skb->len; | 4418 | int tx_bytes = skb->len; |
4419 | unsigned short nr_frags = skb_shinfo(skb)->nr_frags; | 4419 | unsigned short nr_frags = skb_shinfo(skb)->nr_frags; |
4420 | unsigned short tso_size = skb_shinfo(skb)->tso_size; | 4420 | unsigned short tso_size = skb_shinfo(skb)->gso_size; |
4421 | int rc; | 4421 | int rc; |
4422 | 4422 | ||
4423 | QETH_DBF_TEXT(trace, 6, "sendpkt"); | 4423 | QETH_DBF_TEXT(trace, 6, "sendpkt"); |
@@ -4453,7 +4453,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) | |||
4453 | queue = card->qdio.out_qs | 4453 | queue = card->qdio.out_qs |
4454 | [qeth_get_priority_queue(card, skb, ipv, cast_type)]; | 4454 | [qeth_get_priority_queue(card, skb, ipv, cast_type)]; |
4455 | 4455 | ||
4456 | if (skb_shinfo(skb)->tso_size) | 4456 | if (skb_shinfo(skb)->gso_size) |
4457 | large_send = card->options.large_send; | 4457 | large_send = card->options.large_send; |
4458 | 4458 | ||
4459 | /*are we able to do TSO ? If so ,prepare and send it from here */ | 4459 | /*are we able to do TSO ? If so ,prepare and send it from here */ |
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h index 24ef40ca956..593f298142c 100644 --- a/drivers/s390/net/qeth_tso.h +++ b/drivers/s390/net/qeth_tso.h | |||
@@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb) | |||
51 | hdr->ext.hdr_version = 1; | 51 | hdr->ext.hdr_version = 1; |
52 | hdr->ext.hdr_len = 28; | 52 | hdr->ext.hdr_len = 28; |
53 | /*insert non-fix values */ | 53 | /*insert non-fix values */ |
54 | hdr->ext.mss = skb_shinfo(skb)->tso_size; | 54 | hdr->ext.mss = skb_shinfo(skb)->gso_size; |
55 | hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); | 55 | hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); |
56 | hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - | 56 | hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - |
57 | sizeof(struct qeth_hdr_tso)); | 57 | sizeof(struct qeth_hdr_tso)); |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cead6be467e..fa5671307b9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -308,9 +308,12 @@ struct net_device | |||
308 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ | 308 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ |
309 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ | 309 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ |
310 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ | 310 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ |
311 | #define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ | ||
312 | #define NETIF_F_LLTX 4096 /* LockLess TX */ | 311 | #define NETIF_F_LLTX 4096 /* LockLess TX */ |
313 | #define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/ | 312 | |
313 | /* Segmentation offload features */ | ||
314 | #define NETIF_F_GSO_SHIFT 16 | ||
315 | #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) | ||
316 | #define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT) | ||
314 | 317 | ||
315 | #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) | 318 | #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) |
316 | #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) | 319 | #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) |
@@ -979,6 +982,13 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); | |||
979 | 982 | ||
980 | extern void linkwatch_run_queue(void); | 983 | extern void linkwatch_run_queue(void); |
981 | 984 | ||
985 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) | ||
986 | { | ||
987 | int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT; | ||
988 | return skb_shinfo(skb)->gso_size && | ||
989 | (dev->features & feature) != feature; | ||
990 | } | ||
991 | |||
982 | #endif /* __KERNEL__ */ | 992 | #endif /* __KERNEL__ */ |
983 | 993 | ||
984 | #endif /* _LINUX_DEV_H */ | 994 | #endif /* _LINUX_DEV_H */ |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f8c7eb79a27..97b0d2d1a6b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -134,9 +134,10 @@ struct skb_frag_struct { | |||
134 | struct skb_shared_info { | 134 | struct skb_shared_info { |
135 | atomic_t dataref; | 135 | atomic_t dataref; |
136 | unsigned short nr_frags; | 136 | unsigned short nr_frags; |
137 | unsigned short tso_size; | 137 | unsigned short gso_size; |
138 | unsigned short tso_segs; | 138 | /* Warning: this field is not always filled in (UFO)! */ |
139 | unsigned short ufo_size; | 139 | unsigned short gso_segs; |
140 | unsigned short gso_type; | ||
140 | unsigned int ip6_frag_id; | 141 | unsigned int ip6_frag_id; |
141 | struct sk_buff *frag_list; | 142 | struct sk_buff *frag_list; |
142 | skb_frag_t frags[MAX_SKB_FRAGS]; | 143 | skb_frag_t frags[MAX_SKB_FRAGS]; |
@@ -168,6 +169,11 @@ enum { | |||
168 | SKB_FCLONE_CLONE, | 169 | SKB_FCLONE_CLONE, |
169 | }; | 170 | }; |
170 | 171 | ||
172 | enum { | ||
173 | SKB_GSO_TCPV4 = 1 << 0, | ||
174 | SKB_GSO_UDPV4 = 1 << 1, | ||
175 | }; | ||
176 | |||
171 | /** | 177 | /** |
172 | * struct sk_buff - socket buffer | 178 | * struct sk_buff - socket buffer |
173 | * @next: Next buffer in list | 179 | * @next: Next buffer in list |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 5f4eb5c7968..b197a9e615c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -569,13 +569,13 @@ struct tcp_skb_cb { | |||
569 | */ | 569 | */ |
570 | static inline int tcp_skb_pcount(const struct sk_buff *skb) | 570 | static inline int tcp_skb_pcount(const struct sk_buff *skb) |
571 | { | 571 | { |
572 | return skb_shinfo(skb)->tso_segs; | 572 | return skb_shinfo(skb)->gso_segs; |
573 | } | 573 | } |
574 | 574 | ||
575 | /* This is valid iff tcp_skb_pcount() > 1. */ | 575 | /* This is valid iff tcp_skb_pcount() > 1. */ |
576 | static inline int tcp_skb_mss(const struct sk_buff *skb) | 576 | static inline int tcp_skb_mss(const struct sk_buff *skb) |
577 | { | 577 | { |
578 | return skb_shinfo(skb)->tso_size; | 578 | return skb_shinfo(skb)->gso_size; |
579 | } | 579 | } |
580 | 580 | ||
581 | static inline void tcp_dec_pcount_approx(__u32 *count, | 581 | static inline void tcp_dec_pcount_approx(__u32 *count, |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 0dca027ceb8..8be9f2123e5 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -34,8 +34,8 @@ static inline unsigned packet_length(const struct sk_buff *skb) | |||
34 | 34 | ||
35 | int br_dev_queue_push_xmit(struct sk_buff *skb) | 35 | int br_dev_queue_push_xmit(struct sk_buff *skb) |
36 | { | 36 | { |
37 | /* drop mtu oversized packets except tso */ | 37 | /* drop mtu oversized packets except gso */ |
38 | if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->tso_size) | 38 | if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size) |
39 | kfree_skb(skb); | 39 | kfree_skb(skb); |
40 | else { | 40 | else { |
41 | #ifdef CONFIG_BRIDGE_NETFILTER | 41 | #ifdef CONFIG_BRIDGE_NETFILTER |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 3e41f9d6d51..8298a5179ae 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -761,7 +761,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) | |||
761 | { | 761 | { |
762 | if (skb->protocol == htons(ETH_P_IP) && | 762 | if (skb->protocol == htons(ETH_P_IP) && |
763 | skb->len > skb->dev->mtu && | 763 | skb->len > skb->dev->mtu && |
764 | !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) | 764 | !skb_shinfo(skb)->gso_size) |
765 | return ip_fragment(skb, br_dev_queue_push_xmit); | 765 | return ip_fragment(skb, br_dev_queue_push_xmit); |
766 | else | 766 | else |
767 | return br_dev_queue_push_xmit(skb); | 767 | return br_dev_queue_push_xmit(skb); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index fe63d4efbd4..368d98578c1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -172,9 +172,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
172 | shinfo = skb_shinfo(skb); | 172 | shinfo = skb_shinfo(skb); |
173 | atomic_set(&shinfo->dataref, 1); | 173 | atomic_set(&shinfo->dataref, 1); |
174 | shinfo->nr_frags = 0; | 174 | shinfo->nr_frags = 0; |
175 | shinfo->tso_size = 0; | 175 | shinfo->gso_size = 0; |
176 | shinfo->tso_segs = 0; | 176 | shinfo->gso_segs = 0; |
177 | shinfo->ufo_size = 0; | 177 | shinfo->gso_type = 0; |
178 | shinfo->ip6_frag_id = 0; | 178 | shinfo->ip6_frag_id = 0; |
179 | shinfo->frag_list = NULL; | 179 | shinfo->frag_list = NULL; |
180 | 180 | ||
@@ -238,8 +238,9 @@ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, | |||
238 | 238 | ||
239 | atomic_set(&(skb_shinfo(skb)->dataref), 1); | 239 | atomic_set(&(skb_shinfo(skb)->dataref), 1); |
240 | skb_shinfo(skb)->nr_frags = 0; | 240 | skb_shinfo(skb)->nr_frags = 0; |
241 | skb_shinfo(skb)->tso_size = 0; | 241 | skb_shinfo(skb)->gso_size = 0; |
242 | skb_shinfo(skb)->tso_segs = 0; | 242 | skb_shinfo(skb)->gso_segs = 0; |
243 | skb_shinfo(skb)->gso_type = 0; | ||
243 | skb_shinfo(skb)->frag_list = NULL; | 244 | skb_shinfo(skb)->frag_list = NULL; |
244 | out: | 245 | out: |
245 | return skb; | 246 | return skb; |
@@ -528,8 +529,9 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
528 | #endif | 529 | #endif |
529 | skb_copy_secmark(new, old); | 530 | skb_copy_secmark(new, old); |
530 | atomic_set(&new->users, 1); | 531 | atomic_set(&new->users, 1); |
531 | skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size; | 532 | skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; |
532 | skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs; | 533 | skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; |
534 | skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; | ||
533 | } | 535 | } |
534 | 536 | ||
535 | /** | 537 | /** |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 8538aac3d14..7624fd1d8f9 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -210,8 +210,7 @@ static inline int ip_finish_output(struct sk_buff *skb) | |||
210 | return dst_output(skb); | 210 | return dst_output(skb); |
211 | } | 211 | } |
212 | #endif | 212 | #endif |
213 | if (skb->len > dst_mtu(skb->dst) && | 213 | if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) |
214 | !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) | ||
215 | return ip_fragment(skb, ip_finish_output2); | 214 | return ip_fragment(skb, ip_finish_output2); |
216 | else | 215 | else |
217 | return ip_finish_output2(skb); | 216 | return ip_finish_output2(skb); |
@@ -362,7 +361,7 @@ packet_routed: | |||
362 | } | 361 | } |
363 | 362 | ||
364 | ip_select_ident_more(iph, &rt->u.dst, sk, | 363 | ip_select_ident_more(iph, &rt->u.dst, sk, |
365 | (skb_shinfo(skb)->tso_segs ?: 1) - 1); | 364 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); |
366 | 365 | ||
367 | /* Add an IP checksum. */ | 366 | /* Add an IP checksum. */ |
368 | ip_send_check(iph); | 367 | ip_send_check(iph); |
@@ -744,7 +743,8 @@ static inline int ip_ufo_append_data(struct sock *sk, | |||
744 | (length - transhdrlen)); | 743 | (length - transhdrlen)); |
745 | if (!err) { | 744 | if (!err) { |
746 | /* specify the length of each IP datagram fragment*/ | 745 | /* specify the length of each IP datagram fragment*/ |
747 | skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen); | 746 | skb_shinfo(skb)->gso_size = mtu - fragheaderlen; |
747 | skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; | ||
748 | __skb_queue_tail(&sk->sk_write_queue, skb); | 748 | __skb_queue_tail(&sk->sk_write_queue, skb); |
749 | 749 | ||
750 | return 0; | 750 | return 0; |
@@ -1087,14 +1087,16 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1087 | 1087 | ||
1088 | inet->cork.length += size; | 1088 | inet->cork.length += size; |
1089 | if ((sk->sk_protocol == IPPROTO_UDP) && | 1089 | if ((sk->sk_protocol == IPPROTO_UDP) && |
1090 | (rt->u.dst.dev->features & NETIF_F_UFO)) | 1090 | (rt->u.dst.dev->features & NETIF_F_UFO)) { |
1091 | skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen); | 1091 | skb_shinfo(skb)->gso_size = mtu - fragheaderlen; |
1092 | skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; | ||
1093 | } | ||
1092 | 1094 | ||
1093 | 1095 | ||
1094 | while (size > 0) { | 1096 | while (size > 0) { |
1095 | int i; | 1097 | int i; |
1096 | 1098 | ||
1097 | if (skb_shinfo(skb)->ufo_size) | 1099 | if (skb_shinfo(skb)->gso_size) |
1098 | len = size; | 1100 | len = size; |
1099 | else { | 1101 | else { |
1100 | 1102 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 74998f25007..062dd1a0d8a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -571,7 +571,7 @@ new_segment: | |||
571 | skb->ip_summed = CHECKSUM_HW; | 571 | skb->ip_summed = CHECKSUM_HW; |
572 | tp->write_seq += copy; | 572 | tp->write_seq += copy; |
573 | TCP_SKB_CB(skb)->end_seq += copy; | 573 | TCP_SKB_CB(skb)->end_seq += copy; |
574 | skb_shinfo(skb)->tso_segs = 0; | 574 | skb_shinfo(skb)->gso_segs = 0; |
575 | 575 | ||
576 | if (!copied) | 576 | if (!copied) |
577 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; | 577 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; |
@@ -818,7 +818,7 @@ new_segment: | |||
818 | 818 | ||
819 | tp->write_seq += copy; | 819 | tp->write_seq += copy; |
820 | TCP_SKB_CB(skb)->end_seq += copy; | 820 | TCP_SKB_CB(skb)->end_seq += copy; |
821 | skb_shinfo(skb)->tso_segs = 0; | 821 | skb_shinfo(skb)->gso_segs = 0; |
822 | 822 | ||
823 | from += copy; | 823 | from += copy; |
824 | copied += copy; | 824 | copied += copy; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e08245bdda3..94fe5b1f9dc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1073,7 +1073,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1073 | else | 1073 | else |
1074 | pkt_len = (end_seq - | 1074 | pkt_len = (end_seq - |
1075 | TCP_SKB_CB(skb)->seq); | 1075 | TCP_SKB_CB(skb)->seq); |
1076 | if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size)) | 1076 | if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size)) |
1077 | break; | 1077 | break; |
1078 | pcount = tcp_skb_pcount(skb); | 1078 | pcount = tcp_skb_pcount(skb); |
1079 | } | 1079 | } |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 07bb5a2b375..bdd71db8bf9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -515,15 +515,17 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned | |||
515 | /* Avoid the costly divide in the normal | 515 | /* Avoid the costly divide in the normal |
516 | * non-TSO case. | 516 | * non-TSO case. |
517 | */ | 517 | */ |
518 | skb_shinfo(skb)->tso_segs = 1; | 518 | skb_shinfo(skb)->gso_segs = 1; |
519 | skb_shinfo(skb)->tso_size = 0; | 519 | skb_shinfo(skb)->gso_size = 0; |
520 | skb_shinfo(skb)->gso_type = 0; | ||
520 | } else { | 521 | } else { |
521 | unsigned int factor; | 522 | unsigned int factor; |
522 | 523 | ||
523 | factor = skb->len + (mss_now - 1); | 524 | factor = skb->len + (mss_now - 1); |
524 | factor /= mss_now; | 525 | factor /= mss_now; |
525 | skb_shinfo(skb)->tso_segs = factor; | 526 | skb_shinfo(skb)->gso_segs = factor; |
526 | skb_shinfo(skb)->tso_size = mss_now; | 527 | skb_shinfo(skb)->gso_size = mss_now; |
528 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | ||
527 | } | 529 | } |
528 | } | 530 | } |
529 | 531 | ||
@@ -914,7 +916,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int | |||
914 | 916 | ||
915 | if (!tso_segs || | 917 | if (!tso_segs || |
916 | (tso_segs > 1 && | 918 | (tso_segs > 1 && |
917 | skb_shinfo(skb)->tso_size != mss_now)) { | 919 | tcp_skb_mss(skb) != mss_now)) { |
918 | tcp_set_skb_tso_segs(sk, skb, mss_now); | 920 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
919 | tso_segs = tcp_skb_pcount(skb); | 921 | tso_segs = tcp_skb_pcount(skb); |
920 | } | 922 | } |
@@ -1724,8 +1726,9 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1724 | tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { | 1726 | tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { |
1725 | if (!pskb_trim(skb, 0)) { | 1727 | if (!pskb_trim(skb, 0)) { |
1726 | TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; | 1728 | TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; |
1727 | skb_shinfo(skb)->tso_segs = 1; | 1729 | skb_shinfo(skb)->gso_segs = 1; |
1728 | skb_shinfo(skb)->tso_size = 0; | 1730 | skb_shinfo(skb)->gso_size = 0; |
1731 | skb_shinfo(skb)->gso_type = 0; | ||
1729 | skb->ip_summed = CHECKSUM_NONE; | 1732 | skb->ip_summed = CHECKSUM_NONE; |
1730 | skb->csum = 0; | 1733 | skb->csum = 0; |
1731 | } | 1734 | } |
@@ -1930,8 +1933,9 @@ void tcp_send_fin(struct sock *sk) | |||
1930 | skb->csum = 0; | 1933 | skb->csum = 0; |
1931 | TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); | 1934 | TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); |
1932 | TCP_SKB_CB(skb)->sacked = 0; | 1935 | TCP_SKB_CB(skb)->sacked = 0; |
1933 | skb_shinfo(skb)->tso_segs = 1; | 1936 | skb_shinfo(skb)->gso_segs = 1; |
1934 | skb_shinfo(skb)->tso_size = 0; | 1937 | skb_shinfo(skb)->gso_size = 0; |
1938 | skb_shinfo(skb)->gso_type = 0; | ||
1935 | 1939 | ||
1936 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ | 1940 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ |
1937 | TCP_SKB_CB(skb)->seq = tp->write_seq; | 1941 | TCP_SKB_CB(skb)->seq = tp->write_seq; |
@@ -1963,8 +1967,9 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) | |||
1963 | skb->csum = 0; | 1967 | skb->csum = 0; |
1964 | TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); | 1968 | TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); |
1965 | TCP_SKB_CB(skb)->sacked = 0; | 1969 | TCP_SKB_CB(skb)->sacked = 0; |
1966 | skb_shinfo(skb)->tso_segs = 1; | 1970 | skb_shinfo(skb)->gso_segs = 1; |
1967 | skb_shinfo(skb)->tso_size = 0; | 1971 | skb_shinfo(skb)->gso_size = 0; |
1972 | skb_shinfo(skb)->gso_type = 0; | ||
1968 | 1973 | ||
1969 | /* Send it off. */ | 1974 | /* Send it off. */ |
1970 | TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); | 1975 | TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); |
@@ -2047,8 +2052,9 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2047 | TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; | 2052 | TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; |
2048 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; | 2053 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; |
2049 | TCP_SKB_CB(skb)->sacked = 0; | 2054 | TCP_SKB_CB(skb)->sacked = 0; |
2050 | skb_shinfo(skb)->tso_segs = 1; | 2055 | skb_shinfo(skb)->gso_segs = 1; |
2051 | skb_shinfo(skb)->tso_size = 0; | 2056 | skb_shinfo(skb)->gso_size = 0; |
2057 | skb_shinfo(skb)->gso_type = 0; | ||
2052 | th->seq = htonl(TCP_SKB_CB(skb)->seq); | 2058 | th->seq = htonl(TCP_SKB_CB(skb)->seq); |
2053 | th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); | 2059 | th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); |
2054 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ | 2060 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ |
@@ -2152,8 +2158,9 @@ int tcp_connect(struct sock *sk) | |||
2152 | TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; | 2158 | TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; |
2153 | TCP_ECN_send_syn(sk, tp, buff); | 2159 | TCP_ECN_send_syn(sk, tp, buff); |
2154 | TCP_SKB_CB(buff)->sacked = 0; | 2160 | TCP_SKB_CB(buff)->sacked = 0; |
2155 | skb_shinfo(buff)->tso_segs = 1; | 2161 | skb_shinfo(buff)->gso_segs = 1; |
2156 | skb_shinfo(buff)->tso_size = 0; | 2162 | skb_shinfo(buff)->gso_size = 0; |
2163 | skb_shinfo(buff)->gso_type = 0; | ||
2157 | buff->csum = 0; | 2164 | buff->csum = 0; |
2158 | TCP_SKB_CB(buff)->seq = tp->write_seq++; | 2165 | TCP_SKB_CB(buff)->seq = tp->write_seq++; |
2159 | TCP_SKB_CB(buff)->end_seq = tp->write_seq; | 2166 | TCP_SKB_CB(buff)->end_seq = tp->write_seq; |
@@ -2257,8 +2264,9 @@ void tcp_send_ack(struct sock *sk) | |||
2257 | buff->csum = 0; | 2264 | buff->csum = 0; |
2258 | TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; | 2265 | TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; |
2259 | TCP_SKB_CB(buff)->sacked = 0; | 2266 | TCP_SKB_CB(buff)->sacked = 0; |
2260 | skb_shinfo(buff)->tso_segs = 1; | 2267 | skb_shinfo(buff)->gso_segs = 1; |
2261 | skb_shinfo(buff)->tso_size = 0; | 2268 | skb_shinfo(buff)->gso_size = 0; |
2269 | skb_shinfo(buff)->gso_type = 0; | ||
2262 | 2270 | ||
2263 | /* Send it off, this clears delayed acks for us. */ | 2271 | /* Send it off, this clears delayed acks for us. */ |
2264 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); | 2272 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); |
@@ -2293,8 +2301,9 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) | |||
2293 | skb->csum = 0; | 2301 | skb->csum = 0; |
2294 | TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; | 2302 | TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; |
2295 | TCP_SKB_CB(skb)->sacked = urgent; | 2303 | TCP_SKB_CB(skb)->sacked = urgent; |
2296 | skb_shinfo(skb)->tso_segs = 1; | 2304 | skb_shinfo(skb)->gso_segs = 1; |
2297 | skb_shinfo(skb)->tso_size = 0; | 2305 | skb_shinfo(skb)->gso_size = 0; |
2306 | skb_shinfo(skb)->gso_type = 0; | ||
2298 | 2307 | ||
2299 | /* Use a previous sequence. This should cause the other | 2308 | /* Use a previous sequence. This should cause the other |
2300 | * end to send an ack. Don't queue or clone SKB, just | 2309 | * end to send an ack. Don't queue or clone SKB, just |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index d29620f4910..abb94de3376 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -148,7 +148,7 @@ static int ip6_output2(struct sk_buff *skb) | |||
148 | 148 | ||
149 | int ip6_output(struct sk_buff *skb) | 149 | int ip6_output(struct sk_buff *skb) |
150 | { | 150 | { |
151 | if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) || | 151 | if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) || |
152 | dst_allfrag(skb->dst)) | 152 | dst_allfrag(skb->dst)) |
153 | return ip6_fragment(skb, ip6_output2); | 153 | return ip6_fragment(skb, ip6_output2); |
154 | else | 154 | else |
@@ -833,8 +833,9 @@ static inline int ip6_ufo_append_data(struct sock *sk, | |||
833 | struct frag_hdr fhdr; | 833 | struct frag_hdr fhdr; |
834 | 834 | ||
835 | /* specify the length of each IP datagram fragment*/ | 835 | /* specify the length of each IP datagram fragment*/ |
836 | skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) - | 836 | skb_shinfo(skb)->gso_size = mtu - fragheaderlen - |
837 | sizeof(struct frag_hdr); | 837 | sizeof(struct frag_hdr); |
838 | skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; | ||
838 | ipv6_select_ident(skb, &fhdr); | 839 | ipv6_select_ident(skb, &fhdr); |
839 | skb_shinfo(skb)->ip6_frag_id = fhdr.identification; | 840 | skb_shinfo(skb)->ip6_frag_id = fhdr.identification; |
840 | __skb_queue_tail(&sk->sk_write_queue, skb); | 841 | __skb_queue_tail(&sk->sk_write_queue, skb); |