aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h75
1 files changed, 69 insertions, 6 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f89e7fd59a4c..77eb60d2b496 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -169,6 +169,7 @@ struct skb_shared_hwtstamps {
169 * @software: generate software time stamp 169 * @software: generate software time stamp
170 * @in_progress: device driver is going to provide 170 * @in_progress: device driver is going to provide
171 * hardware time stamp 171 * hardware time stamp
172 * @prevent_sk_orphan: make sk reference available on driver level
172 * @flags: all shared_tx flags 173 * @flags: all shared_tx flags
173 * 174 *
174 * These flags are attached to packets as part of the 175 * These flags are attached to packets as part of the
@@ -178,7 +179,8 @@ union skb_shared_tx {
178 struct { 179 struct {
179 __u8 hardware:1, 180 __u8 hardware:1,
180 software:1, 181 software:1,
181 in_progress:1; 182 in_progress:1,
183 prevent_sk_orphan:1;
182 }; 184 };
183 __u8 flags; 185 __u8 flags;
184}; 186};
@@ -202,10 +204,11 @@ struct skb_shared_info {
202 */ 204 */
203 atomic_t dataref; 205 atomic_t dataref;
204 206
205 skb_frag_t frags[MAX_SKB_FRAGS];
206 /* Intermediate layers must ensure that destructor_arg 207 /* Intermediate layers must ensure that destructor_arg
207 * remains valid until skb destructor */ 208 * remains valid until skb destructor */
208 void * destructor_arg; 209 void * destructor_arg;
210 /* must be last field, see pskb_expand_head() */
211 skb_frag_t frags[MAX_SKB_FRAGS];
209}; 212};
210 213
211/* We divide dataref into two halves. The higher 16 bits hold references 214/* We divide dataref into two halves. The higher 16 bits hold references
@@ -1376,6 +1379,11 @@ static inline int skb_network_offset(const struct sk_buff *skb)
1376 return skb_network_header(skb) - skb->data; 1379 return skb_network_header(skb) - skb->data;
1377} 1380}
1378 1381
1382static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1383{
1384 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1385}
1386
1379/* 1387/*
1380 * CPUs often take a performance hit when accessing unaligned memory 1388 * CPUs often take a performance hit when accessing unaligned memory
1381 * locations. The actual performance hit varies, it can be small if the 1389 * locations. The actual performance hit varies, it can be small if the
@@ -1414,12 +1422,14 @@ static inline int skb_network_offset(const struct sk_buff *skb)
1414 * 1422 *
1415 * Various parts of the networking layer expect at least 32 bytes of 1423 * Various parts of the networking layer expect at least 32 bytes of
1416 * headroom, you should not reduce this. 1424 * headroom, you should not reduce this.
1417 * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span 1425 *
1418 * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes 1426 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1427 * to reduce average number of cache lines per packet.
1428 * get_rps_cpus() for example only access one 64 bytes aligned block :
1419 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) 1429 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1420 */ 1430 */
1421#ifndef NET_SKB_PAD 1431#ifndef NET_SKB_PAD
1422#define NET_SKB_PAD 64 1432#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
1423#endif 1433#endif
1424 1434
1425extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1435extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
@@ -1931,6 +1941,36 @@ static inline ktime_t net_invalid_timestamp(void)
1931 return ktime_set(0, 0); 1941 return ktime_set(0, 0);
1932} 1942}
1933 1943
1944extern void skb_timestamping_init(void);
1945
1946#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
1947
1948extern void skb_clone_tx_timestamp(struct sk_buff *skb);
1949extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
1950
1951#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
1952
1953static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
1954{
1955}
1956
1957static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
1958{
1959 return false;
1960}
1961
1962#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
1963
1964/**
1965 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
1966 *
1967 * @skb: clone of the the original outgoing packet
1968 * @hwtstamps: hardware time stamps
1969 *
1970 */
1971void skb_complete_tx_timestamp(struct sk_buff *skb,
1972 struct skb_shared_hwtstamps *hwtstamps);
1973
1934/** 1974/**
1935 * skb_tstamp_tx - queue clone of skb with send time stamps 1975 * skb_tstamp_tx - queue clone of skb with send time stamps
1936 * @orig_skb: the original outgoing packet 1976 * @orig_skb: the original outgoing packet
@@ -1945,6 +1985,28 @@ static inline ktime_t net_invalid_timestamp(void)
1945extern void skb_tstamp_tx(struct sk_buff *orig_skb, 1985extern void skb_tstamp_tx(struct sk_buff *orig_skb,
1946 struct skb_shared_hwtstamps *hwtstamps); 1986 struct skb_shared_hwtstamps *hwtstamps);
1947 1987
1988static inline void sw_tx_timestamp(struct sk_buff *skb)
1989{
1990 union skb_shared_tx *shtx = skb_tx(skb);
1991 if (shtx->software && !shtx->in_progress)
1992 skb_tstamp_tx(skb, NULL);
1993}
1994
1995/**
1996 * skb_tx_timestamp() - Driver hook for transmit timestamping
1997 *
1998 * Ethernet MAC Drivers should call this function in their hard_xmit()
1999 * function as soon as possible after giving the sk_buff to the MAC
2000 * hardware, but before freeing the sk_buff.
2001 *
2002 * @skb: A socket buffer.
2003 */
2004static inline void skb_tx_timestamp(struct sk_buff *skb)
2005{
2006 skb_clone_tx_timestamp(skb);
2007 sw_tx_timestamp(skb);
2008}
2009
1948extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 2010extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
1949extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 2011extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
1950 2012
@@ -2132,7 +2194,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2132 /* LRO sets gso_size but not gso_type, whereas if GSO is really 2194 /* LRO sets gso_size but not gso_type, whereas if GSO is really
2133 * wanted then gso_type will be set. */ 2195 * wanted then gso_type will be set. */
2134 struct skb_shared_info *shinfo = skb_shinfo(skb); 2196 struct skb_shared_info *shinfo = skb_shinfo(skb);
2135 if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { 2197 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2198 unlikely(shinfo->gso_type == 0)) {
2136 __skb_warn_lro_forwarding(skb); 2199 __skb_warn_lro_forwarding(skb);
2137 return true; 2200 return true;
2138 } 2201 }