aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h95
1 files changed, 94 insertions, 1 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b8292d8cc9fa..2e0ced1af3b1 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -32,6 +32,7 @@
32#include <linux/hrtimer.h> 32#include <linux/hrtimer.h>
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/netdev_features.h> 34#include <linux/netdev_features.h>
35#include <net/flow_keys.h>
35 36
36/* Don't change this without changing skb_csum_unnecessary! */ 37/* Don't change this without changing skb_csum_unnecessary! */
37#define CHECKSUM_NONE 0 38#define CHECKSUM_NONE 0
@@ -316,6 +317,8 @@ enum {
316 SKB_GSO_FCOE = 1 << 5, 317 SKB_GSO_FCOE = 1 << 5,
317 318
318 SKB_GSO_GRE = 1 << 6, 319 SKB_GSO_GRE = 1 << 6,
320
321 SKB_GSO_UDP_TUNNEL = 1 << 7,
319}; 322};
320 323
321#if BITS_PER_LONG > 32 324#if BITS_PER_LONG > 32
@@ -384,9 +387,11 @@ typedef unsigned char *sk_buff_data_t;
384 * @secmark: security marking 387 * @secmark: security marking
385 * @mark: Generic packet mark 388 * @mark: Generic packet mark
386 * @dropcount: total number of sk_receive_queue overflows 389 * @dropcount: total number of sk_receive_queue overflows
390 * @vlan_proto: vlan encapsulation protocol
387 * @vlan_tci: vlan tag control information 391 * @vlan_tci: vlan tag control information
388 * @inner_transport_header: Inner transport layer header (encapsulation) 392 * @inner_transport_header: Inner transport layer header (encapsulation)
389 * @inner_network_header: Network layer header (encapsulation) 393 * @inner_network_header: Network layer header (encapsulation)
394 * @inner_mac_header: Link layer header (encapsulation)
390 * @transport_header: Transport layer header 395 * @transport_header: Transport layer header
391 * @network_header: Network layer header 396 * @network_header: Network layer header
392 * @mac_header: Link layer header 397 * @mac_header: Link layer header
@@ -461,6 +466,7 @@ struct sk_buff {
461 466
462 __u32 rxhash; 467 __u32 rxhash;
463 468
469 __be16 vlan_proto;
464 __u16 vlan_tci; 470 __u16 vlan_tci;
465 471
466#ifdef CONFIG_NET_SCHED 472#ifdef CONFIG_NET_SCHED
@@ -505,6 +511,7 @@ struct sk_buff {
505 511
506 sk_buff_data_t inner_transport_header; 512 sk_buff_data_t inner_transport_header;
507 sk_buff_data_t inner_network_header; 513 sk_buff_data_t inner_network_header;
514 sk_buff_data_t inner_mac_header;
508 sk_buff_data_t transport_header; 515 sk_buff_data_t transport_header;
509 sk_buff_data_t network_header; 516 sk_buff_data_t network_header;
510 sk_buff_data_t mac_header; 517 sk_buff_data_t mac_header;
@@ -570,7 +577,40 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
570 skb->_skb_refdst = (unsigned long)dst; 577 skb->_skb_refdst = (unsigned long)dst;
571} 578}
572 579
573extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst); 580extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
581 bool force);
582
583/**
584 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
585 * @skb: buffer
586 * @dst: dst entry
587 *
588 * Sets skb dst, assuming a reference was not taken on dst.
589 * If dst entry is cached, we do not take reference and dst_release
590 * will be avoided by refdst_drop. If dst entry is not cached, we take
591 * reference, so that last dst_release can destroy the dst immediately.
592 */
593static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
594{
595 __skb_dst_set_noref(skb, dst, false);
596}
597
598/**
599 * skb_dst_set_noref_force - sets skb dst, without taking reference
600 * @skb: buffer
601 * @dst: dst entry
602 *
603 * Sets skb dst, assuming a reference was not taken on dst.
604 * No reference is taken and no dst_release will be called. While for
605 * cached dsts deferred reclaim is a basic feature, for entries that are
606 * not cached it is caller's job to guarantee that last dst_release for
607 * provided dst happens when nobody uses it, eg. after a RCU grace period.
608 */
609static inline void skb_dst_set_noref_force(struct sk_buff *skb,
610 struct dst_entry *dst)
611{
612 __skb_dst_set_noref(skb, dst, true);
613}
574 614
575/** 615/**
576 * skb_dst_is_noref - Test if skb dst isn't refcounted 616 * skb_dst_is_noref - Test if skb dst isn't refcounted
@@ -611,6 +651,12 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
611 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); 651 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
612} 652}
613 653
654extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
655static inline struct sk_buff *alloc_skb_head(gfp_t priority)
656{
657 return __alloc_skb_head(priority, -1);
658}
659
614extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 660extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
615extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 661extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
616extern struct sk_buff *skb_clone(struct sk_buff *skb, 662extern struct sk_buff *skb_clone(struct sk_buff *skb,
@@ -1471,6 +1517,7 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
1471 1517
1472static inline void skb_reset_inner_headers(struct sk_buff *skb) 1518static inline void skb_reset_inner_headers(struct sk_buff *skb)
1473{ 1519{
1520 skb->inner_mac_header = skb->mac_header;
1474 skb->inner_network_header = skb->network_header; 1521 skb->inner_network_header = skb->network_header;
1475 skb->inner_transport_header = skb->transport_header; 1522 skb->inner_transport_header = skb->transport_header;
1476} 1523}
@@ -1516,6 +1563,22 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
1516 skb->inner_network_header += offset; 1563 skb->inner_network_header += offset;
1517} 1564}
1518 1565
1566static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1567{
1568 return skb->head + skb->inner_mac_header;
1569}
1570
1571static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1572{
1573 skb->inner_mac_header = skb->data - skb->head;
1574}
1575
1576static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1577 const int offset)
1578{
1579 skb_reset_inner_mac_header(skb);
1580 skb->inner_mac_header += offset;
1581}
1519static inline bool skb_transport_header_was_set(const struct sk_buff *skb) 1582static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1520{ 1583{
1521 return skb->transport_header != ~0U; 1584 return skb->transport_header != ~0U;
@@ -1609,6 +1672,21 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
1609 skb->inner_network_header = skb->data + offset; 1672 skb->inner_network_header = skb->data + offset;
1610} 1673}
1611 1674
1675static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1676{
1677 return skb->inner_mac_header;
1678}
1679
1680static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1681{
1682 skb->inner_mac_header = skb->data;
1683}
1684
1685static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1686 const int offset)
1687{
1688 skb->inner_mac_header = skb->data + offset;
1689}
1612static inline bool skb_transport_header_was_set(const struct sk_buff *skb) 1690static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1613{ 1691{
1614 return skb->transport_header != NULL; 1692 return skb->transport_header != NULL;
@@ -1666,6 +1744,19 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1666} 1744}
1667#endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1745#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1668 1746
1747static inline void skb_probe_transport_header(struct sk_buff *skb,
1748 const int offset_hint)
1749{
1750 struct flow_keys keys;
1751
1752 if (skb_transport_header_was_set(skb))
1753 return;
1754 else if (skb_flow_dissect(skb, &keys))
1755 skb_set_transport_header(skb, keys.thoff);
1756 else
1757 skb_set_transport_header(skb, offset_hint);
1758}
1759
1669static inline void skb_mac_header_rebuild(struct sk_buff *skb) 1760static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1670{ 1761{
1671 if (skb_mac_header_was_set(skb)) { 1762 if (skb_mac_header_was_set(skb)) {
@@ -2811,6 +2902,8 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2811 2902
2812bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2903bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2813 2904
2905u32 __skb_get_poff(const struct sk_buff *skb);
2906
2814/** 2907/**
2815 * skb_head_is_locked - Determine if the skb->head is locked down 2908 * skb_head_is_locked - Determine if the skb->head is locked down
2816 * @skb: skb to check 2909 * @skb: skb to check