diff options
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 46 |
1 files changed, 37 insertions, 9 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3597b4f14389..19c96d498e20 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -604,12 +604,17 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) | |||
604 | return list_->qlen; | 604 | return list_->qlen; |
605 | } | 605 | } |
606 | 606 | ||
607 | extern struct lock_class_key skb_queue_lock_key; | 607 | /* |
608 | 608 | * This function creates a split out lock class for each invocation; | |
609 | * this is needed for now since a whole lot of users of the skb-queue | ||
610 | * infrastructure in drivers have different locking usage (in hardirq) | ||
611 | * than the networking core (in softirq only). In the long run either the | ||
612 | * network layer or drivers should need annotation to consolidate the | ||
613 | * main types of usage into 3 classes. | ||
614 | */ | ||
609 | static inline void skb_queue_head_init(struct sk_buff_head *list) | 615 | static inline void skb_queue_head_init(struct sk_buff_head *list) |
610 | { | 616 | { |
611 | spin_lock_init(&list->lock); | 617 | spin_lock_init(&list->lock); |
612 | lockdep_set_class(&list->lock, &skb_queue_lock_key); | ||
613 | list->prev = list->next = (struct sk_buff *)list; | 618 | list->prev = list->next = (struct sk_buff *)list; |
614 | list->qlen = 0; | 619 | list->qlen = 0; |
615 | } | 620 | } |
@@ -1066,9 +1071,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) | |||
1066 | kfree_skb(skb); | 1071 | kfree_skb(skb); |
1067 | } | 1072 | } |
1068 | 1073 | ||
1069 | #ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB | ||
1070 | /** | 1074 | /** |
1071 | * __dev_alloc_skb - allocate an skbuff for sending | 1075 | * __dev_alloc_skb - allocate an skbuff for receiving |
1072 | * @length: length to allocate | 1076 | * @length: length to allocate |
1073 | * @gfp_mask: get_free_pages mask, passed to alloc_skb | 1077 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
1074 | * | 1078 | * |
@@ -1087,12 +1091,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, | |||
1087 | skb_reserve(skb, NET_SKB_PAD); | 1091 | skb_reserve(skb, NET_SKB_PAD); |
1088 | return skb; | 1092 | return skb; |
1089 | } | 1093 | } |
1090 | #else | ||
1091 | extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask); | ||
1092 | #endif | ||
1093 | 1094 | ||
1094 | /** | 1095 | /** |
1095 | * dev_alloc_skb - allocate an skbuff for sending | 1096 | * dev_alloc_skb - allocate an skbuff for receiving |
1096 | * @length: length to allocate | 1097 | * @length: length to allocate |
1097 | * | 1098 | * |
1098 | * Allocate a new &sk_buff and assign it a usage count of one. The | 1099 | * Allocate a new &sk_buff and assign it a usage count of one. The |
@@ -1108,6 +1109,28 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length) | |||
1108 | return __dev_alloc_skb(length, GFP_ATOMIC); | 1109 | return __dev_alloc_skb(length, GFP_ATOMIC); |
1109 | } | 1110 | } |
1110 | 1111 | ||
1112 | extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | ||
1113 | unsigned int length, gfp_t gfp_mask); | ||
1114 | |||
1115 | /** | ||
1116 | * netdev_alloc_skb - allocate an skbuff for rx on a specific device | ||
1117 | * @dev: network device to receive on | ||
1118 | * @length: length to allocate | ||
1119 | * | ||
1120 | * Allocate a new &sk_buff and assign it a usage count of one. The | ||
1121 | * buffer has unspecified headroom built in. Users should allocate | ||
1122 | * the headroom they think they need without accounting for the | ||
1123 | * built in space. The built in space is used for optimisations. | ||
1124 | * | ||
1125 | * %NULL is returned if there is no free memory. Although this function | ||
1126 | * allocates memory it can be called from an interrupt. | ||
1127 | */ | ||
1128 | static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, | ||
1129 | unsigned int length) | ||
1130 | { | ||
1131 | return __netdev_alloc_skb(dev, length, GFP_ATOMIC); | ||
1132 | } | ||
1133 | |||
1111 | /** | 1134 | /** |
1112 | * skb_cow - copy header of skb when it is required | 1135 | * skb_cow - copy header of skb when it is required |
1113 | * @skb: buffer to cow | 1136 | * @skb: buffer to cow |
@@ -1455,5 +1478,10 @@ static inline void skb_init_secmark(struct sk_buff *skb) | |||
1455 | { } | 1478 | { } |
1456 | #endif | 1479 | #endif |
1457 | 1480 | ||
1481 | static inline int skb_is_gso(const struct sk_buff *skb) | ||
1482 | { | ||
1483 | return skb_shinfo(skb)->gso_size; | ||
1484 | } | ||
1485 | |||
1458 | #endif /* __KERNEL__ */ | 1486 | #endif /* __KERNEL__ */ |
1459 | #endif /* _LINUX_SKBUFF_H */ | 1487 | #endif /* _LINUX_SKBUFF_H */ |