aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorAndy Fleming <afleming@freescale.com>2011-10-13 00:33:54 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-19 15:59:45 -0400
commit3d153a7c8b23031df35e61377c0600a6c96a8b0b (patch)
treef5c7f622331728ba12f40c2ebb66666ce3d924fd /include/linux
parent1e5c22cde3b85737921d3ec6ecf2c356e5b64ea7 (diff)
net: Allow skb_recycle_check to be done in stages
skb_recycle_check resets the skb if it's eligible for recycling. However, there are times when a driver might want to optionally manipulate the skb data with the skb before resetting the skb, but after it has determined eligibility. We do this by splitting the eligibility check from the skb reset, creating two inline functions to accomplish that task. Signed-off-by: Andy Fleming <afleming@freescale.com> Acked-by: David Daney <david.daney@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/skbuff.h21
1 files changed, 21 insertions, 0 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6fcbbbd12ceb..77ddf2de712f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -550,6 +550,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
550 return __alloc_skb(size, priority, 1, NUMA_NO_NODE); 550 return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
551} 551}
552 552
553extern void skb_recycle(struct sk_buff *skb);
553extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 554extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
554 555
555extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 556extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
@@ -2484,5 +2485,25 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb)
2484 2485
2485bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2486bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2486 2487
2488static inline bool skb_is_recycleable(struct sk_buff *skb, int skb_size)
2489{
2490 if (irqs_disabled())
2491 return false;
2492
2493 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
2494 return false;
2495
2496 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
2497 return false;
2498
2499 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
2500 if (skb_end_pointer(skb) - skb->head < skb_size)
2501 return false;
2502
2503 if (skb_shared(skb) || skb_cloned(skb))
2504 return false;
2505
2506 return true;
2507}
2487#endif /* __KERNEL__ */ 2508#endif /* __KERNEL__ */
2488#endif /* _LINUX_SKBUFF_H */ 2509#endif /* _LINUX_SKBUFF_H */