diff options
-rw-r--r-- | include/linux/skbuff.h | 24 | ||||
-rw-r--r-- | net/core/skbuff.c | 47 |
2 files changed, 71 insertions, 0 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 320e976..e401875 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -602,6 +602,9 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); } +extern void skb_recycle(struct sk_buff *skb); +extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); + extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); extern struct sk_buff *skb_clone(struct sk_buff *skb, @@ -2738,6 +2741,27 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb) bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); +static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) +{ + if (irqs_disabled()) + return false; + + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) + return false; + + if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) + return false; + + skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); + if (skb_end_offset(skb) < skb_size) + return false; + + if (skb_shared(skb) || skb_cloned(skb)) + return false; + + return true; +} + /** * skb_head_is_locked - Determine if the skb->head is locked down * @skb: skb to check diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 32443eb..f4a73cb 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -675,6 +675,53 @@ void consume_skb(struct sk_buff *skb) } EXPORT_SYMBOL(consume_skb); +/** + * skb_recycle - clean up an skb for reuse + * @skb: buffer + * + * Recycles the skb to be reused as a receive buffer. This + * function does any necessary reference count dropping, and + * cleans up the skbuff as if it just came from __alloc_skb(). + */ +void skb_recycle(struct sk_buff *skb) +{ + struct skb_shared_info *shinfo; + + skb_release_head_state(skb); + + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + + memset(skb, 0, offsetof(struct sk_buff, tail)); + skb->data = skb->head + NET_SKB_PAD; + skb_reset_tail_pointer(skb); +} +EXPORT_SYMBOL(skb_recycle); + +/** + * skb_recycle_check - check if skb can be reused for receive + * @skb: buffer + * @skb_size: minimum receive buffer size + * + * Checks that the skb passed in is not shared or cloned, and + * that it is linear and its head portion at least as large as + * skb_size so that it can be recycled as a receive buffer. + * If these conditions are met, this function does any necessary + * reference count dropping and cleans up the skbuff as if it + * just came from __alloc_skb(). + */ +bool skb_recycle_check(struct sk_buff *skb, int skb_size) +{ + if (!skb_is_recycleable(skb, skb_size)) + return false; + + skb_recycle(skb); + + return true; +} +EXPORT_SYMBOL(skb_recycle_check); + static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { new->tstamp = old->tstamp; |