diff options
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 219 |
1 files changed, 77 insertions, 142 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 95294d8..0effa7b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -476,18 +476,6 @@ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, } EXPORT_SYMBOL(skb_add_rx_frag); -void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, - unsigned int truesize) -{ - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - skb_frag_size_add(frag, size); - skb->len += size; - skb->data_len += size; - skb->truesize += truesize; -} -EXPORT_SYMBOL(skb_coalesce_rx_frag); - static void skb_drop_list(struct sk_buff **listp) { kfree_skb_list(*listp); @@ -592,6 +580,9 @@ static void skb_release_head_state(struct sk_buff *skb) #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_conntrack_put(skb->nfct); #endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED + nf_conntrack_put_reasm(skb->nfct_reasm); +#endif #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); #endif @@ -961,9 +952,6 @@ EXPORT_SYMBOL(skb_clone); static void skb_headers_offset_update(struct sk_buff *skb, int off) { - /* Only adjust this if it actually is csum_start rather than csum */ - if (skb->ip_summed == CHECKSUM_PARTIAL) - skb->csum_start += off; /* {transport,network,mac}_header and tail are relative to skb->head */ skb->transport_header += off; skb->network_header += off; @@ -1097,8 +1085,8 @@ EXPORT_SYMBOL(__pskb_copy); * @ntail: room to add at tail * @gfp_mask: allocation priority * - * Expands (or creates identical copy, if @nhead and @ntail are zero) - * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have + * Expands (or creates identical copy, if &nhead and &ntail are zero) + * header of skb. &sk_buff itself is not changed. &sk_buff MUST have * reference count of 1. Returns zero in the case of success or error, * if expansion failed. In the last case, &sk_buff is not changed. * @@ -1170,6 +1158,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, #endif skb->tail += off; skb_headers_offset_update(skb, nhead); + /* Only adjust this if it actually is csum_start rather than csum */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + skb->csum_start += nhead; skb->cloned = 0; skb->hdr_len = 0; skb->nohdr = 0; @@ -1234,6 +1225,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, NUMA_NO_NODE); int oldheadroom = skb_headroom(skb); int head_copy_len, head_copy_off; + int off; if (!n) return NULL; @@ -1257,7 +1249,11 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, copy_skb_header(n, skb); - skb_headers_offset_update(n, newheadroom - oldheadroom); + off = newheadroom - oldheadroom; + if (n->ip_summed == CHECKSUM_PARTIAL) + n->csum_start += off; + + skb_headers_offset_update(n, off); return n; } @@ -1310,29 +1306,6 @@ free_skb: EXPORT_SYMBOL(skb_pad); /** - * pskb_put - add data to the tail of a potentially fragmented buffer - * @skb: start of the buffer to use - * @tail: tail fragment of the buffer to use - * @len: amount of data to add - * - * This function extends the used data area of the potentially - * fragmented buffer. @tail must be the last fragment of @skb -- or - * @skb itself. If this would exceed the total buffer size the kernel - * will panic. A pointer to the first byte of the extra data is - * returned. - */ - -unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) -{ - if (tail != skb) { - skb->data_len += len; - skb->len += len; - } - return skb_put(tail, len); -} -EXPORT_SYMBOL_GPL(pskb_put); - -/** * skb_put - add data to a buffer * @skb: buffer to use * @len: amount of data to add @@ -2009,8 +1982,9 @@ fault: EXPORT_SYMBOL(skb_store_bits); /* Checksum skb data. */ -__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, - __wsum csum, const struct skb_checksum_ops *ops) + +__wsum skb_checksum(const struct sk_buff *skb, int offset, + int len, __wsum csum) { int start = skb_headlen(skb); int i, copy = start - offset; @@ -2021,7 +1995,7 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, if (copy > 0) { if (copy > len) copy = len; - csum = ops->update(skb->data + offset, copy, csum); + csum = csum_partial(skb->data + offset, copy, csum); if ((len -= copy) == 0) return csum; offset += copy; @@ -2042,10 +2016,10 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, if (copy > len) copy = len; vaddr = kmap_atomic(skb_frag_page(frag)); - csum2 = ops->update(vaddr + frag->page_offset + - offset - start, copy, 0); + csum2 = csum_partial(vaddr + frag->page_offset + + offset - start, copy, 0); kunmap_atomic(vaddr); - csum = ops->combine(csum, csum2, pos, copy); + csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) return csum; offset += copy; @@ -2064,9 +2038,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum2; if (copy > len) copy = len; - csum2 = __skb_checksum(frag_iter, offset - start, - copy, 0, ops); - csum = ops->combine(csum, csum2, pos, copy); + csum2 = skb_checksum(frag_iter, offset - start, + copy, 0); + csum = csum_block_add(csum, csum2, pos); if ((len -= copy) == 0) return csum; offset += copy; @@ -2078,18 +2052,6 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, return csum; } -EXPORT_SYMBOL(__skb_checksum); - -__wsum skb_checksum(const struct sk_buff *skb, int offset, - int len, __wsum csum) -{ - const struct skb_checksum_ops ops = { - .update = csum_partial_ext, - .combine = csum_block_add_ext, - }; - - return __skb_checksum(skb, offset, len, csum, &ops); -} EXPORT_SYMBOL(skb_checksum); /* Both of above in one bottle. */ @@ -2609,14 +2571,14 @@ EXPORT_SYMBOL(skb_prepare_seq_read); * @data: destination pointer for data to be returned * @st: state variable * - * Reads a block of skb data at @consumed relative to the + * Reads a block of skb data at &consumed relative to the * lower offset specified to skb_prepare_seq_read(). Assigns - * the head of the data block to @data and returns the length + * the head of the data block to &data and returns the length * of the block or 0 if the end of the skb data or the upper * offset has been reached. * * The caller is not required to consume all of the data - * returned, i.e. @consumed is typically set to the number + * returned, i.e. &consumed is typically set to the number * of bytes already consumed and the next call to * skb_seq_read() will return the remaining part of the block. * @@ -2845,7 +2807,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) struct sk_buff *segs = NULL; struct sk_buff *tail = NULL; struct sk_buff *fskb = skb_shinfo(skb)->frag_list; - skb_frag_t *skb_frag = skb_shinfo(skb)->frags; unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int doffset = skb->data - skb_mac_header(skb); unsigned int offset = doffset; @@ -2885,38 +2846,16 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) if (hsize > len || !sg) hsize = len; - if (!hsize && i >= nfrags && skb_headlen(fskb) && - (skb_headlen(fskb) == len || sg)) { - BUG_ON(skb_headlen(fskb) > len); - - i = 0; - nfrags = skb_shinfo(fskb)->nr_frags; - skb_frag = skb_shinfo(fskb)->frags; - pos += skb_headlen(fskb); - - while (pos < offset + len) { - BUG_ON(i >= nfrags); - - size = skb_frag_size(skb_frag); - if (pos + size > offset + len) - break; - - i++; - pos += size; - skb_frag++; - } + if (!hsize && i >= nfrags) { + BUG_ON(fskb->len != len); + pos += len; nskb = skb_clone(fskb, GFP_ATOMIC); fskb = fskb->next; if (unlikely(!nskb)) goto err; - if (unlikely(pskb_trim(nskb, len))) { - kfree_skb(nskb); - goto err; - } - hsize = skb_end_offset(nskb); if (skb_cow_head(nskb, doffset + headroom)) { kfree_skb(nskb); @@ -2947,13 +2886,20 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) __copy_skb_header(nskb, skb); nskb->mac_len = skb->mac_len; - skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); + /* nskb and skb might have different headroom */ + if (nskb->ip_summed == CHECKSUM_PARTIAL) + nskb->csum_start += skb_headroom(nskb) - headroom; + + skb_reset_mac_header(nskb); + skb_set_network_header(nskb, skb->mac_len); + nskb->transport_header = (nskb->network_header + + skb_network_header_len(skb)); skb_copy_from_linear_data_offset(skb, -tnl_hlen, nskb->data - tnl_hlen, doffset + tnl_hlen); - if (nskb->len == len + doffset) + if (fskb != skb_shinfo(skb)->frag_list) goto perform_csum_check; if (!sg) { @@ -2971,28 +2917,8 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; - while (pos < offset + len) { - if (i >= nfrags) { - BUG_ON(skb_headlen(fskb)); - - i = 0; - nfrags = skb_shinfo(fskb)->nr_frags; - skb_frag = skb_shinfo(fskb)->frags; - - BUG_ON(!nfrags); - - fskb = fskb->next; - } - - if (unlikely(skb_shinfo(nskb)->nr_frags >= - MAX_SKB_FRAGS)) { - net_warn_ratelimited( - "skb_segment: too many frags: %u %u\n", - pos, mss); - goto err; - } - - *frag = *skb_frag; + while (pos < offset + len && i < nfrags) { + *frag = skb_shinfo(skb)->frags[i]; __skb_frag_ref(frag); size = skb_frag_size(frag); @@ -3005,7 +2931,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) if (pos + size <= offset + len) { i++; - skb_frag++; pos += size; } else { skb_frag_size_sub(frag, pos + size - (offset + len)); @@ -3015,6 +2940,25 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) frag++; } + if (pos < offset + len) { + struct sk_buff *fskb2 = fskb; + + BUG_ON(pos + fskb->len != offset + len); + + pos += fskb->len; + fskb = fskb->next; + + if (fskb2->next) { + fskb2 = skb_clone(fskb2, GFP_ATOMIC); + if (!fskb2) + goto err; + } else + skb_get(fskb2); + + SKB_FRAG_ASSERT(nskb); + skb_shinfo(nskb)->frag_list = fskb2; + } + skip_fraglist: nskb->data_len = len - hsize; nskb->len += nskb->data_len; @@ -3041,30 +2985,32 @@ EXPORT_SYMBOL_GPL(skb_segment); int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) { - struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); + struct sk_buff *p = *head; + struct sk_buff *nskb; + struct skb_shared_info *skbinfo = skb_shinfo(skb); + struct skb_shared_info *pinfo = skb_shinfo(p); + unsigned int headroom; + unsigned int len = skb_gro_len(skb); unsigned int offset = skb_gro_offset(skb); unsigned int headlen = skb_headlen(skb); - struct sk_buff *nskb, *lp, *p = *head; - unsigned int len = skb_gro_len(skb); unsigned int delta_truesize; - unsigned int headroom; - if (unlikely(p->len + len >= 65536)) + if (p->len + len >= 65536) return -E2BIG; - lp = NAPI_GRO_CB(p)->last ?: p; - pinfo = skb_shinfo(lp); - - if (headlen <= offset) { + if (pinfo->frag_list) + goto merge; + else if (headlen <= offset) { skb_frag_t *frag; skb_frag_t *frag2; int i = skbinfo->nr_frags; int nr_frags = pinfo->nr_frags + i; + offset -= headlen; + if (nr_frags > MAX_SKB_FRAGS) - goto merge; + return -E2BIG; - offset -= headlen; pinfo->nr_frags = nr_frags; skbinfo->nr_frags = 0; @@ -3095,7 +3041,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) unsigned int first_offset; if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) - goto merge; + return -E2BIG; first_offset = skb->data - (unsigned char *)page_address(page) + @@ -3113,10 +3059,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; goto done; - } - if (pinfo->frag_list) - goto merge; - if (skb_gro_len(p) != pinfo->gso_size) + } else if (skb_gro_len(p) != pinfo->gso_size) return -E2BIG; headroom = skb_headroom(p); @@ -3168,24 +3111,16 @@ merge: __skb_pull(skb, offset); - if (!NAPI_GRO_CB(p)->last) - skb_shinfo(p)->frag_list = skb; - else - NAPI_GRO_CB(p)->last->next = skb; + NAPI_GRO_CB(p)->last->next = skb; NAPI_GRO_CB(p)->last = skb; skb_header_release(skb); - lp = p; done: NAPI_GRO_CB(p)->count++; p->data_len += len; p->truesize += delta_truesize; p->len += len; - if (lp != p) { - lp->data_len += len; - lp->truesize += delta_truesize; - lp->len += len; - } + NAPI_GRO_CB(skb)->same_flow = 1; return 0; } |