1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #ifndef _NET_IPV6_GRO_H
4 #define _NET_IPV6_GRO_H
6 #include <linux/indirect_call_wrapper.h>
8 #include <linux/ipv6.h>
9 #include <net/ip6_checksum.h>
10 #include <linux/skbuff.h>
14 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
17 /* Length of frag0. */
18 unsigned int frag0_len;
20 /* This indicates where we are processing relative to skb->data. */
23 /* This is non-zero if the packet cannot be merged with the new skb. */
26 /* Save the IP ID here and check when we get to the transport layer */
29 /* Number of segments aggregated. */
32 /* Start offset for remote checksum offload */
33 u16 gro_remcsum_start;
35 /* jiffies when first packet was created/queued */
38 /* Used in ipv6_gro_receive() and foo-over-udp */
41 /* This is non-zero if the packet may be of the same flow. */
44 /* Used in tunnel GRO receive */
47 /* GRO checksum is valid */
50 /* Number of checksums via CHECKSUM_UNNECESSARY */
55 #define NAPI_GRO_FREE 1
56 #define NAPI_GRO_FREE_STOLEN_HEAD 2
58 /* Used in foo-over-udp, set in udp[46]_gro_receive */
61 /* Used in GRE, set in fou/gue_gro_receive */
64 /* Used to determine if flush_id can be ignored */
67 /* Number of gro_receive callbacks this packet already went through */
68 u8 recursion_counter:4;
70 /* GRO is done by frag_list pointer chaining. */
73 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
76 /* used in skb_gro_receive() slow path */
80 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
82 #define GRO_RECURSION_LIMIT 15
83 static inline int gro_recursion_inc_test(struct sk_buff *skb)
85 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
88 typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
89 static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
90 struct list_head *head,
93 if (unlikely(gro_recursion_inc_test(skb))) {
94 NAPI_GRO_CB(skb)->flush |= 1;
101 typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
103 static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
105 struct list_head *head,
108 if (unlikely(gro_recursion_inc_test(skb))) {
109 NAPI_GRO_CB(skb)->flush |= 1;
113 return cb(sk, head, skb);
116 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
118 return NAPI_GRO_CB(skb)->data_offset;
121 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
123 return skb->len - NAPI_GRO_CB(skb)->data_offset;
126 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
128 NAPI_GRO_CB(skb)->data_offset += len;
131 static inline void *skb_gro_header_fast(struct sk_buff *skb,
134 return NAPI_GRO_CB(skb)->frag0 + offset;
137 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
139 return NAPI_GRO_CB(skb)->frag0_len < hlen;
142 static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
144 NAPI_GRO_CB(skb)->frag0 = NULL;
145 NAPI_GRO_CB(skb)->frag0_len = 0;
148 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
151 if (!pskb_may_pull(skb, hlen))
154 skb_gro_frag0_invalidate(skb);
155 return skb->data + offset;
158 static inline void *skb_gro_network_header(struct sk_buff *skb)
160 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
161 skb_network_offset(skb);
164 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
166 const struct iphdr *iph = skb_gro_network_header(skb);
168 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
169 skb_gro_len(skb), proto, 0);
172 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
173 const void *start, unsigned int len)
175 if (NAPI_GRO_CB(skb)->csum_valid)
176 NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
177 wsum_negate(NAPI_GRO_CB(skb)->csum)));
180 /* GRO checksum functions. These are logical equivalents of the normal
181 * checksum functions (in skbuff.h) except that they operate on the GRO
182 * offsets and fields in sk_buff.
185 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
187 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
189 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
192 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
196 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
197 skb_checksum_start_offset(skb) <
198 skb_gro_offset(skb)) &&
199 !skb_at_gro_remcsum_start(skb) &&
200 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
201 (!zero_okay || check));
204 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
207 if (NAPI_GRO_CB(skb)->csum_valid &&
208 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
211 NAPI_GRO_CB(skb)->csum = psum;
213 return __skb_gro_checksum_complete(skb);
216 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
218 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
219 /* Consume a checksum from CHECKSUM_UNNECESSARY */
220 NAPI_GRO_CB(skb)->csum_cnt--;
222 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
223 * verified a new top level checksum or an encapsulated one
224 * during GRO. This saves work if we fallback to normal path.
226 __skb_incr_checksum_unnecessary(skb);
230 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
234 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
235 __ret = __skb_gro_checksum_validate_complete(skb, \
236 compute_pseudo(skb, proto)); \
238 skb_gro_incr_csum_unnecessary(skb); \
242 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
243 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
245 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
247 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
249 #define skb_gro_checksum_simple_validate(skb) \
250 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
252 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
254 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
255 !NAPI_GRO_CB(skb)->csum_valid);
258 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
261 NAPI_GRO_CB(skb)->csum = ~pseudo;
262 NAPI_GRO_CB(skb)->csum_valid = 1;
265 #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
267 if (__skb_gro_checksum_convert_check(skb)) \
268 __skb_gro_checksum_convert(skb, \
269 compute_pseudo(skb, proto)); \
277 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
283 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
284 unsigned int off, size_t hdrlen,
285 int start, int offset,
286 struct gro_remcsum *grc,
290 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
292 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
295 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
299 ptr = skb_gro_header_fast(skb, off);
300 if (skb_gro_header_hard(skb, off + plen)) {
301 ptr = skb_gro_header_slow(skb, off + plen, off);
306 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
309 /* Adjust skb->csum since we changed the packet */
310 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
312 grc->offset = off + hdrlen + offset;
318 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
319 struct gro_remcsum *grc)
322 size_t plen = grc->offset + sizeof(u16);
327 ptr = skb_gro_header_fast(skb, grc->offset);
328 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
329 ptr = skb_gro_header_slow(skb, plen, grc->offset);
334 remcsum_unadjust((__sum16 *)ptr, grc->delta);
337 #ifdef CONFIG_XFRM_OFFLOAD
338 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
340 if (PTR_ERR(pp) != -EINPROGRESS)
341 NAPI_GRO_CB(skb)->flush |= flush;
343 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
346 struct gro_remcsum *grc)
348 if (PTR_ERR(pp) != -EINPROGRESS) {
349 NAPI_GRO_CB(skb)->flush |= flush;
350 skb_gro_remcsum_cleanup(skb, grc);
351 skb->remcsum_offload = 0;
355 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
357 NAPI_GRO_CB(skb)->flush |= flush;
359 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
362 struct gro_remcsum *grc)
364 NAPI_GRO_CB(skb)->flush |= flush;
365 skb_gro_remcsum_cleanup(skb, grc);
366 skb->remcsum_offload = 0;
370 INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
372 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
373 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
375 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
377 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
379 INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
381 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
383 INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
385 #define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
387 unlikely(gro_recursion_inc_test(skb)) ? \
388 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
389 INDIRECT_CALL_INET(cb, f2, f1, head, skb); \
392 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
393 struct udphdr *uh, struct sock *sk);
394 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
396 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
399 unsigned int hlen, off;
401 off = skb_gro_offset(skb);
402 hlen = off + sizeof(*uh);
403 uh = skb_gro_header_fast(skb, off);
404 if (skb_gro_header_hard(skb, hlen))
405 uh = skb_gro_header_slow(skb, hlen, off);
410 static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
412 const struct ipv6hdr *iph = skb_gro_network_header(skb);
414 return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
415 skb_gro_len(skb), proto, 0));
418 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
420 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
421 static inline void gro_normal_list(struct napi_struct *napi)
425 netif_receive_skb_list_internal(&napi->rx_list);
426 INIT_LIST_HEAD(&napi->rx_list);
430 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
431 * pass the whole batch up to the stack.
433 static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
435 list_add_tail(&skb->list, &napi->rx_list);
436 napi->rx_count += segs;
437 if (napi->rx_count >= gro_normal_batch)
438 gro_normal_list(napi);
442 #endif /* _NET_IPV6_GRO_H */