]> Git Repo - linux.git/blob - net/core/gro.c
bpf, arm64: Fix trampoline for BPF_TRAMP_F_CALL_ORIG
[linux.git] / net / core / gro.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6 #include <linux/skbuff_ref.h>
7
8 #define MAX_GRO_SKBS 8
9
10 /* This should be increased if a protocol with a bigger head is added. */
11 #define GRO_MAX_HEAD (MAX_HEADER + 128)
12
13 static DEFINE_SPINLOCK(offload_lock);
14
15 /**
16  *      dev_add_offload - register offload handlers
17  *      @po: protocol offload declaration
18  *
19  *      Add protocol offload handlers to the networking stack. The passed
20  *      &proto_offload is linked into kernel lists and may not be freed until
21  *      it has been removed from the kernel lists.
22  *
23  *      This call does not sleep therefore it can not
24  *      guarantee all CPU's that are in middle of receiving packets
25  *      will see the new offload handlers (until the next received packet).
26  */
27 void dev_add_offload(struct packet_offload *po)
28 {
29         struct packet_offload *elem;
30
31         spin_lock(&offload_lock);
32         list_for_each_entry(elem, &net_hotdata.offload_base, list) {
33                 if (po->priority < elem->priority)
34                         break;
35         }
36         list_add_rcu(&po->list, elem->list.prev);
37         spin_unlock(&offload_lock);
38 }
39 EXPORT_SYMBOL(dev_add_offload);
40
41 /**
42  *      __dev_remove_offload     - remove offload handler
43  *      @po: packet offload declaration
44  *
45  *      Remove a protocol offload handler that was previously added to the
46  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
47  *      is removed from the kernel lists and can be freed or reused once this
48  *      function returns.
49  *
50  *      The packet type might still be in use by receivers
51  *      and must not be freed until after all the CPU's have gone
52  *      through a quiescent state.
53  */
54 static void __dev_remove_offload(struct packet_offload *po)
55 {
56         struct list_head *head = &net_hotdata.offload_base;
57         struct packet_offload *po1;
58
59         spin_lock(&offload_lock);
60
61         list_for_each_entry(po1, head, list) {
62                 if (po == po1) {
63                         list_del_rcu(&po->list);
64                         goto out;
65                 }
66         }
67
68         pr_warn("dev_remove_offload: %p not found\n", po);
69 out:
70         spin_unlock(&offload_lock);
71 }
72
73 /**
74  *      dev_remove_offload       - remove packet offload handler
75  *      @po: packet offload declaration
76  *
77  *      Remove a packet offload handler that was previously added to the kernel
78  *      offload handlers by dev_add_offload(). The passed &offload_type is
79  *      removed from the kernel lists and can be freed or reused once this
80  *      function returns.
81  *
82  *      This call sleeps to guarantee that no CPU is looking at the packet
83  *      type after return.
84  */
85 void dev_remove_offload(struct packet_offload *po)
86 {
87         __dev_remove_offload(po);
88
89         synchronize_net();
90 }
91 EXPORT_SYMBOL(dev_remove_offload);
92
93
94 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
95 {
96         struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
97         unsigned int offset = skb_gro_offset(skb);
98         unsigned int headlen = skb_headlen(skb);
99         unsigned int len = skb_gro_len(skb);
100         unsigned int delta_truesize;
101         unsigned int gro_max_size;
102         unsigned int new_truesize;
103         struct sk_buff *lp;
104         int segs;
105
106         /* Do not splice page pool based packets w/ non-page pool
107          * packets. This can result in reference count issues as page
108          * pool pages will not decrement the reference count and will
109          * instead be immediately returned to the pool or have frag
110          * count decremented.
111          */
112         if (p->pp_recycle != skb->pp_recycle)
113                 return -ETOOMANYREFS;
114
115         /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
116         gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
117                         READ_ONCE(p->dev->gro_max_size) :
118                         READ_ONCE(p->dev->gro_ipv4_max_size);
119
120         if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
121                 return -E2BIG;
122
123         if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
124                 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
125                     (p->protocol == htons(ETH_P_IPV6) &&
126                      skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
127                     p->encapsulation)
128                         return -E2BIG;
129         }
130
131         segs = NAPI_GRO_CB(skb)->count;
132         lp = NAPI_GRO_CB(p)->last;
133         pinfo = skb_shinfo(lp);
134
135         if (headlen <= offset) {
136                 skb_frag_t *frag;
137                 skb_frag_t *frag2;
138                 int i = skbinfo->nr_frags;
139                 int nr_frags = pinfo->nr_frags + i;
140
141                 if (nr_frags > MAX_SKB_FRAGS)
142                         goto merge;
143
144                 offset -= headlen;
145                 pinfo->nr_frags = nr_frags;
146                 skbinfo->nr_frags = 0;
147
148                 frag = pinfo->frags + nr_frags;
149                 frag2 = skbinfo->frags + i;
150                 do {
151                         *--frag = *--frag2;
152                 } while (--i);
153
154                 skb_frag_off_add(frag, offset);
155                 skb_frag_size_sub(frag, offset);
156
157                 /* all fragments truesize : remove (head size + sk_buff) */
158                 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
159                 delta_truesize = skb->truesize - new_truesize;
160
161                 skb->truesize = new_truesize;
162                 skb->len -= skb->data_len;
163                 skb->data_len = 0;
164
165                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
166                 goto done;
167         } else if (skb->head_frag) {
168                 int nr_frags = pinfo->nr_frags;
169                 skb_frag_t *frag = pinfo->frags + nr_frags;
170                 struct page *page = virt_to_head_page(skb->head);
171                 unsigned int first_size = headlen - offset;
172                 unsigned int first_offset;
173
174                 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
175                         goto merge;
176
177                 first_offset = skb->data -
178                                (unsigned char *)page_address(page) +
179                                offset;
180
181                 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
182
183                 skb_frag_fill_page_desc(frag, page, first_offset, first_size);
184
185                 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
186                 /* We dont need to clear skbinfo->nr_frags here */
187
188                 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
189                 delta_truesize = skb->truesize - new_truesize;
190                 skb->truesize = new_truesize;
191                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
192                 goto done;
193         }
194
195 merge:
196         /* sk ownership - if any - completely transferred to the aggregated packet */
197         skb->destructor = NULL;
198         skb->sk = NULL;
199         delta_truesize = skb->truesize;
200         if (offset > headlen) {
201                 unsigned int eat = offset - headlen;
202
203                 skb_frag_off_add(&skbinfo->frags[0], eat);
204                 skb_frag_size_sub(&skbinfo->frags[0], eat);
205                 skb->data_len -= eat;
206                 skb->len -= eat;
207                 offset = headlen;
208         }
209
210         __skb_pull(skb, offset);
211
212         if (NAPI_GRO_CB(p)->last == p)
213                 skb_shinfo(p)->frag_list = skb;
214         else
215                 NAPI_GRO_CB(p)->last->next = skb;
216         NAPI_GRO_CB(p)->last = skb;
217         __skb_header_release(skb);
218         lp = p;
219
220 done:
221         NAPI_GRO_CB(p)->count += segs;
222         p->data_len += len;
223         p->truesize += delta_truesize;
224         p->len += len;
225         if (lp != p) {
226                 lp->data_len += len;
227                 lp->truesize += delta_truesize;
228                 lp->len += len;
229         }
230         NAPI_GRO_CB(skb)->same_flow = 1;
231         return 0;
232 }
233
234 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
235 {
236         if (unlikely(p->len + skb->len >= 65536))
237                 return -E2BIG;
238
239         if (NAPI_GRO_CB(p)->last == p)
240                 skb_shinfo(p)->frag_list = skb;
241         else
242                 NAPI_GRO_CB(p)->last->next = skb;
243
244         skb_pull(skb, skb_gro_offset(skb));
245
246         NAPI_GRO_CB(p)->last = skb;
247         NAPI_GRO_CB(p)->count++;
248         p->data_len += skb->len;
249
250         /* sk ownership - if any - completely transferred to the aggregated packet */
251         skb->destructor = NULL;
252         skb->sk = NULL;
253         p->truesize += skb->truesize;
254         p->len += skb->len;
255
256         NAPI_GRO_CB(skb)->same_flow = 1;
257
258         return 0;
259 }
260
261
262 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
263 {
264         struct list_head *head = &net_hotdata.offload_base;
265         struct packet_offload *ptype;
266         __be16 type = skb->protocol;
267         int err = -ENOENT;
268
269         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
270
271         if (NAPI_GRO_CB(skb)->count == 1) {
272                 skb_shinfo(skb)->gso_size = 0;
273                 goto out;
274         }
275
276         rcu_read_lock();
277         list_for_each_entry_rcu(ptype, head, list) {
278                 if (ptype->type != type || !ptype->callbacks.gro_complete)
279                         continue;
280
281                 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
282                                          ipv6_gro_complete, inet_gro_complete,
283                                          skb, 0);
284                 break;
285         }
286         rcu_read_unlock();
287
288         if (err) {
289                 WARN_ON(&ptype->list == head);
290                 kfree_skb(skb);
291                 return;
292         }
293
294 out:
295         gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
296 }
297
298 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
299                                    bool flush_old)
300 {
301         struct list_head *head = &napi->gro_hash[index].list;
302         struct sk_buff *skb, *p;
303
304         list_for_each_entry_safe_reverse(skb, p, head, list) {
305                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
306                         return;
307                 skb_list_del_init(skb);
308                 napi_gro_complete(napi, skb);
309                 napi->gro_hash[index].count--;
310         }
311
312         if (!napi->gro_hash[index].count)
313                 __clear_bit(index, &napi->gro_bitmask);
314 }
315
316 /* napi->gro_hash[].list contains packets ordered by age.
317  * youngest packets at the head of it.
318  * Complete skbs in reverse order to reduce latencies.
319  */
320 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
321 {
322         unsigned long bitmask = napi->gro_bitmask;
323         unsigned int i, base = ~0U;
324
325         while ((i = ffs(bitmask)) != 0) {
326                 bitmask >>= i;
327                 base += i;
328                 __napi_gro_flush_chain(napi, base, flush_old);
329         }
330 }
331 EXPORT_SYMBOL(napi_gro_flush);
332
333 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
334                                              const struct sk_buff *p,
335                                              unsigned long diffs)
336 {
337 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
338         struct tc_skb_ext *skb_ext;
339         struct tc_skb_ext *p_ext;
340
341         skb_ext = skb_ext_find(skb, TC_SKB_EXT);
342         p_ext = skb_ext_find(p, TC_SKB_EXT);
343
344         diffs |= (!!p_ext) ^ (!!skb_ext);
345         if (!diffs && unlikely(skb_ext))
346                 diffs |= p_ext->chain ^ skb_ext->chain;
347 #endif
348         return diffs;
349 }
350
351 static void gro_list_prepare(const struct list_head *head,
352                              const struct sk_buff *skb)
353 {
354         unsigned int maclen = skb->dev->hard_header_len;
355         u32 hash = skb_get_hash_raw(skb);
356         struct sk_buff *p;
357
358         list_for_each_entry(p, head, list) {
359                 unsigned long diffs;
360
361                 if (hash != skb_get_hash_raw(p)) {
362                         NAPI_GRO_CB(p)->same_flow = 0;
363                         continue;
364                 }
365
366                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
367                 diffs |= p->vlan_all ^ skb->vlan_all;
368                 diffs |= skb_metadata_differs(p, skb);
369                 if (maclen == ETH_HLEN)
370                         diffs |= compare_ether_header(skb_mac_header(p),
371                                                       skb_mac_header(skb));
372                 else if (!diffs)
373                         diffs = memcmp(skb_mac_header(p),
374                                        skb_mac_header(skb),
375                                        maclen);
376
377                 /* in most common scenarions 'slow_gro' is 0
378                  * otherwise we are already on some slower paths
379                  * either skip all the infrequent tests altogether or
380                  * avoid trying too hard to skip each of them individually
381                  */
382                 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
383                         diffs |= p->sk != skb->sk;
384                         diffs |= skb_metadata_dst_cmp(p, skb);
385                         diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
386
387                         diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
388                 }
389
390                 NAPI_GRO_CB(p)->same_flow = !diffs;
391         }
392 }
393
394 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
395 {
396         const struct skb_shared_info *pinfo;
397         const skb_frag_t *frag0;
398         unsigned int headlen;
399
400         NAPI_GRO_CB(skb)->network_offset = 0;
401         NAPI_GRO_CB(skb)->data_offset = 0;
402         headlen = skb_headlen(skb);
403         NAPI_GRO_CB(skb)->frag0 = skb->data;
404         NAPI_GRO_CB(skb)->frag0_len = headlen;
405         if (headlen)
406                 return;
407
408         pinfo = skb_shinfo(skb);
409         frag0 = &pinfo->frags[0];
410
411         if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) &&
412             (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
413                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
414                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
415                                                     skb_frag_size(frag0),
416                                                     skb->end - skb->tail);
417         }
418 }
419
420 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
421 {
422         struct skb_shared_info *pinfo = skb_shinfo(skb);
423
424         BUG_ON(skb->end - skb->tail < grow);
425
426         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
427
428         skb->data_len -= grow;
429         skb->tail += grow;
430
431         skb_frag_off_add(&pinfo->frags[0], grow);
432         skb_frag_size_sub(&pinfo->frags[0], grow);
433
434         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
435                 skb_frag_unref(skb, 0);
436                 memmove(pinfo->frags, pinfo->frags + 1,
437                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
438         }
439 }
440
441 static void gro_try_pull_from_frag0(struct sk_buff *skb)
442 {
443         int grow = skb_gro_offset(skb) - skb_headlen(skb);
444
445         if (grow > 0)
446                 gro_pull_from_frag0(skb, grow);
447 }
448
449 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
450 {
451         struct sk_buff *oldest;
452
453         oldest = list_last_entry(head, struct sk_buff, list);
454
455         /* We are called with head length >= MAX_GRO_SKBS, so this is
456          * impossible.
457          */
458         if (WARN_ON_ONCE(!oldest))
459                 return;
460
461         /* Do not adjust napi->gro_hash[].count, caller is adding a new
462          * SKB to the chain.
463          */
464         skb_list_del_init(oldest);
465         napi_gro_complete(napi, oldest);
466 }
467
468 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
469 {
470         u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
471         struct gro_list *gro_list = &napi->gro_hash[bucket];
472         struct list_head *head = &net_hotdata.offload_base;
473         struct packet_offload *ptype;
474         __be16 type = skb->protocol;
475         struct sk_buff *pp = NULL;
476         enum gro_result ret;
477         int same_flow;
478
479         if (netif_elide_gro(skb->dev))
480                 goto normal;
481
482         gro_list_prepare(&gro_list->list, skb);
483
484         rcu_read_lock();
485         list_for_each_entry_rcu(ptype, head, list) {
486                 if (ptype->type == type && ptype->callbacks.gro_receive)
487                         goto found_ptype;
488         }
489         rcu_read_unlock();
490         goto normal;
491
492 found_ptype:
493         skb_set_network_header(skb, skb_gro_offset(skb));
494         skb_reset_mac_len(skb);
495         BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
496         BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
497                                         sizeof(u32))); /* Avoid slow unaligned acc */
498         *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
499         NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
500         NAPI_GRO_CB(skb)->count = 1;
501         if (unlikely(skb_is_gso(skb))) {
502                 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
503                 /* Only support TCP and non DODGY users. */
504                 if (!skb_is_gso_tcp(skb) ||
505                     (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
506                         NAPI_GRO_CB(skb)->flush = 1;
507         }
508
509         /* Setup for GRO checksum validation */
510         switch (skb->ip_summed) {
511         case CHECKSUM_COMPLETE:
512                 NAPI_GRO_CB(skb)->csum = skb->csum;
513                 NAPI_GRO_CB(skb)->csum_valid = 1;
514                 break;
515         case CHECKSUM_UNNECESSARY:
516                 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
517                 break;
518         }
519
520         pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
521                                 ipv6_gro_receive, inet_gro_receive,
522                                 &gro_list->list, skb);
523
524         rcu_read_unlock();
525
526         if (PTR_ERR(pp) == -EINPROGRESS) {
527                 ret = GRO_CONSUMED;
528                 goto ok;
529         }
530
531         same_flow = NAPI_GRO_CB(skb)->same_flow;
532         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
533
534         if (pp) {
535                 skb_list_del_init(pp);
536                 napi_gro_complete(napi, pp);
537                 gro_list->count--;
538         }
539
540         if (same_flow)
541                 goto ok;
542
543         if (NAPI_GRO_CB(skb)->flush)
544                 goto normal;
545
546         if (unlikely(gro_list->count >= MAX_GRO_SKBS))
547                 gro_flush_oldest(napi, &gro_list->list);
548         else
549                 gro_list->count++;
550
551         /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
552         gro_try_pull_from_frag0(skb);
553         NAPI_GRO_CB(skb)->age = jiffies;
554         NAPI_GRO_CB(skb)->last = skb;
555         if (!skb_is_gso(skb))
556                 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
557         list_add(&skb->list, &gro_list->list);
558         ret = GRO_HELD;
559 ok:
560         if (gro_list->count) {
561                 if (!test_bit(bucket, &napi->gro_bitmask))
562                         __set_bit(bucket, &napi->gro_bitmask);
563         } else if (test_bit(bucket, &napi->gro_bitmask)) {
564                 __clear_bit(bucket, &napi->gro_bitmask);
565         }
566
567         return ret;
568
569 normal:
570         ret = GRO_NORMAL;
571         gro_try_pull_from_frag0(skb);
572         goto ok;
573 }
574
575 struct packet_offload *gro_find_receive_by_type(__be16 type)
576 {
577         struct list_head *offload_head = &net_hotdata.offload_base;
578         struct packet_offload *ptype;
579
580         list_for_each_entry_rcu(ptype, offload_head, list) {
581                 if (ptype->type != type || !ptype->callbacks.gro_receive)
582                         continue;
583                 return ptype;
584         }
585         return NULL;
586 }
587 EXPORT_SYMBOL(gro_find_receive_by_type);
588
589 struct packet_offload *gro_find_complete_by_type(__be16 type)
590 {
591         struct list_head *offload_head = &net_hotdata.offload_base;
592         struct packet_offload *ptype;
593
594         list_for_each_entry_rcu(ptype, offload_head, list) {
595                 if (ptype->type != type || !ptype->callbacks.gro_complete)
596                         continue;
597                 return ptype;
598         }
599         return NULL;
600 }
601 EXPORT_SYMBOL(gro_find_complete_by_type);
602
603 static gro_result_t napi_skb_finish(struct napi_struct *napi,
604                                     struct sk_buff *skb,
605                                     gro_result_t ret)
606 {
607         switch (ret) {
608         case GRO_NORMAL:
609                 gro_normal_one(napi, skb, 1);
610                 break;
611
612         case GRO_MERGED_FREE:
613                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
614                         napi_skb_free_stolen_head(skb);
615                 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
616                         __kfree_skb(skb);
617                 else
618                         __napi_kfree_skb(skb, SKB_CONSUMED);
619                 break;
620
621         case GRO_HELD:
622         case GRO_MERGED:
623         case GRO_CONSUMED:
624                 break;
625         }
626
627         return ret;
628 }
629
630 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
631 {
632         gro_result_t ret;
633
634         skb_mark_napi_id(skb, napi);
635         trace_napi_gro_receive_entry(skb);
636
637         skb_gro_reset_offset(skb, 0);
638
639         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
640         trace_napi_gro_receive_exit(ret);
641
642         return ret;
643 }
644 EXPORT_SYMBOL(napi_gro_receive);
645
646 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
647 {
648         if (unlikely(skb->pfmemalloc)) {
649                 consume_skb(skb);
650                 return;
651         }
652         __skb_pull(skb, skb_headlen(skb));
653         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
654         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
655         __vlan_hwaccel_clear_tag(skb);
656         skb->dev = napi->dev;
657         skb->skb_iif = 0;
658
659         /* eth_type_trans() assumes pkt_type is PACKET_HOST */
660         skb->pkt_type = PACKET_HOST;
661
662         skb->encapsulation = 0;
663         skb_shinfo(skb)->gso_type = 0;
664         skb_shinfo(skb)->gso_size = 0;
665         if (unlikely(skb->slow_gro)) {
666                 skb_orphan(skb);
667                 skb_ext_reset(skb);
668                 nf_reset_ct(skb);
669                 skb->slow_gro = 0;
670         }
671
672         napi->skb = skb;
673 }
674
675 struct sk_buff *napi_get_frags(struct napi_struct *napi)
676 {
677         struct sk_buff *skb = napi->skb;
678
679         if (!skb) {
680                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
681                 if (skb) {
682                         napi->skb = skb;
683                         skb_mark_napi_id(skb, napi);
684                 }
685         }
686         return skb;
687 }
688 EXPORT_SYMBOL(napi_get_frags);
689
690 static gro_result_t napi_frags_finish(struct napi_struct *napi,
691                                       struct sk_buff *skb,
692                                       gro_result_t ret)
693 {
694         switch (ret) {
695         case GRO_NORMAL:
696         case GRO_HELD:
697                 __skb_push(skb, ETH_HLEN);
698                 skb->protocol = eth_type_trans(skb, skb->dev);
699                 if (ret == GRO_NORMAL)
700                         gro_normal_one(napi, skb, 1);
701                 break;
702
703         case GRO_MERGED_FREE:
704                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
705                         napi_skb_free_stolen_head(skb);
706                 else
707                         napi_reuse_skb(napi, skb);
708                 break;
709
710         case GRO_MERGED:
711         case GRO_CONSUMED:
712                 break;
713         }
714
715         return ret;
716 }
717
718 /* Upper GRO stack assumes network header starts at gro_offset=0
719  * Drivers could call both napi_gro_frags() and napi_gro_receive()
720  * We copy ethernet header into skb->data to have a common layout.
721  */
722 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
723 {
724         struct sk_buff *skb = napi->skb;
725         const struct ethhdr *eth;
726         unsigned int hlen = sizeof(*eth);
727
728         napi->skb = NULL;
729
730         skb_reset_mac_header(skb);
731         skb_gro_reset_offset(skb, hlen);
732
733         if (unlikely(!skb_gro_may_pull(skb, hlen))) {
734                 eth = skb_gro_header_slow(skb, hlen, 0);
735                 if (unlikely(!eth)) {
736                         net_warn_ratelimited("%s: dropping impossible skb from %s\n",
737                                              __func__, napi->dev->name);
738                         napi_reuse_skb(napi, skb);
739                         return NULL;
740                 }
741         } else {
742                 eth = (const struct ethhdr *)skb->data;
743
744                 if (NAPI_GRO_CB(skb)->frag0 != skb->data)
745                         gro_pull_from_frag0(skb, hlen);
746
747                 NAPI_GRO_CB(skb)->frag0 += hlen;
748                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
749         }
750         __skb_pull(skb, hlen);
751
752         /*
753          * This works because the only protocols we care about don't require
754          * special handling.
755          * We'll fix it up properly in napi_frags_finish()
756          */
757         skb->protocol = eth->h_proto;
758
759         return skb;
760 }
761
762 gro_result_t napi_gro_frags(struct napi_struct *napi)
763 {
764         gro_result_t ret;
765         struct sk_buff *skb = napi_frags_skb(napi);
766
767         trace_napi_gro_frags_entry(skb);
768
769         ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
770         trace_napi_gro_frags_exit(ret);
771
772         return ret;
773 }
774 EXPORT_SYMBOL(napi_gro_frags);
775
776 /* Compute the checksum from gro_offset and return the folded value
777  * after adding in any pseudo checksum.
778  */
779 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
780 {
781         __wsum wsum;
782         __sum16 sum;
783
784         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
785
786         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
787         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
788         /* See comments in __skb_checksum_complete(). */
789         if (likely(!sum)) {
790                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
791                     !skb->csum_complete_sw)
792                         netdev_rx_csum_fault(skb->dev, skb);
793         }
794
795         NAPI_GRO_CB(skb)->csum = wsum;
796         NAPI_GRO_CB(skb)->csum_valid = 1;
797
798         return sum;
799 }
800 EXPORT_SYMBOL(__skb_gro_checksum_complete);
This page took 0.079022 seconds and 4 git commands to generate.