]> Git Repo - linux.git/blob - net/core/gro.c
gpio: Return EPROBE_DEFER if gc->to_irq is NULL
[linux.git] / net / core / gro.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6
7 #define MAX_GRO_SKBS 8
8
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12 static DEFINE_SPINLOCK(offload_lock);
13 static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15 int gro_normal_batch __read_mostly = 8;
16
17 /**
18  *      dev_add_offload - register offload handlers
19  *      @po: protocol offload declaration
20  *
21  *      Add protocol offload handlers to the networking stack. The passed
22  *      &proto_offload is linked into kernel lists and may not be freed until
23  *      it has been removed from the kernel lists.
24  *
25  *      This call does not sleep therefore it can not
26  *      guarantee all CPU's that are in middle of receiving packets
27  *      will see the new offload handlers (until the next received packet).
28  */
29 void dev_add_offload(struct packet_offload *po)
30 {
31         struct packet_offload *elem;
32
33         spin_lock(&offload_lock);
34         list_for_each_entry(elem, &offload_base, list) {
35                 if (po->priority < elem->priority)
36                         break;
37         }
38         list_add_rcu(&po->list, elem->list.prev);
39         spin_unlock(&offload_lock);
40 }
41 EXPORT_SYMBOL(dev_add_offload);
42
43 /**
44  *      __dev_remove_offload     - remove offload handler
45  *      @po: packet offload declaration
46  *
47  *      Remove a protocol offload handler that was previously added to the
48  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
49  *      is removed from the kernel lists and can be freed or reused once this
50  *      function returns.
51  *
52  *      The packet type might still be in use by receivers
53  *      and must not be freed until after all the CPU's have gone
54  *      through a quiescent state.
55  */
56 static void __dev_remove_offload(struct packet_offload *po)
57 {
58         struct list_head *head = &offload_base;
59         struct packet_offload *po1;
60
61         spin_lock(&offload_lock);
62
63         list_for_each_entry(po1, head, list) {
64                 if (po == po1) {
65                         list_del_rcu(&po->list);
66                         goto out;
67                 }
68         }
69
70         pr_warn("dev_remove_offload: %p not found\n", po);
71 out:
72         spin_unlock(&offload_lock);
73 }
74
75 /**
76  *      dev_remove_offload       - remove packet offload handler
77  *      @po: packet offload declaration
78  *
79  *      Remove a packet offload handler that was previously added to the kernel
80  *      offload handlers by dev_add_offload(). The passed &offload_type is
81  *      removed from the kernel lists and can be freed or reused once this
82  *      function returns.
83  *
84  *      This call sleeps to guarantee that no CPU is looking at the packet
85  *      type after return.
86  */
87 void dev_remove_offload(struct packet_offload *po)
88 {
89         __dev_remove_offload(po);
90
91         synchronize_net();
92 }
93 EXPORT_SYMBOL(dev_remove_offload);
94
95 /**
96  *      skb_mac_gso_segment - mac layer segmentation handler.
97  *      @skb: buffer to segment
98  *      @features: features for the output path (see dev->features)
99  */
100 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
101                                     netdev_features_t features)
102 {
103         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
104         struct packet_offload *ptype;
105         int vlan_depth = skb->mac_len;
106         __be16 type = skb_network_protocol(skb, &vlan_depth);
107
108         if (unlikely(!type))
109                 return ERR_PTR(-EINVAL);
110
111         __skb_pull(skb, vlan_depth);
112
113         rcu_read_lock();
114         list_for_each_entry_rcu(ptype, &offload_base, list) {
115                 if (ptype->type == type && ptype->callbacks.gso_segment) {
116                         segs = ptype->callbacks.gso_segment(skb, features);
117                         break;
118                 }
119         }
120         rcu_read_unlock();
121
122         __skb_push(skb, skb->data - skb_mac_header(skb));
123
124         return segs;
125 }
126 EXPORT_SYMBOL(skb_mac_gso_segment);
127
128 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
129 {
130         struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
131         unsigned int offset = skb_gro_offset(skb);
132         unsigned int headlen = skb_headlen(skb);
133         unsigned int len = skb_gro_len(skb);
134         unsigned int delta_truesize;
135         unsigned int gro_max_size;
136         unsigned int new_truesize;
137         struct sk_buff *lp;
138
139         /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
140         gro_max_size = READ_ONCE(p->dev->gro_max_size);
141
142         if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
143                 return -E2BIG;
144
145         lp = NAPI_GRO_CB(p)->last;
146         pinfo = skb_shinfo(lp);
147
148         if (headlen <= offset) {
149                 skb_frag_t *frag;
150                 skb_frag_t *frag2;
151                 int i = skbinfo->nr_frags;
152                 int nr_frags = pinfo->nr_frags + i;
153
154                 if (nr_frags > MAX_SKB_FRAGS)
155                         goto merge;
156
157                 offset -= headlen;
158                 pinfo->nr_frags = nr_frags;
159                 skbinfo->nr_frags = 0;
160
161                 frag = pinfo->frags + nr_frags;
162                 frag2 = skbinfo->frags + i;
163                 do {
164                         *--frag = *--frag2;
165                 } while (--i);
166
167                 skb_frag_off_add(frag, offset);
168                 skb_frag_size_sub(frag, offset);
169
170                 /* all fragments truesize : remove (head size + sk_buff) */
171                 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
172                 delta_truesize = skb->truesize - new_truesize;
173
174                 skb->truesize = new_truesize;
175                 skb->len -= skb->data_len;
176                 skb->data_len = 0;
177
178                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
179                 goto done;
180         } else if (skb->head_frag) {
181                 int nr_frags = pinfo->nr_frags;
182                 skb_frag_t *frag = pinfo->frags + nr_frags;
183                 struct page *page = virt_to_head_page(skb->head);
184                 unsigned int first_size = headlen - offset;
185                 unsigned int first_offset;
186
187                 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
188                         goto merge;
189
190                 first_offset = skb->data -
191                                (unsigned char *)page_address(page) +
192                                offset;
193
194                 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
195
196                 __skb_frag_set_page(frag, page);
197                 skb_frag_off_set(frag, first_offset);
198                 skb_frag_size_set(frag, first_size);
199
200                 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
201                 /* We dont need to clear skbinfo->nr_frags here */
202
203                 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
204                 delta_truesize = skb->truesize - new_truesize;
205                 skb->truesize = new_truesize;
206                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
207                 goto done;
208         }
209
210 merge:
211         /* sk owenrship - if any - completely transferred to the aggregated packet */
212         skb->destructor = NULL;
213         delta_truesize = skb->truesize;
214         if (offset > headlen) {
215                 unsigned int eat = offset - headlen;
216
217                 skb_frag_off_add(&skbinfo->frags[0], eat);
218                 skb_frag_size_sub(&skbinfo->frags[0], eat);
219                 skb->data_len -= eat;
220                 skb->len -= eat;
221                 offset = headlen;
222         }
223
224         __skb_pull(skb, offset);
225
226         if (NAPI_GRO_CB(p)->last == p)
227                 skb_shinfo(p)->frag_list = skb;
228         else
229                 NAPI_GRO_CB(p)->last->next = skb;
230         NAPI_GRO_CB(p)->last = skb;
231         __skb_header_release(skb);
232         lp = p;
233
234 done:
235         NAPI_GRO_CB(p)->count++;
236         p->data_len += len;
237         p->truesize += delta_truesize;
238         p->len += len;
239         if (lp != p) {
240                 lp->data_len += len;
241                 lp->truesize += delta_truesize;
242                 lp->len += len;
243         }
244         NAPI_GRO_CB(skb)->same_flow = 1;
245         return 0;
246 }
247
248
249 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
250 {
251         struct packet_offload *ptype;
252         __be16 type = skb->protocol;
253         struct list_head *head = &offload_base;
254         int err = -ENOENT;
255
256         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
257
258         if (NAPI_GRO_CB(skb)->count == 1) {
259                 skb_shinfo(skb)->gso_size = 0;
260                 goto out;
261         }
262
263         rcu_read_lock();
264         list_for_each_entry_rcu(ptype, head, list) {
265                 if (ptype->type != type || !ptype->callbacks.gro_complete)
266                         continue;
267
268                 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
269                                          ipv6_gro_complete, inet_gro_complete,
270                                          skb, 0);
271                 break;
272         }
273         rcu_read_unlock();
274
275         if (err) {
276                 WARN_ON(&ptype->list == head);
277                 kfree_skb(skb);
278                 return;
279         }
280
281 out:
282         gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
283 }
284
285 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
286                                    bool flush_old)
287 {
288         struct list_head *head = &napi->gro_hash[index].list;
289         struct sk_buff *skb, *p;
290
291         list_for_each_entry_safe_reverse(skb, p, head, list) {
292                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
293                         return;
294                 skb_list_del_init(skb);
295                 napi_gro_complete(napi, skb);
296                 napi->gro_hash[index].count--;
297         }
298
299         if (!napi->gro_hash[index].count)
300                 __clear_bit(index, &napi->gro_bitmask);
301 }
302
303 /* napi->gro_hash[].list contains packets ordered by age.
304  * youngest packets at the head of it.
305  * Complete skbs in reverse order to reduce latencies.
306  */
307 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
308 {
309         unsigned long bitmask = napi->gro_bitmask;
310         unsigned int i, base = ~0U;
311
312         while ((i = ffs(bitmask)) != 0) {
313                 bitmask >>= i;
314                 base += i;
315                 __napi_gro_flush_chain(napi, base, flush_old);
316         }
317 }
318 EXPORT_SYMBOL(napi_gro_flush);
319
320 static void gro_list_prepare(const struct list_head *head,
321                              const struct sk_buff *skb)
322 {
323         unsigned int maclen = skb->dev->hard_header_len;
324         u32 hash = skb_get_hash_raw(skb);
325         struct sk_buff *p;
326
327         list_for_each_entry(p, head, list) {
328                 unsigned long diffs;
329
330                 NAPI_GRO_CB(p)->flush = 0;
331
332                 if (hash != skb_get_hash_raw(p)) {
333                         NAPI_GRO_CB(p)->same_flow = 0;
334                         continue;
335                 }
336
337                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
338                 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
339                 if (skb_vlan_tag_present(p))
340                         diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
341                 diffs |= skb_metadata_differs(p, skb);
342                 if (maclen == ETH_HLEN)
343                         diffs |= compare_ether_header(skb_mac_header(p),
344                                                       skb_mac_header(skb));
345                 else if (!diffs)
346                         diffs = memcmp(skb_mac_header(p),
347                                        skb_mac_header(skb),
348                                        maclen);
349
350                 /* in most common scenarions 'slow_gro' is 0
351                  * otherwise we are already on some slower paths
352                  * either skip all the infrequent tests altogether or
353                  * avoid trying too hard to skip each of them individually
354                  */
355                 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
356 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
357                         struct tc_skb_ext *skb_ext;
358                         struct tc_skb_ext *p_ext;
359 #endif
360
361                         diffs |= p->sk != skb->sk;
362                         diffs |= skb_metadata_dst_cmp(p, skb);
363                         diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
364
365 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
366                         skb_ext = skb_ext_find(skb, TC_SKB_EXT);
367                         p_ext = skb_ext_find(p, TC_SKB_EXT);
368
369                         diffs |= (!!p_ext) ^ (!!skb_ext);
370                         if (!diffs && unlikely(skb_ext))
371                                 diffs |= p_ext->chain ^ skb_ext->chain;
372 #endif
373                 }
374
375                 NAPI_GRO_CB(p)->same_flow = !diffs;
376         }
377 }
378
379 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
380 {
381         const struct skb_shared_info *pinfo = skb_shinfo(skb);
382         const skb_frag_t *frag0 = &pinfo->frags[0];
383
384         NAPI_GRO_CB(skb)->data_offset = 0;
385         NAPI_GRO_CB(skb)->frag0 = NULL;
386         NAPI_GRO_CB(skb)->frag0_len = 0;
387
388         if (!skb_headlen(skb) && pinfo->nr_frags &&
389             !PageHighMem(skb_frag_page(frag0)) &&
390             (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
391                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
392                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
393                                                     skb_frag_size(frag0),
394                                                     skb->end - skb->tail);
395         }
396 }
397
398 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
399 {
400         struct skb_shared_info *pinfo = skb_shinfo(skb);
401
402         BUG_ON(skb->end - skb->tail < grow);
403
404         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
405
406         skb->data_len -= grow;
407         skb->tail += grow;
408
409         skb_frag_off_add(&pinfo->frags[0], grow);
410         skb_frag_size_sub(&pinfo->frags[0], grow);
411
412         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
413                 skb_frag_unref(skb, 0);
414                 memmove(pinfo->frags, pinfo->frags + 1,
415                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
416         }
417 }
418
419 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
420 {
421         struct sk_buff *oldest;
422
423         oldest = list_last_entry(head, struct sk_buff, list);
424
425         /* We are called with head length >= MAX_GRO_SKBS, so this is
426          * impossible.
427          */
428         if (WARN_ON_ONCE(!oldest))
429                 return;
430
431         /* Do not adjust napi->gro_hash[].count, caller is adding a new
432          * SKB to the chain.
433          */
434         skb_list_del_init(oldest);
435         napi_gro_complete(napi, oldest);
436 }
437
438 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
439 {
440         u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
441         struct gro_list *gro_list = &napi->gro_hash[bucket];
442         struct list_head *head = &offload_base;
443         struct packet_offload *ptype;
444         __be16 type = skb->protocol;
445         struct sk_buff *pp = NULL;
446         enum gro_result ret;
447         int same_flow;
448         int grow;
449
450         if (netif_elide_gro(skb->dev))
451                 goto normal;
452
453         gro_list_prepare(&gro_list->list, skb);
454
455         rcu_read_lock();
456         list_for_each_entry_rcu(ptype, head, list) {
457                 if (ptype->type != type || !ptype->callbacks.gro_receive)
458                         continue;
459
460                 skb_set_network_header(skb, skb_gro_offset(skb));
461                 skb_reset_mac_len(skb);
462                 NAPI_GRO_CB(skb)->same_flow = 0;
463                 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
464                 NAPI_GRO_CB(skb)->free = 0;
465                 NAPI_GRO_CB(skb)->encap_mark = 0;
466                 NAPI_GRO_CB(skb)->recursion_counter = 0;
467                 NAPI_GRO_CB(skb)->is_fou = 0;
468                 NAPI_GRO_CB(skb)->is_atomic = 1;
469                 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
470
471                 /* Setup for GRO checksum validation */
472                 switch (skb->ip_summed) {
473                 case CHECKSUM_COMPLETE:
474                         NAPI_GRO_CB(skb)->csum = skb->csum;
475                         NAPI_GRO_CB(skb)->csum_valid = 1;
476                         NAPI_GRO_CB(skb)->csum_cnt = 0;
477                         break;
478                 case CHECKSUM_UNNECESSARY:
479                         NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
480                         NAPI_GRO_CB(skb)->csum_valid = 0;
481                         break;
482                 default:
483                         NAPI_GRO_CB(skb)->csum_cnt = 0;
484                         NAPI_GRO_CB(skb)->csum_valid = 0;
485                 }
486
487                 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
488                                         ipv6_gro_receive, inet_gro_receive,
489                                         &gro_list->list, skb);
490                 break;
491         }
492         rcu_read_unlock();
493
494         if (&ptype->list == head)
495                 goto normal;
496
497         if (PTR_ERR(pp) == -EINPROGRESS) {
498                 ret = GRO_CONSUMED;
499                 goto ok;
500         }
501
502         same_flow = NAPI_GRO_CB(skb)->same_flow;
503         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
504
505         if (pp) {
506                 skb_list_del_init(pp);
507                 napi_gro_complete(napi, pp);
508                 gro_list->count--;
509         }
510
511         if (same_flow)
512                 goto ok;
513
514         if (NAPI_GRO_CB(skb)->flush)
515                 goto normal;
516
517         if (unlikely(gro_list->count >= MAX_GRO_SKBS))
518                 gro_flush_oldest(napi, &gro_list->list);
519         else
520                 gro_list->count++;
521
522         NAPI_GRO_CB(skb)->count = 1;
523         NAPI_GRO_CB(skb)->age = jiffies;
524         NAPI_GRO_CB(skb)->last = skb;
525         skb_shinfo(skb)->gso_size = skb_gro_len(skb);
526         list_add(&skb->list, &gro_list->list);
527         ret = GRO_HELD;
528
529 pull:
530         grow = skb_gro_offset(skb) - skb_headlen(skb);
531         if (grow > 0)
532                 gro_pull_from_frag0(skb, grow);
533 ok:
534         if (gro_list->count) {
535                 if (!test_bit(bucket, &napi->gro_bitmask))
536                         __set_bit(bucket, &napi->gro_bitmask);
537         } else if (test_bit(bucket, &napi->gro_bitmask)) {
538                 __clear_bit(bucket, &napi->gro_bitmask);
539         }
540
541         return ret;
542
543 normal:
544         ret = GRO_NORMAL;
545         goto pull;
546 }
547
548 struct packet_offload *gro_find_receive_by_type(__be16 type)
549 {
550         struct list_head *offload_head = &offload_base;
551         struct packet_offload *ptype;
552
553         list_for_each_entry_rcu(ptype, offload_head, list) {
554                 if (ptype->type != type || !ptype->callbacks.gro_receive)
555                         continue;
556                 return ptype;
557         }
558         return NULL;
559 }
560 EXPORT_SYMBOL(gro_find_receive_by_type);
561
562 struct packet_offload *gro_find_complete_by_type(__be16 type)
563 {
564         struct list_head *offload_head = &offload_base;
565         struct packet_offload *ptype;
566
567         list_for_each_entry_rcu(ptype, offload_head, list) {
568                 if (ptype->type != type || !ptype->callbacks.gro_complete)
569                         continue;
570                 return ptype;
571         }
572         return NULL;
573 }
574 EXPORT_SYMBOL(gro_find_complete_by_type);
575
576 static gro_result_t napi_skb_finish(struct napi_struct *napi,
577                                     struct sk_buff *skb,
578                                     gro_result_t ret)
579 {
580         switch (ret) {
581         case GRO_NORMAL:
582                 gro_normal_one(napi, skb, 1);
583                 break;
584
585         case GRO_MERGED_FREE:
586                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
587                         napi_skb_free_stolen_head(skb);
588                 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
589                         __kfree_skb(skb);
590                 else
591                         __kfree_skb_defer(skb);
592                 break;
593
594         case GRO_HELD:
595         case GRO_MERGED:
596         case GRO_CONSUMED:
597                 break;
598         }
599
600         return ret;
601 }
602
603 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
604 {
605         gro_result_t ret;
606
607         skb_mark_napi_id(skb, napi);
608         trace_napi_gro_receive_entry(skb);
609
610         skb_gro_reset_offset(skb, 0);
611
612         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
613         trace_napi_gro_receive_exit(ret);
614
615         return ret;
616 }
617 EXPORT_SYMBOL(napi_gro_receive);
618
619 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
620 {
621         if (unlikely(skb->pfmemalloc)) {
622                 consume_skb(skb);
623                 return;
624         }
625         __skb_pull(skb, skb_headlen(skb));
626         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
627         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
628         __vlan_hwaccel_clear_tag(skb);
629         skb->dev = napi->dev;
630         skb->skb_iif = 0;
631
632         /* eth_type_trans() assumes pkt_type is PACKET_HOST */
633         skb->pkt_type = PACKET_HOST;
634
635         skb->encapsulation = 0;
636         skb_shinfo(skb)->gso_type = 0;
637         skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
638         if (unlikely(skb->slow_gro)) {
639                 skb_orphan(skb);
640                 skb_ext_reset(skb);
641                 nf_reset_ct(skb);
642                 skb->slow_gro = 0;
643         }
644
645         napi->skb = skb;
646 }
647
648 struct sk_buff *napi_get_frags(struct napi_struct *napi)
649 {
650         struct sk_buff *skb = napi->skb;
651
652         if (!skb) {
653                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
654                 if (skb) {
655                         napi->skb = skb;
656                         skb_mark_napi_id(skb, napi);
657                 }
658         }
659         return skb;
660 }
661 EXPORT_SYMBOL(napi_get_frags);
662
663 static gro_result_t napi_frags_finish(struct napi_struct *napi,
664                                       struct sk_buff *skb,
665                                       gro_result_t ret)
666 {
667         switch (ret) {
668         case GRO_NORMAL:
669         case GRO_HELD:
670                 __skb_push(skb, ETH_HLEN);
671                 skb->protocol = eth_type_trans(skb, skb->dev);
672                 if (ret == GRO_NORMAL)
673                         gro_normal_one(napi, skb, 1);
674                 break;
675
676         case GRO_MERGED_FREE:
677                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
678                         napi_skb_free_stolen_head(skb);
679                 else
680                         napi_reuse_skb(napi, skb);
681                 break;
682
683         case GRO_MERGED:
684         case GRO_CONSUMED:
685                 break;
686         }
687
688         return ret;
689 }
690
691 /* Upper GRO stack assumes network header starts at gro_offset=0
692  * Drivers could call both napi_gro_frags() and napi_gro_receive()
693  * We copy ethernet header into skb->data to have a common layout.
694  */
695 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
696 {
697         struct sk_buff *skb = napi->skb;
698         const struct ethhdr *eth;
699         unsigned int hlen = sizeof(*eth);
700
701         napi->skb = NULL;
702
703         skb_reset_mac_header(skb);
704         skb_gro_reset_offset(skb, hlen);
705
706         if (unlikely(skb_gro_header_hard(skb, hlen))) {
707                 eth = skb_gro_header_slow(skb, hlen, 0);
708                 if (unlikely(!eth)) {
709                         net_warn_ratelimited("%s: dropping impossible skb from %s\n",
710                                              __func__, napi->dev->name);
711                         napi_reuse_skb(napi, skb);
712                         return NULL;
713                 }
714         } else {
715                 eth = (const struct ethhdr *)skb->data;
716                 gro_pull_from_frag0(skb, hlen);
717                 NAPI_GRO_CB(skb)->frag0 += hlen;
718                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
719         }
720         __skb_pull(skb, hlen);
721
722         /*
723          * This works because the only protocols we care about don't require
724          * special handling.
725          * We'll fix it up properly in napi_frags_finish()
726          */
727         skb->protocol = eth->h_proto;
728
729         return skb;
730 }
731
732 gro_result_t napi_gro_frags(struct napi_struct *napi)
733 {
734         gro_result_t ret;
735         struct sk_buff *skb = napi_frags_skb(napi);
736
737         trace_napi_gro_frags_entry(skb);
738
739         ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
740         trace_napi_gro_frags_exit(ret);
741
742         return ret;
743 }
744 EXPORT_SYMBOL(napi_gro_frags);
745
746 /* Compute the checksum from gro_offset and return the folded value
747  * after adding in any pseudo checksum.
748  */
749 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
750 {
751         __wsum wsum;
752         __sum16 sum;
753
754         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
755
756         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
757         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
758         /* See comments in __skb_checksum_complete(). */
759         if (likely(!sum)) {
760                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
761                     !skb->csum_complete_sw)
762                         netdev_rx_csum_fault(skb->dev, skb);
763         }
764
765         NAPI_GRO_CB(skb)->csum = wsum;
766         NAPI_GRO_CB(skb)->csum_valid = 1;
767
768         return sum;
769 }
770 EXPORT_SYMBOL(__skb_gro_checksum_complete);
This page took 0.07535 seconds and 4 git commands to generate.