]> Git Repo - linux.git/blob - drivers/net/ipvlan/ipvlan_core.c
Linux 6.14-rc3
[linux.git] / drivers / net / ipvlan / ipvlan_core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2014 Mahesh Bandewar <[email protected]>
3  */
4
5 #include <net/inet_dscp.h>
6 #include <net/ip.h>
7
8 #include "ipvlan.h"
9
10 static u32 ipvlan_jhash_secret __read_mostly;
11
12 void ipvlan_init_secret(void)
13 {
14         net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
15 }
16
17 void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
18                             unsigned int len, bool success, bool mcast)
19 {
20         if (likely(success)) {
21                 struct ipvl_pcpu_stats *pcptr;
22
23                 pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
24                 u64_stats_update_begin(&pcptr->syncp);
25                 u64_stats_inc(&pcptr->rx_pkts);
26                 u64_stats_add(&pcptr->rx_bytes, len);
27                 if (mcast)
28                         u64_stats_inc(&pcptr->rx_mcast);
29                 u64_stats_update_end(&pcptr->syncp);
30         } else {
31                 this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
32         }
33 }
34 EXPORT_SYMBOL_GPL(ipvlan_count_rx);
35
36 #if IS_ENABLED(CONFIG_IPV6)
37 static u8 ipvlan_get_v6_hash(const void *iaddr)
38 {
39         const struct in6_addr *ip6_addr = iaddr;
40
41         return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
42                IPVLAN_HASH_MASK;
43 }
44 #else
45 static u8 ipvlan_get_v6_hash(const void *iaddr)
46 {
47         return 0;
48 }
49 #endif
50
51 static u8 ipvlan_get_v4_hash(const void *iaddr)
52 {
53         const struct in_addr *ip4_addr = iaddr;
54
55         return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
56                IPVLAN_HASH_MASK;
57 }
58
59 static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
60 {
61         if (!is_v6 && addr->atype == IPVL_IPV4) {
62                 struct in_addr *i4addr = (struct in_addr *)iaddr;
63
64                 return addr->ip4addr.s_addr == i4addr->s_addr;
65 #if IS_ENABLED(CONFIG_IPV6)
66         } else if (is_v6 && addr->atype == IPVL_IPV6) {
67                 struct in6_addr *i6addr = (struct in6_addr *)iaddr;
68
69                 return ipv6_addr_equal(&addr->ip6addr, i6addr);
70 #endif
71         }
72
73         return false;
74 }
75
76 static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
77                                                const void *iaddr, bool is_v6)
78 {
79         struct ipvl_addr *addr;
80         u8 hash;
81
82         hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
83                ipvlan_get_v4_hash(iaddr);
84         hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
85                 if (addr_equal(is_v6, addr, iaddr))
86                         return addr;
87         return NULL;
88 }
89
90 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
91 {
92         struct ipvl_port *port = ipvlan->port;
93         u8 hash;
94
95         hash = (addr->atype == IPVL_IPV6) ?
96                ipvlan_get_v6_hash(&addr->ip6addr) :
97                ipvlan_get_v4_hash(&addr->ip4addr);
98         if (hlist_unhashed(&addr->hlnode))
99                 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
100 }
101
102 void ipvlan_ht_addr_del(struct ipvl_addr *addr)
103 {
104         hlist_del_init_rcu(&addr->hlnode);
105 }
106
107 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
108                                    const void *iaddr, bool is_v6)
109 {
110         struct ipvl_addr *addr, *ret = NULL;
111
112         rcu_read_lock();
113         list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
114                 if (addr_equal(is_v6, addr, iaddr)) {
115                         ret = addr;
116                         break;
117                 }
118         }
119         rcu_read_unlock();
120         return ret;
121 }
122
123 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
124 {
125         struct ipvl_dev *ipvlan;
126         bool ret = false;
127
128         rcu_read_lock();
129         list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
130                 if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
131                         ret = true;
132                         break;
133                 }
134         }
135         rcu_read_unlock();
136         return ret;
137 }
138
139 void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
140 {
141         void *lyr3h = NULL;
142
143         switch (skb->protocol) {
144         case htons(ETH_P_ARP): {
145                 struct arphdr *arph;
146
147                 if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
148                         return NULL;
149
150                 arph = arp_hdr(skb);
151                 *type = IPVL_ARP;
152                 lyr3h = arph;
153                 break;
154         }
155         case htons(ETH_P_IP): {
156                 u32 pktlen;
157                 struct iphdr *ip4h;
158
159                 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
160                         return NULL;
161
162                 ip4h = ip_hdr(skb);
163                 pktlen = skb_ip_totlen(skb);
164                 if (ip4h->ihl < 5 || ip4h->version != 4)
165                         return NULL;
166                 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
167                         return NULL;
168
169                 *type = IPVL_IPV4;
170                 lyr3h = ip4h;
171                 break;
172         }
173 #if IS_ENABLED(CONFIG_IPV6)
174         case htons(ETH_P_IPV6): {
175                 struct ipv6hdr *ip6h;
176
177                 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
178                         return NULL;
179
180                 ip6h = ipv6_hdr(skb);
181                 if (ip6h->version != 6)
182                         return NULL;
183
184                 *type = IPVL_IPV6;
185                 lyr3h = ip6h;
186                 /* Only Neighbour Solicitation pkts need different treatment */
187                 if (ipv6_addr_any(&ip6h->saddr) &&
188                     ip6h->nexthdr == NEXTHDR_ICMP) {
189                         struct icmp6hdr *icmph;
190
191                         if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
192                                 return NULL;
193
194                         ip6h = ipv6_hdr(skb);
195                         icmph = (struct icmp6hdr *)(ip6h + 1);
196
197                         if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
198                                 /* Need to access the ipv6 address in body */
199                                 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
200                                                 + sizeof(struct in6_addr))))
201                                         return NULL;
202
203                                 ip6h = ipv6_hdr(skb);
204                                 icmph = (struct icmp6hdr *)(ip6h + 1);
205                         }
206
207                         *type = IPVL_ICMPV6;
208                         lyr3h = icmph;
209                 }
210                 break;
211         }
212 #endif
213         default:
214                 return NULL;
215         }
216
217         return lyr3h;
218 }
219
220 unsigned int ipvlan_mac_hash(const unsigned char *addr)
221 {
222         u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
223                                ipvlan_jhash_secret);
224
225         return hash & IPVLAN_MAC_FILTER_MASK;
226 }
227
228 void ipvlan_process_multicast(struct work_struct *work)
229 {
230         struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
231         struct ethhdr *ethh;
232         struct ipvl_dev *ipvlan;
233         struct sk_buff *skb, *nskb;
234         struct sk_buff_head list;
235         unsigned int len;
236         unsigned int mac_hash;
237         int ret;
238         u8 pkt_type;
239         bool tx_pkt;
240
241         __skb_queue_head_init(&list);
242
243         spin_lock_bh(&port->backlog.lock);
244         skb_queue_splice_tail_init(&port->backlog, &list);
245         spin_unlock_bh(&port->backlog.lock);
246
247         while ((skb = __skb_dequeue(&list)) != NULL) {
248                 struct net_device *dev = skb->dev;
249                 bool consumed = false;
250
251                 ethh = eth_hdr(skb);
252                 tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
253                 mac_hash = ipvlan_mac_hash(ethh->h_dest);
254
255                 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
256                         pkt_type = PACKET_BROADCAST;
257                 else
258                         pkt_type = PACKET_MULTICAST;
259
260                 rcu_read_lock();
261                 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
262                         if (tx_pkt && (ipvlan->dev == skb->dev))
263                                 continue;
264                         if (!test_bit(mac_hash, ipvlan->mac_filters))
265                                 continue;
266                         if (!(ipvlan->dev->flags & IFF_UP))
267                                 continue;
268                         ret = NET_RX_DROP;
269                         len = skb->len + ETH_HLEN;
270                         nskb = skb_clone(skb, GFP_ATOMIC);
271                         local_bh_disable();
272                         if (nskb) {
273                                 consumed = true;
274                                 nskb->pkt_type = pkt_type;
275                                 nskb->dev = ipvlan->dev;
276                                 if (tx_pkt)
277                                         ret = dev_forward_skb(ipvlan->dev, nskb);
278                                 else
279                                         ret = netif_rx(nskb);
280                         }
281                         ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
282                         local_bh_enable();
283                 }
284                 rcu_read_unlock();
285
286                 if (tx_pkt) {
287                         /* If the packet originated here, send it out. */
288                         skb->dev = port->dev;
289                         skb->pkt_type = pkt_type;
290                         dev_queue_xmit(skb);
291                 } else {
292                         if (consumed)
293                                 consume_skb(skb);
294                         else
295                                 kfree_skb(skb);
296                 }
297                 dev_put(dev);
298                 cond_resched();
299         }
300 }
301
302 static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
303 {
304         bool xnet = true;
305
306         if (dev)
307                 xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
308
309         skb_scrub_packet(skb, xnet);
310         if (dev)
311                 skb->dev = dev;
312 }
313
314 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
315                             bool local)
316 {
317         struct ipvl_dev *ipvlan = addr->master;
318         struct net_device *dev = ipvlan->dev;
319         unsigned int len;
320         rx_handler_result_t ret = RX_HANDLER_CONSUMED;
321         bool success = false;
322         struct sk_buff *skb = *pskb;
323
324         len = skb->len + ETH_HLEN;
325         /* Only packets exchanged between two local slaves need to have
326          * device-up check as well as skb-share check.
327          */
328         if (local) {
329                 if (unlikely(!(dev->flags & IFF_UP))) {
330                         kfree_skb(skb);
331                         goto out;
332                 }
333
334                 skb = skb_share_check(skb, GFP_ATOMIC);
335                 if (!skb)
336                         goto out;
337
338                 *pskb = skb;
339         }
340
341         if (local) {
342                 skb->pkt_type = PACKET_HOST;
343                 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
344                         success = true;
345         } else {
346                 skb->dev = dev;
347                 ret = RX_HANDLER_ANOTHER;
348                 success = true;
349         }
350
351 out:
352         ipvlan_count_rx(ipvlan, len, success, false);
353         return ret;
354 }
355
356 struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
357                                      int addr_type, bool use_dest)
358 {
359         struct ipvl_addr *addr = NULL;
360
361         switch (addr_type) {
362 #if IS_ENABLED(CONFIG_IPV6)
363         case IPVL_IPV6: {
364                 struct ipv6hdr *ip6h;
365                 struct in6_addr *i6addr;
366
367                 ip6h = (struct ipv6hdr *)lyr3h;
368                 i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
369                 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
370                 break;
371         }
372         case IPVL_ICMPV6: {
373                 struct nd_msg *ndmh;
374                 struct in6_addr *i6addr;
375
376                 /* Make sure that the NeighborSolicitation ICMPv6 packets
377                  * are handled to avoid DAD issue.
378                  */
379                 ndmh = (struct nd_msg *)lyr3h;
380                 if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
381                         i6addr = &ndmh->target;
382                         addr = ipvlan_ht_addr_lookup(port, i6addr, true);
383                 }
384                 break;
385         }
386 #endif
387         case IPVL_IPV4: {
388                 struct iphdr *ip4h;
389                 __be32 *i4addr;
390
391                 ip4h = (struct iphdr *)lyr3h;
392                 i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
393                 addr = ipvlan_ht_addr_lookup(port, i4addr, false);
394                 break;
395         }
396         case IPVL_ARP: {
397                 struct arphdr *arph;
398                 unsigned char *arp_ptr;
399                 __be32 dip;
400
401                 arph = (struct arphdr *)lyr3h;
402                 arp_ptr = (unsigned char *)(arph + 1);
403                 if (use_dest)
404                         arp_ptr += (2 * port->dev->addr_len) + 4;
405                 else
406                         arp_ptr += port->dev->addr_len;
407
408                 memcpy(&dip, arp_ptr, 4);
409                 addr = ipvlan_ht_addr_lookup(port, &dip, false);
410                 break;
411         }
412         }
413
414         return addr;
415 }
416
417 static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
418 {
419         const struct iphdr *ip4h = ip_hdr(skb);
420         struct net_device *dev = skb->dev;
421         struct net *net = dev_net(dev);
422         struct rtable *rt;
423         int err, ret = NET_XMIT_DROP;
424         struct flowi4 fl4 = {
425                 .flowi4_oif = dev->ifindex,
426                 .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)),
427                 .flowi4_flags = FLOWI_FLAG_ANYSRC,
428                 .flowi4_mark = skb->mark,
429                 .daddr = ip4h->daddr,
430                 .saddr = ip4h->saddr,
431         };
432
433         rt = ip_route_output_flow(net, &fl4, NULL);
434         if (IS_ERR(rt))
435                 goto err;
436
437         if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
438                 ip_rt_put(rt);
439                 goto err;
440         }
441         skb_dst_set(skb, &rt->dst);
442
443         memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
444
445         err = ip_local_out(net, NULL, skb);
446         if (unlikely(net_xmit_eval(err)))
447                 DEV_STATS_INC(dev, tx_errors);
448         else
449                 ret = NET_XMIT_SUCCESS;
450         goto out;
451 err:
452         DEV_STATS_INC(dev, tx_errors);
453         kfree_skb(skb);
454 out:
455         return ret;
456 }
457
458 #if IS_ENABLED(CONFIG_IPV6)
459
460 static noinline_for_stack int
461 ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
462 {
463         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
464         struct flowi6 fl6 = {
465                 .flowi6_oif = dev->ifindex,
466                 .daddr = ip6h->daddr,
467                 .saddr = ip6h->saddr,
468                 .flowi6_flags = FLOWI_FLAG_ANYSRC,
469                 .flowlabel = ip6_flowinfo(ip6h),
470                 .flowi6_mark = skb->mark,
471                 .flowi6_proto = ip6h->nexthdr,
472         };
473         struct dst_entry *dst;
474         int err;
475
476         dst = ip6_route_output(dev_net(dev), NULL, &fl6);
477         err = dst->error;
478         if (err) {
479                 dst_release(dst);
480                 return err;
481         }
482         skb_dst_set(skb, dst);
483         return 0;
484 }
485
486 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
487 {
488         struct net_device *dev = skb->dev;
489         int err, ret = NET_XMIT_DROP;
490
491         err = ipvlan_route_v6_outbound(dev, skb);
492         if (unlikely(err)) {
493                 DEV_STATS_INC(dev, tx_errors);
494                 kfree_skb(skb);
495                 return err;
496         }
497
498         memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
499
500         err = ip6_local_out(dev_net(dev), NULL, skb);
501         if (unlikely(net_xmit_eval(err)))
502                 DEV_STATS_INC(dev, tx_errors);
503         else
504                 ret = NET_XMIT_SUCCESS;
505         return ret;
506 }
507 #else
508 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
509 {
510         return NET_XMIT_DROP;
511 }
512 #endif
513
514 static int ipvlan_process_outbound(struct sk_buff *skb)
515 {
516         int ret = NET_XMIT_DROP;
517
518         /* The ipvlan is a pseudo-L2 device, so the packets that we receive
519          * will have L2; which need to discarded and processed further
520          * in the net-ns of the main-device.
521          */
522         if (skb_mac_header_was_set(skb)) {
523                 /* In this mode we dont care about
524                  * multicast and broadcast traffic */
525                 struct ethhdr *ethh = eth_hdr(skb);
526
527                 if (is_multicast_ether_addr(ethh->h_dest)) {
528                         pr_debug_ratelimited(
529                                 "Dropped {multi|broad}cast of type=[%x]\n",
530                                 ntohs(skb->protocol));
531                         kfree_skb(skb);
532                         goto out;
533                 }
534
535                 skb_pull(skb, sizeof(*ethh));
536                 skb->mac_header = (typeof(skb->mac_header))~0U;
537                 skb_reset_network_header(skb);
538         }
539
540         if (skb->protocol == htons(ETH_P_IPV6))
541                 ret = ipvlan_process_v6_outbound(skb);
542         else if (skb->protocol == htons(ETH_P_IP))
543                 ret = ipvlan_process_v4_outbound(skb);
544         else {
545                 pr_warn_ratelimited("Dropped outbound packet type=%x\n",
546                                     ntohs(skb->protocol));
547                 kfree_skb(skb);
548         }
549 out:
550         return ret;
551 }
552
553 static void ipvlan_multicast_enqueue(struct ipvl_port *port,
554                                      struct sk_buff *skb, bool tx_pkt)
555 {
556         if (skb->protocol == htons(ETH_P_PAUSE)) {
557                 kfree_skb(skb);
558                 return;
559         }
560
561         /* Record that the deferred packet is from TX or RX path. By
562          * looking at mac-addresses on packet will lead to erronus decisions.
563          * (This would be true for a loopback-mode on master device or a
564          * hair-pin mode of the switch.)
565          */
566         IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
567
568         spin_lock(&port->backlog.lock);
569         if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
570                 dev_hold(skb->dev);
571                 __skb_queue_tail(&port->backlog, skb);
572                 spin_unlock(&port->backlog.lock);
573                 schedule_work(&port->wq);
574         } else {
575                 spin_unlock(&port->backlog.lock);
576                 dev_core_stats_rx_dropped_inc(skb->dev);
577                 kfree_skb(skb);
578         }
579 }
580
581 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
582 {
583         const struct ipvl_dev *ipvlan = netdev_priv(dev);
584         void *lyr3h;
585         struct ipvl_addr *addr;
586         int addr_type;
587
588         lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
589         if (!lyr3h)
590                 goto out;
591
592         if (!ipvlan_is_vepa(ipvlan->port)) {
593                 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
594                 if (addr) {
595                         if (ipvlan_is_private(ipvlan->port)) {
596                                 consume_skb(skb);
597                                 return NET_XMIT_DROP;
598                         }
599                         ipvlan_rcv_frame(addr, &skb, true);
600                         return NET_XMIT_SUCCESS;
601                 }
602         }
603 out:
604         ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
605         return ipvlan_process_outbound(skb);
606 }
607
608 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
609 {
610         const struct ipvl_dev *ipvlan = netdev_priv(dev);
611         struct ethhdr *eth = skb_eth_hdr(skb);
612         struct ipvl_addr *addr;
613         void *lyr3h;
614         int addr_type;
615
616         if (!ipvlan_is_vepa(ipvlan->port) &&
617             ether_addr_equal(eth->h_dest, eth->h_source)) {
618                 lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
619                 if (lyr3h) {
620                         addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
621                         if (addr) {
622                                 if (ipvlan_is_private(ipvlan->port)) {
623                                         consume_skb(skb);
624                                         return NET_XMIT_DROP;
625                                 }
626                                 ipvlan_rcv_frame(addr, &skb, true);
627                                 return NET_XMIT_SUCCESS;
628                         }
629                 }
630                 skb = skb_share_check(skb, GFP_ATOMIC);
631                 if (!skb)
632                         return NET_XMIT_DROP;
633
634                 /* Packet definitely does not belong to any of the
635                  * virtual devices, but the dest is local. So forward
636                  * the skb for the main-dev. At the RX side we just return
637                  * RX_PASS for it to be processed further on the stack.
638                  */
639                 dev_forward_skb(ipvlan->phy_dev, skb);
640                 return NET_XMIT_SUCCESS;
641
642         } else if (is_multicast_ether_addr(eth->h_dest)) {
643                 skb_reset_mac_header(skb);
644                 ipvlan_skb_crossing_ns(skb, NULL);
645                 ipvlan_multicast_enqueue(ipvlan->port, skb, true);
646                 return NET_XMIT_SUCCESS;
647         }
648
649         skb->dev = ipvlan->phy_dev;
650         return dev_queue_xmit(skb);
651 }
652
653 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
654 {
655         struct ipvl_dev *ipvlan = netdev_priv(dev);
656         struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
657
658         if (!port)
659                 goto out;
660
661         if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
662                 goto out;
663
664         switch(port->mode) {
665         case IPVLAN_MODE_L2:
666                 return ipvlan_xmit_mode_l2(skb, dev);
667         case IPVLAN_MODE_L3:
668 #ifdef CONFIG_IPVLAN_L3S
669         case IPVLAN_MODE_L3S:
670 #endif
671                 return ipvlan_xmit_mode_l3(skb, dev);
672         }
673
674         /* Should not reach here */
675         WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
676 out:
677         kfree_skb(skb);
678         return NET_XMIT_DROP;
679 }
680
681 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
682 {
683         struct ethhdr *eth = eth_hdr(skb);
684         struct ipvl_addr *addr;
685         void *lyr3h;
686         int addr_type;
687
688         if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
689                 lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
690                 if (!lyr3h)
691                         return true;
692
693                 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
694                 if (addr)
695                         return false;
696         }
697
698         return true;
699 }
700
701 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
702                                                  struct ipvl_port *port)
703 {
704         void *lyr3h;
705         int addr_type;
706         struct ipvl_addr *addr;
707         struct sk_buff *skb = *pskb;
708         rx_handler_result_t ret = RX_HANDLER_PASS;
709
710         lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
711         if (!lyr3h)
712                 goto out;
713
714         addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
715         if (addr)
716                 ret = ipvlan_rcv_frame(addr, pskb, false);
717
718 out:
719         return ret;
720 }
721
722 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
723                                                  struct ipvl_port *port)
724 {
725         struct sk_buff *skb = *pskb;
726         struct ethhdr *eth = eth_hdr(skb);
727         rx_handler_result_t ret = RX_HANDLER_PASS;
728
729         if (is_multicast_ether_addr(eth->h_dest)) {
730                 if (ipvlan_external_frame(skb, port)) {
731                         struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
732
733                         /* External frames are queued for device local
734                          * distribution, but a copy is given to master
735                          * straight away to avoid sending duplicates later
736                          * when work-queue processes this frame. This is
737                          * achieved by returning RX_HANDLER_PASS.
738                          */
739                         if (nskb) {
740                                 ipvlan_skb_crossing_ns(nskb, NULL);
741                                 ipvlan_multicast_enqueue(port, nskb, false);
742                         }
743                 }
744         } else {
745                 /* Perform like l3 mode for non-multicast packet */
746                 ret = ipvlan_handle_mode_l3(pskb, port);
747         }
748
749         return ret;
750 }
751
752 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
753 {
754         struct sk_buff *skb = *pskb;
755         struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
756
757         if (!port)
758                 return RX_HANDLER_PASS;
759
760         switch (port->mode) {
761         case IPVLAN_MODE_L2:
762                 return ipvlan_handle_mode_l2(pskb, port);
763         case IPVLAN_MODE_L3:
764                 return ipvlan_handle_mode_l3(pskb, port);
765 #ifdef CONFIG_IPVLAN_L3S
766         case IPVLAN_MODE_L3S:
767                 return RX_HANDLER_PASS;
768 #endif
769         }
770
771         /* Should not reach here */
772         WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
773         kfree_skb(skb);
774         return RX_HANDLER_CONSUMED;
775 }
This page took 0.075235 seconds and 4 git commands to generate.