1 // SPDX-License-Identifier: GPL-2.0-or-later
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/skbuff.h>
9 #include <linux/jhash.h>
10 #include <linux/if_tunnel.h>
11 #include <linux/net.h>
12 #include <linux/igmp.h>
13 #include <linux/workqueue.h>
14 #include <net/net_namespace.h>
17 #include <net/udp_tunnel.h>
21 #include <uapi/linux/amt.h>
22 #include <linux/security.h>
23 #include <net/gro_cells.h>
25 #include <net/if_inet6.h>
26 #include <net/ndisc.h>
27 #include <net/addrconf.h>
28 #include <net/ip6_route.h>
29 #include <net/inet_common.h>
30 #include <net/ip6_checksum.h>
32 static struct workqueue_struct *amt_wq;
34 static HLIST_HEAD(source_gc_list);
35 /* Lock for source_gc_list */
36 static spinlock_t source_gc_lock;
37 static struct delayed_work source_gc_wq;
38 static char *status_str[] = {
40 "AMT_STATUS_SENT_DISCOVERY",
41 "AMT_STATUS_RECEIVED_DISCOVERY",
42 "AMT_STATUS_SENT_ADVERTISEMENT",
43 "AMT_STATUS_RECEIVED_ADVERTISEMENT",
44 "AMT_STATUS_SENT_REQUEST",
45 "AMT_STATUS_RECEIVED_REQUEST",
46 "AMT_STATUS_SENT_QUERY",
47 "AMT_STATUS_RECEIVED_QUERY",
48 "AMT_STATUS_SENT_UPDATE",
49 "AMT_STATUS_RECEIVED_UPDATE",
52 static char *type_str[] = {
54 "AMT_MSG_ADVERTISEMENT",
56 "AMT_MSG_MEMBERSHIP_QUERY",
57 "AMT_MSG_MEMBERSHIP_UPDATE",
58 "AMT_MSG_MULTICAST_DATA",
62 static char *action_str[] = {
66 "AMT_ACT_STATUS_FWD_NEW",
67 "AMT_ACT_STATUS_D_FWD_NEW",
68 "AMT_ACT_STATUS_NONE_NEW",
71 static struct igmpv3_grec igmpv3_zero_grec;
73 #if IS_ENABLED(CONFIG_IPV6)
74 #define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } }
75 static struct in6_addr mld2_all_node = MLD2_ALL_NODE_INIT;
76 static struct mld2_grec mldv2_zero_grec;
79 static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
81 BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) >
82 sizeof_field(struct sk_buff, cb));
84 return (struct amt_skb_cb *)((void *)skb->cb +
85 sizeof(struct qdisc_skb_cb));
88 static void __amt_source_gc_work(void)
90 struct amt_source_node *snode;
91 struct hlist_head gc_list;
94 spin_lock_bh(&source_gc_lock);
95 hlist_move_list(&source_gc_list, &gc_list);
96 spin_unlock_bh(&source_gc_lock);
98 hlist_for_each_entry_safe(snode, t, &gc_list, node) {
99 hlist_del_rcu(&snode->node);
100 kfree_rcu(snode, rcu);
104 static void amt_source_gc_work(struct work_struct *work)
106 __amt_source_gc_work();
108 spin_lock_bh(&source_gc_lock);
109 mod_delayed_work(amt_wq, &source_gc_wq,
110 msecs_to_jiffies(AMT_GC_INTERVAL));
111 spin_unlock_bh(&source_gc_lock);
114 static bool amt_addr_equal(union amt_addr *a, union amt_addr *b)
116 return !memcmp(a, b, sizeof(union amt_addr));
119 static u32 amt_source_hash(struct amt_tunnel_list *tunnel, union amt_addr *src)
121 u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed);
123 return reciprocal_scale(hash, tunnel->amt->hash_buckets);
126 static bool amt_status_filter(struct amt_source_node *snode,
127 enum amt_filter filter)
133 if (snode->status == AMT_SOURCE_STATUS_FWD &&
134 snode->flags == AMT_SOURCE_OLD)
137 case AMT_FILTER_D_FWD:
138 if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
139 snode->flags == AMT_SOURCE_OLD)
142 case AMT_FILTER_FWD_NEW:
143 if (snode->status == AMT_SOURCE_STATUS_FWD &&
144 snode->flags == AMT_SOURCE_NEW)
147 case AMT_FILTER_D_FWD_NEW:
148 if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
149 snode->flags == AMT_SOURCE_NEW)
155 case AMT_FILTER_NONE_NEW:
156 if (snode->status == AMT_SOURCE_STATUS_NONE &&
157 snode->flags == AMT_SOURCE_NEW)
160 case AMT_FILTER_BOTH:
161 if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
162 snode->status == AMT_SOURCE_STATUS_FWD) &&
163 snode->flags == AMT_SOURCE_OLD)
166 case AMT_FILTER_BOTH_NEW:
167 if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
168 snode->status == AMT_SOURCE_STATUS_FWD) &&
169 snode->flags == AMT_SOURCE_NEW)
180 static struct amt_source_node *amt_lookup_src(struct amt_tunnel_list *tunnel,
181 struct amt_group_node *gnode,
182 enum amt_filter filter,
185 u32 hash = amt_source_hash(tunnel, src);
186 struct amt_source_node *snode;
188 hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node)
189 if (amt_status_filter(snode, filter) &&
190 amt_addr_equal(&snode->source_addr, src))
196 static u32 amt_group_hash(struct amt_tunnel_list *tunnel, union amt_addr *group)
198 u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed);
200 return reciprocal_scale(hash, tunnel->amt->hash_buckets);
203 static struct amt_group_node *amt_lookup_group(struct amt_tunnel_list *tunnel,
204 union amt_addr *group,
205 union amt_addr *host,
208 u32 hash = amt_group_hash(tunnel, group);
209 struct amt_group_node *gnode;
211 hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) {
212 if (amt_addr_equal(&gnode->group_addr, group) &&
213 amt_addr_equal(&gnode->host_addr, host) &&
221 static void amt_destroy_source(struct amt_source_node *snode)
223 struct amt_group_node *gnode = snode->gnode;
224 struct amt_tunnel_list *tunnel;
226 tunnel = gnode->tunnel_list;
229 netdev_dbg(snode->gnode->amt->dev,
230 "Delete source %pI4 from %pI4\n",
231 &snode->source_addr.ip4,
232 &gnode->group_addr.ip4);
233 #if IS_ENABLED(CONFIG_IPV6)
235 netdev_dbg(snode->gnode->amt->dev,
236 "Delete source %pI6 from %pI6\n",
237 &snode->source_addr.ip6,
238 &gnode->group_addr.ip6);
242 cancel_delayed_work(&snode->source_timer);
243 hlist_del_init_rcu(&snode->node);
244 tunnel->nr_sources--;
246 spin_lock_bh(&source_gc_lock);
247 hlist_add_head_rcu(&snode->node, &source_gc_list);
248 spin_unlock_bh(&source_gc_lock);
251 static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode)
253 struct amt_source_node *snode;
254 struct hlist_node *t;
257 if (cancel_delayed_work(&gnode->group_timer))
259 hlist_del_rcu(&gnode->node);
260 gnode->tunnel_list->nr_groups--;
263 netdev_dbg(amt->dev, "Leave group %pI4\n",
264 &gnode->group_addr.ip4);
265 #if IS_ENABLED(CONFIG_IPV6)
267 netdev_dbg(amt->dev, "Leave group %pI6\n",
268 &gnode->group_addr.ip6);
270 for (i = 0; i < amt->hash_buckets; i++)
271 hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node)
272 amt_destroy_source(snode);
274 /* tunnel->lock was acquired outside of amt_del_group()
275 * But rcu_read_lock() was acquired too so It's safe.
277 kfree_rcu(gnode, rcu);
280 /* If a source timer expires with a router filter-mode for the group of
281 * INCLUDE, the router concludes that traffic from this particular
282 * source is no longer desired on the attached network, and deletes the
283 * associated source record.
285 static void amt_source_work(struct work_struct *work)
287 struct amt_source_node *snode = container_of(to_delayed_work(work),
288 struct amt_source_node,
290 struct amt_group_node *gnode = snode->gnode;
291 struct amt_dev *amt = gnode->amt;
292 struct amt_tunnel_list *tunnel;
294 tunnel = gnode->tunnel_list;
295 spin_lock_bh(&tunnel->lock);
297 if (gnode->filter_mode == MCAST_INCLUDE) {
298 amt_destroy_source(snode);
299 if (!gnode->nr_sources)
300 amt_del_group(amt, gnode);
302 /* When a router filter-mode for a group is EXCLUDE,
303 * source records are only deleted when the group timer expires
305 snode->status = AMT_SOURCE_STATUS_D_FWD;
308 spin_unlock_bh(&tunnel->lock);
311 static void amt_act_src(struct amt_tunnel_list *tunnel,
312 struct amt_group_node *gnode,
313 struct amt_source_node *snode,
316 struct amt_dev *amt = tunnel->amt;
320 mod_delayed_work(amt_wq, &snode->source_timer,
321 msecs_to_jiffies(amt_gmi(amt)));
323 case AMT_ACT_GMI_ZERO:
324 cancel_delayed_work(&snode->source_timer);
327 mod_delayed_work(amt_wq, &snode->source_timer,
328 gnode->group_timer.timer.expires);
330 case AMT_ACT_STATUS_FWD_NEW:
331 snode->status = AMT_SOURCE_STATUS_FWD;
332 snode->flags = AMT_SOURCE_NEW;
334 case AMT_ACT_STATUS_D_FWD_NEW:
335 snode->status = AMT_SOURCE_STATUS_D_FWD;
336 snode->flags = AMT_SOURCE_NEW;
338 case AMT_ACT_STATUS_NONE_NEW:
339 cancel_delayed_work(&snode->source_timer);
340 snode->status = AMT_SOURCE_STATUS_NONE;
341 snode->flags = AMT_SOURCE_NEW;
349 netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n",
350 &snode->source_addr.ip4,
351 &gnode->group_addr.ip4,
353 #if IS_ENABLED(CONFIG_IPV6)
355 netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n",
356 &snode->source_addr.ip6,
357 &gnode->group_addr.ip6,
362 static struct amt_source_node *amt_alloc_snode(struct amt_group_node *gnode,
365 struct amt_source_node *snode;
367 snode = kzalloc(sizeof(*snode), GFP_ATOMIC);
371 memcpy(&snode->source_addr, src, sizeof(union amt_addr));
372 snode->gnode = gnode;
373 snode->status = AMT_SOURCE_STATUS_NONE;
374 snode->flags = AMT_SOURCE_NEW;
375 INIT_HLIST_NODE(&snode->node);
376 INIT_DELAYED_WORK(&snode->source_timer, amt_source_work);
381 /* RFC 3810 - 7.2.2. Definition of Filter Timers
383 * Router Mode Filter Timer Actions/Comments
384 * ----------- ----------------- ----------------
386 * INCLUDE Not Used All listeners in
389 * EXCLUDE Timer > 0 At least one listener
392 * EXCLUDE Timer == 0 No more listeners in
393 * EXCLUDE mode for the
395 * If the Requested List
398 * Record. If not, switch
399 * to INCLUDE filter mode;
402 * moved to the Include
403 * List, and the Exclude
406 static void amt_group_work(struct work_struct *work)
408 struct amt_group_node *gnode = container_of(to_delayed_work(work),
409 struct amt_group_node,
411 struct amt_tunnel_list *tunnel = gnode->tunnel_list;
412 struct amt_dev *amt = gnode->amt;
413 struct amt_source_node *snode;
414 bool delete_group = true;
415 struct hlist_node *t;
418 buckets = amt->hash_buckets;
420 spin_lock_bh(&tunnel->lock);
421 if (gnode->filter_mode == MCAST_INCLUDE) {
423 spin_unlock_bh(&tunnel->lock);
428 for (i = 0; i < buckets; i++) {
429 hlist_for_each_entry_safe(snode, t,
430 &gnode->sources[i], node) {
431 if (!delayed_work_pending(&snode->source_timer) ||
432 snode->status == AMT_SOURCE_STATUS_D_FWD) {
433 amt_destroy_source(snode);
435 delete_group = false;
436 snode->status = AMT_SOURCE_STATUS_FWD;
441 amt_del_group(amt, gnode);
443 gnode->filter_mode = MCAST_INCLUDE;
445 spin_unlock_bh(&tunnel->lock);
450 /* Non-existant group is created as INCLUDE {empty}:
452 * RFC 3376 - 5.1. Action on Change of Interface State
454 * If no interface state existed for that multicast address before
455 * the change (i.e., the change consisted of creating a new
456 * per-interface record), or if no state exists after the change
457 * (i.e., the change consisted of deleting a per-interface record),
458 * then the "non-existent" state is considered to have a filter mode
459 * of INCLUDE and an empty source list.
461 static struct amt_group_node *amt_add_group(struct amt_dev *amt,
462 struct amt_tunnel_list *tunnel,
463 union amt_addr *group,
464 union amt_addr *host,
467 struct amt_group_node *gnode;
471 if (tunnel->nr_groups >= amt->max_groups)
472 return ERR_PTR(-ENOSPC);
474 gnode = kzalloc(sizeof(*gnode) +
475 (sizeof(struct hlist_head) * amt->hash_buckets),
477 if (unlikely(!gnode))
478 return ERR_PTR(-ENOMEM);
481 gnode->group_addr = *group;
482 gnode->host_addr = *host;
484 gnode->tunnel_list = tunnel;
485 gnode->filter_mode = MCAST_INCLUDE;
486 INIT_HLIST_NODE(&gnode->node);
487 INIT_DELAYED_WORK(&gnode->group_timer, amt_group_work);
488 for (i = 0; i < amt->hash_buckets; i++)
489 INIT_HLIST_HEAD(&gnode->sources[i]);
491 hash = amt_group_hash(tunnel, group);
492 hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]);
496 netdev_dbg(amt->dev, "Join group %pI4\n",
497 &gnode->group_addr.ip4);
498 #if IS_ENABLED(CONFIG_IPV6)
500 netdev_dbg(amt->dev, "Join group %pI6\n",
501 &gnode->group_addr.ip6);
507 static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
509 u8 ra[AMT_IPHDR_OPTS] = { IPOPT_RA, 4, 0, 0 };
510 int hlen = LL_RESERVED_SPACE(amt->dev);
511 int tlen = amt->dev->needed_tailroom;
512 struct igmpv3_query *ihv3;
513 void *csum_start = NULL;
514 __sum16 *csum = NULL;
521 len = hlen + tlen + sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3);
522 skb = netdev_alloc_skb_ip_align(amt->dev, len);
526 skb_reserve(skb, hlen);
527 skb_push(skb, sizeof(*eth));
528 skb->protocol = htons(ETH_P_IP);
529 skb_reset_mac_header(skb);
530 skb->priority = TC_PRIO_CONTROL;
531 skb_put(skb, sizeof(*iph));
532 skb_put_data(skb, ra, sizeof(ra));
533 skb_put(skb, sizeof(*ihv3));
534 skb_pull(skb, sizeof(*eth));
535 skb_reset_network_header(skb);
539 iph->ihl = (sizeof(struct iphdr) + AMT_IPHDR_OPTS) >> 2;
541 iph->tot_len = htons(sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3));
542 iph->frag_off = htons(IP_DF);
545 iph->protocol = IPPROTO_IGMP;
546 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
547 iph->saddr = htonl(INADDR_ANY);
551 ether_addr_copy(eth->h_source, amt->dev->dev_addr);
552 ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP), eth->h_dest);
553 eth->h_proto = htons(ETH_P_IP);
555 ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
556 skb_reset_transport_header(skb);
557 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
560 ihv3->qqic = amt->qi;
563 ihv3->suppress = false;
564 ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv;
567 csum_start = (void *)ihv3;
568 *csum = ip_compute_csum(csum_start, sizeof(*ihv3));
569 offset = skb_transport_offset(skb);
570 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
571 skb->ip_summed = CHECKSUM_NONE;
573 skb_push(skb, sizeof(*eth) + sizeof(*iph) + AMT_IPHDR_OPTS);
578 static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
581 if (validate && amt->status >= status)
583 netdev_dbg(amt->dev, "Update GW status %s -> %s",
584 status_str[amt->status], status_str[status]);
585 amt->status = status;
588 static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
589 enum amt_status status,
592 if (validate && tunnel->status >= status)
594 netdev_dbg(tunnel->amt->dev,
595 "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s",
596 &tunnel->ip4, ntohs(tunnel->source_port),
597 status_str[tunnel->status], status_str[status]);
598 tunnel->status = status;
601 static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
604 spin_lock_bh(&amt->lock);
605 __amt_update_gw_status(amt, status, validate);
606 spin_unlock_bh(&amt->lock);
609 static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
610 enum amt_status status, bool validate)
612 spin_lock_bh(&tunnel->lock);
613 __amt_update_relay_status(tunnel, status, validate);
614 spin_unlock_bh(&tunnel->lock);
617 static void amt_send_discovery(struct amt_dev *amt)
619 struct amt_header_discovery *amtd;
620 int hlen, tlen, offset;
631 sock = rcu_dereference(amt->sock);
635 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
638 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
639 amt->discovery_ip, amt->local_ip,
640 amt->gw_port, amt->relay_port,
642 amt->stream_dev->ifindex);
644 amt->dev->stats.tx_errors++;
648 hlen = LL_RESERVED_SPACE(amt->dev);
649 tlen = amt->dev->needed_tailroom;
650 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
651 skb = netdev_alloc_skb_ip_align(amt->dev, len);
654 amt->dev->stats.tx_errors++;
658 skb->priority = TC_PRIO_CONTROL;
659 skb_dst_set(skb, &rt->dst);
661 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
662 skb_reset_network_header(skb);
664 amtd = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
666 amtd->type = AMT_MSG_DISCOVERY;
668 amtd->nonce = amt->nonce;
669 skb_push(skb, sizeof(*udph));
670 skb_reset_transport_header(skb);
672 udph->source = amt->gw_port;
673 udph->dest = amt->relay_port;
674 udph->len = htons(sizeof(*udph) + sizeof(*amtd));
676 offset = skb_transport_offset(skb);
677 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
678 udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip,
679 sizeof(*udph) + sizeof(*amtd),
680 IPPROTO_UDP, skb->csum);
682 skb_push(skb, sizeof(*iph));
685 iph->ihl = (sizeof(struct iphdr)) >> 2;
688 iph->ttl = ip4_dst_hoplimit(&rt->dst);
689 iph->daddr = amt->discovery_ip;
690 iph->saddr = amt->local_ip;
691 iph->protocol = IPPROTO_UDP;
692 iph->tot_len = htons(len);
694 skb->ip_summed = CHECKSUM_NONE;
695 ip_select_ident(amt->net, skb, NULL);
697 err = ip_local_out(amt->net, sock->sk, skb);
698 if (unlikely(net_xmit_eval(err)))
699 amt->dev->stats.tx_errors++;
701 spin_lock_bh(&amt->lock);
702 __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
703 spin_unlock_bh(&amt->lock);
708 static void amt_send_request(struct amt_dev *amt, bool v6)
710 struct amt_header_request *amtrh;
711 int hlen, tlen, offset;
722 sock = rcu_dereference(amt->sock);
726 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
729 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
730 amt->remote_ip, amt->local_ip,
731 amt->gw_port, amt->relay_port,
733 amt->stream_dev->ifindex);
735 amt->dev->stats.tx_errors++;
739 hlen = LL_RESERVED_SPACE(amt->dev);
740 tlen = amt->dev->needed_tailroom;
741 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
742 skb = netdev_alloc_skb_ip_align(amt->dev, len);
745 amt->dev->stats.tx_errors++;
749 skb->priority = TC_PRIO_CONTROL;
750 skb_dst_set(skb, &rt->dst);
752 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
753 skb_reset_network_header(skb);
755 amtrh = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
757 amtrh->type = AMT_MSG_REQUEST;
758 amtrh->reserved1 = 0;
760 amtrh->reserved2 = 0;
761 amtrh->nonce = amt->nonce;
762 skb_push(skb, sizeof(*udph));
763 skb_reset_transport_header(skb);
765 udph->source = amt->gw_port;
766 udph->dest = amt->relay_port;
767 udph->len = htons(sizeof(*amtrh) + sizeof(*udph));
769 offset = skb_transport_offset(skb);
770 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
771 udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip,
772 sizeof(*udph) + sizeof(*amtrh),
773 IPPROTO_UDP, skb->csum);
775 skb_push(skb, sizeof(*iph));
778 iph->ihl = (sizeof(struct iphdr)) >> 2;
781 iph->ttl = ip4_dst_hoplimit(&rt->dst);
782 iph->daddr = amt->remote_ip;
783 iph->saddr = amt->local_ip;
784 iph->protocol = IPPROTO_UDP;
785 iph->tot_len = htons(len);
787 skb->ip_summed = CHECKSUM_NONE;
788 ip_select_ident(amt->net, skb, NULL);
790 err = ip_local_out(amt->net, sock->sk, skb);
791 if (unlikely(net_xmit_eval(err)))
792 amt->dev->stats.tx_errors++;
798 static void amt_send_igmp_gq(struct amt_dev *amt,
799 struct amt_tunnel_list *tunnel)
803 skb = amt_build_igmp_gq(amt);
807 amt_skb_cb(skb)->tunnel = tunnel;
811 #if IS_ENABLED(CONFIG_IPV6)
812 static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt)
814 u8 ra[AMT_IP6HDR_OPTS] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
815 2, 0, 0, IPV6_TLV_PAD1, IPV6_TLV_PAD1 };
816 int hlen = LL_RESERVED_SPACE(amt->dev);
817 int tlen = amt->dev->needed_tailroom;
818 struct mld2_query *mld2q;
819 void *csum_start = NULL;
820 struct ipv6hdr *ip6h;
825 len = hlen + tlen + sizeof(*ip6h) + sizeof(ra) + sizeof(*mld2q);
826 skb = netdev_alloc_skb_ip_align(amt->dev, len);
830 skb_reserve(skb, hlen);
831 skb_push(skb, sizeof(*eth));
832 skb_reset_mac_header(skb);
834 skb->priority = TC_PRIO_CONTROL;
835 skb->protocol = htons(ETH_P_IPV6);
836 skb_put_zero(skb, sizeof(*ip6h));
837 skb_put_data(skb, ra, sizeof(ra));
838 skb_put_zero(skb, sizeof(*mld2q));
839 skb_pull(skb, sizeof(*eth));
840 skb_reset_network_header(skb);
841 ip6h = ipv6_hdr(skb);
842 ip6h->payload_len = htons(sizeof(ra) + sizeof(*mld2q));
843 ip6h->nexthdr = NEXTHDR_HOP;
845 ip6h->daddr = mld2_all_node;
846 ip6_flow_hdr(ip6h, 0, 0);
848 if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0,
850 amt->dev->stats.tx_errors++;
855 eth->h_proto = htons(ETH_P_IPV6);
856 ether_addr_copy(eth->h_source, amt->dev->dev_addr);
857 ipv6_eth_mc_map(&mld2_all_node, eth->h_dest);
859 skb_pull(skb, sizeof(*ip6h) + sizeof(ra));
860 skb_reset_transport_header(skb);
861 mld2q = (struct mld2_query *)icmp6_hdr(skb);
862 mld2q->mld2q_mrc = htons(1);
863 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
864 mld2q->mld2q_code = 0;
865 mld2q->mld2q_cksum = 0;
866 mld2q->mld2q_resv1 = 0;
867 mld2q->mld2q_resv2 = 0;
868 mld2q->mld2q_suppress = 0;
869 mld2q->mld2q_qrv = amt->qrv;
870 mld2q->mld2q_nsrcs = 0;
871 mld2q->mld2q_qqic = amt->qi;
872 csum_start = (void *)mld2q;
873 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
876 csum_partial(csum_start,
879 skb->ip_summed = CHECKSUM_NONE;
880 skb_push(skb, sizeof(*eth) + sizeof(*ip6h) + sizeof(ra));
884 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
888 skb = amt_build_mld_gq(amt);
892 amt_skb_cb(skb)->tunnel = tunnel;
896 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
901 static void amt_secret_work(struct work_struct *work)
903 struct amt_dev *amt = container_of(to_delayed_work(work),
907 spin_lock_bh(&amt->lock);
908 get_random_bytes(&amt->key, sizeof(siphash_key_t));
909 spin_unlock_bh(&amt->lock);
910 mod_delayed_work(amt_wq, &amt->secret_wq,
911 msecs_to_jiffies(AMT_SECRET_TIMEOUT));
914 static void amt_discovery_work(struct work_struct *work)
916 struct amt_dev *amt = container_of(to_delayed_work(work),
920 spin_lock_bh(&amt->lock);
921 if (amt->status > AMT_STATUS_SENT_DISCOVERY)
923 get_random_bytes(&amt->nonce, sizeof(__be32));
924 spin_unlock_bh(&amt->lock);
926 amt_send_discovery(amt);
927 spin_lock_bh(&amt->lock);
929 mod_delayed_work(amt_wq, &amt->discovery_wq,
930 msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
931 spin_unlock_bh(&amt->lock);
934 static void amt_req_work(struct work_struct *work)
936 struct amt_dev *amt = container_of(to_delayed_work(work),
941 spin_lock_bh(&amt->lock);
942 if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
945 if (amt->req_cnt++ > AMT_MAX_REQ_COUNT) {
946 netdev_dbg(amt->dev, "Gateway is not ready");
947 amt->qi = AMT_INIT_REQ_TIMEOUT;
951 __amt_update_gw_status(amt, AMT_STATUS_INIT, false);
954 spin_unlock_bh(&amt->lock);
956 amt_send_request(amt, false);
957 amt_send_request(amt, true);
958 amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
959 spin_lock_bh(&amt->lock);
961 exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
962 mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
963 spin_unlock_bh(&amt->lock);
966 static bool amt_send_membership_update(struct amt_dev *amt,
970 struct amt_header_membership_update *amtmu;
977 sock = rcu_dereference_bh(amt->sock);
981 err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) +
982 sizeof(*iph) + sizeof(struct udphdr));
986 skb_reset_inner_headers(skb);
987 memset(&fl4, 0, sizeof(struct flowi4));
988 fl4.flowi4_oif = amt->stream_dev->ifindex;
989 fl4.daddr = amt->remote_ip;
990 fl4.saddr = amt->local_ip;
991 fl4.flowi4_tos = AMT_TOS;
992 fl4.flowi4_proto = IPPROTO_UDP;
993 rt = ip_route_output_key(amt->net, &fl4);
995 netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip);
999 amtmu = skb_push(skb, sizeof(*amtmu));
1001 amtmu->type = AMT_MSG_MEMBERSHIP_UPDATE;
1002 amtmu->reserved = 0;
1003 amtmu->nonce = amt->nonce;
1004 amtmu->response_mac = amt->mac;
1007 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1009 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1010 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1014 ip4_dst_hoplimit(&rt->dst),
1020 amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
1024 static void amt_send_multicast_data(struct amt_dev *amt,
1025 const struct sk_buff *oskb,
1026 struct amt_tunnel_list *tunnel,
1029 struct amt_header_mcast_data *amtmd;
1030 struct socket *sock;
1031 struct sk_buff *skb;
1036 sock = rcu_dereference_bh(amt->sock);
1040 skb = skb_copy_expand(oskb, sizeof(*amtmd) + sizeof(*iph) +
1041 sizeof(struct udphdr), 0, GFP_ATOMIC);
1045 skb_reset_inner_headers(skb);
1046 memset(&fl4, 0, sizeof(struct flowi4));
1047 fl4.flowi4_oif = amt->stream_dev->ifindex;
1048 fl4.daddr = tunnel->ip4;
1049 fl4.saddr = amt->local_ip;
1050 fl4.flowi4_proto = IPPROTO_UDP;
1051 rt = ip_route_output_key(amt->net, &fl4);
1053 netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1058 amtmd = skb_push(skb, sizeof(*amtmd));
1060 amtmd->reserved = 0;
1061 amtmd->type = AMT_MSG_MULTICAST_DATA;
1064 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1066 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1067 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1071 ip4_dst_hoplimit(&rt->dst),
1074 tunnel->source_port,
1079 static bool amt_send_membership_query(struct amt_dev *amt,
1080 struct sk_buff *skb,
1081 struct amt_tunnel_list *tunnel,
1084 struct amt_header_membership_query *amtmq;
1085 struct socket *sock;
1090 sock = rcu_dereference_bh(amt->sock);
1094 err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) +
1095 sizeof(struct iphdr) + sizeof(struct udphdr));
1099 skb_reset_inner_headers(skb);
1100 memset(&fl4, 0, sizeof(struct flowi4));
1101 fl4.flowi4_oif = amt->stream_dev->ifindex;
1102 fl4.daddr = tunnel->ip4;
1103 fl4.saddr = amt->local_ip;
1104 fl4.flowi4_tos = AMT_TOS;
1105 fl4.flowi4_proto = IPPROTO_UDP;
1106 rt = ip_route_output_key(amt->net, &fl4);
1108 netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1112 amtmq = skb_push(skb, sizeof(*amtmq));
1114 amtmq->type = AMT_MSG_MEMBERSHIP_QUERY;
1115 amtmq->reserved = 0;
1118 amtmq->nonce = tunnel->nonce;
1119 amtmq->response_mac = tunnel->mac;
1122 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1124 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1125 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1129 ip4_dst_hoplimit(&rt->dst),
1132 tunnel->source_port,
1135 amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
1139 static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1141 struct amt_dev *amt = netdev_priv(dev);
1142 struct amt_tunnel_list *tunnel;
1143 struct amt_group_node *gnode;
1144 union amt_addr group = {0,};
1145 #if IS_ENABLED(CONFIG_IPV6)
1146 struct ipv6hdr *ip6h;
1147 struct mld_msg *mld;
1149 bool report = false;
1158 if (iph->version == 4) {
1159 if (!ipv4_is_multicast(iph->daddr))
1162 if (!ip_mc_check_igmp(skb)) {
1165 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1166 case IGMP_HOST_MEMBERSHIP_REPORT:
1169 case IGMP_HOST_MEMBERSHIP_QUERY:
1179 group.ip4 = iph->daddr;
1180 #if IS_ENABLED(CONFIG_IPV6)
1181 } else if (iph->version == 6) {
1182 ip6h = ipv6_hdr(skb);
1183 if (!ipv6_addr_is_multicast(&ip6h->daddr))
1186 if (!ipv6_mc_check_mld(skb)) {
1187 mld = (struct mld_msg *)skb_transport_header(skb);
1188 switch (mld->mld_type) {
1189 case ICMPV6_MGM_REPORT:
1190 case ICMPV6_MLD2_REPORT:
1193 case ICMPV6_MGM_QUERY:
1203 group.ip6 = ip6h->daddr;
1206 dev->stats.tx_errors++;
1210 if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
1213 skb_pull(skb, sizeof(struct ethhdr));
1215 if (amt->mode == AMT_MODE_GATEWAY) {
1216 /* Gateway only passes IGMP/MLD packets */
1219 if ((!v6 && !amt->ready4) || (v6 && !amt->ready6))
1221 if (amt_send_membership_update(amt, skb, v6))
1224 } else if (amt->mode == AMT_MODE_RELAY) {
1226 tunnel = amt_skb_cb(skb)->tunnel;
1232 /* Do not forward unexpected query */
1233 if (amt_send_membership_query(amt, skb, tunnel, v6))
1240 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
1241 hash = amt_group_hash(tunnel, &group);
1242 hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash],
1245 if (gnode->group_addr.ip4 == iph->daddr)
1247 #if IS_ENABLED(CONFIG_IPV6)
1249 if (ipv6_addr_equal(&gnode->group_addr.ip6,
1257 amt_send_multicast_data(amt, skb, tunnel, v6);
1262 return NETDEV_TX_OK;
1266 dev->stats.tx_dropped++;
1267 return NETDEV_TX_OK;
1270 static int amt_parse_type(struct sk_buff *skb)
1272 struct amt_header *amth;
1274 if (!pskb_may_pull(skb, sizeof(struct udphdr) +
1275 sizeof(struct amt_header)))
1278 amth = (struct amt_header *)(udp_hdr(skb) + 1);
1280 if (amth->version != 0)
1283 if (amth->type >= __AMT_MSG_MAX || !amth->type)
1288 static void amt_clear_groups(struct amt_tunnel_list *tunnel)
1290 struct amt_dev *amt = tunnel->amt;
1291 struct amt_group_node *gnode;
1292 struct hlist_node *t;
1295 spin_lock_bh(&tunnel->lock);
1297 for (i = 0; i < amt->hash_buckets; i++)
1298 hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node)
1299 amt_del_group(amt, gnode);
1301 spin_unlock_bh(&tunnel->lock);
1304 static void amt_tunnel_expire(struct work_struct *work)
1306 struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work),
1307 struct amt_tunnel_list,
1309 struct amt_dev *amt = tunnel->amt;
1311 spin_lock_bh(&amt->lock);
1313 list_del_rcu(&tunnel->list);
1315 amt_clear_groups(tunnel);
1317 spin_unlock_bh(&amt->lock);
1318 kfree_rcu(tunnel, rcu);
1321 static void amt_cleanup_srcs(struct amt_dev *amt,
1322 struct amt_tunnel_list *tunnel,
1323 struct amt_group_node *gnode)
1325 struct amt_source_node *snode;
1326 struct hlist_node *t;
1329 /* Delete old sources */
1330 for (i = 0; i < amt->hash_buckets; i++) {
1331 hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) {
1332 if (snode->flags == AMT_SOURCE_OLD)
1333 amt_destroy_source(snode);
1337 /* switch from new to old */
1338 for (i = 0; i < amt->hash_buckets; i++) {
1339 hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) {
1340 snode->flags = AMT_SOURCE_OLD;
1342 netdev_dbg(snode->gnode->amt->dev,
1343 "Add source as OLD %pI4 from %pI4\n",
1344 &snode->source_addr.ip4,
1345 &gnode->group_addr.ip4);
1346 #if IS_ENABLED(CONFIG_IPV6)
1348 netdev_dbg(snode->gnode->amt->dev,
1349 "Add source as OLD %pI6 from %pI6\n",
1350 &snode->source_addr.ip6,
1351 &gnode->group_addr.ip6);
1357 static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
1358 struct amt_group_node *gnode, void *grec,
1361 struct igmpv3_grec *igmp_grec;
1362 struct amt_source_node *snode;
1363 #if IS_ENABLED(CONFIG_IPV6)
1364 struct mld2_grec *mld_grec;
1366 union amt_addr src = {0,};
1372 igmp_grec = (struct igmpv3_grec *)grec;
1373 nsrcs = ntohs(igmp_grec->grec_nsrcs);
1375 #if IS_ENABLED(CONFIG_IPV6)
1376 mld_grec = (struct mld2_grec *)grec;
1377 nsrcs = ntohs(mld_grec->grec_nsrcs);
1382 for (i = 0; i < nsrcs; i++) {
1383 if (tunnel->nr_sources >= amt->max_sources)
1386 src.ip4 = igmp_grec->grec_src[i];
1387 #if IS_ENABLED(CONFIG_IPV6)
1389 memcpy(&src.ip6, &mld_grec->grec_src[i],
1390 sizeof(struct in6_addr));
1392 if (amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src))
1395 snode = amt_alloc_snode(gnode, &src);
1397 hash = amt_source_hash(tunnel, &snode->source_addr);
1398 hlist_add_head_rcu(&snode->node, &gnode->sources[hash]);
1399 tunnel->nr_sources++;
1400 gnode->nr_sources++;
1403 netdev_dbg(snode->gnode->amt->dev,
1404 "Add source as NEW %pI4 from %pI4\n",
1405 &snode->source_addr.ip4,
1406 &gnode->group_addr.ip4);
1407 #if IS_ENABLED(CONFIG_IPV6)
1409 netdev_dbg(snode->gnode->amt->dev,
1410 "Add source as NEW %pI6 from %pI6\n",
1411 &snode->source_addr.ip6,
1412 &gnode->group_addr.ip6);
1418 /* Router State Report Rec'd New Router State
1419 * ------------ ------------ ----------------
1420 * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A)
1422 * -----------+-----------+-----------+
1424 * -----------+-----------+-----------+
1426 * -----------+-----------+-----------+
1428 * -----------+-----------+-----------+
1430 * -----------+-----------+-----------+
1432 * a) Received sources are NONE/NEW
1433 * b) All NONE will be deleted by amt_cleanup_srcs().
1434 * c) All OLD will be deleted by amt_cleanup_srcs().
1435 * d) After delete, NEW source will be switched to OLD.
1437 static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
1438 struct amt_group_node *gnode,
1441 enum amt_filter filter,
1445 struct amt_dev *amt = tunnel->amt;
1446 struct amt_source_node *snode;
1447 struct igmpv3_grec *igmp_grec;
1448 #if IS_ENABLED(CONFIG_IPV6)
1449 struct mld2_grec *mld_grec;
1451 union amt_addr src = {0,};
1452 struct hlist_node *t;
1457 igmp_grec = (struct igmpv3_grec *)grec;
1458 nsrcs = ntohs(igmp_grec->grec_nsrcs);
1460 #if IS_ENABLED(CONFIG_IPV6)
1461 mld_grec = (struct mld2_grec *)grec;
1462 nsrcs = ntohs(mld_grec->grec_nsrcs);
1468 memset(&src, 0, sizeof(union amt_addr));
1472 for (i = 0; i < nsrcs; i++) {
1474 src.ip4 = igmp_grec->grec_src[i];
1475 #if IS_ENABLED(CONFIG_IPV6)
1477 memcpy(&src.ip6, &mld_grec->grec_src[i],
1478 sizeof(struct in6_addr));
1480 snode = amt_lookup_src(tunnel, gnode, filter, &src);
1483 amt_act_src(tunnel, gnode, snode, act);
1488 for (i = 0; i < amt->hash_buckets; i++) {
1489 hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
1491 if (amt_status_filter(snode, filter))
1492 amt_act_src(tunnel, gnode, snode, act);
1495 for (i = 0; i < nsrcs; i++) {
1497 src.ip4 = igmp_grec->grec_src[i];
1498 #if IS_ENABLED(CONFIG_IPV6)
1500 memcpy(&src.ip6, &mld_grec->grec_src[i],
1501 sizeof(struct in6_addr));
1503 snode = amt_lookup_src(tunnel, gnode, filter, &src);
1506 amt_act_src(tunnel, gnode, snode, act);
1511 for (i = 0; i < amt->hash_buckets; i++) {
1512 hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
1514 if (!amt_status_filter(snode, filter))
1516 for (j = 0; j < nsrcs; j++) {
1518 src.ip4 = igmp_grec->grec_src[j];
1519 #if IS_ENABLED(CONFIG_IPV6)
1522 &mld_grec->grec_src[j],
1523 sizeof(struct in6_addr));
1525 if (amt_addr_equal(&snode->source_addr,
1529 amt_act_src(tunnel, gnode, snode, act);
1535 case AMT_OPS_SUB_REV:
1537 for (i = 0; i < nsrcs; i++) {
1539 src.ip4 = igmp_grec->grec_src[i];
1540 #if IS_ENABLED(CONFIG_IPV6)
1542 memcpy(&src.ip6, &mld_grec->grec_src[i],
1543 sizeof(struct in6_addr));
1545 snode = amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL,
1548 snode = amt_lookup_src(tunnel, gnode,
1551 amt_act_src(tunnel, gnode, snode, act);
1556 netdev_dbg(amt->dev, "Invalid type\n");
1561 static void amt_mcast_is_in_handler(struct amt_dev *amt,
1562 struct amt_tunnel_list *tunnel,
1563 struct amt_group_node *gnode,
1564 void *grec, void *zero_grec, bool v6)
1566 if (gnode->filter_mode == MCAST_INCLUDE) {
1567 /* Router State Report Rec'd New Router State Actions
1568 * ------------ ------------ ---------------- -------
1569 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
1571 /* Update IS_IN (B) as FWD/NEW */
1572 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1573 AMT_FILTER_NONE_NEW,
1574 AMT_ACT_STATUS_FWD_NEW,
1576 /* Update INCLUDE (A) as NEW */
1577 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1579 AMT_ACT_STATUS_FWD_NEW,
1582 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1588 * ------------ ------------ ---------------- -------
1589 * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1591 /* Update (A) in (X, Y) as NONE/NEW */
1592 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1594 AMT_ACT_STATUS_NONE_NEW,
1596 /* Update FWD/OLD as FWD/NEW */
1597 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1599 AMT_ACT_STATUS_FWD_NEW,
1601 /* Update IS_IN (A) as FWD/NEW */
1602 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1603 AMT_FILTER_NONE_NEW,
1604 AMT_ACT_STATUS_FWD_NEW,
1606 /* Update EXCLUDE (, Y-A) as D_FWD_NEW */
1607 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
1609 AMT_ACT_STATUS_D_FWD_NEW,
1614 static void amt_mcast_is_ex_handler(struct amt_dev *amt,
1615 struct amt_tunnel_list *tunnel,
1616 struct amt_group_node *gnode,
1617 void *grec, void *zero_grec, bool v6)
1619 if (gnode->filter_mode == MCAST_INCLUDE) {
1620 /* Router State Report Rec'd New Router State Actions
1621 * ------------ ------------ ---------------- -------
1622 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1626 /* EXCLUDE(A*B, ) */
1627 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1629 AMT_ACT_STATUS_FWD_NEW,
1631 /* EXCLUDE(, B-A) */
1632 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1634 AMT_ACT_STATUS_D_FWD_NEW,
1637 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1638 AMT_FILTER_D_FWD_NEW,
1641 /* Group Timer=GMI */
1642 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1643 msecs_to_jiffies(amt_gmi(amt))))
1645 gnode->filter_mode = MCAST_EXCLUDE;
1646 /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1648 /* Router State Report Rec'd New Router State Actions
1649 * ------------ ------------ ---------------- -------
1650 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
1655 /* EXCLUDE (A-Y, ) */
1656 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1658 AMT_ACT_STATUS_FWD_NEW,
1660 /* EXCLUDE (, Y*A ) */
1661 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1663 AMT_ACT_STATUS_D_FWD_NEW,
1666 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1667 AMT_FILTER_BOTH_NEW,
1670 /* Group Timer=GMI */
1671 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1672 msecs_to_jiffies(amt_gmi(amt))))
1674 /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1678 static void amt_mcast_to_in_handler(struct amt_dev *amt,
1679 struct amt_tunnel_list *tunnel,
1680 struct amt_group_node *gnode,
1681 void *grec, void *zero_grec, bool v6)
1683 if (gnode->filter_mode == MCAST_INCLUDE) {
1684 /* Router State Report Rec'd New Router State Actions
1685 * ------------ ------------ ---------------- -------
1686 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
1689 /* Update TO_IN (B) sources as FWD/NEW */
1690 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1691 AMT_FILTER_NONE_NEW,
1692 AMT_ACT_STATUS_FWD_NEW,
1694 /* Update INCLUDE (A) sources as NEW */
1695 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1697 AMT_ACT_STATUS_FWD_NEW,
1700 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1705 /* Router State Report Rec'd New Router State Actions
1706 * ------------ ------------ ---------------- -------
1707 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1711 /* Update TO_IN (A) sources as FWD/NEW */
1712 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1713 AMT_FILTER_NONE_NEW,
1714 AMT_ACT_STATUS_FWD_NEW,
1716 /* Update EXCLUDE(X,) sources as FWD/NEW */
1717 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1719 AMT_ACT_STATUS_FWD_NEW,
1722 * (A) are already switched to FWD_NEW.
1723 * So, D_FWD/OLD -> D_FWD/NEW is okay.
1725 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1727 AMT_ACT_STATUS_D_FWD_NEW,
1730 * Only FWD_NEW will have (A) sources.
1732 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1739 static void amt_mcast_to_ex_handler(struct amt_dev *amt,
1740 struct amt_tunnel_list *tunnel,
1741 struct amt_group_node *gnode,
1742 void *grec, void *zero_grec, bool v6)
1744 if (gnode->filter_mode == MCAST_INCLUDE) {
1745 /* Router State Report Rec'd New Router State Actions
1746 * ------------ ------------ ---------------- -------
1747 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1752 /* EXCLUDE (A*B, ) */
1753 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1755 AMT_ACT_STATUS_FWD_NEW,
1757 /* EXCLUDE (, B-A) */
1758 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1760 AMT_ACT_STATUS_D_FWD_NEW,
1763 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1764 AMT_FILTER_D_FWD_NEW,
1767 /* Group Timer=GMI */
1768 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1769 msecs_to_jiffies(amt_gmi(amt))))
1771 gnode->filter_mode = MCAST_EXCLUDE;
1772 /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1774 /* Router State Report Rec'd New Router State Actions
1775 * ------------ ------------ ---------------- -------
1776 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
1782 /* Update (A-X-Y) as NONE/OLD */
1783 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1787 /* EXCLUDE (A-Y, ) */
1788 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1790 AMT_ACT_STATUS_FWD_NEW,
1792 /* EXCLUDE (, Y*A) */
1793 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1795 AMT_ACT_STATUS_D_FWD_NEW,
1797 /* Group Timer=GMI */
1798 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1799 msecs_to_jiffies(amt_gmi(amt))))
1801 /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1805 static void amt_mcast_allow_handler(struct amt_dev *amt,
1806 struct amt_tunnel_list *tunnel,
1807 struct amt_group_node *gnode,
1808 void *grec, void *zero_grec, bool v6)
1810 if (gnode->filter_mode == MCAST_INCLUDE) {
1811 /* Router State Report Rec'd New Router State Actions
1812 * ------------ ------------ ---------------- -------
1813 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
1816 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1818 AMT_ACT_STATUS_FWD_NEW,
1821 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1826 /* Router State Report Rec'd New Router State Actions
1827 * ------------ ------------ ---------------- -------
1828 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
1830 /* EXCLUDE (X+A, ) */
1831 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1833 AMT_ACT_STATUS_FWD_NEW,
1835 /* EXCLUDE (, Y-A) */
1836 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
1838 AMT_ACT_STATUS_D_FWD_NEW,
1841 * All (A) source are now FWD/NEW status.
1843 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1850 static void amt_mcast_block_handler(struct amt_dev *amt,
1851 struct amt_tunnel_list *tunnel,
1852 struct amt_group_node *gnode,
1853 void *grec, void *zero_grec, bool v6)
1855 if (gnode->filter_mode == MCAST_INCLUDE) {
1856 /* Router State Report Rec'd New Router State Actions
1857 * ------------ ------------ ---------------- -------
1858 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
1861 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1863 AMT_ACT_STATUS_FWD_NEW,
1866 /* Router State Report Rec'd New Router State Actions
1867 * ------------ ------------ ---------------- -------
1868 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
1871 /* (A-X-Y)=Group Timer */
1872 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1877 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1879 AMT_ACT_STATUS_FWD_NEW,
1881 /* EXCLUDE (X+(A-Y) */
1882 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1884 AMT_ACT_STATUS_FWD_NEW,
1887 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1889 AMT_ACT_STATUS_D_FWD_NEW,
1895 * 7.3.2. In the Presence of Older Version Group Members
1897 * When Group Compatibility Mode is IGMPv2, a router internally
1898 * translates the following IGMPv2 messages for that group to their
1899 * IGMPv3 equivalents:
1901 * IGMPv2 Message IGMPv3 Equivalent
1902 * -------------- -----------------
1903 * Report IS_EX( {} )
1906 static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
1907 struct amt_tunnel_list *tunnel)
1909 struct igmphdr *ih = igmp_hdr(skb);
1910 struct iphdr *iph = ip_hdr(skb);
1911 struct amt_group_node *gnode;
1912 union amt_addr group, host;
1914 memset(&group, 0, sizeof(union amt_addr));
1915 group.ip4 = ih->group;
1916 memset(&host, 0, sizeof(union amt_addr));
1917 host.ip4 = iph->saddr;
1919 gnode = amt_lookup_group(tunnel, &group, &host, false);
1921 gnode = amt_add_group(amt, tunnel, &group, &host, false);
1922 if (!IS_ERR(gnode)) {
1923 gnode->filter_mode = MCAST_EXCLUDE;
1924 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1925 msecs_to_jiffies(amt_gmi(amt))))
1932 * 7.3.2. In the Presence of Older Version Group Members
1934 * When Group Compatibility Mode is IGMPv2, a router internally
1935 * translates the following IGMPv2 messages for that group to their
1936 * IGMPv3 equivalents:
1938 * IGMPv2 Message IGMPv3 Equivalent
1939 * -------------- -----------------
1940 * Report IS_EX( {} )
1943 static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
1944 struct amt_tunnel_list *tunnel)
1946 struct igmphdr *ih = igmp_hdr(skb);
1947 struct iphdr *iph = ip_hdr(skb);
1948 struct amt_group_node *gnode;
1949 union amt_addr group, host;
1951 memset(&group, 0, sizeof(union amt_addr));
1952 group.ip4 = ih->group;
1953 memset(&host, 0, sizeof(union amt_addr));
1954 host.ip4 = iph->saddr;
1956 gnode = amt_lookup_group(tunnel, &group, &host, false);
1958 amt_del_group(amt, gnode);
1961 static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb,
1962 struct amt_tunnel_list *tunnel)
1964 struct igmpv3_report *ihrv3 = igmpv3_report_hdr(skb);
1965 int len = skb_transport_offset(skb) + sizeof(*ihrv3);
1966 void *zero_grec = (void *)&igmpv3_zero_grec;
1967 struct iphdr *iph = ip_hdr(skb);
1968 struct amt_group_node *gnode;
1969 union amt_addr group, host;
1970 struct igmpv3_grec *grec;
1974 for (i = 0; i < ntohs(ihrv3->ngrec); i++) {
1975 len += sizeof(*grec);
1976 if (!ip_mc_may_pull(skb, len))
1979 grec = (void *)(skb->data + len - sizeof(*grec));
1980 nsrcs = ntohs(grec->grec_nsrcs);
1982 len += nsrcs * sizeof(__be32);
1983 if (!ip_mc_may_pull(skb, len))
1986 memset(&group, 0, sizeof(union amt_addr));
1987 group.ip4 = grec->grec_mca;
1988 memset(&host, 0, sizeof(union amt_addr));
1989 host.ip4 = iph->saddr;
1990 gnode = amt_lookup_group(tunnel, &group, &host, false);
1992 gnode = amt_add_group(amt, tunnel, &group, &host,
1998 amt_add_srcs(amt, tunnel, gnode, grec, false);
1999 switch (grec->grec_type) {
2000 case IGMPV3_MODE_IS_INCLUDE:
2001 amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2004 case IGMPV3_MODE_IS_EXCLUDE:
2005 amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2008 case IGMPV3_CHANGE_TO_INCLUDE:
2009 amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2012 case IGMPV3_CHANGE_TO_EXCLUDE:
2013 amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2016 case IGMPV3_ALLOW_NEW_SOURCES:
2017 amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2020 case IGMPV3_BLOCK_OLD_SOURCES:
2021 amt_mcast_block_handler(amt, tunnel, gnode, grec,
2027 amt_cleanup_srcs(amt, tunnel, gnode);
2031 /* caller held tunnel->lock */
2032 static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2033 struct amt_tunnel_list *tunnel)
2035 struct igmphdr *ih = igmp_hdr(skb);
2038 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2039 amt_igmpv3_report_handler(amt, skb, tunnel);
2041 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2042 amt_igmpv2_report_handler(amt, skb, tunnel);
2044 case IGMP_HOST_LEAVE_MESSAGE:
2045 amt_igmpv2_leave_handler(amt, skb, tunnel);
2052 #if IS_ENABLED(CONFIG_IPV6)
2054 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2056 * When Multicast Address Compatibility Mode is MLDv2, a router acts
2057 * using the MLDv2 protocol for that multicast address. When Multicast
2058 * Address Compatibility Mode is MLDv1, a router internally translates
2059 * the following MLDv1 messages for that multicast address to their
2060 * MLDv2 equivalents:
2062 * MLDv1 Message MLDv2 Equivalent
2063 * -------------- -----------------
2064 * Report IS_EX( {} )
2067 static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2068 struct amt_tunnel_list *tunnel)
2070 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2071 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2072 struct amt_group_node *gnode;
2073 union amt_addr group, host;
2075 memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
2076 memcpy(&host.ip6, &ip6h->saddr, sizeof(struct in6_addr));
2078 gnode = amt_lookup_group(tunnel, &group, &host, true);
2080 gnode = amt_add_group(amt, tunnel, &group, &host, true);
2081 if (!IS_ERR(gnode)) {
2082 gnode->filter_mode = MCAST_EXCLUDE;
2083 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
2084 msecs_to_jiffies(amt_gmi(amt))))
2091 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2093 * When Multicast Address Compatibility Mode is MLDv2, a router acts
2094 * using the MLDv2 protocol for that multicast address. When Multicast
2095 * Address Compatibility Mode is MLDv1, a router internally translates
2096 * the following MLDv1 messages for that multicast address to their
2097 * MLDv2 equivalents:
2099 * MLDv1 Message MLDv2 Equivalent
2100 * -------------- -----------------
2101 * Report IS_EX( {} )
2104 static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
2105 struct amt_tunnel_list *tunnel)
2107 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2108 struct iphdr *iph = ip_hdr(skb);
2109 struct amt_group_node *gnode;
2110 union amt_addr group, host;
2112 memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
2113 memset(&host, 0, sizeof(union amt_addr));
2114 host.ip4 = iph->saddr;
2116 gnode = amt_lookup_group(tunnel, &group, &host, true);
2118 amt_del_group(amt, gnode);
2123 static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2124 struct amt_tunnel_list *tunnel)
2126 struct mld2_report *mld2r = (struct mld2_report *)icmp6_hdr(skb);
2127 int len = skb_transport_offset(skb) + sizeof(*mld2r);
2128 void *zero_grec = (void *)&mldv2_zero_grec;
2129 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2130 struct amt_group_node *gnode;
2131 union amt_addr group, host;
2132 struct mld2_grec *grec;
2136 for (i = 0; i < ntohs(mld2r->mld2r_ngrec); i++) {
2137 len += sizeof(*grec);
2138 if (!ipv6_mc_may_pull(skb, len))
2141 grec = (void *)(skb->data + len - sizeof(*grec));
2142 nsrcs = ntohs(grec->grec_nsrcs);
2144 len += nsrcs * sizeof(struct in6_addr);
2145 if (!ipv6_mc_may_pull(skb, len))
2148 memset(&group, 0, sizeof(union amt_addr));
2149 group.ip6 = grec->grec_mca;
2150 memset(&host, 0, sizeof(union amt_addr));
2151 host.ip6 = ip6h->saddr;
2152 gnode = amt_lookup_group(tunnel, &group, &host, true);
2154 gnode = amt_add_group(amt, tunnel, &group, &host,
2160 amt_add_srcs(amt, tunnel, gnode, grec, true);
2161 switch (grec->grec_type) {
2162 case MLD2_MODE_IS_INCLUDE:
2163 amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2166 case MLD2_MODE_IS_EXCLUDE:
2167 amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2170 case MLD2_CHANGE_TO_INCLUDE:
2171 amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2174 case MLD2_CHANGE_TO_EXCLUDE:
2175 amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2178 case MLD2_ALLOW_NEW_SOURCES:
2179 amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2182 case MLD2_BLOCK_OLD_SOURCES:
2183 amt_mcast_block_handler(amt, tunnel, gnode, grec,
2189 amt_cleanup_srcs(amt, tunnel, gnode);
2193 /* caller held tunnel->lock */
2194 static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2195 struct amt_tunnel_list *tunnel)
2197 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2199 switch (mld->mld_type) {
2200 case ICMPV6_MGM_REPORT:
2201 amt_mldv1_report_handler(amt, skb, tunnel);
2203 case ICMPV6_MLD2_REPORT:
2204 amt_mldv2_report_handler(amt, skb, tunnel);
2206 case ICMPV6_MGM_REDUCTION:
2207 amt_mldv1_leave_handler(amt, skb, tunnel);
2215 static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
2217 struct amt_header_advertisement *amta;
2220 hdr_size = sizeof(*amta) - sizeof(struct amt_header);
2222 if (!pskb_may_pull(skb, hdr_size))
2225 amta = (struct amt_header_advertisement *)(udp_hdr(skb) + 1);
2229 if (amta->reserved || amta->version)
2232 if (ipv4_is_loopback(amta->ip4) || ipv4_is_multicast(amta->ip4) ||
2233 ipv4_is_zeronet(amta->ip4))
2236 amt->remote_ip = amta->ip4;
2237 netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
2238 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2240 amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true);
2244 static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
2246 struct amt_header_mcast_data *amtmd;
2247 int hdr_size, len, err;
2251 amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
2252 if (amtmd->reserved || amtmd->version)
2255 hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
2256 if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
2258 skb_reset_network_header(skb);
2259 skb_push(skb, sizeof(*eth));
2260 skb_reset_mac_header(skb);
2261 skb_pull(skb, sizeof(*eth));
2264 if (iph->version == 4) {
2265 if (!ipv4_is_multicast(iph->daddr))
2267 skb->protocol = htons(ETH_P_IP);
2268 eth->h_proto = htons(ETH_P_IP);
2269 ip_eth_mc_map(iph->daddr, eth->h_dest);
2270 #if IS_ENABLED(CONFIG_IPV6)
2271 } else if (iph->version == 6) {
2272 struct ipv6hdr *ip6h;
2274 ip6h = ipv6_hdr(skb);
2275 if (!ipv6_addr_is_multicast(&ip6h->daddr))
2277 skb->protocol = htons(ETH_P_IPV6);
2278 eth->h_proto = htons(ETH_P_IPV6);
2279 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2285 skb->pkt_type = PACKET_MULTICAST;
2286 skb->ip_summed = CHECKSUM_NONE;
2288 err = gro_cells_receive(&amt->gro_cells, skb);
2289 if (likely(err == NET_RX_SUCCESS))
2290 dev_sw_netstats_rx_add(amt->dev, len);
2292 amt->dev->stats.rx_dropped++;
2297 static bool amt_membership_query_handler(struct amt_dev *amt,
2298 struct sk_buff *skb)
2300 struct amt_header_membership_query *amtmq;
2301 struct igmpv3_query *ihv3;
2302 struct ethhdr *eth, *oeth;
2306 hdr_size = sizeof(*amtmq) - sizeof(struct amt_header);
2308 if (!pskb_may_pull(skb, hdr_size))
2311 amtmq = (struct amt_header_membership_query *)(udp_hdr(skb) + 1);
2312 if (amtmq->reserved || amtmq->version)
2315 hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth);
2316 if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
2318 oeth = eth_hdr(skb);
2319 skb_reset_mac_header(skb);
2320 skb_pull(skb, sizeof(*eth));
2321 skb_reset_network_header(skb);
2324 if (iph->version == 4) {
2325 if (!ipv4_is_multicast(iph->daddr))
2327 if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
2331 ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
2332 skb_reset_transport_header(skb);
2333 skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
2334 spin_lock_bh(&amt->lock);
2336 amt->mac = amtmq->response_mac;
2338 amt->qi = ihv3->qqic;
2339 spin_unlock_bh(&amt->lock);
2340 skb->protocol = htons(ETH_P_IP);
2341 eth->h_proto = htons(ETH_P_IP);
2342 ip_eth_mc_map(iph->daddr, eth->h_dest);
2343 #if IS_ENABLED(CONFIG_IPV6)
2344 } else if (iph->version == 6) {
2345 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2346 struct mld2_query *mld2q;
2348 if (!ipv6_addr_is_multicast(&ip6h->daddr))
2350 if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
2354 mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
2355 skb_reset_transport_header(skb);
2356 skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
2357 spin_lock_bh(&amt->lock);
2359 amt->mac = amtmq->response_mac;
2361 amt->qi = mld2q->mld2q_qqic;
2362 spin_unlock_bh(&amt->lock);
2363 skb->protocol = htons(ETH_P_IPV6);
2364 eth->h_proto = htons(ETH_P_IPV6);
2365 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2371 ether_addr_copy(eth->h_source, oeth->h_source);
2372 skb->pkt_type = PACKET_MULTICAST;
2373 skb->ip_summed = CHECKSUM_NONE;
2375 if (netif_rx(skb) == NET_RX_SUCCESS) {
2376 amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
2377 dev_sw_netstats_rx_add(amt->dev, len);
2379 amt->dev->stats.rx_dropped++;
2385 static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
2387 struct amt_header_membership_update *amtmu;
2388 struct amt_tunnel_list *tunnel;
2389 struct udphdr *udph;
2395 udph = udp_hdr(skb);
2397 if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol,
2401 amtmu = (struct amt_header_membership_update *)skb->data;
2402 if (amtmu->reserved || amtmu->version)
2405 skb_pull(skb, sizeof(*amtmu));
2406 skb_reset_network_header(skb);
2408 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
2409 if (tunnel->ip4 == iph->saddr) {
2410 if ((amtmu->nonce == tunnel->nonce &&
2411 amtmu->response_mac == tunnel->mac)) {
2412 mod_delayed_work(amt_wq, &tunnel->gc_wq,
2413 msecs_to_jiffies(amt_gmi(amt))
2417 netdev_dbg(amt->dev, "Invalid MAC\n");
2427 if (iph->version == 4) {
2428 if (ip_mc_check_igmp(skb)) {
2429 netdev_dbg(amt->dev, "Invalid IGMP\n");
2433 spin_lock_bh(&tunnel->lock);
2434 amt_igmp_report_handler(amt, skb, tunnel);
2435 spin_unlock_bh(&tunnel->lock);
2437 skb_push(skb, sizeof(struct ethhdr));
2438 skb_reset_mac_header(skb);
2440 skb->protocol = htons(ETH_P_IP);
2441 eth->h_proto = htons(ETH_P_IP);
2442 ip_eth_mc_map(iph->daddr, eth->h_dest);
2443 #if IS_ENABLED(CONFIG_IPV6)
2444 } else if (iph->version == 6) {
2445 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2447 if (ipv6_mc_check_mld(skb)) {
2448 netdev_dbg(amt->dev, "Invalid MLD\n");
2452 spin_lock_bh(&tunnel->lock);
2453 amt_mld_report_handler(amt, skb, tunnel);
2454 spin_unlock_bh(&tunnel->lock);
2456 skb_push(skb, sizeof(struct ethhdr));
2457 skb_reset_mac_header(skb);
2459 skb->protocol = htons(ETH_P_IPV6);
2460 eth->h_proto = htons(ETH_P_IPV6);
2461 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2464 netdev_dbg(amt->dev, "Unsupported Protocol\n");
2468 skb_pull(skb, sizeof(struct ethhdr));
2469 skb->pkt_type = PACKET_MULTICAST;
2470 skb->ip_summed = CHECKSUM_NONE;
2472 if (netif_rx(skb) == NET_RX_SUCCESS) {
2473 amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
2475 dev_sw_netstats_rx_add(amt->dev, len);
2477 amt->dev->stats.rx_dropped++;
2483 static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce,
2484 __be32 daddr, __be16 dport)
2486 struct amt_header_advertisement *amta;
2487 int hlen, tlen, offset;
2488 struct socket *sock;
2489 struct udphdr *udph;
2490 struct sk_buff *skb;
2498 sock = rcu_dereference(amt->sock);
2502 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
2505 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
2506 daddr, amt->local_ip,
2507 dport, amt->relay_port,
2509 amt->stream_dev->ifindex);
2511 amt->dev->stats.tx_errors++;
2515 hlen = LL_RESERVED_SPACE(amt->dev);
2516 tlen = amt->dev->needed_tailroom;
2517 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
2518 skb = netdev_alloc_skb_ip_align(amt->dev, len);
2521 amt->dev->stats.tx_errors++;
2525 skb->priority = TC_PRIO_CONTROL;
2526 skb_dst_set(skb, &rt->dst);
2528 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
2529 skb_reset_network_header(skb);
2531 amta = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
2533 amta->type = AMT_MSG_ADVERTISEMENT;
2535 amta->nonce = nonce;
2536 amta->ip4 = amt->local_ip;
2537 skb_push(skb, sizeof(*udph));
2538 skb_reset_transport_header(skb);
2539 udph = udp_hdr(skb);
2540 udph->source = amt->relay_port;
2542 udph->len = htons(sizeof(*amta) + sizeof(*udph));
2544 offset = skb_transport_offset(skb);
2545 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
2546 udph->check = csum_tcpudp_magic(amt->local_ip, daddr,
2547 sizeof(*udph) + sizeof(*amta),
2548 IPPROTO_UDP, skb->csum);
2550 skb_push(skb, sizeof(*iph));
2553 iph->ihl = (sizeof(struct iphdr)) >> 2;
2556 iph->ttl = ip4_dst_hoplimit(&rt->dst);
2558 iph->saddr = amt->local_ip;
2559 iph->protocol = IPPROTO_UDP;
2560 iph->tot_len = htons(len);
2562 skb->ip_summed = CHECKSUM_NONE;
2563 ip_select_ident(amt->net, skb, NULL);
2565 err = ip_local_out(amt->net, sock->sk, skb);
2566 if (unlikely(net_xmit_eval(err)))
2567 amt->dev->stats.tx_errors++;
2573 static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb)
2575 struct amt_header_discovery *amtd;
2576 struct udphdr *udph;
2579 if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtd)))
2583 udph = udp_hdr(skb);
2584 amtd = (struct amt_header_discovery *)(udp_hdr(skb) + 1);
2586 if (amtd->reserved || amtd->version)
2589 amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source);
2594 static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
2596 struct amt_header_request *amtrh;
2597 struct amt_tunnel_list *tunnel;
2598 unsigned long long key;
2599 struct udphdr *udph;
2604 if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtrh)))
2608 udph = udp_hdr(skb);
2609 amtrh = (struct amt_header_request *)(udp_hdr(skb) + 1);
2611 if (amtrh->reserved1 || amtrh->reserved2 || amtrh->version)
2614 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list)
2615 if (tunnel->ip4 == iph->saddr)
2618 if (amt->nr_tunnels >= amt->max_tunnels) {
2619 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
2623 tunnel = kzalloc(sizeof(*tunnel) +
2624 (sizeof(struct hlist_head) * amt->hash_buckets),
2629 tunnel->source_port = udph->source;
2630 tunnel->ip4 = iph->saddr;
2632 memcpy(&key, &tunnel->key, sizeof(unsigned long long));
2634 spin_lock_init(&tunnel->lock);
2635 for (i = 0; i < amt->hash_buckets; i++)
2636 INIT_HLIST_HEAD(&tunnel->groups[i]);
2638 INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
2640 spin_lock_bh(&amt->lock);
2641 list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
2642 tunnel->key = amt->key;
2643 amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
2645 mod_delayed_work(amt_wq, &tunnel->gc_wq,
2646 msecs_to_jiffies(amt_gmi(amt)));
2647 spin_unlock_bh(&amt->lock);
2650 tunnel->nonce = amtrh->nonce;
2651 mac = siphash_3u32((__force u32)tunnel->ip4,
2652 (__force u32)tunnel->source_port,
2653 (__force u32)tunnel->nonce,
2655 tunnel->mac = mac >> 16;
2657 if (!netif_running(amt->dev) || !netif_running(amt->stream_dev))
2661 amt_send_igmp_gq(amt, tunnel);
2663 amt_send_mld_gq(amt, tunnel);
2668 static int amt_rcv(struct sock *sk, struct sk_buff *skb)
2670 struct amt_dev *amt;
2676 amt = rcu_dereference_sk_user_data(sk);
2682 skb->dev = amt->dev;
2684 type = amt_parse_type(skb);
2690 if (amt->mode == AMT_MODE_GATEWAY) {
2692 case AMT_MSG_ADVERTISEMENT:
2693 if (iph->saddr != amt->discovery_ip) {
2694 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2698 if (amt_advertisement_handler(amt, skb))
2699 amt->dev->stats.rx_dropped++;
2701 case AMT_MSG_MULTICAST_DATA:
2702 if (iph->saddr != amt->remote_ip) {
2703 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2707 err = amt_multicast_data_handler(amt, skb);
2712 case AMT_MSG_MEMBERSHIP_QUERY:
2713 if (iph->saddr != amt->remote_ip) {
2714 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2718 err = amt_membership_query_handler(amt, skb);
2725 netdev_dbg(amt->dev, "Invalid type of Gateway\n");
2730 case AMT_MSG_DISCOVERY:
2731 err = amt_discovery_handler(amt, skb);
2733 case AMT_MSG_REQUEST:
2734 err = amt_request_handler(amt, skb);
2736 case AMT_MSG_MEMBERSHIP_UPDATE:
2737 err = amt_update_handler(amt, skb);
2744 netdev_dbg(amt->dev, "Invalid type of relay\n");
2750 amt->dev->stats.rx_dropped++;
2756 rcu_read_unlock_bh();
2760 static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
2762 struct amt_dev *amt;
2766 amt = rcu_dereference_sk_user_data(sk);
2770 if (amt->mode != AMT_MODE_GATEWAY)
2773 type = amt_parse_type(skb);
2777 netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n",
2780 case AMT_MSG_DISCOVERY:
2782 case AMT_MSG_REQUEST:
2783 case AMT_MSG_MEMBERSHIP_UPDATE:
2784 if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
2785 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2791 rcu_read_unlock_bh();
2794 rcu_read_unlock_bh();
2795 amt->dev->stats.rx_dropped++;
2799 static struct socket *amt_create_sock(struct net *net, __be16 port)
2801 struct udp_port_cfg udp_conf;
2802 struct socket *sock;
2805 memset(&udp_conf, 0, sizeof(udp_conf));
2806 udp_conf.family = AF_INET;
2807 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
2809 udp_conf.local_udp_port = port;
2811 err = udp_sock_create(net, &udp_conf, &sock);
2813 return ERR_PTR(err);
2818 static int amt_socket_create(struct amt_dev *amt)
2820 struct udp_tunnel_sock_cfg tunnel_cfg;
2821 struct socket *sock;
2823 sock = amt_create_sock(amt->net, amt->relay_port);
2825 return PTR_ERR(sock);
2827 /* Mark socket as an encapsulation socket */
2828 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2829 tunnel_cfg.sk_user_data = amt;
2830 tunnel_cfg.encap_type = 1;
2831 tunnel_cfg.encap_rcv = amt_rcv;
2832 tunnel_cfg.encap_err_lookup = amt_err_lookup;
2833 tunnel_cfg.encap_destroy = NULL;
2834 setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg);
2836 rcu_assign_pointer(amt->sock, sock);
2840 static int amt_dev_open(struct net_device *dev)
2842 struct amt_dev *amt = netdev_priv(dev);
2845 amt->ready4 = false;
2846 amt->ready6 = false;
2848 err = amt_socket_create(amt);
2854 get_random_bytes(&amt->key, sizeof(siphash_key_t));
2856 amt->status = AMT_STATUS_INIT;
2857 if (amt->mode == AMT_MODE_GATEWAY) {
2858 mod_delayed_work(amt_wq, &amt->discovery_wq, 0);
2859 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2860 } else if (amt->mode == AMT_MODE_RELAY) {
2861 mod_delayed_work(amt_wq, &amt->secret_wq,
2862 msecs_to_jiffies(AMT_SECRET_TIMEOUT));
2867 static int amt_dev_stop(struct net_device *dev)
2869 struct amt_dev *amt = netdev_priv(dev);
2870 struct amt_tunnel_list *tunnel, *tmp;
2871 struct socket *sock;
2873 cancel_delayed_work_sync(&amt->req_wq);
2874 cancel_delayed_work_sync(&amt->discovery_wq);
2875 cancel_delayed_work_sync(&amt->secret_wq);
2878 sock = rtnl_dereference(amt->sock);
2879 RCU_INIT_POINTER(amt->sock, NULL);
2882 udp_tunnel_sock_release(sock);
2884 amt->ready4 = false;
2885 amt->ready6 = false;
2889 list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) {
2890 list_del_rcu(&tunnel->list);
2892 cancel_delayed_work_sync(&tunnel->gc_wq);
2893 amt_clear_groups(tunnel);
2894 kfree_rcu(tunnel, rcu);
2900 static const struct device_type amt_type = {
2904 static int amt_dev_init(struct net_device *dev)
2906 struct amt_dev *amt = netdev_priv(dev);
2910 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2914 err = gro_cells_init(&amt->gro_cells, dev);
2916 free_percpu(dev->tstats);
2923 static void amt_dev_uninit(struct net_device *dev)
2925 struct amt_dev *amt = netdev_priv(dev);
2927 gro_cells_destroy(&amt->gro_cells);
2928 free_percpu(dev->tstats);
2931 static const struct net_device_ops amt_netdev_ops = {
2932 .ndo_init = amt_dev_init,
2933 .ndo_uninit = amt_dev_uninit,
2934 .ndo_open = amt_dev_open,
2935 .ndo_stop = amt_dev_stop,
2936 .ndo_start_xmit = amt_dev_xmit,
2937 .ndo_get_stats64 = dev_get_tstats64,
2940 static void amt_link_setup(struct net_device *dev)
2942 dev->netdev_ops = &amt_netdev_ops;
2943 dev->needs_free_netdev = true;
2944 SET_NETDEV_DEVTYPE(dev, &amt_type);
2945 dev->min_mtu = ETH_MIN_MTU;
2946 dev->max_mtu = ETH_MAX_MTU;
2947 dev->type = ARPHRD_NONE;
2948 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2949 dev->hard_header_len = 0;
2951 dev->priv_flags |= IFF_NO_QUEUE;
2952 dev->features |= NETIF_F_LLTX;
2953 dev->features |= NETIF_F_GSO_SOFTWARE;
2954 dev->features |= NETIF_F_NETNS_LOCAL;
2955 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2956 dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
2957 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2958 eth_hw_addr_random(dev);
2959 eth_zero_addr(dev->broadcast);
2963 static const struct nla_policy amt_policy[IFLA_AMT_MAX + 1] = {
2964 [IFLA_AMT_MODE] = { .type = NLA_U32 },
2965 [IFLA_AMT_RELAY_PORT] = { .type = NLA_U16 },
2966 [IFLA_AMT_GATEWAY_PORT] = { .type = NLA_U16 },
2967 [IFLA_AMT_LINK] = { .type = NLA_U32 },
2968 [IFLA_AMT_LOCAL_IP] = { .len = sizeof_field(struct iphdr, daddr) },
2969 [IFLA_AMT_REMOTE_IP] = { .len = sizeof_field(struct iphdr, daddr) },
2970 [IFLA_AMT_DISCOVERY_IP] = { .len = sizeof_field(struct iphdr, daddr) },
2971 [IFLA_AMT_MAX_TUNNELS] = { .type = NLA_U32 },
2974 static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
2975 struct netlink_ext_ack *extack)
2980 if (!data[IFLA_AMT_LINK]) {
2981 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LINK],
2982 "Link attribute is required");
2986 if (!data[IFLA_AMT_MODE]) {
2987 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
2988 "Mode attribute is required");
2992 if (nla_get_u32(data[IFLA_AMT_MODE]) > AMT_MODE_MAX) {
2993 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
2994 "Mode attribute is not valid");
2998 if (!data[IFLA_AMT_LOCAL_IP]) {
2999 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_DISCOVERY_IP],
3000 "Local attribute is required");
3004 if (!data[IFLA_AMT_DISCOVERY_IP] &&
3005 nla_get_u32(data[IFLA_AMT_MODE]) == AMT_MODE_GATEWAY) {
3006 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LOCAL_IP],
3007 "Discovery attribute is required");
3014 static int amt_newlink(struct net *net, struct net_device *dev,
3015 struct nlattr *tb[], struct nlattr *data[],
3016 struct netlink_ext_ack *extack)
3018 struct amt_dev *amt = netdev_priv(dev);
3022 amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
3024 if (data[IFLA_AMT_MAX_TUNNELS] &&
3025 nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]))
3026 amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]);
3028 amt->max_tunnels = AMT_MAX_TUNNELS;
3030 spin_lock_init(&amt->lock);
3031 amt->max_groups = AMT_MAX_GROUP;
3032 amt->max_sources = AMT_MAX_SOURCE;
3033 amt->hash_buckets = AMT_HSIZE;
3034 amt->nr_tunnels = 0;
3035 get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
3036 amt->stream_dev = dev_get_by_index(net,
3037 nla_get_u32(data[IFLA_AMT_LINK]));
3038 if (!amt->stream_dev) {
3039 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
3040 "Can't find stream device");
3044 if (amt->stream_dev->type != ARPHRD_ETHER) {
3045 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
3046 "Invalid stream device type");
3050 amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]);
3051 if (ipv4_is_loopback(amt->local_ip) ||
3052 ipv4_is_zeronet(amt->local_ip) ||
3053 ipv4_is_multicast(amt->local_ip)) {
3054 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LOCAL_IP],
3055 "Invalid Local address");
3059 if (data[IFLA_AMT_RELAY_PORT])
3060 amt->relay_port = nla_get_be16(data[IFLA_AMT_RELAY_PORT]);
3062 amt->relay_port = htons(IANA_AMT_UDP_PORT);
3064 if (data[IFLA_AMT_GATEWAY_PORT])
3065 amt->gw_port = nla_get_be16(data[IFLA_AMT_GATEWAY_PORT]);
3067 amt->gw_port = htons(IANA_AMT_UDP_PORT);
3069 if (!amt->relay_port) {
3070 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3071 "relay port must not be 0");
3074 if (amt->mode == AMT_MODE_RELAY) {
3075 amt->qrv = amt->net->ipv4.sysctl_igmp_qrv;
3077 dev->needed_headroom = amt->stream_dev->needed_headroom +
3079 dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN;
3080 dev->max_mtu = dev->mtu;
3081 dev->min_mtu = ETH_MIN_MTU + AMT_RELAY_HLEN;
3083 if (!data[IFLA_AMT_DISCOVERY_IP]) {
3084 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3085 "discovery must be set in gateway mode");
3088 if (!amt->gw_port) {
3089 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3090 "gateway port must not be 0");
3094 amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]);
3095 if (ipv4_is_loopback(amt->discovery_ip) ||
3096 ipv4_is_zeronet(amt->discovery_ip) ||
3097 ipv4_is_multicast(amt->discovery_ip)) {
3098 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3099 "discovery must be unicast");
3103 dev->needed_headroom = amt->stream_dev->needed_headroom +
3105 dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN;
3106 dev->max_mtu = dev->mtu;
3107 dev->min_mtu = ETH_MIN_MTU + AMT_GW_HLEN;
3109 amt->qi = AMT_INIT_QUERY_INTERVAL;
3111 err = register_netdevice(dev);
3113 netdev_dbg(dev, "failed to register new netdev %d\n", err);
3117 err = netdev_upper_dev_link(amt->stream_dev, dev, extack);
3119 unregister_netdevice(dev);
3123 INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
3124 INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
3125 INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
3126 INIT_LIST_HEAD(&amt->tunnel_list);
3130 dev_put(amt->stream_dev);
3134 static void amt_dellink(struct net_device *dev, struct list_head *head)
3136 struct amt_dev *amt = netdev_priv(dev);
3138 unregister_netdevice_queue(dev, head);
3139 netdev_upper_dev_unlink(amt->stream_dev, dev);
3140 dev_put(amt->stream_dev);
3143 static size_t amt_get_size(const struct net_device *dev)
3145 return nla_total_size(sizeof(__u32)) + /* IFLA_AMT_MODE */
3146 nla_total_size(sizeof(__u16)) + /* IFLA_AMT_RELAY_PORT */
3147 nla_total_size(sizeof(__u16)) + /* IFLA_AMT_GATEWAY_PORT */
3148 nla_total_size(sizeof(__u32)) + /* IFLA_AMT_LINK */
3149 nla_total_size(sizeof(__u32)) + /* IFLA_MAX_TUNNELS */
3150 nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_DISCOVERY_IP */
3151 nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_REMOTE_IP */
3152 nla_total_size(sizeof(struct iphdr)); /* IFLA_AMT_LOCAL_IP */
3155 static int amt_fill_info(struct sk_buff *skb, const struct net_device *dev)
3157 struct amt_dev *amt = netdev_priv(dev);
3159 if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode))
3160 goto nla_put_failure;
3161 if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port))
3162 goto nla_put_failure;
3163 if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port))
3164 goto nla_put_failure;
3165 if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex))
3166 goto nla_put_failure;
3167 if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip))
3168 goto nla_put_failure;
3169 if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip))
3170 goto nla_put_failure;
3172 if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip))
3173 goto nla_put_failure;
3174 if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels))
3175 goto nla_put_failure;
3183 static struct rtnl_link_ops amt_link_ops __read_mostly = {
3185 .maxtype = IFLA_AMT_MAX,
3186 .policy = amt_policy,
3187 .priv_size = sizeof(struct amt_dev),
3188 .setup = amt_link_setup,
3189 .validate = amt_validate,
3190 .newlink = amt_newlink,
3191 .dellink = amt_dellink,
3192 .get_size = amt_get_size,
3193 .fill_info = amt_fill_info,
3196 static struct net_device *amt_lookup_upper_dev(struct net_device *dev)
3198 struct net_device *upper_dev;
3199 struct amt_dev *amt;
3201 for_each_netdev(dev_net(dev), upper_dev) {
3202 if (netif_is_amt(upper_dev)) {
3203 amt = netdev_priv(upper_dev);
3204 if (amt->stream_dev == dev)
3212 static int amt_device_event(struct notifier_block *unused,
3213 unsigned long event, void *ptr)
3215 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3216 struct net_device *upper_dev;
3217 struct amt_dev *amt;
3221 upper_dev = amt_lookup_upper_dev(dev);
3224 amt = netdev_priv(upper_dev);
3227 case NETDEV_UNREGISTER:
3228 amt_dellink(amt->dev, &list);
3229 unregister_netdevice_many(&list);
3231 case NETDEV_CHANGEMTU:
3232 if (amt->mode == AMT_MODE_RELAY)
3233 new_mtu = dev->mtu - AMT_RELAY_HLEN;
3235 new_mtu = dev->mtu - AMT_GW_HLEN;
3237 dev_set_mtu(amt->dev, new_mtu);
3244 static struct notifier_block amt_notifier_block __read_mostly = {
3245 .notifier_call = amt_device_event,
3248 static int __init amt_init(void)
3252 err = register_netdevice_notifier(&amt_notifier_block);
3256 err = rtnl_link_register(&amt_link_ops);
3258 goto unregister_notifier;
3260 amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1);
3263 goto rtnl_unregister;
3266 spin_lock_init(&source_gc_lock);
3267 spin_lock_bh(&source_gc_lock);
3268 INIT_DELAYED_WORK(&source_gc_wq, amt_source_gc_work);
3269 mod_delayed_work(amt_wq, &source_gc_wq,
3270 msecs_to_jiffies(AMT_GC_INTERVAL));
3271 spin_unlock_bh(&source_gc_lock);
3276 rtnl_link_unregister(&amt_link_ops);
3277 unregister_notifier:
3278 unregister_netdevice_notifier(&amt_notifier_block);
3280 pr_err("error loading AMT module loaded\n");
3283 late_initcall(amt_init);
3285 static void __exit amt_fini(void)
3287 rtnl_link_unregister(&amt_link_ops);
3288 unregister_netdevice_notifier(&amt_notifier_block);
3289 cancel_delayed_work_sync(&source_gc_wq);
3290 __amt_source_gc_work();
3291 destroy_workqueue(amt_wq);
3293 module_exit(amt_fini);
3295 MODULE_LICENSE("GPL");
3297 MODULE_ALIAS_RTNL_LINK("amt");