1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Multicast support for IPv6
4 * Linux INET6 implementation
9 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
14 * yoshfuji : fix format of router-alert option
15 * YOSHIFUJI Hideaki @USAGI:
16 * Fixed source address for MLD message based on
17 * <draft-ietf-magma-mld-source-05.txt>.
18 * YOSHIFUJI Hideaki @USAGI:
19 * - Ignore Queries for invalid addresses.
20 * - MLD for link-local addresses.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/socket.h>
30 #include <linux/sockios.h>
31 #include <linux/jiffies.h>
32 #include <linux/times.h>
33 #include <linux/net.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/if_arp.h>
38 #include <linux/route.h>
39 #include <linux/init.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/slab.h>
43 #include <linux/pkt_sched.h>
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv6.h>
49 #include <net/net_namespace.h>
54 #include <net/protocol.h>
55 #include <net/if_inet6.h>
56 #include <net/ndisc.h>
57 #include <net/addrconf.h>
58 #include <net/ip6_route.h>
59 #include <net/inet_common.h>
61 #include <net/ip6_checksum.h>
63 /* Ensure that we have struct in6_addr aligned on 32bit word. */
64 static int __mld2_query_bugs[] __attribute__((__unused__)) = {
65 BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4),
66 BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4),
67 BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4)
70 static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
72 static void igmp6_join_group(struct ifmcaddr6 *ma);
73 static void igmp6_leave_group(struct ifmcaddr6 *ma);
74 static void igmp6_timer_handler(struct timer_list *t);
76 static void mld_gq_timer_expire(struct timer_list *t);
77 static void mld_ifc_timer_expire(struct timer_list *t);
78 static void mld_ifc_event(struct inet6_dev *idev);
79 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
80 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
81 static void mld_clear_delrec(struct inet6_dev *idev);
82 static bool mld_in_v1_mode(const struct inet6_dev *idev);
83 static int sf_setstate(struct ifmcaddr6 *pmc);
84 static void sf_markstate(struct ifmcaddr6 *pmc);
85 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
86 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
87 int sfmode, int sfcount, const struct in6_addr *psfsrc,
89 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
90 int sfmode, int sfcount, const struct in6_addr *psfsrc,
92 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
93 struct inet6_dev *idev);
94 static int __ipv6_dev_mc_inc(struct net_device *dev,
95 const struct in6_addr *addr, unsigned int mode);
97 #define MLD_QRV_DEFAULT 2
98 /* RFC3810, 9.2. Query Interval */
99 #define MLD_QI_DEFAULT (125 * HZ)
100 /* RFC3810, 9.3. Query Response Interval */
101 #define MLD_QRI_DEFAULT (10 * HZ)
103 /* RFC3810, 8.1 Query Version Distinctions */
104 #define MLD_V1_QUERY_LEN 24
105 #define MLD_V2_QUERY_LEN_MIN 28
107 #define IPV6_MLD_MAX_MSF 64
109 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
110 int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
113 * socket join on multicast group
116 #define for_each_pmc_rcu(np, pmc) \
117 for (pmc = rcu_dereference(np->ipv6_mc_list); \
119 pmc = rcu_dereference(pmc->next))
121 static int unsolicited_report_interval(struct inet6_dev *idev)
125 if (mld_in_v1_mode(idev))
126 iv = idev->cnf.mldv1_unsolicited_report_interval;
128 iv = idev->cnf.mldv2_unsolicited_report_interval;
130 return iv > 0 ? iv : 1;
133 static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
134 const struct in6_addr *addr, unsigned int mode)
136 struct net_device *dev = NULL;
137 struct ipv6_mc_socklist *mc_lst;
138 struct ipv6_pinfo *np = inet6_sk(sk);
139 struct net *net = sock_net(sk);
144 if (!ipv6_addr_is_multicast(addr))
148 for_each_pmc_rcu(np, mc_lst) {
149 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
150 ipv6_addr_equal(&mc_lst->addr, addr)) {
157 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
163 mc_lst->addr = *addr;
167 rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
173 dev = __dev_get_by_index(net, ifindex);
176 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
180 mc_lst->ifindex = dev->ifindex;
181 mc_lst->sfmode = mode;
182 rwlock_init(&mc_lst->sflock);
183 mc_lst->sflist = NULL;
186 * now add/increase the group membership on the device
189 err = __ipv6_dev_mc_inc(dev, addr, mode);
192 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
196 mc_lst->next = np->ipv6_mc_list;
197 rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
202 int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
204 return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
206 EXPORT_SYMBOL(ipv6_sock_mc_join);
208 int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
209 const struct in6_addr *addr, unsigned int mode)
211 return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
215 * socket leave on multicast group
217 int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
219 struct ipv6_pinfo *np = inet6_sk(sk);
220 struct ipv6_mc_socklist *mc_lst;
221 struct ipv6_mc_socklist __rcu **lnk;
222 struct net *net = sock_net(sk);
226 if (!ipv6_addr_is_multicast(addr))
229 for (lnk = &np->ipv6_mc_list;
230 (mc_lst = rtnl_dereference(*lnk)) != NULL;
231 lnk = &mc_lst->next) {
232 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
233 ipv6_addr_equal(&mc_lst->addr, addr)) {
234 struct net_device *dev;
238 dev = __dev_get_by_index(net, mc_lst->ifindex);
240 struct inet6_dev *idev = __in6_dev_get(dev);
242 (void) ip6_mc_leave_src(sk, mc_lst, idev);
244 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
246 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
248 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
249 kfree_rcu(mc_lst, rcu);
254 return -EADDRNOTAVAIL;
256 EXPORT_SYMBOL(ipv6_sock_mc_drop);
258 /* called with rcu_read_lock() */
259 static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
260 const struct in6_addr *group,
263 struct net_device *dev = NULL;
264 struct inet6_dev *idev = NULL;
267 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
274 dev = dev_get_by_index_rcu(net, ifindex);
278 idev = __in6_dev_get(dev);
281 read_lock_bh(&idev->lock);
283 read_unlock_bh(&idev->lock);
289 void __ipv6_sock_mc_close(struct sock *sk)
291 struct ipv6_pinfo *np = inet6_sk(sk);
292 struct ipv6_mc_socklist *mc_lst;
293 struct net *net = sock_net(sk);
297 while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
298 struct net_device *dev;
300 np->ipv6_mc_list = mc_lst->next;
302 dev = __dev_get_by_index(net, mc_lst->ifindex);
304 struct inet6_dev *idev = __in6_dev_get(dev);
306 (void) ip6_mc_leave_src(sk, mc_lst, idev);
308 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
310 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
312 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
313 kfree_rcu(mc_lst, rcu);
317 void ipv6_sock_mc_close(struct sock *sk)
319 struct ipv6_pinfo *np = inet6_sk(sk);
321 if (!rcu_access_pointer(np->ipv6_mc_list))
324 __ipv6_sock_mc_close(sk);
328 int ip6_mc_source(int add, int omode, struct sock *sk,
329 struct group_source_req *pgsr)
331 struct in6_addr *source, *group;
332 struct ipv6_mc_socklist *pmc;
333 struct inet6_dev *idev;
334 struct ipv6_pinfo *inet6 = inet6_sk(sk);
335 struct ip6_sf_socklist *psl;
336 struct net *net = sock_net(sk);
342 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
343 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
345 if (!ipv6_addr_is_multicast(group))
349 idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
355 err = -EADDRNOTAVAIL;
357 for_each_pmc_rcu(inet6, pmc) {
358 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
360 if (ipv6_addr_equal(&pmc->addr, group))
363 if (!pmc) { /* must have a prior join */
367 /* if a source filter was set, must be the same mode as before */
369 if (pmc->sfmode != omode) {
373 } else if (pmc->sfmode != omode) {
374 /* allow mode switches for empty-set filters */
375 ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
376 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
380 write_lock(&pmc->sflock);
386 goto done; /* err = -EADDRNOTAVAIL */
388 for (i = 0; i < psl->sl_count; i++) {
389 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
393 if (rv) /* source not found */
394 goto done; /* err = -EADDRNOTAVAIL */
396 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
397 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
402 /* update the interface filter */
403 ip6_mc_del_src(idev, group, omode, 1, source, 1);
405 for (j = i+1; j < psl->sl_count; j++)
406 psl->sl_addr[j-1] = psl->sl_addr[j];
411 /* else, add a new source to the filter */
413 if (psl && psl->sl_count >= sysctl_mld_max_msf) {
417 if (!psl || psl->sl_count == psl->sl_max) {
418 struct ip6_sf_socklist *newpsl;
419 int count = IP6_SFBLOCK;
422 count += psl->sl_max;
423 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
428 newpsl->sl_max = count;
429 newpsl->sl_count = count - IP6_SFBLOCK;
431 for (i = 0; i < psl->sl_count; i++)
432 newpsl->sl_addr[i] = psl->sl_addr[i];
433 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
435 pmc->sflist = psl = newpsl;
437 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
438 for (i = 0; i < psl->sl_count; i++) {
439 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
440 if (rv == 0) /* There is an error in the address. */
443 for (j = psl->sl_count-1; j >= i; j--)
444 psl->sl_addr[j+1] = psl->sl_addr[j];
445 psl->sl_addr[i] = *source;
448 /* update the interface list */
449 ip6_mc_add_src(idev, group, omode, 1, source, 1);
452 write_unlock(&pmc->sflock);
453 read_unlock_bh(&idev->lock);
456 err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
460 int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
461 struct sockaddr_storage *list)
463 const struct in6_addr *group;
464 struct ipv6_mc_socklist *pmc;
465 struct inet6_dev *idev;
466 struct ipv6_pinfo *inet6 = inet6_sk(sk);
467 struct ip6_sf_socklist *newpsl, *psl;
468 struct net *net = sock_net(sk);
472 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
474 if (!ipv6_addr_is_multicast(group))
476 if (gsf->gf_fmode != MCAST_INCLUDE &&
477 gsf->gf_fmode != MCAST_EXCLUDE)
481 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
490 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
495 for_each_pmc_rcu(inet6, pmc) {
496 if (pmc->ifindex != gsf->gf_interface)
498 if (ipv6_addr_equal(&pmc->addr, group))
501 if (!pmc) { /* must have a prior join */
505 if (gsf->gf_numsrc) {
506 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
512 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
513 for (i = 0; i < newpsl->sl_count; ++i, ++list) {
514 struct sockaddr_in6 *psin6;
516 psin6 = (struct sockaddr_in6 *)list;
517 newpsl->sl_addr[i] = psin6->sin6_addr;
519 err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
520 newpsl->sl_count, newpsl->sl_addr, 0);
522 sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
527 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
530 write_lock(&pmc->sflock);
533 (void) ip6_mc_del_src(idev, group, pmc->sfmode,
534 psl->sl_count, psl->sl_addr, 0);
535 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
537 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
538 pmc->sflist = newpsl;
539 pmc->sfmode = gsf->gf_fmode;
540 write_unlock(&pmc->sflock);
543 read_unlock_bh(&idev->lock);
546 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
550 int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
551 struct sockaddr_storage *p)
553 int err, i, count, copycount;
554 const struct in6_addr *group;
555 struct ipv6_mc_socklist *pmc;
556 struct inet6_dev *idev;
557 struct ipv6_pinfo *inet6 = inet6_sk(sk);
558 struct ip6_sf_socklist *psl;
559 struct net *net = sock_net(sk);
561 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
563 if (!ipv6_addr_is_multicast(group))
567 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
574 err = -EADDRNOTAVAIL;
575 /* changes to the ipv6_mc_list require the socket lock and
576 * rtnl lock. We have the socket lock and rcu read lock,
577 * so reading the list is safe.
580 for_each_pmc_rcu(inet6, pmc) {
581 if (pmc->ifindex != gsf->gf_interface)
583 if (ipv6_addr_equal(group, &pmc->addr))
586 if (!pmc) /* must have a prior join */
588 gsf->gf_fmode = pmc->sfmode;
590 count = psl ? psl->sl_count : 0;
591 read_unlock_bh(&idev->lock);
594 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
595 gsf->gf_numsrc = count;
596 /* changes to psl require the socket lock, and a write lock
597 * on pmc->sflock. We have the socket lock so reading here is safe.
599 for (i = 0; i < copycount; i++, p++) {
600 struct sockaddr_in6 *psin6;
601 struct sockaddr_storage ss;
603 psin6 = (struct sockaddr_in6 *)&ss;
604 memset(&ss, 0, sizeof(ss));
605 psin6->sin6_family = AF_INET6;
606 psin6->sin6_addr = psl->sl_addr[i];
607 if (copy_to_user(p, &ss, sizeof(ss)))
612 read_unlock_bh(&idev->lock);
617 bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
618 const struct in6_addr *src_addr)
620 struct ipv6_pinfo *np = inet6_sk(sk);
621 struct ipv6_mc_socklist *mc;
622 struct ip6_sf_socklist *psl;
626 for_each_pmc_rcu(np, mc) {
627 if (ipv6_addr_equal(&mc->addr, mc_addr))
634 read_lock(&mc->sflock);
637 rv = mc->sfmode == MCAST_EXCLUDE;
641 for (i = 0; i < psl->sl_count; i++) {
642 if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
645 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
647 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
650 read_unlock(&mc->sflock);
656 static void igmp6_group_added(struct ifmcaddr6 *mc)
658 struct net_device *dev = mc->idev->dev;
659 char buf[MAX_ADDR_LEN];
661 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
662 IPV6_ADDR_SCOPE_LINKLOCAL)
665 spin_lock_bh(&mc->mca_lock);
666 if (!(mc->mca_flags&MAF_LOADED)) {
667 mc->mca_flags |= MAF_LOADED;
668 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
669 dev_mc_add(dev, buf);
671 spin_unlock_bh(&mc->mca_lock);
673 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
676 if (mld_in_v1_mode(mc->idev)) {
677 igmp6_join_group(mc);
682 /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
683 * should not send filter-mode change record as the mode
684 * should be from IN() to IN(A).
686 if (mc->mca_sfmode == MCAST_EXCLUDE)
687 mc->mca_crcount = mc->idev->mc_qrv;
689 mld_ifc_event(mc->idev);
692 static void igmp6_group_dropped(struct ifmcaddr6 *mc)
694 struct net_device *dev = mc->idev->dev;
695 char buf[MAX_ADDR_LEN];
697 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
698 IPV6_ADDR_SCOPE_LINKLOCAL)
701 spin_lock_bh(&mc->mca_lock);
702 if (mc->mca_flags&MAF_LOADED) {
703 mc->mca_flags &= ~MAF_LOADED;
704 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
705 dev_mc_del(dev, buf);
708 spin_unlock_bh(&mc->mca_lock);
709 if (mc->mca_flags & MAF_NOREPORT)
713 igmp6_leave_group(mc);
715 spin_lock_bh(&mc->mca_lock);
716 if (del_timer(&mc->mca_timer))
717 refcount_dec(&mc->mca_refcnt);
718 spin_unlock_bh(&mc->mca_lock);
722 * deleted ifmcaddr6 manipulation
724 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
726 struct ifmcaddr6 *pmc;
728 /* this is an "ifmcaddr6" for convenience; only the fields below
729 * are actually used. In particular, the refcnt and users are not
730 * used for management of the delete list. Using the same structure
731 * for deleted items allows change reports to use common code with
732 * non-deleted or query-response MCA's.
734 pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
738 spin_lock_bh(&im->mca_lock);
739 spin_lock_init(&pmc->mca_lock);
740 pmc->idev = im->idev;
742 pmc->mca_addr = im->mca_addr;
743 pmc->mca_crcount = idev->mc_qrv;
744 pmc->mca_sfmode = im->mca_sfmode;
745 if (pmc->mca_sfmode == MCAST_INCLUDE) {
746 struct ip6_sf_list *psf;
748 pmc->mca_tomb = im->mca_tomb;
749 pmc->mca_sources = im->mca_sources;
750 im->mca_tomb = im->mca_sources = NULL;
751 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
752 psf->sf_crcount = pmc->mca_crcount;
754 spin_unlock_bh(&im->mca_lock);
756 spin_lock_bh(&idev->mc_lock);
757 pmc->next = idev->mc_tomb;
759 spin_unlock_bh(&idev->mc_lock);
762 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
764 struct ifmcaddr6 *pmc, *pmc_prev;
765 struct ip6_sf_list *psf;
766 struct in6_addr *pmca = &im->mca_addr;
768 spin_lock_bh(&idev->mc_lock);
770 for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) {
771 if (ipv6_addr_equal(&pmc->mca_addr, pmca))
777 pmc_prev->next = pmc->next;
779 idev->mc_tomb = pmc->next;
781 spin_unlock_bh(&idev->mc_lock);
783 spin_lock_bh(&im->mca_lock);
785 im->idev = pmc->idev;
786 if (im->mca_sfmode == MCAST_INCLUDE) {
787 swap(im->mca_tomb, pmc->mca_tomb);
788 swap(im->mca_sources, pmc->mca_sources);
789 for (psf = im->mca_sources; psf; psf = psf->sf_next)
790 psf->sf_crcount = idev->mc_qrv;
792 im->mca_crcount = idev->mc_qrv;
794 in6_dev_put(pmc->idev);
795 ip6_mc_clear_src(pmc);
798 spin_unlock_bh(&im->mca_lock);
801 static void mld_clear_delrec(struct inet6_dev *idev)
803 struct ifmcaddr6 *pmc, *nextpmc;
805 spin_lock_bh(&idev->mc_lock);
807 idev->mc_tomb = NULL;
808 spin_unlock_bh(&idev->mc_lock);
810 for (; pmc; pmc = nextpmc) {
812 ip6_mc_clear_src(pmc);
813 in6_dev_put(pmc->idev);
817 /* clear dead sources, too */
818 read_lock_bh(&idev->lock);
819 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
820 struct ip6_sf_list *psf, *psf_next;
822 spin_lock_bh(&pmc->mca_lock);
824 pmc->mca_tomb = NULL;
825 spin_unlock_bh(&pmc->mca_lock);
826 for (; psf; psf = psf_next) {
827 psf_next = psf->sf_next;
831 read_unlock_bh(&idev->lock);
834 static void mca_get(struct ifmcaddr6 *mc)
836 refcount_inc(&mc->mca_refcnt);
839 static void ma_put(struct ifmcaddr6 *mc)
841 if (refcount_dec_and_test(&mc->mca_refcnt)) {
842 in6_dev_put(mc->idev);
847 static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
848 const struct in6_addr *addr,
851 struct ifmcaddr6 *mc;
853 mc = kzalloc(sizeof(*mc), GFP_ATOMIC);
857 timer_setup(&mc->mca_timer, igmp6_timer_handler, 0);
859 mc->mca_addr = *addr;
860 mc->idev = idev; /* reference taken by caller */
862 /* mca_stamp should be updated upon changes */
863 mc->mca_cstamp = mc->mca_tstamp = jiffies;
864 refcount_set(&mc->mca_refcnt, 1);
865 spin_lock_init(&mc->mca_lock);
867 mc->mca_sfmode = mode;
868 mc->mca_sfcount[mode] = 1;
870 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
871 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
872 mc->mca_flags |= MAF_NOREPORT;
878 * device multicast group inc (add if not found)
880 static int __ipv6_dev_mc_inc(struct net_device *dev,
881 const struct in6_addr *addr, unsigned int mode)
883 struct ifmcaddr6 *mc;
884 struct inet6_dev *idev;
888 /* we need to take a reference on idev */
889 idev = in6_dev_get(dev);
894 write_lock_bh(&idev->lock);
896 write_unlock_bh(&idev->lock);
901 for (mc = idev->mc_list; mc; mc = mc->next) {
902 if (ipv6_addr_equal(&mc->mca_addr, addr)) {
904 write_unlock_bh(&idev->lock);
905 ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
911 mc = mca_alloc(idev, addr, mode);
913 write_unlock_bh(&idev->lock);
918 mc->next = idev->mc_list;
921 /* Hold this for the code below before we unlock,
922 * it is already exposed via idev->mc_list.
925 write_unlock_bh(&idev->lock);
927 mld_del_delrec(idev, mc);
928 igmp6_group_added(mc);
933 int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
935 return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
937 EXPORT_SYMBOL(ipv6_dev_mc_inc);
940 * device multicast group del
942 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
944 struct ifmcaddr6 *ma, **map;
948 write_lock_bh(&idev->lock);
949 for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) {
950 if (ipv6_addr_equal(&ma->mca_addr, addr)) {
951 if (--ma->mca_users == 0) {
953 write_unlock_bh(&idev->lock);
955 igmp6_group_dropped(ma);
956 ip6_mc_clear_src(ma);
961 write_unlock_bh(&idev->lock);
965 write_unlock_bh(&idev->lock);
970 int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
972 struct inet6_dev *idev;
977 idev = __in6_dev_get(dev);
981 err = __ipv6_dev_mc_dec(idev, addr);
985 EXPORT_SYMBOL(ipv6_dev_mc_dec);
988 * check if the interface/address pair is valid
990 bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
991 const struct in6_addr *src_addr)
993 struct inet6_dev *idev;
994 struct ifmcaddr6 *mc;
998 idev = __in6_dev_get(dev);
1000 read_lock_bh(&idev->lock);
1001 for (mc = idev->mc_list; mc; mc = mc->next) {
1002 if (ipv6_addr_equal(&mc->mca_addr, group))
1006 if (src_addr && !ipv6_addr_any(src_addr)) {
1007 struct ip6_sf_list *psf;
1009 spin_lock_bh(&mc->mca_lock);
1010 for (psf = mc->mca_sources; psf; psf = psf->sf_next) {
1011 if (ipv6_addr_equal(&psf->sf_addr, src_addr))
1015 rv = psf->sf_count[MCAST_INCLUDE] ||
1016 psf->sf_count[MCAST_EXCLUDE] !=
1017 mc->mca_sfcount[MCAST_EXCLUDE];
1019 rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
1020 spin_unlock_bh(&mc->mca_lock);
1022 rv = true; /* don't filter unspecified source */
1024 read_unlock_bh(&idev->lock);
1030 static void mld_gq_start_timer(struct inet6_dev *idev)
1032 unsigned long tv = prandom_u32() % idev->mc_maxdelay;
1034 idev->mc_gq_running = 1;
1035 if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
1039 static void mld_gq_stop_timer(struct inet6_dev *idev)
1041 idev->mc_gq_running = 0;
1042 if (del_timer(&idev->mc_gq_timer))
1043 __in6_dev_put(idev);
1046 static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
1048 unsigned long tv = prandom_u32() % delay;
1050 if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
1054 static void mld_ifc_stop_timer(struct inet6_dev *idev)
1056 idev->mc_ifc_count = 0;
1057 if (del_timer(&idev->mc_ifc_timer))
1058 __in6_dev_put(idev);
1061 static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
1063 unsigned long tv = prandom_u32() % delay;
1065 if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
1069 static void mld_dad_stop_timer(struct inet6_dev *idev)
1071 if (del_timer(&idev->mc_dad_timer))
1072 __in6_dev_put(idev);
1076 * IGMP handling (alias multicast ICMPv6 messages)
1079 static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1081 unsigned long delay = resptime;
1083 /* Do not start timer for these addresses */
1084 if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1085 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1088 if (del_timer(&ma->mca_timer)) {
1089 refcount_dec(&ma->mca_refcnt);
1090 delay = ma->mca_timer.expires - jiffies;
1093 if (delay >= resptime)
1094 delay = prandom_u32() % resptime;
1096 ma->mca_timer.expires = jiffies + delay;
1097 if (!mod_timer(&ma->mca_timer, jiffies + delay))
1098 refcount_inc(&ma->mca_refcnt);
1099 ma->mca_flags |= MAF_TIMER_RUNNING;
1102 /* mark EXCLUDE-mode sources */
1103 static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1104 const struct in6_addr *srcs)
1106 struct ip6_sf_list *psf;
1110 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1111 if (scount == nsrcs)
1113 for (i = 0; i < nsrcs; i++) {
1114 /* skip inactive filters */
1115 if (psf->sf_count[MCAST_INCLUDE] ||
1116 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1117 psf->sf_count[MCAST_EXCLUDE])
1119 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1125 pmc->mca_flags &= ~MAF_GSQUERY;
1126 if (scount == nsrcs) /* all sources excluded */
1131 static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1132 const struct in6_addr *srcs)
1134 struct ip6_sf_list *psf;
1137 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1138 return mld_xmarksources(pmc, nsrcs, srcs);
1140 /* mark INCLUDE-mode sources */
1143 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1144 if (scount == nsrcs)
1146 for (i = 0; i < nsrcs; i++) {
1147 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1155 pmc->mca_flags &= ~MAF_GSQUERY;
1158 pmc->mca_flags |= MAF_GSQUERY;
1162 static int mld_force_mld_version(const struct inet6_dev *idev)
1164 /* Normally, both are 0 here. If enforcement to a particular is
1165 * being used, individual device enforcement will have a lower
1166 * precedence over 'all' device (.../conf/all/force_mld_version).
1169 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
1170 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
1172 return idev->cnf.force_mld_version;
1175 static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1177 return mld_force_mld_version(idev) == 2;
1180 static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1182 return mld_force_mld_version(idev) == 1;
1185 static bool mld_in_v1_mode(const struct inet6_dev *idev)
1187 if (mld_in_v2_mode_only(idev))
1189 if (mld_in_v1_mode_only(idev))
1191 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1197 static void mld_set_v1_mode(struct inet6_dev *idev)
1199 /* RFC3810, relevant sections:
1200 * - 9.1. Robustness Variable
1201 * - 9.2. Query Interval
1202 * - 9.3. Query Response Interval
1203 * - 9.12. Older Version Querier Present Timeout
1205 unsigned long switchback;
1207 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1209 idev->mc_v1_seen = jiffies + switchback;
1212 static void mld_update_qrv(struct inet6_dev *idev,
1213 const struct mld2_query *mlh2)
1215 /* RFC3810, relevant sections:
1216 * - 5.1.8. QRV (Querier's Robustness Variable)
1217 * - 9.1. Robustness Variable
1220 /* The value of the Robustness Variable MUST NOT be zero,
1221 * and SHOULD NOT be one. Catch this here if we ever run
1222 * into such a case in future.
1224 const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
1225 WARN_ON(idev->mc_qrv == 0);
1227 if (mlh2->mld2q_qrv > 0)
1228 idev->mc_qrv = mlh2->mld2q_qrv;
1230 if (unlikely(idev->mc_qrv < min_qrv)) {
1231 net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1232 idev->mc_qrv, min_qrv);
1233 idev->mc_qrv = min_qrv;
1237 static void mld_update_qi(struct inet6_dev *idev,
1238 const struct mld2_query *mlh2)
1240 /* RFC3810, relevant sections:
1241 * - 5.1.9. QQIC (Querier's Query Interval Code)
1242 * - 9.2. Query Interval
1243 * - 9.12. Older Version Querier Present Timeout
1244 * (the [Query Interval] in the last Query received)
1246 unsigned long mc_qqi;
1248 if (mlh2->mld2q_qqic < 128) {
1249 mc_qqi = mlh2->mld2q_qqic;
1251 unsigned long mc_man, mc_exp;
1253 mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1254 mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1256 mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1259 idev->mc_qi = mc_qqi * HZ;
1262 static void mld_update_qri(struct inet6_dev *idev,
1263 const struct mld2_query *mlh2)
1265 /* RFC3810, relevant sections:
1266 * - 5.1.3. Maximum Response Code
1267 * - 9.3. Query Response Interval
1269 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1272 static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1273 unsigned long *max_delay, bool v1_query)
1275 unsigned long mldv1_md;
1277 /* Ignore v1 queries */
1278 if (mld_in_v2_mode_only(idev))
1281 mldv1_md = ntohs(mld->mld_maxdelay);
1283 /* When in MLDv1 fallback and a MLDv2 router start-up being
1284 * unaware of current MLDv1 operation, the MRC == MRD mapping
1285 * only works when the exponential algorithm is not being
1286 * used (as MLDv1 is unaware of such things).
1288 * According to the RFC author, the MLDv2 implementations
1289 * he's aware of all use a MRC < 32768 on start up queries.
1291 * Thus, should we *ever* encounter something else larger
1292 * than that, just assume the maximum possible within our
1296 mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
1298 *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1300 /* MLDv1 router present: we need to go into v1 mode *only*
1301 * when an MLDv1 query is received as per section 9.12. of
1302 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1303 * queries MUST be of exactly 24 octets.
1306 mld_set_v1_mode(idev);
1308 /* cancel MLDv2 report timer */
1309 mld_gq_stop_timer(idev);
1310 /* cancel the interface change timer */
1311 mld_ifc_stop_timer(idev);
1312 /* clear deleted report items */
1313 mld_clear_delrec(idev);
1318 static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1319 unsigned long *max_delay)
1321 *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1323 mld_update_qrv(idev, mld);
1324 mld_update_qi(idev, mld);
1325 mld_update_qri(idev, mld);
1327 idev->mc_maxdelay = *max_delay;
1332 /* called with rcu_read_lock() */
1333 int igmp6_event_query(struct sk_buff *skb)
1335 struct mld2_query *mlh2 = NULL;
1336 struct ifmcaddr6 *ma;
1337 const struct in6_addr *group;
1338 unsigned long max_delay;
1339 struct inet6_dev *idev;
1340 struct mld_msg *mld;
1345 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1348 /* compute payload length excluding extension headers */
1349 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1350 len -= skb_network_header_len(skb);
1353 * Upon reception of an MLD message that contains a Query, the node
1354 * checks if the source address of the message is a valid link-local
1355 * address, if the Hop Limit is set to 1, and if the Router Alert
1356 * option is present in the Hop-By-Hop Options header of the IPv6
1357 * packet. If any of these checks fails, the packet is dropped.
1359 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1360 ipv6_hdr(skb)->hop_limit != 1 ||
1361 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1362 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1365 idev = __in6_dev_get(skb->dev);
1369 mld = (struct mld_msg *)icmp6_hdr(skb);
1370 group = &mld->mld_mca;
1371 group_type = ipv6_addr_type(group);
1373 if (group_type != IPV6_ADDR_ANY &&
1374 !(group_type&IPV6_ADDR_MULTICAST))
1377 if (len < MLD_V1_QUERY_LEN) {
1379 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1380 err = mld_process_v1(idev, mld, &max_delay,
1381 len == MLD_V1_QUERY_LEN);
1384 } else if (len >= MLD_V2_QUERY_LEN_MIN) {
1385 int srcs_offset = sizeof(struct mld2_query) -
1386 sizeof(struct icmp6hdr);
1388 if (!pskb_may_pull(skb, srcs_offset))
1391 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1393 err = mld_process_v2(idev, mlh2, &max_delay);
1397 if (group_type == IPV6_ADDR_ANY) { /* general query */
1398 if (mlh2->mld2q_nsrcs)
1399 return -EINVAL; /* no sources allowed */
1401 mld_gq_start_timer(idev);
1404 /* mark sources to include, if group & source-specific */
1405 if (mlh2->mld2q_nsrcs != 0) {
1406 if (!pskb_may_pull(skb, srcs_offset +
1407 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1410 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1417 read_lock_bh(&idev->lock);
1418 if (group_type == IPV6_ADDR_ANY) {
1419 for (ma = idev->mc_list; ma; ma = ma->next) {
1420 spin_lock_bh(&ma->mca_lock);
1421 igmp6_group_queried(ma, max_delay);
1422 spin_unlock_bh(&ma->mca_lock);
1425 for (ma = idev->mc_list; ma; ma = ma->next) {
1426 if (!ipv6_addr_equal(group, &ma->mca_addr))
1428 spin_lock_bh(&ma->mca_lock);
1429 if (ma->mca_flags & MAF_TIMER_RUNNING) {
1430 /* gsquery <- gsquery && mark */
1432 ma->mca_flags &= ~MAF_GSQUERY;
1434 /* gsquery <- mark */
1436 ma->mca_flags |= MAF_GSQUERY;
1438 ma->mca_flags &= ~MAF_GSQUERY;
1440 if (!(ma->mca_flags & MAF_GSQUERY) ||
1441 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1442 igmp6_group_queried(ma, max_delay);
1443 spin_unlock_bh(&ma->mca_lock);
1447 read_unlock_bh(&idev->lock);
1452 /* called with rcu_read_lock() */
1453 int igmp6_event_report(struct sk_buff *skb)
1455 struct ifmcaddr6 *ma;
1456 struct inet6_dev *idev;
1457 struct mld_msg *mld;
1460 /* Our own report looped back. Ignore it. */
1461 if (skb->pkt_type == PACKET_LOOPBACK)
1464 /* send our report if the MC router may not have heard this report */
1465 if (skb->pkt_type != PACKET_MULTICAST &&
1466 skb->pkt_type != PACKET_BROADCAST)
1469 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1472 mld = (struct mld_msg *)icmp6_hdr(skb);
1474 /* Drop reports with not link local source */
1475 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1476 if (addr_type != IPV6_ADDR_ANY &&
1477 !(addr_type&IPV6_ADDR_LINKLOCAL))
1480 idev = __in6_dev_get(skb->dev);
1485 * Cancel the timer for this group
1488 read_lock_bh(&idev->lock);
1489 for (ma = idev->mc_list; ma; ma = ma->next) {
1490 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1491 spin_lock(&ma->mca_lock);
1492 if (del_timer(&ma->mca_timer))
1493 refcount_dec(&ma->mca_refcnt);
1494 ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);
1495 spin_unlock(&ma->mca_lock);
1499 read_unlock_bh(&idev->lock);
1503 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1504 int gdeleted, int sdeleted)
1507 case MLD2_MODE_IS_INCLUDE:
1508 case MLD2_MODE_IS_EXCLUDE:
1509 if (gdeleted || sdeleted)
1511 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1512 if (pmc->mca_sfmode == MCAST_INCLUDE)
1514 /* don't include if this source is excluded
1517 if (psf->sf_count[MCAST_INCLUDE])
1518 return type == MLD2_MODE_IS_INCLUDE;
1519 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1520 psf->sf_count[MCAST_EXCLUDE];
1523 case MLD2_CHANGE_TO_INCLUDE:
1524 if (gdeleted || sdeleted)
1526 return psf->sf_count[MCAST_INCLUDE] != 0;
1527 case MLD2_CHANGE_TO_EXCLUDE:
1528 if (gdeleted || sdeleted)
1530 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1531 psf->sf_count[MCAST_INCLUDE])
1533 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1534 psf->sf_count[MCAST_EXCLUDE];
1535 case MLD2_ALLOW_NEW_SOURCES:
1536 if (gdeleted || !psf->sf_crcount)
1538 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1539 case MLD2_BLOCK_OLD_SOURCES:
1540 if (pmc->mca_sfmode == MCAST_INCLUDE)
1541 return gdeleted || (psf->sf_crcount && sdeleted);
1542 return psf->sf_crcount && !gdeleted && !sdeleted;
1548 mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1550 struct ip6_sf_list *psf;
1553 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
1554 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1561 static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
1562 struct net_device *dev,
1563 const struct in6_addr *saddr,
1564 const struct in6_addr *daddr,
1567 struct ipv6hdr *hdr;
1569 skb->protocol = htons(ETH_P_IPV6);
1572 skb_reset_network_header(skb);
1573 skb_put(skb, sizeof(struct ipv6hdr));
1574 hdr = ipv6_hdr(skb);
1576 ip6_flow_hdr(hdr, 0, 0);
1578 hdr->payload_len = htons(len);
1579 hdr->nexthdr = proto;
1580 hdr->hop_limit = inet6_sk(sk)->hop_limit;
1582 hdr->saddr = *saddr;
1583 hdr->daddr = *daddr;
1586 static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1588 struct net_device *dev = idev->dev;
1589 struct net *net = dev_net(dev);
1590 struct sock *sk = net->ipv6.igmp_sk;
1591 struct sk_buff *skb;
1592 struct mld2_report *pmr;
1593 struct in6_addr addr_buf;
1594 const struct in6_addr *saddr;
1595 int hlen = LL_RESERVED_SPACE(dev);
1596 int tlen = dev->needed_tailroom;
1597 unsigned int size = mtu + hlen + tlen;
1599 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1600 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1603 /* we assume size > sizeof(ra) here */
1604 /* limit our allocations to order-0 page */
1605 size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1606 skb = sock_alloc_send_skb(sk, size, 1, &err);
1611 skb->priority = TC_PRIO_CONTROL;
1612 skb_reserve(skb, hlen);
1613 skb_tailroom_reserve(skb, mtu, tlen);
1615 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
1616 /* <draft-ietf-magma-mld-source-05.txt>:
1617 * use unspecified address as the source address
1618 * when a valid link-local address is not available.
1620 saddr = &in6addr_any;
1624 ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1626 skb_put_data(skb, ra, sizeof(ra));
1628 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1629 skb_put(skb, sizeof(*pmr));
1630 pmr = (struct mld2_report *)skb_transport_header(skb);
1631 pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1632 pmr->mld2r_resv1 = 0;
1633 pmr->mld2r_cksum = 0;
1634 pmr->mld2r_resv2 = 0;
1635 pmr->mld2r_ngrec = 0;
1639 static void mld_sendpack(struct sk_buff *skb)
1641 struct ipv6hdr *pip6 = ipv6_hdr(skb);
1642 struct mld2_report *pmr =
1643 (struct mld2_report *)skb_transport_header(skb);
1644 int payload_len, mldlen;
1645 struct inet6_dev *idev;
1646 struct net *net = dev_net(skb->dev);
1649 struct dst_entry *dst;
1652 idev = __in6_dev_get(skb->dev);
1653 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1655 payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1657 mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1658 pip6->payload_len = htons(payload_len);
1660 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1662 csum_partial(skb_transport_header(skb),
1665 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1666 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1668 dst = icmp6_dst_alloc(skb->dev, &fl6);
1675 skb_dst_set(skb, dst);
1679 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
1680 net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
1684 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1685 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1687 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1698 static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1700 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1703 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1704 int type, struct mld2_grec **ppgr, unsigned int mtu)
1706 struct mld2_report *pmr;
1707 struct mld2_grec *pgr;
1710 skb = mld_newpack(pmc->idev, mtu);
1714 pgr = skb_put(skb, sizeof(struct mld2_grec));
1715 pgr->grec_type = type;
1716 pgr->grec_auxwords = 0;
1717 pgr->grec_nsrcs = 0;
1718 pgr->grec_mca = pmc->mca_addr; /* structure copy */
1719 pmr = (struct mld2_report *)skb_transport_header(skb);
1720 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1725 #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
1727 static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1728 int type, int gdeleted, int sdeleted, int crsend)
1730 struct inet6_dev *idev = pmc->idev;
1731 struct net_device *dev = idev->dev;
1732 struct mld2_report *pmr;
1733 struct mld2_grec *pgr = NULL;
1734 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1735 int scount, stotal, first, isquery, truncate;
1738 if (pmc->mca_flags & MAF_NOREPORT)
1741 mtu = READ_ONCE(dev->mtu);
1742 if (mtu < IPV6_MIN_MTU)
1745 isquery = type == MLD2_MODE_IS_INCLUDE ||
1746 type == MLD2_MODE_IS_EXCLUDE;
1747 truncate = type == MLD2_MODE_IS_EXCLUDE ||
1748 type == MLD2_CHANGE_TO_EXCLUDE;
1750 stotal = scount = 0;
1752 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1757 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1759 /* EX and TO_EX get a fresh packet, if needed */
1761 if (pmr && pmr->mld2r_ngrec &&
1762 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1765 skb = mld_newpack(idev, mtu);
1770 for (psf = *psf_list; psf; psf = psf_next) {
1771 struct in6_addr *psrc;
1773 psf_next = psf->sf_next;
1775 if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
1780 /* Based on RFC3810 6.1. Should not send source-list change
1781 * records when there is a filter mode change.
1783 if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
1784 (!gdeleted && pmc->mca_crcount)) &&
1785 (type == MLD2_ALLOW_NEW_SOURCES ||
1786 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
1787 goto decrease_sf_crcount;
1789 /* clear marks on query responses */
1793 if (AVAILABLE(skb) < sizeof(*psrc) +
1794 first*sizeof(struct mld2_grec)) {
1795 if (truncate && !first)
1796 break; /* truncate these */
1798 pgr->grec_nsrcs = htons(scount);
1801 skb = mld_newpack(idev, mtu);
1806 skb = add_grhead(skb, pmc, type, &pgr, mtu);
1811 psrc = skb_put(skb, sizeof(*psrc));
1812 *psrc = psf->sf_addr;
1814 if ((type == MLD2_ALLOW_NEW_SOURCES ||
1815 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1816 decrease_sf_crcount:
1818 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
1820 psf_prev->sf_next = psf->sf_next;
1822 *psf_list = psf->sf_next;
1832 if (type == MLD2_ALLOW_NEW_SOURCES ||
1833 type == MLD2_BLOCK_OLD_SOURCES)
1835 if (pmc->mca_crcount || isquery || crsend) {
1836 /* make sure we have room for group header */
1837 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1839 skb = NULL; /* add_grhead will get a new one */
1841 skb = add_grhead(skb, pmc, type, &pgr, mtu);
1845 pgr->grec_nsrcs = htons(scount);
1848 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
1852 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
1854 struct sk_buff *skb = NULL;
1857 read_lock_bh(&idev->lock);
1859 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
1860 if (pmc->mca_flags & MAF_NOREPORT)
1862 spin_lock_bh(&pmc->mca_lock);
1863 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1864 type = MLD2_MODE_IS_EXCLUDE;
1866 type = MLD2_MODE_IS_INCLUDE;
1867 skb = add_grec(skb, pmc, type, 0, 0, 0);
1868 spin_unlock_bh(&pmc->mca_lock);
1871 spin_lock_bh(&pmc->mca_lock);
1872 if (pmc->mca_sfcount[MCAST_EXCLUDE])
1873 type = MLD2_MODE_IS_EXCLUDE;
1875 type = MLD2_MODE_IS_INCLUDE;
1876 skb = add_grec(skb, pmc, type, 0, 0, 0);
1877 spin_unlock_bh(&pmc->mca_lock);
1879 read_unlock_bh(&idev->lock);
1885 * remove zero-count source records from a source filter list
1887 static void mld_clear_zeros(struct ip6_sf_list **ppsf)
1889 struct ip6_sf_list *psf_prev, *psf_next, *psf;
1892 for (psf = *ppsf; psf; psf = psf_next) {
1893 psf_next = psf->sf_next;
1894 if (psf->sf_crcount == 0) {
1896 psf_prev->sf_next = psf->sf_next;
1898 *ppsf = psf->sf_next;
1905 static void mld_send_cr(struct inet6_dev *idev)
1907 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
1908 struct sk_buff *skb = NULL;
1911 read_lock_bh(&idev->lock);
1912 spin_lock(&idev->mc_lock);
1916 for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) {
1917 pmc_next = pmc->next;
1918 if (pmc->mca_sfmode == MCAST_INCLUDE) {
1919 type = MLD2_BLOCK_OLD_SOURCES;
1920 dtype = MLD2_BLOCK_OLD_SOURCES;
1921 skb = add_grec(skb, pmc, type, 1, 0, 0);
1922 skb = add_grec(skb, pmc, dtype, 1, 1, 0);
1924 if (pmc->mca_crcount) {
1925 if (pmc->mca_sfmode == MCAST_EXCLUDE) {
1926 type = MLD2_CHANGE_TO_INCLUDE;
1927 skb = add_grec(skb, pmc, type, 1, 0, 0);
1930 if (pmc->mca_crcount == 0) {
1931 mld_clear_zeros(&pmc->mca_tomb);
1932 mld_clear_zeros(&pmc->mca_sources);
1935 if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
1936 !pmc->mca_sources) {
1938 pmc_prev->next = pmc_next;
1940 idev->mc_tomb = pmc_next;
1941 in6_dev_put(pmc->idev);
1946 spin_unlock(&idev->mc_lock);
1949 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
1950 spin_lock_bh(&pmc->mca_lock);
1951 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
1952 type = MLD2_BLOCK_OLD_SOURCES;
1953 dtype = MLD2_ALLOW_NEW_SOURCES;
1955 type = MLD2_ALLOW_NEW_SOURCES;
1956 dtype = MLD2_BLOCK_OLD_SOURCES;
1958 skb = add_grec(skb, pmc, type, 0, 0, 0);
1959 skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */
1961 /* filter mode changes */
1962 if (pmc->mca_crcount) {
1963 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1964 type = MLD2_CHANGE_TO_EXCLUDE;
1966 type = MLD2_CHANGE_TO_INCLUDE;
1967 skb = add_grec(skb, pmc, type, 0, 0, 0);
1970 spin_unlock_bh(&pmc->mca_lock);
1972 read_unlock_bh(&idev->lock);
1975 (void) mld_sendpack(skb);
1978 static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1980 struct net *net = dev_net(dev);
1981 struct sock *sk = net->ipv6.igmp_sk;
1982 struct inet6_dev *idev;
1983 struct sk_buff *skb;
1984 struct mld_msg *hdr;
1985 const struct in6_addr *snd_addr, *saddr;
1986 struct in6_addr addr_buf;
1987 int hlen = LL_RESERVED_SPACE(dev);
1988 int tlen = dev->needed_tailroom;
1989 int err, len, payload_len, full_len;
1990 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1991 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1994 struct dst_entry *dst;
1996 if (type == ICMPV6_MGM_REDUCTION)
1997 snd_addr = &in6addr_linklocal_allrouters;
2001 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
2002 payload_len = len + sizeof(ra);
2003 full_len = sizeof(struct ipv6hdr) + payload_len;
2006 IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
2007 IPSTATS_MIB_OUT, full_len);
2010 skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
2014 IP6_INC_STATS(net, __in6_dev_get(dev),
2015 IPSTATS_MIB_OUTDISCARDS);
2019 skb->priority = TC_PRIO_CONTROL;
2020 skb_reserve(skb, hlen);
2022 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
2023 /* <draft-ietf-magma-mld-source-05.txt>:
2024 * use unspecified address as the source address
2025 * when a valid link-local address is not available.
2027 saddr = &in6addr_any;
2031 ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
2033 skb_put_data(skb, ra, sizeof(ra));
2035 hdr = skb_put_zero(skb, sizeof(struct mld_msg));
2036 hdr->mld_type = type;
2037 hdr->mld_mca = *addr;
2039 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
2041 csum_partial(hdr, len, 0));
2044 idev = __in6_dev_get(skb->dev);
2046 icmpv6_flow_init(sk, &fl6, type,
2047 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
2049 dst = icmp6_dst_alloc(skb->dev, &fl6);
2055 skb_dst_set(skb, dst);
2056 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
2057 net, sk, skb, NULL, skb->dev,
2061 ICMP6MSGOUT_INC_STATS(net, idev, type);
2062 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2064 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2074 static void mld_send_initial_cr(struct inet6_dev *idev)
2076 struct sk_buff *skb;
2077 struct ifmcaddr6 *pmc;
2080 if (mld_in_v1_mode(idev))
2084 read_lock_bh(&idev->lock);
2085 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2086 spin_lock_bh(&pmc->mca_lock);
2087 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2088 type = MLD2_CHANGE_TO_EXCLUDE;
2090 type = MLD2_ALLOW_NEW_SOURCES;
2091 skb = add_grec(skb, pmc, type, 0, 0, 1);
2092 spin_unlock_bh(&pmc->mca_lock);
2094 read_unlock_bh(&idev->lock);
2099 void ipv6_mc_dad_complete(struct inet6_dev *idev)
2101 idev->mc_dad_count = idev->mc_qrv;
2102 if (idev->mc_dad_count) {
2103 mld_send_initial_cr(idev);
2104 idev->mc_dad_count--;
2105 if (idev->mc_dad_count)
2106 mld_dad_start_timer(idev,
2107 unsolicited_report_interval(idev));
2111 static void mld_dad_timer_expire(struct timer_list *t)
2113 struct inet6_dev *idev = from_timer(idev, t, mc_dad_timer);
2115 mld_send_initial_cr(idev);
2116 if (idev->mc_dad_count) {
2117 idev->mc_dad_count--;
2118 if (idev->mc_dad_count)
2119 mld_dad_start_timer(idev,
2120 unsolicited_report_interval(idev));
2125 static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2126 const struct in6_addr *psfsrc)
2128 struct ip6_sf_list *psf, *psf_prev;
2132 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2133 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2137 if (!psf || psf->sf_count[sfmode] == 0) {
2138 /* source filter not found, or count wrong => bug */
2141 psf->sf_count[sfmode]--;
2142 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2143 struct inet6_dev *idev = pmc->idev;
2145 /* no more filters for this source */
2147 psf_prev->sf_next = psf->sf_next;
2149 pmc->mca_sources = psf->sf_next;
2150 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2151 !mld_in_v1_mode(idev)) {
2152 psf->sf_crcount = idev->mc_qrv;
2153 psf->sf_next = pmc->mca_tomb;
2154 pmc->mca_tomb = psf;
2162 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2163 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2166 struct ifmcaddr6 *pmc;
2172 read_lock_bh(&idev->lock);
2173 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2174 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2178 /* MCA not found?? bug */
2179 read_unlock_bh(&idev->lock);
2182 spin_lock_bh(&pmc->mca_lock);
2185 if (!pmc->mca_sfcount[sfmode]) {
2186 spin_unlock_bh(&pmc->mca_lock);
2187 read_unlock_bh(&idev->lock);
2190 pmc->mca_sfcount[sfmode]--;
2193 for (i = 0; i < sfcount; i++) {
2194 int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2196 changerec |= rv > 0;
2200 if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2201 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2202 pmc->mca_sfcount[MCAST_INCLUDE]) {
2203 struct ip6_sf_list *psf;
2205 /* filter mode change */
2206 pmc->mca_sfmode = MCAST_INCLUDE;
2207 pmc->mca_crcount = idev->mc_qrv;
2208 idev->mc_ifc_count = pmc->mca_crcount;
2209 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2210 psf->sf_crcount = 0;
2211 mld_ifc_event(pmc->idev);
2212 } else if (sf_setstate(pmc) || changerec)
2213 mld_ifc_event(pmc->idev);
2214 spin_unlock_bh(&pmc->mca_lock);
2215 read_unlock_bh(&idev->lock);
2220 * Add multicast single-source filter to the interface list
2222 static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2223 const struct in6_addr *psfsrc)
2225 struct ip6_sf_list *psf, *psf_prev;
2228 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2229 if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2234 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
2238 psf->sf_addr = *psfsrc;
2240 psf_prev->sf_next = psf;
2242 pmc->mca_sources = psf;
2244 psf->sf_count[sfmode]++;
2248 static void sf_markstate(struct ifmcaddr6 *pmc)
2250 struct ip6_sf_list *psf;
2251 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2253 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2254 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2255 psf->sf_oldin = mca_xcount ==
2256 psf->sf_count[MCAST_EXCLUDE] &&
2257 !psf->sf_count[MCAST_INCLUDE];
2259 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2262 static int sf_setstate(struct ifmcaddr6 *pmc)
2264 struct ip6_sf_list *psf, *dpsf;
2265 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2266 int qrv = pmc->idev->mc_qrv;
2270 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
2271 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2272 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2273 !psf->sf_count[MCAST_INCLUDE];
2275 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2277 if (!psf->sf_oldin) {
2278 struct ip6_sf_list *prev = NULL;
2280 for (dpsf = pmc->mca_tomb; dpsf;
2281 dpsf = dpsf->sf_next) {
2282 if (ipv6_addr_equal(&dpsf->sf_addr,
2289 prev->sf_next = dpsf->sf_next;
2291 pmc->mca_tomb = dpsf->sf_next;
2294 psf->sf_crcount = qrv;
2297 } else if (psf->sf_oldin) {
2298 psf->sf_crcount = 0;
2300 * add or update "delete" records if an active filter
2303 for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next)
2304 if (ipv6_addr_equal(&dpsf->sf_addr,
2308 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2312 /* pmc->mca_lock held by callers */
2313 dpsf->sf_next = pmc->mca_tomb;
2314 pmc->mca_tomb = dpsf;
2316 dpsf->sf_crcount = qrv;
2324 * Add multicast source filter list to the interface list
2326 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2327 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2330 struct ifmcaddr6 *pmc;
2336 read_lock_bh(&idev->lock);
2337 for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
2338 if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2342 /* MCA not found?? bug */
2343 read_unlock_bh(&idev->lock);
2346 spin_lock_bh(&pmc->mca_lock);
2349 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2351 pmc->mca_sfcount[sfmode]++;
2353 for (i = 0; i < sfcount; i++) {
2354 err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2362 pmc->mca_sfcount[sfmode]--;
2363 for (j = 0; j < i; j++)
2364 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2365 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2366 struct ip6_sf_list *psf;
2368 /* filter mode change */
2369 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2370 pmc->mca_sfmode = MCAST_EXCLUDE;
2371 else if (pmc->mca_sfcount[MCAST_INCLUDE])
2372 pmc->mca_sfmode = MCAST_INCLUDE;
2373 /* else no filters; keep old mode for reports */
2375 pmc->mca_crcount = idev->mc_qrv;
2376 idev->mc_ifc_count = pmc->mca_crcount;
2377 for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
2378 psf->sf_crcount = 0;
2379 mld_ifc_event(idev);
2380 } else if (sf_setstate(pmc))
2381 mld_ifc_event(idev);
2382 spin_unlock_bh(&pmc->mca_lock);
2383 read_unlock_bh(&idev->lock);
2387 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2389 struct ip6_sf_list *psf, *nextpsf;
2391 for (psf = pmc->mca_tomb; psf; psf = nextpsf) {
2392 nextpsf = psf->sf_next;
2395 pmc->mca_tomb = NULL;
2396 for (psf = pmc->mca_sources; psf; psf = nextpsf) {
2397 nextpsf = psf->sf_next;
2400 pmc->mca_sources = NULL;
2401 pmc->mca_sfmode = MCAST_EXCLUDE;
2402 pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2403 pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
2407 static void igmp6_join_group(struct ifmcaddr6 *ma)
2409 unsigned long delay;
2411 if (ma->mca_flags & MAF_NOREPORT)
2414 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2416 delay = prandom_u32() % unsolicited_report_interval(ma->idev);
2418 spin_lock_bh(&ma->mca_lock);
2419 if (del_timer(&ma->mca_timer)) {
2420 refcount_dec(&ma->mca_refcnt);
2421 delay = ma->mca_timer.expires - jiffies;
2424 if (!mod_timer(&ma->mca_timer, jiffies + delay))
2425 refcount_inc(&ma->mca_refcnt);
2426 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2427 spin_unlock_bh(&ma->mca_lock);
2430 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2431 struct inet6_dev *idev)
2435 write_lock_bh(&iml->sflock);
2437 /* any-source empty exclude case */
2438 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2440 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2441 iml->sflist->sl_count, iml->sflist->sl_addr, 0);
2442 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
2445 write_unlock_bh(&iml->sflock);
2449 static void igmp6_leave_group(struct ifmcaddr6 *ma)
2451 if (mld_in_v1_mode(ma->idev)) {
2452 if (ma->mca_flags & MAF_LAST_REPORTER)
2453 igmp6_send(&ma->mca_addr, ma->idev->dev,
2454 ICMPV6_MGM_REDUCTION);
2456 mld_add_delrec(ma->idev, ma);
2457 mld_ifc_event(ma->idev);
2461 static void mld_gq_timer_expire(struct timer_list *t)
2463 struct inet6_dev *idev = from_timer(idev, t, mc_gq_timer);
2465 idev->mc_gq_running = 0;
2466 mld_send_report(idev, NULL);
2470 static void mld_ifc_timer_expire(struct timer_list *t)
2472 struct inet6_dev *idev = from_timer(idev, t, mc_ifc_timer);
2475 if (idev->mc_ifc_count) {
2476 idev->mc_ifc_count--;
2477 if (idev->mc_ifc_count)
2478 mld_ifc_start_timer(idev,
2479 unsolicited_report_interval(idev));
2484 static void mld_ifc_event(struct inet6_dev *idev)
2486 if (mld_in_v1_mode(idev))
2488 idev->mc_ifc_count = idev->mc_qrv;
2489 mld_ifc_start_timer(idev, 1);
2492 static void igmp6_timer_handler(struct timer_list *t)
2494 struct ifmcaddr6 *ma = from_timer(ma, t, mca_timer);
2496 if (mld_in_v1_mode(ma->idev))
2497 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2499 mld_send_report(ma->idev, ma);
2501 spin_lock(&ma->mca_lock);
2502 ma->mca_flags |= MAF_LAST_REPORTER;
2503 ma->mca_flags &= ~MAF_TIMER_RUNNING;
2504 spin_unlock(&ma->mca_lock);
2508 /* Device changing type */
2510 void ipv6_mc_unmap(struct inet6_dev *idev)
2512 struct ifmcaddr6 *i;
2514 /* Install multicast list, except for all-nodes (already installed) */
2516 read_lock_bh(&idev->lock);
2517 for (i = idev->mc_list; i; i = i->next)
2518 igmp6_group_dropped(i);
2519 read_unlock_bh(&idev->lock);
2522 void ipv6_mc_remap(struct inet6_dev *idev)
2527 /* Device going down */
2529 void ipv6_mc_down(struct inet6_dev *idev)
2531 struct ifmcaddr6 *i;
2533 /* Withdraw multicast list */
2535 read_lock_bh(&idev->lock);
2537 for (i = idev->mc_list; i; i = i->next)
2538 igmp6_group_dropped(i);
2540 /* Should stop timer after group drop. or we will
2541 * start timer again in mld_ifc_event()
2543 mld_ifc_stop_timer(idev);
2544 mld_gq_stop_timer(idev);
2545 mld_dad_stop_timer(idev);
2546 read_unlock_bh(&idev->lock);
2549 static void ipv6_mc_reset(struct inet6_dev *idev)
2551 idev->mc_qrv = sysctl_mld_qrv;
2552 idev->mc_qi = MLD_QI_DEFAULT;
2553 idev->mc_qri = MLD_QRI_DEFAULT;
2554 idev->mc_v1_seen = 0;
2555 idev->mc_maxdelay = unsolicited_report_interval(idev);
2558 /* Device going up */
2560 void ipv6_mc_up(struct inet6_dev *idev)
2562 struct ifmcaddr6 *i;
2564 /* Install multicast list, except for all-nodes (already installed) */
2566 read_lock_bh(&idev->lock);
2567 ipv6_mc_reset(idev);
2568 for (i = idev->mc_list; i; i = i->next) {
2569 mld_del_delrec(idev, i);
2570 igmp6_group_added(i);
2572 read_unlock_bh(&idev->lock);
2575 /* IPv6 device initialization. */
2577 void ipv6_mc_init_dev(struct inet6_dev *idev)
2579 write_lock_bh(&idev->lock);
2580 spin_lock_init(&idev->mc_lock);
2581 idev->mc_gq_running = 0;
2582 timer_setup(&idev->mc_gq_timer, mld_gq_timer_expire, 0);
2583 idev->mc_tomb = NULL;
2584 idev->mc_ifc_count = 0;
2585 timer_setup(&idev->mc_ifc_timer, mld_ifc_timer_expire, 0);
2586 timer_setup(&idev->mc_dad_timer, mld_dad_timer_expire, 0);
2587 ipv6_mc_reset(idev);
2588 write_unlock_bh(&idev->lock);
2592 * Device is about to be destroyed: clean up.
2595 void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2597 struct ifmcaddr6 *i;
2599 /* Deactivate timers */
2601 mld_clear_delrec(idev);
2603 /* Delete all-nodes address. */
2604 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2605 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2608 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2610 if (idev->cnf.forwarding)
2611 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2613 write_lock_bh(&idev->lock);
2614 while ((i = idev->mc_list) != NULL) {
2615 idev->mc_list = i->next;
2617 write_unlock_bh(&idev->lock);
2618 ip6_mc_clear_src(i);
2620 write_lock_bh(&idev->lock);
2622 write_unlock_bh(&idev->lock);
2625 static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
2627 struct ifmcaddr6 *pmc;
2631 if (mld_in_v1_mode(idev)) {
2632 read_lock_bh(&idev->lock);
2633 for (pmc = idev->mc_list; pmc; pmc = pmc->next)
2634 igmp6_join_group(pmc);
2635 read_unlock_bh(&idev->lock);
2637 mld_send_report(idev, NULL);
2640 static int ipv6_mc_netdev_event(struct notifier_block *this,
2641 unsigned long event,
2644 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2645 struct inet6_dev *idev = __in6_dev_get(dev);
2648 case NETDEV_RESEND_IGMP:
2650 ipv6_mc_rejoin_groups(idev);
2659 static struct notifier_block igmp6_netdev_notifier = {
2660 .notifier_call = ipv6_mc_netdev_event,
2663 #ifdef CONFIG_PROC_FS
2664 struct igmp6_mc_iter_state {
2665 struct seq_net_private p;
2666 struct net_device *dev;
2667 struct inet6_dev *idev;
2670 #define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
2672 static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2674 struct ifmcaddr6 *im = NULL;
2675 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2676 struct net *net = seq_file_net(seq);
2679 for_each_netdev_rcu(net, state->dev) {
2680 struct inet6_dev *idev;
2681 idev = __in6_dev_get(state->dev);
2684 read_lock_bh(&idev->lock);
2690 read_unlock_bh(&idev->lock);
2695 static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2697 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2701 if (likely(state->idev))
2702 read_unlock_bh(&state->idev->lock);
2704 state->dev = next_net_device_rcu(state->dev);
2709 state->idev = __in6_dev_get(state->dev);
2712 read_lock_bh(&state->idev->lock);
2713 im = state->idev->mc_list;
2718 static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2720 struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2722 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2724 return pos ? NULL : im;
2727 static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2731 return igmp6_mc_get_idx(seq, *pos);
2734 static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2736 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2742 static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2745 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2747 if (likely(state->idev)) {
2748 read_unlock_bh(&state->idev->lock);
2755 static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2757 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2758 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2761 "%-4d %-15s %pi6 %5d %08X %ld\n",
2762 state->dev->ifindex, state->dev->name,
2764 im->mca_users, im->mca_flags,
2765 (im->mca_flags&MAF_TIMER_RUNNING) ?
2766 jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
2770 static const struct seq_operations igmp6_mc_seq_ops = {
2771 .start = igmp6_mc_seq_start,
2772 .next = igmp6_mc_seq_next,
2773 .stop = igmp6_mc_seq_stop,
2774 .show = igmp6_mc_seq_show,
2777 struct igmp6_mcf_iter_state {
2778 struct seq_net_private p;
2779 struct net_device *dev;
2780 struct inet6_dev *idev;
2781 struct ifmcaddr6 *im;
2784 #define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
2786 static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2788 struct ip6_sf_list *psf = NULL;
2789 struct ifmcaddr6 *im = NULL;
2790 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2791 struct net *net = seq_file_net(seq);
2795 for_each_netdev_rcu(net, state->dev) {
2796 struct inet6_dev *idev;
2797 idev = __in6_dev_get(state->dev);
2798 if (unlikely(idev == NULL))
2800 read_lock_bh(&idev->lock);
2803 spin_lock_bh(&im->mca_lock);
2804 psf = im->mca_sources;
2810 spin_unlock_bh(&im->mca_lock);
2812 read_unlock_bh(&idev->lock);
2817 static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
2819 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2823 spin_unlock_bh(&state->im->mca_lock);
2824 state->im = state->im->next;
2825 while (!state->im) {
2826 if (likely(state->idev))
2827 read_unlock_bh(&state->idev->lock);
2829 state->dev = next_net_device_rcu(state->dev);
2834 state->idev = __in6_dev_get(state->dev);
2837 read_lock_bh(&state->idev->lock);
2838 state->im = state->idev->mc_list;
2842 spin_lock_bh(&state->im->mca_lock);
2843 psf = state->im->mca_sources;
2849 static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2851 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
2853 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
2855 return pos ? NULL : psf;
2858 static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2862 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2865 static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2867 struct ip6_sf_list *psf;
2868 if (v == SEQ_START_TOKEN)
2869 psf = igmp6_mcf_get_first(seq);
2871 psf = igmp6_mcf_get_next(seq, v);
2876 static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2879 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2880 if (likely(state->im)) {
2881 spin_unlock_bh(&state->im->mca_lock);
2884 if (likely(state->idev)) {
2885 read_unlock_bh(&state->idev->lock);
2892 static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2894 struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
2895 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2897 if (v == SEQ_START_TOKEN) {
2898 seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n");
2901 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
2902 state->dev->ifindex, state->dev->name,
2903 &state->im->mca_addr,
2905 psf->sf_count[MCAST_INCLUDE],
2906 psf->sf_count[MCAST_EXCLUDE]);
2911 static const struct seq_operations igmp6_mcf_seq_ops = {
2912 .start = igmp6_mcf_seq_start,
2913 .next = igmp6_mcf_seq_next,
2914 .stop = igmp6_mcf_seq_stop,
2915 .show = igmp6_mcf_seq_show,
2918 static int __net_init igmp6_proc_init(struct net *net)
2923 if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops,
2924 sizeof(struct igmp6_mc_iter_state)))
2926 if (!proc_create_net("mcfilter6", 0444, net->proc_net,
2928 sizeof(struct igmp6_mcf_iter_state)))
2929 goto out_proc_net_igmp6;
2936 remove_proc_entry("igmp6", net->proc_net);
2940 static void __net_exit igmp6_proc_exit(struct net *net)
2942 remove_proc_entry("mcfilter6", net->proc_net);
2943 remove_proc_entry("igmp6", net->proc_net);
2946 static inline int igmp6_proc_init(struct net *net)
2950 static inline void igmp6_proc_exit(struct net *net)
2955 static int __net_init igmp6_net_init(struct net *net)
2959 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
2960 SOCK_RAW, IPPROTO_ICMPV6, net);
2962 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
2967 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
2969 err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
2970 SOCK_RAW, IPPROTO_ICMPV6, net);
2972 pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
2974 goto out_sock_create;
2977 err = igmp6_proc_init(net);
2979 goto out_sock_create_autojoin;
2983 out_sock_create_autojoin:
2984 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
2986 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2991 static void __net_exit igmp6_net_exit(struct net *net)
2993 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2994 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
2995 igmp6_proc_exit(net);
2998 static struct pernet_operations igmp6_net_ops = {
2999 .init = igmp6_net_init,
3000 .exit = igmp6_net_exit,
3003 int __init igmp6_init(void)
3005 return register_pernet_subsys(&igmp6_net_ops);
3008 int __init igmp6_late_init(void)
3010 return register_netdevice_notifier(&igmp6_netdev_notifier);
3013 void igmp6_cleanup(void)
3015 unregister_pernet_subsys(&igmp6_net_ops);
3018 void igmp6_late_cleanup(void)
3020 unregister_netdevice_notifier(&igmp6_netdev_notifier);