1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux IPv6 multicast routing support for BSD pim6sd
4 * Based on net/ipv4/ipmr.c.
7 * LSIIT Laboratory, Strasbourg, France
10 * Copyright (C)2007,2008 USAGI/WIDE Project
14 #include <linux/uaccess.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/fcntl.h>
21 #include <linux/stat.h>
22 #include <linux/socket.h>
23 #include <linux/inet.h>
24 #include <linux/netdevice.h>
25 #include <linux/inetdevice.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/init.h>
29 #include <linux/compat.h>
30 #include <linux/rhashtable.h>
31 #include <net/protocol.h>
32 #include <linux/skbuff.h>
34 #include <linux/notifier.h>
35 #include <linux/if_arp.h>
36 #include <net/checksum.h>
37 #include <net/netlink.h>
38 #include <net/fib_rules.h>
41 #include <net/ip6_route.h>
42 #include <linux/mroute6.h>
43 #include <linux/pim.h>
44 #include <net/addrconf.h>
45 #include <linux/netfilter_ipv6.h>
46 #include <linux/export.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/netconf.h>
49 #include <net/ip_tunnels.h>
51 #include <linux/nospec.h>
54 struct fib_rule common;
61 /* Big lock, protecting vif table, mrt cache and mroute socket state.
62 Note that the changes are semaphored via rtnl_lock.
65 static DEFINE_RWLOCK(mrt_lock);
67 /* Multicast router control variables */
69 /* Special spinlock for queue of unresolved entries */
70 static DEFINE_SPINLOCK(mfc_unres_lock);
72 /* We return to original Alan's scheme. Hash table of resolved
73 entries is changed only in process context and protected
74 with weak lock mrt_lock. Queue of unresolved entries is protected
75 with strong spinlock mfc_unres_lock.
77 In this case data path is free of exclusive locks at all.
80 static struct kmem_cache *mrt_cachep __read_mostly;
82 static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
83 static void ip6mr_free_table(struct mr_table *mrt);
85 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
86 struct net_device *dev, struct sk_buff *skb,
87 struct mfc6_cache *cache);
88 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
89 mifi_t mifi, int assert);
90 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
92 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
93 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
94 struct netlink_callback *cb);
95 static void mroute_clean_tables(struct mr_table *mrt, int flags);
96 static void ipmr_expire_process(struct timer_list *t);
98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
99 #define ip6mr_for_each_table(mrt, net) \
100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \
101 lockdep_rtnl_is_held() || \
102 list_empty(&net->ipv6.mr6_tables))
104 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
105 struct mr_table *mrt)
107 struct mr_table *ret;
110 ret = list_entry_rcu(net->ipv6.mr6_tables.next,
111 struct mr_table, list);
113 ret = list_entry_rcu(mrt->list.next,
114 struct mr_table, list);
116 if (&ret->list == &net->ipv6.mr6_tables)
121 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
123 struct mr_table *mrt;
125 ip6mr_for_each_table(mrt, net) {
132 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
133 struct mr_table **mrt)
136 struct ip6mr_result res;
137 struct fib_lookup_arg arg = {
139 .flags = FIB_LOOKUP_NOREF,
142 /* update flow if oif or iif point to device enslaved to l3mdev */
143 l3mdev_update_flow(net, flowi6_to_flowi(flp6));
145 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
146 flowi6_to_flowi(flp6), 0, &arg);
153 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
154 int flags, struct fib_lookup_arg *arg)
156 struct ip6mr_result *res = arg->result;
157 struct mr_table *mrt;
159 switch (rule->action) {
162 case FR_ACT_UNREACHABLE:
164 case FR_ACT_PROHIBIT:
166 case FR_ACT_BLACKHOLE:
171 arg->table = fib_rule_get_table(rule, arg);
173 mrt = ip6mr_get_table(rule->fr_net, arg->table);
180 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
185 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
186 struct fib_rule_hdr *frh, struct nlattr **tb,
187 struct netlink_ext_ack *extack)
192 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
198 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
199 struct fib_rule_hdr *frh)
207 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
208 .family = RTNL_FAMILY_IP6MR,
209 .rule_size = sizeof(struct ip6mr_rule),
210 .addr_size = sizeof(struct in6_addr),
211 .action = ip6mr_rule_action,
212 .match = ip6mr_rule_match,
213 .configure = ip6mr_rule_configure,
214 .compare = ip6mr_rule_compare,
215 .fill = ip6mr_rule_fill,
216 .nlgroup = RTNLGRP_IPV6_RULE,
217 .owner = THIS_MODULE,
220 static int __net_init ip6mr_rules_init(struct net *net)
222 struct fib_rules_ops *ops;
223 struct mr_table *mrt;
226 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
230 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
232 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
238 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
242 net->ipv6.mr6_rules_ops = ops;
246 ip6mr_free_table(mrt);
248 fib_rules_unregister(ops);
252 static void __net_exit ip6mr_rules_exit(struct net *net)
254 struct mr_table *mrt, *next;
257 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
258 list_del(&mrt->list);
259 ip6mr_free_table(mrt);
261 fib_rules_unregister(net->ipv6.mr6_rules_ops);
265 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
266 struct netlink_ext_ack *extack)
268 return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR, extack);
271 static unsigned int ip6mr_rules_seq_read(struct net *net)
273 return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
276 bool ip6mr_rule_default(const struct fib_rule *rule)
278 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
279 rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
281 EXPORT_SYMBOL(ip6mr_rule_default);
283 #define ip6mr_for_each_table(mrt, net) \
284 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
286 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
287 struct mr_table *mrt)
290 return net->ipv6.mrt6;
294 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
296 return net->ipv6.mrt6;
299 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
300 struct mr_table **mrt)
302 *mrt = net->ipv6.mrt6;
306 static int __net_init ip6mr_rules_init(struct net *net)
308 struct mr_table *mrt;
310 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
313 net->ipv6.mrt6 = mrt;
317 static void __net_exit ip6mr_rules_exit(struct net *net)
320 ip6mr_free_table(net->ipv6.mrt6);
321 net->ipv6.mrt6 = NULL;
325 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
326 struct netlink_ext_ack *extack)
331 static unsigned int ip6mr_rules_seq_read(struct net *net)
337 static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
340 const struct mfc6_cache_cmp_arg *cmparg = arg->key;
341 struct mfc6_cache *c = (struct mfc6_cache *)ptr;
343 return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
344 !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
347 static const struct rhashtable_params ip6mr_rht_params = {
348 .head_offset = offsetof(struct mr_mfc, mnode),
349 .key_offset = offsetof(struct mfc6_cache, cmparg),
350 .key_len = sizeof(struct mfc6_cache_cmp_arg),
352 .obj_cmpfn = ip6mr_hash_cmp,
353 .automatic_shrinking = true,
356 static void ip6mr_new_table_set(struct mr_table *mrt,
359 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
360 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
364 static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
365 .mf6c_origin = IN6ADDR_ANY_INIT,
366 .mf6c_mcastgrp = IN6ADDR_ANY_INIT,
369 static struct mr_table_ops ip6mr_mr_table_ops = {
370 .rht_params = &ip6mr_rht_params,
371 .cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
374 static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
376 struct mr_table *mrt;
378 mrt = ip6mr_get_table(net, id);
382 return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
383 ipmr_expire_process, ip6mr_new_table_set);
386 static void ip6mr_free_table(struct mr_table *mrt)
388 del_timer_sync(&mrt->ipmr_expire_timer);
389 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
390 MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
391 rhltable_destroy(&mrt->mfc_hash);
395 #ifdef CONFIG_PROC_FS
396 /* The /proc interfaces to multicast routing
397 * /proc/ip6_mr_cache /proc/ip6_mr_vif
400 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
403 struct mr_vif_iter *iter = seq->private;
404 struct net *net = seq_file_net(seq);
405 struct mr_table *mrt;
407 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
409 return ERR_PTR(-ENOENT);
413 read_lock(&mrt_lock);
414 return mr_vif_seq_start(seq, pos);
417 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
420 read_unlock(&mrt_lock);
423 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
425 struct mr_vif_iter *iter = seq->private;
426 struct mr_table *mrt = iter->mrt;
428 if (v == SEQ_START_TOKEN) {
430 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
432 const struct vif_device *vif = v;
433 const char *name = vif->dev ? vif->dev->name : "none";
436 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
437 vif - mrt->vif_table,
438 name, vif->bytes_in, vif->pkt_in,
439 vif->bytes_out, vif->pkt_out,
445 static const struct seq_operations ip6mr_vif_seq_ops = {
446 .start = ip6mr_vif_seq_start,
447 .next = mr_vif_seq_next,
448 .stop = ip6mr_vif_seq_stop,
449 .show = ip6mr_vif_seq_show,
452 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
454 struct net *net = seq_file_net(seq);
455 struct mr_table *mrt;
457 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
459 return ERR_PTR(-ENOENT);
461 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
464 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
468 if (v == SEQ_START_TOKEN) {
472 "Iif Pkts Bytes Wrong Oifs\n");
474 const struct mfc6_cache *mfc = v;
475 const struct mr_mfc_iter *it = seq->private;
476 struct mr_table *mrt = it->mrt;
478 seq_printf(seq, "%pI6 %pI6 %-3hd",
479 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
482 if (it->cache != &mrt->mfc_unres_queue) {
483 seq_printf(seq, " %8lu %8lu %8lu",
484 mfc->_c.mfc_un.res.pkt,
485 mfc->_c.mfc_un.res.bytes,
486 mfc->_c.mfc_un.res.wrong_if);
487 for (n = mfc->_c.mfc_un.res.minvif;
488 n < mfc->_c.mfc_un.res.maxvif; n++) {
489 if (VIF_EXISTS(mrt, n) &&
490 mfc->_c.mfc_un.res.ttls[n] < 255)
493 mfc->_c.mfc_un.res.ttls[n]);
496 /* unresolved mfc_caches don't contain
497 * pkt, bytes and wrong_if values
499 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
506 static const struct seq_operations ipmr_mfc_seq_ops = {
507 .start = ipmr_mfc_seq_start,
508 .next = mr_mfc_seq_next,
509 .stop = mr_mfc_seq_stop,
510 .show = ipmr_mfc_seq_show,
514 #ifdef CONFIG_IPV6_PIMSM_V2
516 static int pim6_rcv(struct sk_buff *skb)
518 struct pimreghdr *pim;
519 struct ipv6hdr *encap;
520 struct net_device *reg_dev = NULL;
521 struct net *net = dev_net(skb->dev);
522 struct mr_table *mrt;
523 struct flowi6 fl6 = {
524 .flowi6_iif = skb->dev->ifindex,
525 .flowi6_mark = skb->mark,
529 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
532 pim = (struct pimreghdr *)skb_transport_header(skb);
533 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
534 (pim->flags & PIM_NULL_REGISTER) ||
535 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
536 sizeof(*pim), IPPROTO_PIM,
537 csum_partial((void *)pim, sizeof(*pim), 0)) &&
538 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
541 /* check if the inner packet is destined to mcast group */
542 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
545 if (!ipv6_addr_is_multicast(&encap->daddr) ||
546 encap->payload_len == 0 ||
547 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
550 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
552 reg_vif_num = mrt->mroute_reg_vif_num;
554 read_lock(&mrt_lock);
555 if (reg_vif_num >= 0)
556 reg_dev = mrt->vif_table[reg_vif_num].dev;
558 read_unlock(&mrt_lock);
563 skb->mac_header = skb->network_header;
564 skb_pull(skb, (u8 *)encap - skb->data);
565 skb_reset_network_header(skb);
566 skb->protocol = htons(ETH_P_IPV6);
567 skb->ip_summed = CHECKSUM_NONE;
569 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
580 static const struct inet6_protocol pim6_protocol = {
584 /* Service routines creating virtual interfaces: PIMREG */
586 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
587 struct net_device *dev)
589 struct net *net = dev_net(dev);
590 struct mr_table *mrt;
591 struct flowi6 fl6 = {
592 .flowi6_oif = dev->ifindex,
593 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
594 .flowi6_mark = skb->mark,
597 if (!pskb_inet_may_pull(skb))
600 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
603 read_lock(&mrt_lock);
604 dev->stats.tx_bytes += skb->len;
605 dev->stats.tx_packets++;
606 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
607 read_unlock(&mrt_lock);
612 dev->stats.tx_errors++;
617 static int reg_vif_get_iflink(const struct net_device *dev)
622 static const struct net_device_ops reg_vif_netdev_ops = {
623 .ndo_start_xmit = reg_vif_xmit,
624 .ndo_get_iflink = reg_vif_get_iflink,
627 static void reg_vif_setup(struct net_device *dev)
629 dev->type = ARPHRD_PIMREG;
630 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
631 dev->flags = IFF_NOARP;
632 dev->netdev_ops = ®_vif_netdev_ops;
633 dev->needs_free_netdev = true;
634 dev->features |= NETIF_F_NETNS_LOCAL;
637 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
639 struct net_device *dev;
642 if (mrt->id == RT6_TABLE_DFLT)
643 sprintf(name, "pim6reg");
645 sprintf(name, "pim6reg%u", mrt->id);
647 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
651 dev_net_set(dev, net);
653 if (register_netdevice(dev)) {
658 if (dev_open(dev, NULL))
665 unregister_netdevice(dev);
670 static int call_ip6mr_vif_entry_notifiers(struct net *net,
671 enum fib_event_type event_type,
672 struct vif_device *vif,
673 mifi_t vif_index, u32 tb_id)
675 return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
676 vif, vif_index, tb_id,
677 &net->ipv6.ipmr_seq);
680 static int call_ip6mr_mfc_entry_notifiers(struct net *net,
681 enum fib_event_type event_type,
682 struct mfc6_cache *mfc, u32 tb_id)
684 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
685 &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
688 /* Delete a VIF entry */
689 static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
690 struct list_head *head)
692 struct vif_device *v;
693 struct net_device *dev;
694 struct inet6_dev *in6_dev;
696 if (vifi < 0 || vifi >= mrt->maxvif)
697 return -EADDRNOTAVAIL;
699 v = &mrt->vif_table[vifi];
701 if (VIF_EXISTS(mrt, vifi))
702 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
703 FIB_EVENT_VIF_DEL, v, vifi,
706 write_lock_bh(&mrt_lock);
711 write_unlock_bh(&mrt_lock);
712 return -EADDRNOTAVAIL;
715 #ifdef CONFIG_IPV6_PIMSM_V2
716 if (vifi == mrt->mroute_reg_vif_num)
717 mrt->mroute_reg_vif_num = -1;
720 if (vifi + 1 == mrt->maxvif) {
722 for (tmp = vifi - 1; tmp >= 0; tmp--) {
723 if (VIF_EXISTS(mrt, tmp))
726 mrt->maxvif = tmp + 1;
729 write_unlock_bh(&mrt_lock);
731 dev_set_allmulti(dev, -1);
733 in6_dev = __in6_dev_get(dev);
735 in6_dev->cnf.mc_forwarding--;
736 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
737 NETCONFA_MC_FORWARDING,
738 dev->ifindex, &in6_dev->cnf);
741 if ((v->flags & MIFF_REGISTER) && !notify)
742 unregister_netdevice_queue(dev, head);
744 dev_put_track(dev, &v->dev_tracker);
748 static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
750 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
752 kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
755 static inline void ip6mr_cache_free(struct mfc6_cache *c)
757 call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
760 /* Destroy an unresolved cache entry, killing queued skbs
761 and reporting error to netlink readers.
764 static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
766 struct net *net = read_pnet(&mrt->net);
769 atomic_dec(&mrt->cache_resolve_queue_len);
771 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
772 if (ipv6_hdr(skb)->version == 0) {
773 struct nlmsghdr *nlh = skb_pull(skb,
774 sizeof(struct ipv6hdr));
775 nlh->nlmsg_type = NLMSG_ERROR;
776 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
777 skb_trim(skb, nlh->nlmsg_len);
778 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
779 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
788 /* Timer process for all the unresolved queue. */
790 static void ipmr_do_expire_process(struct mr_table *mrt)
792 unsigned long now = jiffies;
793 unsigned long expires = 10 * HZ;
794 struct mr_mfc *c, *next;
796 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
797 if (time_after(c->mfc_un.unres.expires, now)) {
799 unsigned long interval = c->mfc_un.unres.expires - now;
800 if (interval < expires)
806 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
807 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
810 if (!list_empty(&mrt->mfc_unres_queue))
811 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
814 static void ipmr_expire_process(struct timer_list *t)
816 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
818 if (!spin_trylock(&mfc_unres_lock)) {
819 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
823 if (!list_empty(&mrt->mfc_unres_queue))
824 ipmr_do_expire_process(mrt);
826 spin_unlock(&mfc_unres_lock);
829 /* Fill oifs list. It is called under write locked mrt_lock. */
831 static void ip6mr_update_thresholds(struct mr_table *mrt,
832 struct mr_mfc *cache,
837 cache->mfc_un.res.minvif = MAXMIFS;
838 cache->mfc_un.res.maxvif = 0;
839 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
841 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
842 if (VIF_EXISTS(mrt, vifi) &&
843 ttls[vifi] && ttls[vifi] < 255) {
844 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
845 if (cache->mfc_un.res.minvif > vifi)
846 cache->mfc_un.res.minvif = vifi;
847 if (cache->mfc_un.res.maxvif <= vifi)
848 cache->mfc_un.res.maxvif = vifi + 1;
851 cache->mfc_un.res.lastuse = jiffies;
854 static int mif6_add(struct net *net, struct mr_table *mrt,
855 struct mif6ctl *vifc, int mrtsock)
857 int vifi = vifc->mif6c_mifi;
858 struct vif_device *v = &mrt->vif_table[vifi];
859 struct net_device *dev;
860 struct inet6_dev *in6_dev;
864 if (VIF_EXISTS(mrt, vifi))
867 switch (vifc->mif6c_flags) {
868 #ifdef CONFIG_IPV6_PIMSM_V2
871 * Special Purpose VIF in PIM
872 * All the packets will be sent to the daemon
874 if (mrt->mroute_reg_vif_num >= 0)
876 dev = ip6mr_reg_vif(net, mrt);
879 err = dev_set_allmulti(dev, 1);
881 unregister_netdevice(dev);
888 dev = dev_get_by_index(net, vifc->mif6c_pifi);
890 return -EADDRNOTAVAIL;
891 err = dev_set_allmulti(dev, 1);
901 in6_dev = __in6_dev_get(dev);
903 in6_dev->cnf.mc_forwarding++;
904 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
905 NETCONFA_MC_FORWARDING,
906 dev->ifindex, &in6_dev->cnf);
909 /* Fill in the VIF structures */
910 vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
911 vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
914 /* And finish update writing critical data */
915 write_lock_bh(&mrt_lock);
917 netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
918 #ifdef CONFIG_IPV6_PIMSM_V2
919 if (v->flags & MIFF_REGISTER)
920 mrt->mroute_reg_vif_num = vifi;
922 if (vifi + 1 > mrt->maxvif)
923 mrt->maxvif = vifi + 1;
924 write_unlock_bh(&mrt_lock);
925 call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
930 static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
931 const struct in6_addr *origin,
932 const struct in6_addr *mcastgrp)
934 struct mfc6_cache_cmp_arg arg = {
935 .mf6c_origin = *origin,
936 .mf6c_mcastgrp = *mcastgrp,
939 return mr_mfc_find(mrt, &arg);
942 /* Look for a (*,G) entry */
943 static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
944 struct in6_addr *mcastgrp,
947 struct mfc6_cache_cmp_arg arg = {
948 .mf6c_origin = in6addr_any,
949 .mf6c_mcastgrp = *mcastgrp,
952 if (ipv6_addr_any(mcastgrp))
953 return mr_mfc_find_any_parent(mrt, mifi);
954 return mr_mfc_find_any(mrt, mifi, &arg);
957 /* Look for a (S,G,iif) entry if parent != -1 */
958 static struct mfc6_cache *
959 ip6mr_cache_find_parent(struct mr_table *mrt,
960 const struct in6_addr *origin,
961 const struct in6_addr *mcastgrp,
964 struct mfc6_cache_cmp_arg arg = {
965 .mf6c_origin = *origin,
966 .mf6c_mcastgrp = *mcastgrp,
969 return mr_mfc_find_parent(mrt, &arg, parent);
972 /* Allocate a multicast cache entry */
973 static struct mfc6_cache *ip6mr_cache_alloc(void)
975 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
978 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
979 c->_c.mfc_un.res.minvif = MAXMIFS;
980 c->_c.free = ip6mr_cache_free_rcu;
981 refcount_set(&c->_c.mfc_un.res.refcount, 1);
985 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
987 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
990 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
991 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
996 * A cache entry has gone into a resolved state from queued
999 static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
1000 struct mfc6_cache *uc, struct mfc6_cache *c)
1002 struct sk_buff *skb;
1005 * Play the pending entries through our router
1008 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1009 if (ipv6_hdr(skb)->version == 0) {
1010 struct nlmsghdr *nlh = skb_pull(skb,
1011 sizeof(struct ipv6hdr));
1013 if (mr_fill_mroute(mrt, skb, &c->_c,
1014 nlmsg_data(nlh)) > 0) {
1015 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1017 nlh->nlmsg_type = NLMSG_ERROR;
1018 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1019 skb_trim(skb, nlh->nlmsg_len);
1020 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1022 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1024 ip6_mr_forward(net, mrt, skb->dev, skb, c);
1029 * Bounce a cache query up to pim6sd and netlink.
1031 * Called under mrt_lock.
1034 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
1035 mifi_t mifi, int assert)
1037 struct sock *mroute6_sk;
1038 struct sk_buff *skb;
1039 struct mrt6msg *msg;
1042 #ifdef CONFIG_IPV6_PIMSM_V2
1043 if (assert == MRT6MSG_WHOLEPKT)
1044 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1048 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1053 /* I suppose that internal messages
1054 * do not require checksums */
1056 skb->ip_summed = CHECKSUM_UNNECESSARY;
1058 #ifdef CONFIG_IPV6_PIMSM_V2
1059 if (assert == MRT6MSG_WHOLEPKT) {
1060 /* Ugly, but we have no choice with this interface.
1061 Duplicate old header, fix length etc.
1062 And all this only to mangle msg->im6_msgtype and
1063 to set msg->im6_mbz to "mbz" :-)
1065 skb_push(skb, -skb_network_offset(pkt));
1067 skb_push(skb, sizeof(*msg));
1068 skb_reset_transport_header(skb);
1069 msg = (struct mrt6msg *)skb_transport_header(skb);
1071 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1072 msg->im6_mif = mrt->mroute_reg_vif_num;
1074 msg->im6_src = ipv6_hdr(pkt)->saddr;
1075 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1077 skb->ip_summed = CHECKSUM_UNNECESSARY;
1082 * Copy the IP header
1085 skb_put(skb, sizeof(struct ipv6hdr));
1086 skb_reset_network_header(skb);
1087 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1092 skb_put(skb, sizeof(*msg));
1093 skb_reset_transport_header(skb);
1094 msg = (struct mrt6msg *)skb_transport_header(skb);
1097 msg->im6_msgtype = assert;
1098 msg->im6_mif = mifi;
1100 msg->im6_src = ipv6_hdr(pkt)->saddr;
1101 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1103 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1104 skb->ip_summed = CHECKSUM_UNNECESSARY;
1108 mroute6_sk = rcu_dereference(mrt->mroute_sk);
1115 mrt6msg_netlink_event(mrt, skb);
1117 /* Deliver to user space multicast routing algorithms */
1118 ret = sock_queue_rcv_skb(mroute6_sk, skb);
1121 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1128 /* Queue a packet for resolution. It gets locked cache entry! */
1129 static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
1130 struct sk_buff *skb, struct net_device *dev)
1132 struct mfc6_cache *c;
1136 spin_lock_bh(&mfc_unres_lock);
1137 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1138 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1139 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1147 * Create a new entry if allowable
1150 c = ip6mr_cache_alloc_unres();
1152 spin_unlock_bh(&mfc_unres_lock);
1158 /* Fill in the new cache entry */
1159 c->_c.mfc_parent = -1;
1160 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1161 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1164 * Reflect first query at pim6sd
1166 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1168 /* If the report failed throw the cache entry
1171 spin_unlock_bh(&mfc_unres_lock);
1173 ip6mr_cache_free(c);
1178 atomic_inc(&mrt->cache_resolve_queue_len);
1179 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1180 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1182 ipmr_do_expire_process(mrt);
1185 /* See if we can append the packet */
1186 if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1192 skb->skb_iif = dev->ifindex;
1194 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1198 spin_unlock_bh(&mfc_unres_lock);
1203 * MFC6 cache manipulation by user space
1206 static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
1209 struct mfc6_cache *c;
1211 /* The entries are added/deleted only under RTNL */
1213 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1214 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1218 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1219 list_del_rcu(&c->_c.list);
1221 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1222 FIB_EVENT_ENTRY_DEL, c, mrt->id);
1223 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1224 mr_cache_put(&c->_c);
1228 static int ip6mr_device_event(struct notifier_block *this,
1229 unsigned long event, void *ptr)
1231 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1232 struct net *net = dev_net(dev);
1233 struct mr_table *mrt;
1234 struct vif_device *v;
1237 if (event != NETDEV_UNREGISTER)
1240 ip6mr_for_each_table(mrt, net) {
1241 v = &mrt->vif_table[0];
1242 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1244 mif6_delete(mrt, ct, 1, NULL);
1251 static unsigned int ip6mr_seq_read(struct net *net)
1255 return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
1258 static int ip6mr_dump(struct net *net, struct notifier_block *nb,
1259 struct netlink_ext_ack *extack)
1261 return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
1262 ip6mr_mr_table_iter, &mrt_lock, extack);
1265 static struct notifier_block ip6_mr_notifier = {
1266 .notifier_call = ip6mr_device_event
1269 static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1270 .family = RTNL_FAMILY_IP6MR,
1271 .fib_seq_read = ip6mr_seq_read,
1272 .fib_dump = ip6mr_dump,
1273 .owner = THIS_MODULE,
1276 static int __net_init ip6mr_notifier_init(struct net *net)
1278 struct fib_notifier_ops *ops;
1280 net->ipv6.ipmr_seq = 0;
1282 ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1284 return PTR_ERR(ops);
1286 net->ipv6.ip6mr_notifier_ops = ops;
1291 static void __net_exit ip6mr_notifier_exit(struct net *net)
1293 fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1294 net->ipv6.ip6mr_notifier_ops = NULL;
1297 /* Setup for IP multicast routing */
1298 static int __net_init ip6mr_net_init(struct net *net)
1302 err = ip6mr_notifier_init(net);
1306 err = ip6mr_rules_init(net);
1308 goto ip6mr_rules_fail;
1310 #ifdef CONFIG_PROC_FS
1312 if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops,
1313 sizeof(struct mr_vif_iter)))
1315 if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
1316 sizeof(struct mr_mfc_iter)))
1317 goto proc_cache_fail;
1322 #ifdef CONFIG_PROC_FS
1324 remove_proc_entry("ip6_mr_vif", net->proc_net);
1326 ip6mr_rules_exit(net);
1329 ip6mr_notifier_exit(net);
1333 static void __net_exit ip6mr_net_exit(struct net *net)
1335 #ifdef CONFIG_PROC_FS
1336 remove_proc_entry("ip6_mr_cache", net->proc_net);
1337 remove_proc_entry("ip6_mr_vif", net->proc_net);
1339 ip6mr_rules_exit(net);
1340 ip6mr_notifier_exit(net);
1343 static struct pernet_operations ip6mr_net_ops = {
1344 .init = ip6mr_net_init,
1345 .exit = ip6mr_net_exit,
1348 int __init ip6_mr_init(void)
1352 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1353 sizeof(struct mfc6_cache),
1354 0, SLAB_HWCACHE_ALIGN,
1359 err = register_pernet_subsys(&ip6mr_net_ops);
1361 goto reg_pernet_fail;
1363 err = register_netdevice_notifier(&ip6_mr_notifier);
1365 goto reg_notif_fail;
1366 #ifdef CONFIG_IPV6_PIMSM_V2
1367 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1368 pr_err("%s: can't add PIM protocol\n", __func__);
1370 goto add_proto_fail;
1373 err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1374 NULL, ip6mr_rtm_dumproute, 0);
1378 #ifdef CONFIG_IPV6_PIMSM_V2
1379 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1381 unregister_netdevice_notifier(&ip6_mr_notifier);
1384 unregister_pernet_subsys(&ip6mr_net_ops);
1386 kmem_cache_destroy(mrt_cachep);
1390 void ip6_mr_cleanup(void)
1392 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1393 #ifdef CONFIG_IPV6_PIMSM_V2
1394 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1396 unregister_netdevice_notifier(&ip6_mr_notifier);
1397 unregister_pernet_subsys(&ip6mr_net_ops);
1398 kmem_cache_destroy(mrt_cachep);
1401 static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
1402 struct mf6cctl *mfc, int mrtsock, int parent)
1404 unsigned char ttls[MAXMIFS];
1405 struct mfc6_cache *uc, *c;
1410 if (mfc->mf6cc_parent >= MAXMIFS)
1413 memset(ttls, 255, MAXMIFS);
1414 for (i = 0; i < MAXMIFS; i++) {
1415 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1419 /* The entries are added/deleted only under RTNL */
1421 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1422 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1425 write_lock_bh(&mrt_lock);
1426 c->_c.mfc_parent = mfc->mf6cc_parent;
1427 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1429 c->_c.mfc_flags |= MFC_STATIC;
1430 write_unlock_bh(&mrt_lock);
1431 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1433 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1437 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1438 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1441 c = ip6mr_cache_alloc();
1445 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1446 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1447 c->_c.mfc_parent = mfc->mf6cc_parent;
1448 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1450 c->_c.mfc_flags |= MFC_STATIC;
1452 err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1455 pr_err("ip6mr: rhtable insert error %d\n", err);
1456 ip6mr_cache_free(c);
1459 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1461 /* Check to see if we resolved a queued list. If so we
1462 * need to send on the frames and tidy up.
1465 spin_lock_bh(&mfc_unres_lock);
1466 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1467 uc = (struct mfc6_cache *)_uc;
1468 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1469 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1470 list_del(&_uc->list);
1471 atomic_dec(&mrt->cache_resolve_queue_len);
1476 if (list_empty(&mrt->mfc_unres_queue))
1477 del_timer(&mrt->ipmr_expire_timer);
1478 spin_unlock_bh(&mfc_unres_lock);
1481 ip6mr_cache_resolve(net, mrt, uc, c);
1482 ip6mr_cache_free(uc);
1484 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1486 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1491 * Close the multicast socket, and clear the vif tables etc
1494 static void mroute_clean_tables(struct mr_table *mrt, int flags)
1496 struct mr_mfc *c, *tmp;
1500 /* Shut down all active vif entries */
1501 if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) {
1502 for (i = 0; i < mrt->maxvif; i++) {
1503 if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1504 !(flags & MRT6_FLUSH_MIFS_STATIC)) ||
1505 (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS)))
1507 mif6_delete(mrt, i, 0, &list);
1509 unregister_netdevice_many(&list);
1512 /* Wipe the cache */
1513 if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) {
1514 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1515 if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) ||
1516 (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC)))
1518 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1519 list_del_rcu(&c->list);
1520 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1521 FIB_EVENT_ENTRY_DEL,
1522 (struct mfc6_cache *)c, mrt->id);
1523 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1528 if (flags & MRT6_FLUSH_MFC) {
1529 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1530 spin_lock_bh(&mfc_unres_lock);
1531 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1533 mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1535 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1537 spin_unlock_bh(&mfc_unres_lock);
1542 static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
1545 struct net *net = sock_net(sk);
1548 write_lock_bh(&mrt_lock);
1549 if (rtnl_dereference(mrt->mroute_sk)) {
1552 rcu_assign_pointer(mrt->mroute_sk, sk);
1553 sock_set_flag(sk, SOCK_RCU_FREE);
1554 net->ipv6.devconf_all->mc_forwarding++;
1556 write_unlock_bh(&mrt_lock);
1559 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1560 NETCONFA_MC_FORWARDING,
1561 NETCONFA_IFINDEX_ALL,
1562 net->ipv6.devconf_all);
1568 int ip6mr_sk_done(struct sock *sk)
1571 struct net *net = sock_net(sk);
1572 struct mr_table *mrt;
1574 if (sk->sk_type != SOCK_RAW ||
1575 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1579 ip6mr_for_each_table(mrt, net) {
1580 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1581 write_lock_bh(&mrt_lock);
1582 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1583 /* Note that mroute_sk had SOCK_RCU_FREE set,
1584 * so the RCU grace period before sk freeing
1585 * is guaranteed by sk_destruct()
1587 net->ipv6.devconf_all->mc_forwarding--;
1588 write_unlock_bh(&mrt_lock);
1589 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1590 NETCONFA_MC_FORWARDING,
1591 NETCONFA_IFINDEX_ALL,
1592 net->ipv6.devconf_all);
1594 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC);
1604 bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
1606 struct mr_table *mrt;
1607 struct flowi6 fl6 = {
1608 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1609 .flowi6_oif = skb->dev->ifindex,
1610 .flowi6_mark = skb->mark,
1613 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1616 return rcu_access_pointer(mrt->mroute_sk);
1618 EXPORT_SYMBOL(mroute6_is_socket);
1621 * Socket options and virtual interface manipulation. The whole
1622 * virtual interface system is a complete heap, but unfortunately
1623 * that's how BSD mrouted happens to think. Maybe one day with a proper
1624 * MOSPF/PIM router set up we can clean this up.
1627 int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
1628 unsigned int optlen)
1630 int ret, parent = 0;
1634 struct net *net = sock_net(sk);
1635 struct mr_table *mrt;
1637 if (sk->sk_type != SOCK_RAW ||
1638 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1641 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1645 if (optname != MRT6_INIT) {
1646 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1647 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1653 if (optlen < sizeof(int))
1656 return ip6mr_sk_init(mrt, sk);
1659 return ip6mr_sk_done(sk);
1662 if (optlen < sizeof(vif))
1664 if (copy_from_sockptr(&vif, optval, sizeof(vif)))
1666 if (vif.mif6c_mifi >= MAXMIFS)
1669 ret = mif6_add(net, mrt, &vif,
1670 sk == rtnl_dereference(mrt->mroute_sk));
1675 if (optlen < sizeof(mifi_t))
1677 if (copy_from_sockptr(&mifi, optval, sizeof(mifi_t)))
1680 ret = mif6_delete(mrt, mifi, 0, NULL);
1685 * Manipulate the forwarding caches. These live
1686 * in a sort of kernel/user symbiosis.
1692 case MRT6_ADD_MFC_PROXY:
1693 case MRT6_DEL_MFC_PROXY:
1694 if (optlen < sizeof(mfc))
1696 if (copy_from_sockptr(&mfc, optval, sizeof(mfc)))
1699 parent = mfc.mf6cc_parent;
1701 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1702 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1704 ret = ip6mr_mfc_add(net, mrt, &mfc,
1706 rtnl_dereference(mrt->mroute_sk),
1715 if (optlen != sizeof(flags))
1717 if (copy_from_sockptr(&flags, optval, sizeof(flags)))
1720 mroute_clean_tables(mrt, flags);
1726 * Control PIM assert (to activate pim will activate assert)
1732 if (optlen != sizeof(v))
1734 if (copy_from_sockptr(&v, optval, sizeof(v)))
1736 mrt->mroute_do_assert = v;
1740 #ifdef CONFIG_IPV6_PIMSM_V2
1745 if (optlen != sizeof(v))
1747 if (copy_from_sockptr(&v, optval, sizeof(v)))
1752 if (v != mrt->mroute_do_pim) {
1753 mrt->mroute_do_pim = v;
1754 mrt->mroute_do_assert = v;
1761 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1766 if (optlen != sizeof(u32))
1768 if (copy_from_sockptr(&v, optval, sizeof(v)))
1770 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1771 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1773 if (sk == rcu_access_pointer(mrt->mroute_sk))
1778 mrt = ip6mr_new_table(net, v);
1782 raw6_sk(sk)->ip6mr_table = v;
1788 * Spurious command, or MRT6_VERSION which you cannot
1792 return -ENOPROTOOPT;
1797 * Getsock opt support for the multicast routing system.
1800 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1805 struct net *net = sock_net(sk);
1806 struct mr_table *mrt;
1808 if (sk->sk_type != SOCK_RAW ||
1809 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1812 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1820 #ifdef CONFIG_IPV6_PIMSM_V2
1822 val = mrt->mroute_do_pim;
1826 val = mrt->mroute_do_assert;
1829 return -ENOPROTOOPT;
1832 if (get_user(olr, optlen))
1835 olr = min_t(int, olr, sizeof(int));
1839 if (put_user(olr, optlen))
1841 if (copy_to_user(optval, &val, olr))
1847 * The IP multicast ioctl support routines.
1850 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1852 struct sioc_sg_req6 sr;
1853 struct sioc_mif_req6 vr;
1854 struct vif_device *vif;
1855 struct mfc6_cache *c;
1856 struct net *net = sock_net(sk);
1857 struct mr_table *mrt;
1859 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1864 case SIOCGETMIFCNT_IN6:
1865 if (copy_from_user(&vr, arg, sizeof(vr)))
1867 if (vr.mifi >= mrt->maxvif)
1869 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1870 read_lock(&mrt_lock);
1871 vif = &mrt->vif_table[vr.mifi];
1872 if (VIF_EXISTS(mrt, vr.mifi)) {
1873 vr.icount = vif->pkt_in;
1874 vr.ocount = vif->pkt_out;
1875 vr.ibytes = vif->bytes_in;
1876 vr.obytes = vif->bytes_out;
1877 read_unlock(&mrt_lock);
1879 if (copy_to_user(arg, &vr, sizeof(vr)))
1883 read_unlock(&mrt_lock);
1884 return -EADDRNOTAVAIL;
1885 case SIOCGETSGCNT_IN6:
1886 if (copy_from_user(&sr, arg, sizeof(sr)))
1890 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1892 sr.pktcnt = c->_c.mfc_un.res.pkt;
1893 sr.bytecnt = c->_c.mfc_un.res.bytes;
1894 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1897 if (copy_to_user(arg, &sr, sizeof(sr)))
1902 return -EADDRNOTAVAIL;
1904 return -ENOIOCTLCMD;
1908 #ifdef CONFIG_COMPAT
1909 struct compat_sioc_sg_req6 {
1910 struct sockaddr_in6 src;
1911 struct sockaddr_in6 grp;
1912 compat_ulong_t pktcnt;
1913 compat_ulong_t bytecnt;
1914 compat_ulong_t wrong_if;
1917 struct compat_sioc_mif_req6 {
1919 compat_ulong_t icount;
1920 compat_ulong_t ocount;
1921 compat_ulong_t ibytes;
1922 compat_ulong_t obytes;
1925 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1927 struct compat_sioc_sg_req6 sr;
1928 struct compat_sioc_mif_req6 vr;
1929 struct vif_device *vif;
1930 struct mfc6_cache *c;
1931 struct net *net = sock_net(sk);
1932 struct mr_table *mrt;
1934 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1939 case SIOCGETMIFCNT_IN6:
1940 if (copy_from_user(&vr, arg, sizeof(vr)))
1942 if (vr.mifi >= mrt->maxvif)
1944 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1945 read_lock(&mrt_lock);
1946 vif = &mrt->vif_table[vr.mifi];
1947 if (VIF_EXISTS(mrt, vr.mifi)) {
1948 vr.icount = vif->pkt_in;
1949 vr.ocount = vif->pkt_out;
1950 vr.ibytes = vif->bytes_in;
1951 vr.obytes = vif->bytes_out;
1952 read_unlock(&mrt_lock);
1954 if (copy_to_user(arg, &vr, sizeof(vr)))
1958 read_unlock(&mrt_lock);
1959 return -EADDRNOTAVAIL;
1960 case SIOCGETSGCNT_IN6:
1961 if (copy_from_user(&sr, arg, sizeof(sr)))
1965 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1967 sr.pktcnt = c->_c.mfc_un.res.pkt;
1968 sr.bytecnt = c->_c.mfc_un.res.bytes;
1969 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1972 if (copy_to_user(arg, &sr, sizeof(sr)))
1977 return -EADDRNOTAVAIL;
1979 return -ENOIOCTLCMD;
1984 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1986 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1987 IPSTATS_MIB_OUTFORWDATAGRAMS);
1988 IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
1989 IPSTATS_MIB_OUTOCTETS, skb->len);
1990 return dst_output(net, sk, skb);
1994 * Processing handlers for ip6mr_forward
1997 static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
1998 struct sk_buff *skb, int vifi)
2000 struct ipv6hdr *ipv6h;
2001 struct vif_device *vif = &mrt->vif_table[vifi];
2002 struct net_device *dev;
2003 struct dst_entry *dst;
2009 #ifdef CONFIG_IPV6_PIMSM_V2
2010 if (vif->flags & MIFF_REGISTER) {
2012 vif->bytes_out += skb->len;
2013 vif->dev->stats.tx_bytes += skb->len;
2014 vif->dev->stats.tx_packets++;
2015 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2020 ipv6h = ipv6_hdr(skb);
2022 fl6 = (struct flowi6) {
2023 .flowi6_oif = vif->link,
2024 .daddr = ipv6h->daddr,
2027 dst = ip6_route_output(net, NULL, &fl6);
2034 skb_dst_set(skb, dst);
2037 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2038 * not only before forwarding, but after forwarding on all output
2039 * interfaces. It is clear, if mrouter runs a multicasting
2040 * program, it should receive packets not depending to what interface
2041 * program is joined.
2042 * If we will not make it, the program will have to join on all
2043 * interfaces. On the other hand, multihoming host (or router, but
2044 * not mrouter) cannot join to more than one interface - it will
2045 * result in receiving multiple packets.
2050 vif->bytes_out += skb->len;
2052 /* We are about to write */
2053 /* XXX: extension headers? */
2054 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2057 ipv6h = ipv6_hdr(skb);
2060 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2062 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2063 net, NULL, skb, skb->dev, dev,
2064 ip6mr_forward2_finish);
2071 static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
2075 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2076 if (mrt->vif_table[ct].dev == dev)
2082 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
2083 struct net_device *dev, struct sk_buff *skb,
2084 struct mfc6_cache *c)
2088 int true_vifi = ip6mr_find_vif(mrt, dev);
2090 vif = c->_c.mfc_parent;
2091 c->_c.mfc_un.res.pkt++;
2092 c->_c.mfc_un.res.bytes += skb->len;
2093 c->_c.mfc_un.res.lastuse = jiffies;
2095 if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
2096 struct mfc6_cache *cache_proxy;
2098 /* For an (*,G) entry, we only check that the incoming
2099 * interface is part of the static tree.
2102 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2104 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
2112 * Wrong interface: drop packet and (maybe) send PIM assert.
2114 if (mrt->vif_table[vif].dev != dev) {
2115 c->_c.mfc_un.res.wrong_if++;
2117 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2118 /* pimsm uses asserts, when switching from RPT to SPT,
2119 so that we cannot check that packet arrived on an oif.
2120 It is bad, but otherwise we would need to move pretty
2121 large chunk of pimd to kernel. Ough... --ANK
2123 (mrt->mroute_do_pim ||
2124 c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2126 c->_c.mfc_un.res.last_assert +
2127 MFC_ASSERT_THRESH)) {
2128 c->_c.mfc_un.res.last_assert = jiffies;
2129 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2135 mrt->vif_table[vif].pkt_in++;
2136 mrt->vif_table[vif].bytes_in += skb->len;
2141 if (ipv6_addr_any(&c->mf6c_origin) &&
2142 ipv6_addr_any(&c->mf6c_mcastgrp)) {
2143 if (true_vifi >= 0 &&
2144 true_vifi != c->_c.mfc_parent &&
2145 ipv6_hdr(skb)->hop_limit >
2146 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2147 /* It's an (*,*) entry and the packet is not coming from
2148 * the upstream: forward the packet to the upstream
2151 psend = c->_c.mfc_parent;
2156 for (ct = c->_c.mfc_un.res.maxvif - 1;
2157 ct >= c->_c.mfc_un.res.minvif; ct--) {
2158 /* For (*,G) entry, don't forward to the incoming interface */
2159 if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2160 ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
2162 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2164 ip6mr_forward2(net, mrt, skb2, psend);
2171 ip6mr_forward2(net, mrt, skb, psend);
2181 * Multicast packets for forwarding arrive here
2184 int ip6_mr_input(struct sk_buff *skb)
2186 struct mfc6_cache *cache;
2187 struct net *net = dev_net(skb->dev);
2188 struct mr_table *mrt;
2189 struct flowi6 fl6 = {
2190 .flowi6_iif = skb->dev->ifindex,
2191 .flowi6_mark = skb->mark,
2194 struct net_device *dev;
2196 /* skb->dev passed in is the master dev for vrfs.
2197 * Get the proper interface that does have a vif associated with it.
2200 if (netif_is_l3_master(skb->dev)) {
2201 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2208 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2214 read_lock(&mrt_lock);
2215 cache = ip6mr_cache_find(mrt,
2216 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2218 int vif = ip6mr_find_vif(mrt, dev);
2221 cache = ip6mr_cache_find_any(mrt,
2222 &ipv6_hdr(skb)->daddr,
2227 * No usable cache entry
2232 vif = ip6mr_find_vif(mrt, dev);
2234 int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
2235 read_unlock(&mrt_lock);
2239 read_unlock(&mrt_lock);
2244 ip6_mr_forward(net, mrt, dev, skb, cache);
2246 read_unlock(&mrt_lock);
2251 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2255 struct mr_table *mrt;
2256 struct mfc6_cache *cache;
2257 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2259 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2263 read_lock(&mrt_lock);
2264 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2265 if (!cache && skb->dev) {
2266 int vif = ip6mr_find_vif(mrt, skb->dev);
2269 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2274 struct sk_buff *skb2;
2275 struct ipv6hdr *iph;
2276 struct net_device *dev;
2280 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2281 read_unlock(&mrt_lock);
2285 /* really correct? */
2286 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2288 read_unlock(&mrt_lock);
2292 NETLINK_CB(skb2).portid = portid;
2293 skb_reset_transport_header(skb2);
2295 skb_put(skb2, sizeof(struct ipv6hdr));
2296 skb_reset_network_header(skb2);
2298 iph = ipv6_hdr(skb2);
2301 iph->flow_lbl[0] = 0;
2302 iph->flow_lbl[1] = 0;
2303 iph->flow_lbl[2] = 0;
2304 iph->payload_len = 0;
2305 iph->nexthdr = IPPROTO_NONE;
2307 iph->saddr = rt->rt6i_src.addr;
2308 iph->daddr = rt->rt6i_dst.addr;
2310 err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
2311 read_unlock(&mrt_lock);
2316 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2317 read_unlock(&mrt_lock);
2321 static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2322 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2325 struct nlmsghdr *nlh;
2329 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2333 rtm = nlmsg_data(nlh);
2334 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2335 rtm->rtm_dst_len = 128;
2336 rtm->rtm_src_len = 128;
2338 rtm->rtm_table = mrt->id;
2339 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2340 goto nla_put_failure;
2341 rtm->rtm_type = RTN_MULTICAST;
2342 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2343 if (c->_c.mfc_flags & MFC_STATIC)
2344 rtm->rtm_protocol = RTPROT_STATIC;
2346 rtm->rtm_protocol = RTPROT_MROUTED;
2349 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2350 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2351 goto nla_put_failure;
2352 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2353 /* do not break the dump if cache is unresolved */
2354 if (err < 0 && err != -ENOENT)
2355 goto nla_put_failure;
2357 nlmsg_end(skb, nlh);
2361 nlmsg_cancel(skb, nlh);
2365 static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2366 u32 portid, u32 seq, struct mr_mfc *c,
2369 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2373 static int mr6_msgsize(bool unresolved, int maxvif)
2376 NLMSG_ALIGN(sizeof(struct rtmsg))
2377 + nla_total_size(4) /* RTA_TABLE */
2378 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2379 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2384 + nla_total_size(4) /* RTA_IIF */
2385 + nla_total_size(0) /* RTA_MULTIPATH */
2386 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2388 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2394 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
2397 struct net *net = read_pnet(&mrt->net);
2398 struct sk_buff *skb;
2401 skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
2406 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2410 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2416 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2419 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2422 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2423 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2424 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2425 /* IP6MRA_CREPORT_SRC_ADDR */
2426 + nla_total_size(sizeof(struct in6_addr))
2427 /* IP6MRA_CREPORT_DST_ADDR */
2428 + nla_total_size(sizeof(struct in6_addr))
2429 /* IP6MRA_CREPORT_PKT */
2430 + nla_total_size(payloadlen)
2436 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2438 struct net *net = read_pnet(&mrt->net);
2439 struct nlmsghdr *nlh;
2440 struct rtgenmsg *rtgenm;
2441 struct mrt6msg *msg;
2442 struct sk_buff *skb;
2446 payloadlen = pkt->len - sizeof(struct mrt6msg);
2447 msg = (struct mrt6msg *)skb_transport_header(pkt);
2449 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2453 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2454 sizeof(struct rtgenmsg), 0);
2457 rtgenm = nlmsg_data(nlh);
2458 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2459 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2460 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2461 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2463 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2465 goto nla_put_failure;
2467 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2468 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2469 nla_data(nla), payloadlen))
2470 goto nla_put_failure;
2472 nlmsg_end(skb, nlh);
2474 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2478 nlmsg_cancel(skb, nlh);
2481 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2484 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2486 const struct nlmsghdr *nlh = cb->nlh;
2487 struct fib_dump_filter filter = {};
2490 if (cb->strict_check) {
2491 err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
2497 if (filter.table_id) {
2498 struct mr_table *mrt;
2500 mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
2502 if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR)
2505 NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
2508 err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
2509 &mfc_unres_lock, &filter);
2510 return skb->len ? : err;
2513 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2514 _ip6mr_fill_mroute, &mfc_unres_lock, &filter);