1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IP multicast routing support for mrouted 3.6/3.8
6 * Linux Consultancy and Custom Driver Development
9 * Michael Chastain : Incorrect size of copying.
10 * Alan Cox : Added the cache manager code
11 * Alan Cox : Fixed the clone/copy bug and device race.
12 * Mike McLagan : Routing by source
13 * Malcolm Beattie : Buffer handling fixes.
14 * Alexey Kuznetsov : Double buffer free and other fixes.
15 * SVR Anand : Fixed several multicast bugs and problems.
16 * Alexey Kuznetsov : Status, optimisations and more.
17 * Brad Parker : Better behaviour on mrouted upcall
19 * Carlos Picoto : PIMv1 Support
20 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
21 * Relax this requirement to work with older peers.
24 #include <linux/uaccess.h>
25 #include <linux/types.h>
26 #include <linux/cache.h>
27 #include <linux/capability.h>
28 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/fcntl.h>
32 #include <linux/stat.h>
33 #include <linux/socket.h>
35 #include <linux/inet.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/igmp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/mroute.h>
42 #include <linux/init.h>
43 #include <linux/if_ether.h>
44 #include <linux/slab.h>
45 #include <net/net_namespace.h>
47 #include <net/protocol.h>
48 #include <linux/skbuff.h>
49 #include <net/route.h>
53 #include <linux/notifier.h>
54 #include <linux/if_arp.h>
55 #include <linux/netfilter_ipv4.h>
56 #include <linux/compat.h>
57 #include <linux/export.h>
58 #include <linux/rhashtable.h>
59 #include <net/ip_tunnels.h>
60 #include <net/checksum.h>
61 #include <net/netlink.h>
62 #include <net/fib_rules.h>
63 #include <linux/netconf.h>
66 #include <linux/nospec.h>
69 struct fib_rule common;
76 /* Big lock, protecting vif table, mrt cache and mroute socket state.
77 * Note that the changes are semaphored via rtnl_lock.
80 static DEFINE_RWLOCK(mrt_lock);
82 /* Multicast router control variables */
84 /* Special spinlock for queue of unresolved entries */
85 static DEFINE_SPINLOCK(mfc_unres_lock);
87 /* We return to original Alan's scheme. Hash table of resolved
88 * entries is changed only in process context and protected
89 * with weak lock mrt_lock. Queue of unresolved entries is protected
90 * with strong spinlock mfc_unres_lock.
92 * In this case data path is free of exclusive locks at all.
95 static struct kmem_cache *mrt_cachep __ro_after_init;
97 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
98 static void ipmr_free_table(struct mr_table *mrt);
100 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
101 struct net_device *dev, struct sk_buff *skb,
102 struct mfc_cache *cache, int local);
103 static int ipmr_cache_report(struct mr_table *mrt,
104 struct sk_buff *pkt, vifi_t vifi, int assert);
105 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
107 static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
108 static void mroute_clean_tables(struct mr_table *mrt, int flags);
109 static void ipmr_expire_process(struct timer_list *t);
111 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
112 #define ipmr_for_each_table(mrt, net) \
113 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
115 static struct mr_table *ipmr_mr_table_iter(struct net *net,
116 struct mr_table *mrt)
118 struct mr_table *ret;
121 ret = list_entry_rcu(net->ipv4.mr_tables.next,
122 struct mr_table, list);
124 ret = list_entry_rcu(mrt->list.next,
125 struct mr_table, list);
127 if (&ret->list == &net->ipv4.mr_tables)
132 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
134 struct mr_table *mrt;
136 ipmr_for_each_table(mrt, net) {
143 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
144 struct mr_table **mrt)
147 struct ipmr_result res;
148 struct fib_lookup_arg arg = {
150 .flags = FIB_LOOKUP_NOREF,
153 /* update flow if oif or iif point to device enslaved to l3mdev */
154 l3mdev_update_flow(net, flowi4_to_flowi(flp4));
156 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
157 flowi4_to_flowi(flp4), 0, &arg);
164 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
165 int flags, struct fib_lookup_arg *arg)
167 struct ipmr_result *res = arg->result;
168 struct mr_table *mrt;
170 switch (rule->action) {
173 case FR_ACT_UNREACHABLE:
175 case FR_ACT_PROHIBIT:
177 case FR_ACT_BLACKHOLE:
182 arg->table = fib_rule_get_table(rule, arg);
184 mrt = ipmr_get_table(rule->fr_net, arg->table);
191 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
196 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
200 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
201 struct fib_rule_hdr *frh, struct nlattr **tb,
202 struct netlink_ext_ack *extack)
207 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
213 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
214 struct fib_rule_hdr *frh)
222 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
223 .family = RTNL_FAMILY_IPMR,
224 .rule_size = sizeof(struct ipmr_rule),
225 .addr_size = sizeof(u32),
226 .action = ipmr_rule_action,
227 .match = ipmr_rule_match,
228 .configure = ipmr_rule_configure,
229 .compare = ipmr_rule_compare,
230 .fill = ipmr_rule_fill,
231 .nlgroup = RTNLGRP_IPV4_RULE,
232 .policy = ipmr_rule_policy,
233 .owner = THIS_MODULE,
236 static int __net_init ipmr_rules_init(struct net *net)
238 struct fib_rules_ops *ops;
239 struct mr_table *mrt;
242 ops = fib_rules_register(&ipmr_rules_ops_template, net);
246 INIT_LIST_HEAD(&net->ipv4.mr_tables);
248 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
254 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
258 net->ipv4.mr_rules_ops = ops;
262 ipmr_free_table(mrt);
264 fib_rules_unregister(ops);
268 static void __net_exit ipmr_rules_exit(struct net *net)
270 struct mr_table *mrt, *next;
273 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
274 list_del(&mrt->list);
275 ipmr_free_table(mrt);
277 fib_rules_unregister(net->ipv4.mr_rules_ops);
281 static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
283 return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR);
286 static unsigned int ipmr_rules_seq_read(struct net *net)
288 return fib_rules_seq_read(net, RTNL_FAMILY_IPMR);
291 bool ipmr_rule_default(const struct fib_rule *rule)
293 return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT;
295 EXPORT_SYMBOL(ipmr_rule_default);
297 #define ipmr_for_each_table(mrt, net) \
298 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
300 static struct mr_table *ipmr_mr_table_iter(struct net *net,
301 struct mr_table *mrt)
304 return net->ipv4.mrt;
308 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
310 return net->ipv4.mrt;
313 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
314 struct mr_table **mrt)
316 *mrt = net->ipv4.mrt;
320 static int __net_init ipmr_rules_init(struct net *net)
322 struct mr_table *mrt;
324 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
331 static void __net_exit ipmr_rules_exit(struct net *net)
334 ipmr_free_table(net->ipv4.mrt);
335 net->ipv4.mrt = NULL;
339 static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
344 static unsigned int ipmr_rules_seq_read(struct net *net)
349 bool ipmr_rule_default(const struct fib_rule *rule)
353 EXPORT_SYMBOL(ipmr_rule_default);
356 static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
359 const struct mfc_cache_cmp_arg *cmparg = arg->key;
360 struct mfc_cache *c = (struct mfc_cache *)ptr;
362 return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
363 cmparg->mfc_origin != c->mfc_origin;
366 static const struct rhashtable_params ipmr_rht_params = {
367 .head_offset = offsetof(struct mr_mfc, mnode),
368 .key_offset = offsetof(struct mfc_cache, cmparg),
369 .key_len = sizeof(struct mfc_cache_cmp_arg),
371 .obj_cmpfn = ipmr_hash_cmp,
372 .automatic_shrinking = true,
375 static void ipmr_new_table_set(struct mr_table *mrt,
378 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
379 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
383 static struct mfc_cache_cmp_arg ipmr_mr_table_ops_cmparg_any = {
384 .mfc_mcastgrp = htonl(INADDR_ANY),
385 .mfc_origin = htonl(INADDR_ANY),
388 static struct mr_table_ops ipmr_mr_table_ops = {
389 .rht_params = &ipmr_rht_params,
390 .cmparg_any = &ipmr_mr_table_ops_cmparg_any,
393 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
395 struct mr_table *mrt;
397 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
398 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
399 return ERR_PTR(-EINVAL);
401 mrt = ipmr_get_table(net, id);
405 return mr_table_alloc(net, id, &ipmr_mr_table_ops,
406 ipmr_expire_process, ipmr_new_table_set);
409 static void ipmr_free_table(struct mr_table *mrt)
411 del_timer_sync(&mrt->ipmr_expire_timer);
412 mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
413 MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC);
414 rhltable_destroy(&mrt->mfc_hash);
418 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
420 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
422 struct net *net = dev_net(dev);
426 dev = __dev_get_by_name(net, "tunl0");
428 const struct net_device_ops *ops = dev->netdev_ops;
430 struct ip_tunnel_parm p;
432 memset(&p, 0, sizeof(p));
433 p.iph.daddr = v->vifc_rmt_addr.s_addr;
434 p.iph.saddr = v->vifc_lcl_addr.s_addr;
437 p.iph.protocol = IPPROTO_IPIP;
438 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
439 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
441 if (ops->ndo_do_ioctl) {
442 mm_segment_t oldfs = get_fs();
445 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
451 /* Initialize ipmr pimreg/tunnel in_device */
452 static bool ipmr_init_vif_indev(const struct net_device *dev)
454 struct in_device *in_dev;
458 in_dev = __in_dev_get_rtnl(dev);
461 ipv4_devconf_setall(in_dev);
462 neigh_parms_data_state_setall(in_dev->arp_parms);
463 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
468 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
470 struct net_device *dev;
472 dev = __dev_get_by_name(net, "tunl0");
475 const struct net_device_ops *ops = dev->netdev_ops;
478 struct ip_tunnel_parm p;
480 memset(&p, 0, sizeof(p));
481 p.iph.daddr = v->vifc_rmt_addr.s_addr;
482 p.iph.saddr = v->vifc_lcl_addr.s_addr;
485 p.iph.protocol = IPPROTO_IPIP;
486 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
487 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
489 if (ops->ndo_do_ioctl) {
490 mm_segment_t oldfs = get_fs();
493 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
501 (dev = __dev_get_by_name(net, p.name)) != NULL) {
502 dev->flags |= IFF_MULTICAST;
503 if (!ipmr_init_vif_indev(dev))
505 if (dev_open(dev, NULL))
513 unregister_netdevice(dev);
517 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
518 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
520 struct net *net = dev_net(dev);
521 struct mr_table *mrt;
522 struct flowi4 fl4 = {
523 .flowi4_oif = dev->ifindex,
524 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
525 .flowi4_mark = skb->mark,
529 err = ipmr_fib_lookup(net, &fl4, &mrt);
535 read_lock(&mrt_lock);
536 dev->stats.tx_bytes += skb->len;
537 dev->stats.tx_packets++;
538 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
539 read_unlock(&mrt_lock);
544 static int reg_vif_get_iflink(const struct net_device *dev)
549 static const struct net_device_ops reg_vif_netdev_ops = {
550 .ndo_start_xmit = reg_vif_xmit,
551 .ndo_get_iflink = reg_vif_get_iflink,
554 static void reg_vif_setup(struct net_device *dev)
556 dev->type = ARPHRD_PIMREG;
557 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
558 dev->flags = IFF_NOARP;
559 dev->netdev_ops = ®_vif_netdev_ops;
560 dev->needs_free_netdev = true;
561 dev->features |= NETIF_F_NETNS_LOCAL;
564 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
566 struct net_device *dev;
569 if (mrt->id == RT_TABLE_DEFAULT)
570 sprintf(name, "pimreg");
572 sprintf(name, "pimreg%u", mrt->id);
574 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
579 dev_net_set(dev, net);
581 if (register_netdevice(dev)) {
586 if (!ipmr_init_vif_indev(dev))
588 if (dev_open(dev, NULL))
596 unregister_netdevice(dev);
600 /* called with rcu_read_lock() */
601 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
604 struct net_device *reg_dev = NULL;
607 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
609 * a. packet is really sent to a multicast group
610 * b. packet is not a NULL-REGISTER
611 * c. packet is not truncated
613 if (!ipv4_is_multicast(encap->daddr) ||
614 encap->tot_len == 0 ||
615 ntohs(encap->tot_len) + pimlen > skb->len)
618 read_lock(&mrt_lock);
619 if (mrt->mroute_reg_vif_num >= 0)
620 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
621 read_unlock(&mrt_lock);
626 skb->mac_header = skb->network_header;
627 skb_pull(skb, (u8 *)encap - skb->data);
628 skb_reset_network_header(skb);
629 skb->protocol = htons(ETH_P_IP);
630 skb->ip_summed = CHECKSUM_NONE;
632 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
636 return NET_RX_SUCCESS;
639 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
645 static int call_ipmr_vif_entry_notifiers(struct net *net,
646 enum fib_event_type event_type,
647 struct vif_device *vif,
648 vifi_t vif_index, u32 tb_id)
650 return mr_call_vif_notifiers(net, RTNL_FAMILY_IPMR, event_type,
651 vif, vif_index, tb_id,
652 &net->ipv4.ipmr_seq);
655 static int call_ipmr_mfc_entry_notifiers(struct net *net,
656 enum fib_event_type event_type,
657 struct mfc_cache *mfc, u32 tb_id)
659 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IPMR, event_type,
660 &mfc->_c, tb_id, &net->ipv4.ipmr_seq);
664 * vif_delete - Delete a VIF entry
665 * @notify: Set to 1, if the caller is a notifier_call
667 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
668 struct list_head *head)
670 struct net *net = read_pnet(&mrt->net);
671 struct vif_device *v;
672 struct net_device *dev;
673 struct in_device *in_dev;
675 if (vifi < 0 || vifi >= mrt->maxvif)
676 return -EADDRNOTAVAIL;
678 v = &mrt->vif_table[vifi];
680 if (VIF_EXISTS(mrt, vifi))
681 call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, vifi,
684 write_lock_bh(&mrt_lock);
689 write_unlock_bh(&mrt_lock);
690 return -EADDRNOTAVAIL;
693 if (vifi == mrt->mroute_reg_vif_num)
694 mrt->mroute_reg_vif_num = -1;
696 if (vifi + 1 == mrt->maxvif) {
699 for (tmp = vifi - 1; tmp >= 0; tmp--) {
700 if (VIF_EXISTS(mrt, tmp))
706 write_unlock_bh(&mrt_lock);
708 dev_set_allmulti(dev, -1);
710 in_dev = __in_dev_get_rtnl(dev);
712 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
713 inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
714 NETCONFA_MC_FORWARDING,
715 dev->ifindex, &in_dev->cnf);
716 ip_rt_multicast_event(in_dev);
719 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
720 unregister_netdevice_queue(dev, head);
726 static void ipmr_cache_free_rcu(struct rcu_head *head)
728 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
730 kmem_cache_free(mrt_cachep, (struct mfc_cache *)c);
733 static void ipmr_cache_free(struct mfc_cache *c)
735 call_rcu(&c->_c.rcu, ipmr_cache_free_rcu);
738 /* Destroy an unresolved cache entry, killing queued skbs
739 * and reporting error to netlink readers.
741 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
743 struct net *net = read_pnet(&mrt->net);
747 atomic_dec(&mrt->cache_resolve_queue_len);
749 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved))) {
750 if (ip_hdr(skb)->version == 0) {
751 struct nlmsghdr *nlh = skb_pull(skb,
752 sizeof(struct iphdr));
753 nlh->nlmsg_type = NLMSG_ERROR;
754 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
755 skb_trim(skb, nlh->nlmsg_len);
757 e->error = -ETIMEDOUT;
758 memset(&e->msg, 0, sizeof(e->msg));
760 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
769 /* Timer process for the unresolved queue. */
770 static void ipmr_expire_process(struct timer_list *t)
772 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
773 struct mr_mfc *c, *next;
774 unsigned long expires;
777 if (!spin_trylock(&mfc_unres_lock)) {
778 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
782 if (list_empty(&mrt->mfc_unres_queue))
788 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
789 if (time_after(c->mfc_un.unres.expires, now)) {
790 unsigned long interval = c->mfc_un.unres.expires - now;
791 if (interval < expires)
797 mroute_netlink_event(mrt, (struct mfc_cache *)c, RTM_DELROUTE);
798 ipmr_destroy_unres(mrt, (struct mfc_cache *)c);
801 if (!list_empty(&mrt->mfc_unres_queue))
802 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
805 spin_unlock(&mfc_unres_lock);
808 /* Fill oifs list. It is called under write locked mrt_lock. */
809 static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
814 cache->mfc_un.res.minvif = MAXVIFS;
815 cache->mfc_un.res.maxvif = 0;
816 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
818 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
819 if (VIF_EXISTS(mrt, vifi) &&
820 ttls[vifi] && ttls[vifi] < 255) {
821 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
822 if (cache->mfc_un.res.minvif > vifi)
823 cache->mfc_un.res.minvif = vifi;
824 if (cache->mfc_un.res.maxvif <= vifi)
825 cache->mfc_un.res.maxvif = vifi + 1;
828 cache->mfc_un.res.lastuse = jiffies;
831 static int vif_add(struct net *net, struct mr_table *mrt,
832 struct vifctl *vifc, int mrtsock)
834 struct netdev_phys_item_id ppid = { };
835 int vifi = vifc->vifc_vifi;
836 struct vif_device *v = &mrt->vif_table[vifi];
837 struct net_device *dev;
838 struct in_device *in_dev;
842 if (VIF_EXISTS(mrt, vifi))
845 switch (vifc->vifc_flags) {
847 if (!ipmr_pimsm_enabled())
849 /* Special Purpose VIF in PIM
850 * All the packets will be sent to the daemon
852 if (mrt->mroute_reg_vif_num >= 0)
854 dev = ipmr_reg_vif(net, mrt);
857 err = dev_set_allmulti(dev, 1);
859 unregister_netdevice(dev);
865 dev = ipmr_new_tunnel(net, vifc);
868 err = dev_set_allmulti(dev, 1);
870 ipmr_del_tunnel(dev, vifc);
875 case VIFF_USE_IFINDEX:
877 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
878 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
879 if (dev && !__in_dev_get_rtnl(dev)) {
881 return -EADDRNOTAVAIL;
884 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
887 return -EADDRNOTAVAIL;
888 err = dev_set_allmulti(dev, 1);
898 in_dev = __in_dev_get_rtnl(dev);
901 return -EADDRNOTAVAIL;
903 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
904 inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
905 dev->ifindex, &in_dev->cnf);
906 ip_rt_multicast_event(in_dev);
908 /* Fill in the VIF structures */
909 vif_device_init(v, dev, vifc->vifc_rate_limit,
910 vifc->vifc_threshold,
911 vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0),
912 (VIFF_TUNNEL | VIFF_REGISTER));
914 err = dev_get_port_parent_id(dev, &ppid, true);
916 memcpy(v->dev_parent_id.id, ppid.id, ppid.id_len);
917 v->dev_parent_id.id_len = ppid.id_len;
919 v->dev_parent_id.id_len = 0;
922 v->local = vifc->vifc_lcl_addr.s_addr;
923 v->remote = vifc->vifc_rmt_addr.s_addr;
925 /* And finish update writing critical data */
926 write_lock_bh(&mrt_lock);
928 if (v->flags & VIFF_REGISTER)
929 mrt->mroute_reg_vif_num = vifi;
930 if (vifi+1 > mrt->maxvif)
931 mrt->maxvif = vifi+1;
932 write_unlock_bh(&mrt_lock);
933 call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, vifi, mrt->id);
937 /* called with rcu_read_lock() */
938 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
942 struct mfc_cache_cmp_arg arg = {
943 .mfc_mcastgrp = mcastgrp,
947 return mr_mfc_find(mrt, &arg);
950 /* Look for a (*,G) entry */
951 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
952 __be32 mcastgrp, int vifi)
954 struct mfc_cache_cmp_arg arg = {
955 .mfc_mcastgrp = mcastgrp,
956 .mfc_origin = htonl(INADDR_ANY)
959 if (mcastgrp == htonl(INADDR_ANY))
960 return mr_mfc_find_any_parent(mrt, vifi);
961 return mr_mfc_find_any(mrt, vifi, &arg);
964 /* Look for a (S,G,iif) entry if parent != -1 */
965 static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
966 __be32 origin, __be32 mcastgrp,
969 struct mfc_cache_cmp_arg arg = {
970 .mfc_mcastgrp = mcastgrp,
971 .mfc_origin = origin,
974 return mr_mfc_find_parent(mrt, &arg, parent);
977 /* Allocate a multicast cache entry */
978 static struct mfc_cache *ipmr_cache_alloc(void)
980 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
983 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
984 c->_c.mfc_un.res.minvif = MAXVIFS;
985 c->_c.free = ipmr_cache_free_rcu;
986 refcount_set(&c->_c.mfc_un.res.refcount, 1);
991 static struct mfc_cache *ipmr_cache_alloc_unres(void)
993 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
996 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
997 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
1002 /* A cache entry has gone into a resolved state from queued */
1003 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
1004 struct mfc_cache *uc, struct mfc_cache *c)
1006 struct sk_buff *skb;
1009 /* Play the pending entries through our router */
1010 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1011 if (ip_hdr(skb)->version == 0) {
1012 struct nlmsghdr *nlh = skb_pull(skb,
1013 sizeof(struct iphdr));
1015 if (mr_fill_mroute(mrt, skb, &c->_c,
1016 nlmsg_data(nlh)) > 0) {
1017 nlh->nlmsg_len = skb_tail_pointer(skb) -
1020 nlh->nlmsg_type = NLMSG_ERROR;
1021 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1022 skb_trim(skb, nlh->nlmsg_len);
1023 e = nlmsg_data(nlh);
1024 e->error = -EMSGSIZE;
1025 memset(&e->msg, 0, sizeof(e->msg));
1028 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1030 ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
1035 /* Bounce a cache query up to mrouted and netlink.
1037 * Called under mrt_lock.
1039 static int ipmr_cache_report(struct mr_table *mrt,
1040 struct sk_buff *pkt, vifi_t vifi, int assert)
1042 const int ihl = ip_hdrlen(pkt);
1043 struct sock *mroute_sk;
1044 struct igmphdr *igmp;
1045 struct igmpmsg *msg;
1046 struct sk_buff *skb;
1049 if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
1050 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
1052 skb = alloc_skb(128, GFP_ATOMIC);
1057 if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) {
1058 /* Ugly, but we have no choice with this interface.
1059 * Duplicate old header, fix ihl, length etc.
1060 * And all this only to mangle msg->im_msgtype and
1061 * to set msg->im_mbz to "mbz" :-)
1063 skb_push(skb, sizeof(struct iphdr));
1064 skb_reset_network_header(skb);
1065 skb_reset_transport_header(skb);
1066 msg = (struct igmpmsg *)skb_network_header(skb);
1067 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1068 msg->im_msgtype = assert;
1070 if (assert == IGMPMSG_WRVIFWHOLE)
1073 msg->im_vif = mrt->mroute_reg_vif_num;
1074 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
1075 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
1076 sizeof(struct iphdr));
1078 /* Copy the IP header */
1079 skb_set_network_header(skb, skb->len);
1081 skb_copy_to_linear_data(skb, pkt->data, ihl);
1082 /* Flag to the kernel this is a route add */
1083 ip_hdr(skb)->protocol = 0;
1084 msg = (struct igmpmsg *)skb_network_header(skb);
1086 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1087 /* Add our header */
1088 igmp = skb_put(skb, sizeof(struct igmphdr));
1089 igmp->type = assert;
1090 msg->im_msgtype = assert;
1092 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1093 skb->transport_header = skb->network_header;
1097 mroute_sk = rcu_dereference(mrt->mroute_sk);
1104 igmpmsg_netlink_event(mrt, skb);
1106 /* Deliver to mrouted */
1107 ret = sock_queue_rcv_skb(mroute_sk, skb);
1110 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1117 /* Queue a packet for resolution. It gets locked cache entry! */
1118 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1119 struct sk_buff *skb, struct net_device *dev)
1121 const struct iphdr *iph = ip_hdr(skb);
1122 struct mfc_cache *c;
1126 spin_lock_bh(&mfc_unres_lock);
1127 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1128 if (c->mfc_mcastgrp == iph->daddr &&
1129 c->mfc_origin == iph->saddr) {
1136 /* Create a new entry if allowable */
1137 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1138 (c = ipmr_cache_alloc_unres()) == NULL) {
1139 spin_unlock_bh(&mfc_unres_lock);
1145 /* Fill in the new cache entry */
1146 c->_c.mfc_parent = -1;
1147 c->mfc_origin = iph->saddr;
1148 c->mfc_mcastgrp = iph->daddr;
1150 /* Reflect first query at mrouted. */
1151 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1154 /* If the report failed throw the cache entry
1157 spin_unlock_bh(&mfc_unres_lock);
1164 atomic_inc(&mrt->cache_resolve_queue_len);
1165 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1166 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1168 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1169 mod_timer(&mrt->ipmr_expire_timer,
1170 c->_c.mfc_un.unres.expires);
1173 /* See if we can append the packet */
1174 if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1180 skb->skb_iif = dev->ifindex;
1182 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1186 spin_unlock_bh(&mfc_unres_lock);
1190 /* MFC cache manipulation by user space mroute daemon */
1192 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1194 struct net *net = read_pnet(&mrt->net);
1195 struct mfc_cache *c;
1197 /* The entries are added/deleted only under RTNL */
1199 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1200 mfc->mfcc_mcastgrp.s_addr, parent);
1204 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ipmr_rht_params);
1205 list_del_rcu(&c->_c.list);
1206 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
1207 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1208 mr_cache_put(&c->_c);
1213 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1214 struct mfcctl *mfc, int mrtsock, int parent)
1216 struct mfc_cache *uc, *c;
1221 if (mfc->mfcc_parent >= MAXVIFS)
1224 /* The entries are added/deleted only under RTNL */
1226 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1227 mfc->mfcc_mcastgrp.s_addr, parent);
1230 write_lock_bh(&mrt_lock);
1231 c->_c.mfc_parent = mfc->mfcc_parent;
1232 ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1234 c->_c.mfc_flags |= MFC_STATIC;
1235 write_unlock_bh(&mrt_lock);
1236 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
1238 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1242 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1243 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1246 c = ipmr_cache_alloc();
1250 c->mfc_origin = mfc->mfcc_origin.s_addr;
1251 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1252 c->_c.mfc_parent = mfc->mfcc_parent;
1253 ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1255 c->_c.mfc_flags |= MFC_STATIC;
1257 ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1260 pr_err("ipmr: rhtable insert error %d\n", ret);
1264 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1265 /* Check to see if we resolved a queued list. If so we
1266 * need to send on the frames and tidy up.
1269 spin_lock_bh(&mfc_unres_lock);
1270 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1271 uc = (struct mfc_cache *)_uc;
1272 if (uc->mfc_origin == c->mfc_origin &&
1273 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1274 list_del(&_uc->list);
1275 atomic_dec(&mrt->cache_resolve_queue_len);
1280 if (list_empty(&mrt->mfc_unres_queue))
1281 del_timer(&mrt->ipmr_expire_timer);
1282 spin_unlock_bh(&mfc_unres_lock);
1285 ipmr_cache_resolve(net, mrt, uc, c);
1286 ipmr_cache_free(uc);
1288 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id);
1289 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1293 /* Close the multicast socket, and clear the vif tables etc */
1294 static void mroute_clean_tables(struct mr_table *mrt, int flags)
1296 struct net *net = read_pnet(&mrt->net);
1297 struct mr_mfc *c, *tmp;
1298 struct mfc_cache *cache;
1302 /* Shut down all active vif entries */
1303 if (flags & (MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC)) {
1304 for (i = 0; i < mrt->maxvif; i++) {
1305 if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1306 !(flags & MRT_FLUSH_VIFS_STATIC)) ||
1307 (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT_FLUSH_VIFS)))
1309 vif_delete(mrt, i, 0, &list);
1311 unregister_netdevice_many(&list);
1314 /* Wipe the cache */
1315 if (flags & (MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC)) {
1316 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1317 if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC_STATIC)) ||
1318 (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC)))
1320 rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1321 list_del_rcu(&c->list);
1322 cache = (struct mfc_cache *)c;
1323 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache,
1325 mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1330 if (flags & MRT_FLUSH_MFC) {
1331 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1332 spin_lock_bh(&mfc_unres_lock);
1333 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1335 cache = (struct mfc_cache *)c;
1336 mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1337 ipmr_destroy_unres(mrt, cache);
1339 spin_unlock_bh(&mfc_unres_lock);
1344 /* called from ip_ra_control(), before an RCU grace period,
1345 * we dont need to call synchronize_rcu() here
1347 static void mrtsock_destruct(struct sock *sk)
1349 struct net *net = sock_net(sk);
1350 struct mr_table *mrt;
1353 ipmr_for_each_table(mrt, net) {
1354 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1355 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1356 inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1357 NETCONFA_MC_FORWARDING,
1358 NETCONFA_IFINDEX_ALL,
1359 net->ipv4.devconf_all);
1360 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1361 mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_MFC);
1367 /* Socket options and virtual interface manipulation. The whole
1368 * virtual interface system is a complete heap, but unfortunately
1369 * that's how BSD mrouted happens to think. Maybe one day with a proper
1370 * MOSPF/PIM router set up we can clean this up.
1373 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1374 unsigned int optlen)
1376 struct net *net = sock_net(sk);
1377 int val, ret = 0, parent = 0;
1378 struct mr_table *mrt;
1384 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1386 if (sk->sk_type != SOCK_RAW ||
1387 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1392 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1397 if (optname != MRT_INIT) {
1398 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1399 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1407 if (optlen != sizeof(int)) {
1411 if (rtnl_dereference(mrt->mroute_sk)) {
1416 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1418 rcu_assign_pointer(mrt->mroute_sk, sk);
1419 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1420 inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1421 NETCONFA_MC_FORWARDING,
1422 NETCONFA_IFINDEX_ALL,
1423 net->ipv4.devconf_all);
1427 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1430 /* We need to unlock here because mrtsock_destruct takes
1431 * care of rtnl itself and we can't change that due to
1432 * the IP_ROUTER_ALERT setsockopt which runs without it.
1435 ret = ip_ra_control(sk, 0, NULL);
1441 if (optlen != sizeof(vif)) {
1445 if (copy_from_user(&vif, optval, sizeof(vif))) {
1449 if (vif.vifc_vifi >= MAXVIFS) {
1453 if (optname == MRT_ADD_VIF) {
1454 ret = vif_add(net, mrt, &vif,
1455 sk == rtnl_dereference(mrt->mroute_sk));
1457 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1460 /* Manipulate the forwarding caches. These live
1461 * in a sort of kernel/user symbiosis.
1467 case MRT_ADD_MFC_PROXY:
1468 case MRT_DEL_MFC_PROXY:
1469 if (optlen != sizeof(mfc)) {
1473 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1478 parent = mfc.mfcc_parent;
1479 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1480 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1482 ret = ipmr_mfc_add(net, mrt, &mfc,
1483 sk == rtnl_dereference(mrt->mroute_sk),
1487 if (optlen != sizeof(val)) {
1491 if (get_user(val, (int __user *)optval)) {
1495 mroute_clean_tables(mrt, val);
1497 /* Control PIM assert. */
1499 if (optlen != sizeof(val)) {
1503 if (get_user(val, (int __user *)optval)) {
1507 mrt->mroute_do_assert = val;
1510 if (!ipmr_pimsm_enabled()) {
1514 if (optlen != sizeof(val)) {
1518 if (get_user(val, (int __user *)optval)) {
1523 do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE);
1525 if (val != mrt->mroute_do_pim) {
1526 mrt->mroute_do_pim = val;
1527 mrt->mroute_do_assert = val;
1528 mrt->mroute_do_wrvifwhole = do_wrvifwhole;
1532 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1536 if (optlen != sizeof(uval)) {
1540 if (get_user(uval, (u32 __user *)optval)) {
1545 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1548 mrt = ipmr_new_table(net, uval);
1552 raw_sk(sk)->ipmr_table = uval;
1555 /* Spurious command, or MRT_VERSION which you cannot set. */
1565 /* Getsock opt support for the multicast routing system. */
1566 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1570 struct net *net = sock_net(sk);
1571 struct mr_table *mrt;
1573 if (sk->sk_type != SOCK_RAW ||
1574 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1577 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1586 if (!ipmr_pimsm_enabled())
1587 return -ENOPROTOOPT;
1588 val = mrt->mroute_do_pim;
1591 val = mrt->mroute_do_assert;
1594 return -ENOPROTOOPT;
1597 if (get_user(olr, optlen))
1599 olr = min_t(unsigned int, olr, sizeof(int));
1602 if (put_user(olr, optlen))
1604 if (copy_to_user(optval, &val, olr))
1609 /* The IP multicast ioctl support routines. */
1610 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1612 struct sioc_sg_req sr;
1613 struct sioc_vif_req vr;
1614 struct vif_device *vif;
1615 struct mfc_cache *c;
1616 struct net *net = sock_net(sk);
1617 struct mr_table *mrt;
1619 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1625 if (copy_from_user(&vr, arg, sizeof(vr)))
1627 if (vr.vifi >= mrt->maxvif)
1629 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1630 read_lock(&mrt_lock);
1631 vif = &mrt->vif_table[vr.vifi];
1632 if (VIF_EXISTS(mrt, vr.vifi)) {
1633 vr.icount = vif->pkt_in;
1634 vr.ocount = vif->pkt_out;
1635 vr.ibytes = vif->bytes_in;
1636 vr.obytes = vif->bytes_out;
1637 read_unlock(&mrt_lock);
1639 if (copy_to_user(arg, &vr, sizeof(vr)))
1643 read_unlock(&mrt_lock);
1644 return -EADDRNOTAVAIL;
1646 if (copy_from_user(&sr, arg, sizeof(sr)))
1650 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1652 sr.pktcnt = c->_c.mfc_un.res.pkt;
1653 sr.bytecnt = c->_c.mfc_un.res.bytes;
1654 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1657 if (copy_to_user(arg, &sr, sizeof(sr)))
1662 return -EADDRNOTAVAIL;
1664 return -ENOIOCTLCMD;
1668 #ifdef CONFIG_COMPAT
1669 struct compat_sioc_sg_req {
1672 compat_ulong_t pktcnt;
1673 compat_ulong_t bytecnt;
1674 compat_ulong_t wrong_if;
1677 struct compat_sioc_vif_req {
1678 vifi_t vifi; /* Which iface */
1679 compat_ulong_t icount;
1680 compat_ulong_t ocount;
1681 compat_ulong_t ibytes;
1682 compat_ulong_t obytes;
1685 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1687 struct compat_sioc_sg_req sr;
1688 struct compat_sioc_vif_req vr;
1689 struct vif_device *vif;
1690 struct mfc_cache *c;
1691 struct net *net = sock_net(sk);
1692 struct mr_table *mrt;
1694 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1700 if (copy_from_user(&vr, arg, sizeof(vr)))
1702 if (vr.vifi >= mrt->maxvif)
1704 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1705 read_lock(&mrt_lock);
1706 vif = &mrt->vif_table[vr.vifi];
1707 if (VIF_EXISTS(mrt, vr.vifi)) {
1708 vr.icount = vif->pkt_in;
1709 vr.ocount = vif->pkt_out;
1710 vr.ibytes = vif->bytes_in;
1711 vr.obytes = vif->bytes_out;
1712 read_unlock(&mrt_lock);
1714 if (copy_to_user(arg, &vr, sizeof(vr)))
1718 read_unlock(&mrt_lock);
1719 return -EADDRNOTAVAIL;
1721 if (copy_from_user(&sr, arg, sizeof(sr)))
1725 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1727 sr.pktcnt = c->_c.mfc_un.res.pkt;
1728 sr.bytecnt = c->_c.mfc_un.res.bytes;
1729 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1732 if (copy_to_user(arg, &sr, sizeof(sr)))
1737 return -EADDRNOTAVAIL;
1739 return -ENOIOCTLCMD;
1744 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1746 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1747 struct net *net = dev_net(dev);
1748 struct mr_table *mrt;
1749 struct vif_device *v;
1752 if (event != NETDEV_UNREGISTER)
1755 ipmr_for_each_table(mrt, net) {
1756 v = &mrt->vif_table[0];
1757 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1759 vif_delete(mrt, ct, 1, NULL);
1765 static struct notifier_block ip_mr_notifier = {
1766 .notifier_call = ipmr_device_event,
1769 /* Encapsulate a packet by attaching a valid IPIP header to it.
1770 * This avoids tunnel drivers and other mess and gives us the speed so
1771 * important for multicast video.
1773 static void ip_encap(struct net *net, struct sk_buff *skb,
1774 __be32 saddr, __be32 daddr)
1777 const struct iphdr *old_iph = ip_hdr(skb);
1779 skb_push(skb, sizeof(struct iphdr));
1780 skb->transport_header = skb->network_header;
1781 skb_reset_network_header(skb);
1785 iph->tos = old_iph->tos;
1786 iph->ttl = old_iph->ttl;
1790 iph->protocol = IPPROTO_IPIP;
1792 iph->tot_len = htons(skb->len);
1793 ip_select_ident(net, skb, NULL);
1796 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1800 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1801 struct sk_buff *skb)
1803 struct ip_options *opt = &(IPCB(skb)->opt);
1805 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1806 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1808 if (unlikely(opt->optlen))
1809 ip_forward_options(skb);
1811 return dst_output(net, sk, skb);
1814 #ifdef CONFIG_NET_SWITCHDEV
1815 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1816 int in_vifi, int out_vifi)
1818 struct vif_device *out_vif = &mrt->vif_table[out_vifi];
1819 struct vif_device *in_vif = &mrt->vif_table[in_vifi];
1821 if (!skb->offload_l3_fwd_mark)
1823 if (!out_vif->dev_parent_id.id_len || !in_vif->dev_parent_id.id_len)
1825 return netdev_phys_item_id_same(&out_vif->dev_parent_id,
1826 &in_vif->dev_parent_id);
1829 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1830 int in_vifi, int out_vifi)
1836 /* Processing handlers for ipmr_forward */
1838 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1839 int in_vifi, struct sk_buff *skb, int vifi)
1841 const struct iphdr *iph = ip_hdr(skb);
1842 struct vif_device *vif = &mrt->vif_table[vifi];
1843 struct net_device *dev;
1851 if (vif->flags & VIFF_REGISTER) {
1853 vif->bytes_out += skb->len;
1854 vif->dev->stats.tx_bytes += skb->len;
1855 vif->dev->stats.tx_packets++;
1856 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1860 if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
1863 if (vif->flags & VIFF_TUNNEL) {
1864 rt = ip_route_output_ports(net, &fl4, NULL,
1865 vif->remote, vif->local,
1868 RT_TOS(iph->tos), vif->link);
1871 encap = sizeof(struct iphdr);
1873 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1876 RT_TOS(iph->tos), vif->link);
1883 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1884 /* Do not fragment multicasts. Alas, IPv4 does not
1885 * allow to send ICMP, so that packets will disappear
1888 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1893 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1895 if (skb_cow(skb, encap)) {
1901 vif->bytes_out += skb->len;
1904 skb_dst_set(skb, &rt->dst);
1905 ip_decrease_ttl(ip_hdr(skb));
1907 /* FIXME: forward and output firewalls used to be called here.
1908 * What do we do with netfilter? -- RR
1910 if (vif->flags & VIFF_TUNNEL) {
1911 ip_encap(net, skb, vif->local, vif->remote);
1912 /* FIXME: extra output firewall step used to be here. --RR */
1913 vif->dev->stats.tx_packets++;
1914 vif->dev->stats.tx_bytes += skb->len;
1917 IPCB(skb)->flags |= IPSKB_FORWARDED;
1919 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1920 * not only before forwarding, but after forwarding on all output
1921 * interfaces. It is clear, if mrouter runs a multicasting
1922 * program, it should receive packets not depending to what interface
1923 * program is joined.
1924 * If we will not make it, the program will have to join on all
1925 * interfaces. On the other hand, multihoming host (or router, but
1926 * not mrouter) cannot join to more than one interface - it will
1927 * result in receiving multiple packets.
1929 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1930 net, NULL, skb, skb->dev, dev,
1931 ipmr_forward_finish);
1938 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1942 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1943 if (mrt->vif_table[ct].dev == dev)
1949 /* "local" means that we should preserve one skb (for local delivery) */
1950 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1951 struct net_device *dev, struct sk_buff *skb,
1952 struct mfc_cache *c, int local)
1954 int true_vifi = ipmr_find_vif(mrt, dev);
1958 vif = c->_c.mfc_parent;
1959 c->_c.mfc_un.res.pkt++;
1960 c->_c.mfc_un.res.bytes += skb->len;
1961 c->_c.mfc_un.res.lastuse = jiffies;
1963 if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1964 struct mfc_cache *cache_proxy;
1966 /* For an (*,G) entry, we only check that the incomming
1967 * interface is part of the static tree.
1969 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
1971 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255)
1975 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1976 if (mrt->vif_table[vif].dev != dev) {
1977 if (rt_is_output_route(skb_rtable(skb))) {
1978 /* It is our own packet, looped back.
1979 * Very complicated situation...
1981 * The best workaround until routing daemons will be
1982 * fixed is not to redistribute packet, if it was
1983 * send through wrong interface. It means, that
1984 * multicast applications WILL NOT work for
1985 * (S,G), which have default multicast route pointing
1986 * to wrong oif. In any case, it is not a good
1987 * idea to use multicasting applications on router.
1992 c->_c.mfc_un.res.wrong_if++;
1994 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1995 /* pimsm uses asserts, when switching from RPT to SPT,
1996 * so that we cannot check that packet arrived on an oif.
1997 * It is bad, but otherwise we would need to move pretty
1998 * large chunk of pimd to kernel. Ough... --ANK
2000 (mrt->mroute_do_pim ||
2001 c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2003 c->_c.mfc_un.res.last_assert +
2004 MFC_ASSERT_THRESH)) {
2005 c->_c.mfc_un.res.last_assert = jiffies;
2006 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
2007 if (mrt->mroute_do_wrvifwhole)
2008 ipmr_cache_report(mrt, skb, true_vifi,
2009 IGMPMSG_WRVIFWHOLE);
2015 mrt->vif_table[vif].pkt_in++;
2016 mrt->vif_table[vif].bytes_in += skb->len;
2018 /* Forward the frame */
2019 if (c->mfc_origin == htonl(INADDR_ANY) &&
2020 c->mfc_mcastgrp == htonl(INADDR_ANY)) {
2021 if (true_vifi >= 0 &&
2022 true_vifi != c->_c.mfc_parent &&
2024 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2025 /* It's an (*,*) entry and the packet is not coming from
2026 * the upstream: forward the packet to the upstream
2029 psend = c->_c.mfc_parent;
2034 for (ct = c->_c.mfc_un.res.maxvif - 1;
2035 ct >= c->_c.mfc_un.res.minvif; ct--) {
2036 /* For (*,G) entry, don't forward to the incoming interface */
2037 if ((c->mfc_origin != htonl(INADDR_ANY) ||
2039 ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
2041 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2044 ipmr_queue_xmit(net, mrt, true_vifi,
2053 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2056 ipmr_queue_xmit(net, mrt, true_vifi, skb2,
2059 ipmr_queue_xmit(net, mrt, true_vifi, skb, psend);
2069 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
2071 struct rtable *rt = skb_rtable(skb);
2072 struct iphdr *iph = ip_hdr(skb);
2073 struct flowi4 fl4 = {
2074 .daddr = iph->daddr,
2075 .saddr = iph->saddr,
2076 .flowi4_tos = RT_TOS(iph->tos),
2077 .flowi4_oif = (rt_is_output_route(rt) ?
2078 skb->dev->ifindex : 0),
2079 .flowi4_iif = (rt_is_output_route(rt) ?
2082 .flowi4_mark = skb->mark,
2084 struct mr_table *mrt;
2087 err = ipmr_fib_lookup(net, &fl4, &mrt);
2089 return ERR_PTR(err);
2093 /* Multicast packets for forwarding arrive here
2094 * Called with rcu_read_lock();
2096 int ip_mr_input(struct sk_buff *skb)
2098 struct mfc_cache *cache;
2099 struct net *net = dev_net(skb->dev);
2100 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
2101 struct mr_table *mrt;
2102 struct net_device *dev;
2104 /* skb->dev passed in is the loX master dev for vrfs.
2105 * As there are no vifs associated with loopback devices,
2106 * get the proper interface that does have a vif associated with it.
2109 if (netif_is_l3_master(skb->dev)) {
2110 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2117 /* Packet is looped back after forward, it should not be
2118 * forwarded second time, but still can be delivered locally.
2120 if (IPCB(skb)->flags & IPSKB_FORWARDED)
2123 mrt = ipmr_rt_fib_lookup(net, skb);
2126 return PTR_ERR(mrt);
2129 if (IPCB(skb)->opt.router_alert) {
2130 if (ip_call_ra_chain(skb))
2132 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
2133 /* IGMPv1 (and broken IGMPv2 implementations sort of
2134 * Cisco IOS <= 11.2(8)) do not put router alert
2135 * option to IGMP packets destined to routable
2136 * groups. It is very bad, because it means
2137 * that we can forward NO IGMP messages.
2139 struct sock *mroute_sk;
2141 mroute_sk = rcu_dereference(mrt->mroute_sk);
2144 raw_rcv(mroute_sk, skb);
2150 /* already under rcu_read_lock() */
2151 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2153 int vif = ipmr_find_vif(mrt, dev);
2156 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2160 /* No usable cache entry */
2165 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2166 ip_local_deliver(skb);
2172 read_lock(&mrt_lock);
2173 vif = ipmr_find_vif(mrt, dev);
2175 int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
2176 read_unlock(&mrt_lock);
2180 read_unlock(&mrt_lock);
2185 read_lock(&mrt_lock);
2186 ip_mr_forward(net, mrt, dev, skb, cache, local);
2187 read_unlock(&mrt_lock);
2190 return ip_local_deliver(skb);
2196 return ip_local_deliver(skb);
2201 #ifdef CONFIG_IP_PIMSM_V1
2202 /* Handle IGMP messages of PIMv1 */
2203 int pim_rcv_v1(struct sk_buff *skb)
2205 struct igmphdr *pim;
2206 struct net *net = dev_net(skb->dev);
2207 struct mr_table *mrt;
2209 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2212 pim = igmp_hdr(skb);
2214 mrt = ipmr_rt_fib_lookup(net, skb);
2217 if (!mrt->mroute_do_pim ||
2218 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2221 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2229 #ifdef CONFIG_IP_PIMSM_V2
2230 static int pim_rcv(struct sk_buff *skb)
2232 struct pimreghdr *pim;
2233 struct net *net = dev_net(skb->dev);
2234 struct mr_table *mrt;
2236 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2239 pim = (struct pimreghdr *)skb_transport_header(skb);
2240 if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
2241 (pim->flags & PIM_NULL_REGISTER) ||
2242 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2243 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2246 mrt = ipmr_rt_fib_lookup(net, skb);
2249 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2257 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2258 __be32 saddr, __be32 daddr,
2259 struct rtmsg *rtm, u32 portid)
2261 struct mfc_cache *cache;
2262 struct mr_table *mrt;
2265 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2270 cache = ipmr_cache_find(mrt, saddr, daddr);
2271 if (!cache && skb->dev) {
2272 int vif = ipmr_find_vif(mrt, skb->dev);
2275 cache = ipmr_cache_find_any(mrt, daddr, vif);
2278 struct sk_buff *skb2;
2280 struct net_device *dev;
2284 read_lock(&mrt_lock);
2286 vif = ipmr_find_vif(mrt, dev);
2288 read_unlock(&mrt_lock);
2292 skb2 = skb_clone(skb, GFP_ATOMIC);
2294 read_unlock(&mrt_lock);
2299 NETLINK_CB(skb2).portid = portid;
2300 skb_push(skb2, sizeof(struct iphdr));
2301 skb_reset_network_header(skb2);
2303 iph->ihl = sizeof(struct iphdr) >> 2;
2307 err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
2308 read_unlock(&mrt_lock);
2313 read_lock(&mrt_lock);
2314 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2315 read_unlock(&mrt_lock);
2320 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2321 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2324 struct nlmsghdr *nlh;
2328 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2332 rtm = nlmsg_data(nlh);
2333 rtm->rtm_family = RTNL_FAMILY_IPMR;
2334 rtm->rtm_dst_len = 32;
2335 rtm->rtm_src_len = 32;
2337 rtm->rtm_table = mrt->id;
2338 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2339 goto nla_put_failure;
2340 rtm->rtm_type = RTN_MULTICAST;
2341 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2342 if (c->_c.mfc_flags & MFC_STATIC)
2343 rtm->rtm_protocol = RTPROT_STATIC;
2345 rtm->rtm_protocol = RTPROT_MROUTED;
2348 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2349 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2350 goto nla_put_failure;
2351 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2352 /* do not break the dump if cache is unresolved */
2353 if (err < 0 && err != -ENOENT)
2354 goto nla_put_failure;
2356 nlmsg_end(skb, nlh);
2360 nlmsg_cancel(skb, nlh);
2364 static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2365 u32 portid, u32 seq, struct mr_mfc *c, int cmd,
2368 return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c,
2372 static size_t mroute_msgsize(bool unresolved, int maxvif)
2375 NLMSG_ALIGN(sizeof(struct rtmsg))
2376 + nla_total_size(4) /* RTA_TABLE */
2377 + nla_total_size(4) /* RTA_SRC */
2378 + nla_total_size(4) /* RTA_DST */
2383 + nla_total_size(4) /* RTA_IIF */
2384 + nla_total_size(0) /* RTA_MULTIPATH */
2385 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2387 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2393 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2396 struct net *net = read_pnet(&mrt->net);
2397 struct sk_buff *skb;
2400 skb = nlmsg_new(mroute_msgsize(mfc->_c.mfc_parent >= MAXVIFS,
2406 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2410 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2416 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2419 static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
2422 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2423 + nla_total_size(1) /* IPMRA_CREPORT_MSGTYPE */
2424 + nla_total_size(4) /* IPMRA_CREPORT_VIF_ID */
2425 + nla_total_size(4) /* IPMRA_CREPORT_SRC_ADDR */
2426 + nla_total_size(4) /* IPMRA_CREPORT_DST_ADDR */
2427 /* IPMRA_CREPORT_PKT */
2428 + nla_total_size(payloadlen)
2434 static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2436 struct net *net = read_pnet(&mrt->net);
2437 struct nlmsghdr *nlh;
2438 struct rtgenmsg *rtgenm;
2439 struct igmpmsg *msg;
2440 struct sk_buff *skb;
2444 payloadlen = pkt->len - sizeof(struct igmpmsg);
2445 msg = (struct igmpmsg *)skb_network_header(pkt);
2447 skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2451 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2452 sizeof(struct rtgenmsg), 0);
2455 rtgenm = nlmsg_data(nlh);
2456 rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
2457 if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
2458 nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif) ||
2459 nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
2460 msg->im_src.s_addr) ||
2461 nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
2462 msg->im_dst.s_addr))
2463 goto nla_put_failure;
2465 nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
2466 if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
2467 nla_data(nla), payloadlen))
2468 goto nla_put_failure;
2470 nlmsg_end(skb, nlh);
2472 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
2476 nlmsg_cancel(skb, nlh);
2479 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
2482 static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb,
2483 const struct nlmsghdr *nlh,
2485 struct netlink_ext_ack *extack)
2490 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
2491 NL_SET_ERR_MSG(extack, "ipv4: Invalid header for multicast route get request");
2495 if (!netlink_strict_get_check(skb))
2496 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
2497 rtm_ipv4_policy, extack);
2499 rtm = nlmsg_data(nlh);
2500 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
2501 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
2502 rtm->rtm_tos || rtm->rtm_table || rtm->rtm_protocol ||
2503 rtm->rtm_scope || rtm->rtm_type || rtm->rtm_flags) {
2504 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for multicast route get request");
2508 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
2509 rtm_ipv4_policy, extack);
2513 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
2514 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
2515 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
2519 for (i = 0; i <= RTA_MAX; i++) {
2529 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in multicast route get request");
2537 static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2538 struct netlink_ext_ack *extack)
2540 struct net *net = sock_net(in_skb->sk);
2541 struct nlattr *tb[RTA_MAX + 1];
2542 struct sk_buff *skb = NULL;
2543 struct mfc_cache *cache;
2544 struct mr_table *mrt;
2549 err = ipmr_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
2553 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2554 grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2555 tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
2557 mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
2563 /* entries are added/deleted only under RTNL */
2565 cache = ipmr_cache_find(mrt, src, grp);
2572 skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL);
2578 err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
2579 nlh->nlmsg_seq, cache,
2584 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2594 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2596 struct fib_dump_filter filter = {};
2599 if (cb->strict_check) {
2600 err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh,
2606 if (filter.table_id) {
2607 struct mr_table *mrt;
2609 mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
2611 if (filter.dump_all_families)
2614 NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
2617 err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
2618 &mfc_unres_lock, &filter);
2619 return skb->len ? : err;
2622 return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
2623 _ipmr_fill_mroute, &mfc_unres_lock, &filter);
2626 static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2627 [RTA_SRC] = { .type = NLA_U32 },
2628 [RTA_DST] = { .type = NLA_U32 },
2629 [RTA_IIF] = { .type = NLA_U32 },
2630 [RTA_TABLE] = { .type = NLA_U32 },
2631 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2634 static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2636 switch (rtm_protocol) {
2638 case RTPROT_MROUTED:
2644 static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2646 struct rtnexthop *rtnh = nla_data(nla);
2647 int remaining = nla_len(nla), vifi = 0;
2649 while (rtnh_ok(rtnh, remaining)) {
2650 mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2651 if (++vifi == MAXVIFS)
2653 rtnh = rtnh_next(rtnh, &remaining);
2656 return remaining > 0 ? -EINVAL : vifi;
2659 /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2660 static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2661 struct mfcctl *mfcc, int *mrtsock,
2662 struct mr_table **mrtret,
2663 struct netlink_ext_ack *extack)
2665 struct net_device *dev = NULL;
2666 u32 tblid = RT_TABLE_DEFAULT;
2667 struct mr_table *mrt;
2668 struct nlattr *attr;
2672 ret = nlmsg_validate_deprecated(nlh, sizeof(*rtm), RTA_MAX,
2673 rtm_ipmr_policy, extack);
2676 rtm = nlmsg_data(nlh);
2679 if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2680 rtm->rtm_type != RTN_MULTICAST ||
2681 rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2682 !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2685 memset(mfcc, 0, sizeof(*mfcc));
2686 mfcc->mfcc_parent = -1;
2688 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2689 switch (nla_type(attr)) {
2691 mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2694 mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2697 dev = __dev_get_by_index(net, nla_get_u32(attr));
2704 if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2713 tblid = nla_get_u32(attr);
2717 mrt = ipmr_get_table(net, tblid);
2723 *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2725 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2731 /* takes care of both newroute and delroute */
2732 static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
2733 struct netlink_ext_ack *extack)
2735 struct net *net = sock_net(skb->sk);
2736 int ret, mrtsock, parent;
2737 struct mr_table *tbl;
2742 ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack);
2746 parent = ret ? mfcc.mfcc_parent : -1;
2747 if (nlh->nlmsg_type == RTM_NEWROUTE)
2748 return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2750 return ipmr_mfc_delete(tbl, &mfcc, parent);
2753 static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
2755 u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
2757 if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
2758 nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
2759 nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
2760 mrt->mroute_reg_vif_num) ||
2761 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
2762 mrt->mroute_do_assert) ||
2763 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) ||
2764 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
2765 mrt->mroute_do_wrvifwhole))
2771 static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
2773 struct nlattr *vif_nest;
2774 struct vif_device *vif;
2776 /* if the VIF doesn't exist just continue */
2777 if (!VIF_EXISTS(mrt, vifid))
2780 vif = &mrt->vif_table[vifid];
2781 vif_nest = nla_nest_start_noflag(skb, IPMRA_VIF);
2784 if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
2785 nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
2786 nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
2787 nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
2789 nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out,
2791 nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in,
2793 nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out,
2795 nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
2796 nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
2797 nla_nest_cancel(skb, vif_nest);
2800 nla_nest_end(skb, vif_nest);
2805 static int ipmr_valid_dumplink(const struct nlmsghdr *nlh,
2806 struct netlink_ext_ack *extack)
2808 struct ifinfomsg *ifm;
2810 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2811 NL_SET_ERR_MSG(extack, "ipv4: Invalid header for ipmr link dump");
2815 if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
2816 NL_SET_ERR_MSG(extack, "Invalid data after header in ipmr link dump");
2820 ifm = nlmsg_data(nlh);
2821 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2822 ifm->ifi_change || ifm->ifi_index) {
2823 NL_SET_ERR_MSG(extack, "Invalid values in header for ipmr link dump request");
2830 static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
2832 struct net *net = sock_net(skb->sk);
2833 struct nlmsghdr *nlh = NULL;
2834 unsigned int t = 0, s_t;
2835 unsigned int e = 0, s_e;
2836 struct mr_table *mrt;
2838 if (cb->strict_check) {
2839 int err = ipmr_valid_dumplink(cb->nlh, cb->extack);
2848 ipmr_for_each_table(mrt, net) {
2849 struct nlattr *vifs, *af;
2850 struct ifinfomsg *hdr;
2855 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
2856 cb->nlh->nlmsg_seq, RTM_NEWLINK,
2857 sizeof(*hdr), NLM_F_MULTI);
2861 hdr = nlmsg_data(nlh);
2862 memset(hdr, 0, sizeof(*hdr));
2863 hdr->ifi_family = RTNL_FAMILY_IPMR;
2865 af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
2867 nlmsg_cancel(skb, nlh);
2871 if (!ipmr_fill_table(mrt, skb)) {
2872 nlmsg_cancel(skb, nlh);
2876 vifs = nla_nest_start_noflag(skb, IPMRA_TABLE_VIFS);
2878 nla_nest_end(skb, af);
2879 nlmsg_end(skb, nlh);
2882 for (i = 0; i < mrt->maxvif; i++) {
2885 if (!ipmr_fill_vif(mrt, i, skb)) {
2886 nla_nest_end(skb, vifs);
2887 nla_nest_end(skb, af);
2888 nlmsg_end(skb, nlh);
2896 nla_nest_end(skb, vifs);
2897 nla_nest_end(skb, af);
2898 nlmsg_end(skb, nlh);
2910 #ifdef CONFIG_PROC_FS
2911 /* The /proc interfaces to multicast routing :
2912 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2915 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2916 __acquires(mrt_lock)
2918 struct mr_vif_iter *iter = seq->private;
2919 struct net *net = seq_file_net(seq);
2920 struct mr_table *mrt;
2922 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2924 return ERR_PTR(-ENOENT);
2928 read_lock(&mrt_lock);
2929 return mr_vif_seq_start(seq, pos);
2932 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2933 __releases(mrt_lock)
2935 read_unlock(&mrt_lock);
2938 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2940 struct mr_vif_iter *iter = seq->private;
2941 struct mr_table *mrt = iter->mrt;
2943 if (v == SEQ_START_TOKEN) {
2945 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2947 const struct vif_device *vif = v;
2948 const char *name = vif->dev ?
2949 vif->dev->name : "none";
2952 "%2td %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2953 vif - mrt->vif_table,
2954 name, vif->bytes_in, vif->pkt_in,
2955 vif->bytes_out, vif->pkt_out,
2956 vif->flags, vif->local, vif->remote);
2961 static const struct seq_operations ipmr_vif_seq_ops = {
2962 .start = ipmr_vif_seq_start,
2963 .next = mr_vif_seq_next,
2964 .stop = ipmr_vif_seq_stop,
2965 .show = ipmr_vif_seq_show,
2968 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2970 struct net *net = seq_file_net(seq);
2971 struct mr_table *mrt;
2973 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2975 return ERR_PTR(-ENOENT);
2977 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
2980 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2984 if (v == SEQ_START_TOKEN) {
2986 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2988 const struct mfc_cache *mfc = v;
2989 const struct mr_mfc_iter *it = seq->private;
2990 const struct mr_table *mrt = it->mrt;
2992 seq_printf(seq, "%08X %08X %-3hd",
2993 (__force u32) mfc->mfc_mcastgrp,
2994 (__force u32) mfc->mfc_origin,
2995 mfc->_c.mfc_parent);
2997 if (it->cache != &mrt->mfc_unres_queue) {
2998 seq_printf(seq, " %8lu %8lu %8lu",
2999 mfc->_c.mfc_un.res.pkt,
3000 mfc->_c.mfc_un.res.bytes,
3001 mfc->_c.mfc_un.res.wrong_if);
3002 for (n = mfc->_c.mfc_un.res.minvif;
3003 n < mfc->_c.mfc_un.res.maxvif; n++) {
3004 if (VIF_EXISTS(mrt, n) &&
3005 mfc->_c.mfc_un.res.ttls[n] < 255)
3008 n, mfc->_c.mfc_un.res.ttls[n]);
3011 /* unresolved mfc_caches don't contain
3012 * pkt, bytes and wrong_if values
3014 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
3016 seq_putc(seq, '\n');
3021 static const struct seq_operations ipmr_mfc_seq_ops = {
3022 .start = ipmr_mfc_seq_start,
3023 .next = mr_mfc_seq_next,
3024 .stop = mr_mfc_seq_stop,
3025 .show = ipmr_mfc_seq_show,
3029 #ifdef CONFIG_IP_PIMSM_V2
3030 static const struct net_protocol pim_protocol = {
3036 static unsigned int ipmr_seq_read(struct net *net)
3040 return net->ipv4.ipmr_seq + ipmr_rules_seq_read(net);
3043 static int ipmr_dump(struct net *net, struct notifier_block *nb)
3045 return mr_dump(net, nb, RTNL_FAMILY_IPMR, ipmr_rules_dump,
3046 ipmr_mr_table_iter, &mrt_lock);
3049 static const struct fib_notifier_ops ipmr_notifier_ops_template = {
3050 .family = RTNL_FAMILY_IPMR,
3051 .fib_seq_read = ipmr_seq_read,
3052 .fib_dump = ipmr_dump,
3053 .owner = THIS_MODULE,
3056 static int __net_init ipmr_notifier_init(struct net *net)
3058 struct fib_notifier_ops *ops;
3060 net->ipv4.ipmr_seq = 0;
3062 ops = fib_notifier_ops_register(&ipmr_notifier_ops_template, net);
3064 return PTR_ERR(ops);
3065 net->ipv4.ipmr_notifier_ops = ops;
3070 static void __net_exit ipmr_notifier_exit(struct net *net)
3072 fib_notifier_ops_unregister(net->ipv4.ipmr_notifier_ops);
3073 net->ipv4.ipmr_notifier_ops = NULL;
3076 /* Setup for IP multicast routing */
3077 static int __net_init ipmr_net_init(struct net *net)
3081 err = ipmr_notifier_init(net);
3083 goto ipmr_notifier_fail;
3085 err = ipmr_rules_init(net);
3087 goto ipmr_rules_fail;
3089 #ifdef CONFIG_PROC_FS
3091 if (!proc_create_net("ip_mr_vif", 0, net->proc_net, &ipmr_vif_seq_ops,
3092 sizeof(struct mr_vif_iter)))
3094 if (!proc_create_net("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
3095 sizeof(struct mr_mfc_iter)))
3096 goto proc_cache_fail;
3100 #ifdef CONFIG_PROC_FS
3102 remove_proc_entry("ip_mr_vif", net->proc_net);
3104 ipmr_rules_exit(net);
3107 ipmr_notifier_exit(net);
3112 static void __net_exit ipmr_net_exit(struct net *net)
3114 #ifdef CONFIG_PROC_FS
3115 remove_proc_entry("ip_mr_cache", net->proc_net);
3116 remove_proc_entry("ip_mr_vif", net->proc_net);
3118 ipmr_notifier_exit(net);
3119 ipmr_rules_exit(net);
3122 static struct pernet_operations ipmr_net_ops = {
3123 .init = ipmr_net_init,
3124 .exit = ipmr_net_exit,
3127 int __init ip_mr_init(void)
3131 mrt_cachep = kmem_cache_create("ip_mrt_cache",
3132 sizeof(struct mfc_cache),
3133 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
3136 err = register_pernet_subsys(&ipmr_net_ops);
3138 goto reg_pernet_fail;
3140 err = register_netdevice_notifier(&ip_mr_notifier);
3142 goto reg_notif_fail;
3143 #ifdef CONFIG_IP_PIMSM_V2
3144 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
3145 pr_err("%s: can't add PIM protocol\n", __func__);
3147 goto add_proto_fail;
3150 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
3151 ipmr_rtm_getroute, ipmr_rtm_dumproute, 0);
3152 rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
3153 ipmr_rtm_route, NULL, 0);
3154 rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
3155 ipmr_rtm_route, NULL, 0);
3157 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK,
3158 NULL, ipmr_rtm_dumplink, 0);
3161 #ifdef CONFIG_IP_PIMSM_V2
3163 unregister_netdevice_notifier(&ip_mr_notifier);
3166 unregister_pernet_subsys(&ipmr_net_ops);
3168 kmem_cache_destroy(mrt_cachep);