2 * IP multicast routing support for mrouted 3.6/3.8
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
36 #include <linux/kernel.h>
37 #include <linux/fcntl.h>
38 #include <linux/stat.h>
39 #include <linux/socket.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/inetdevice.h>
44 #include <linux/igmp.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/mroute.h>
48 #include <linux/init.h>
49 #include <linux/if_ether.h>
50 #include <linux/slab.h>
51 #include <net/net_namespace.h>
53 #include <net/protocol.h>
54 #include <linux/skbuff.h>
55 #include <net/route.h>
60 #include <linux/notifier.h>
61 #include <linux/if_arp.h>
62 #include <linux/netfilter_ipv4.h>
63 #include <linux/compat.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
69 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
70 #define CONFIG_IP_PIMSM 1
74 struct list_head list;
79 struct sock __rcu *mroute_sk;
80 struct timer_list ipmr_expire_timer;
81 struct list_head mfc_unres_queue;
82 struct list_head mfc_cache_array[MFC_LINES];
83 struct vif_device vif_table[MAXVIFS];
85 atomic_t cache_resolve_queue_len;
88 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
89 int mroute_reg_vif_num;
94 struct fib_rule common;
101 /* Big lock, protecting vif table, mrt cache and mroute socket state.
102 * Note that the changes are semaphored via rtnl_lock.
105 static DEFINE_RWLOCK(mrt_lock);
108 * Multicast router control variables
111 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
113 /* Special spinlock for queue of unresolved entries */
114 static DEFINE_SPINLOCK(mfc_unres_lock);
116 /* We return to original Alan's scheme. Hash table of resolved
117 * entries is changed only in process context and protected
118 * with weak lock mrt_lock. Queue of unresolved entries is protected
119 * with strong spinlock mfc_unres_lock.
121 * In this case data path is free of exclusive locks at all.
124 static struct kmem_cache *mrt_cachep __read_mostly;
126 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
127 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
128 struct sk_buff *skb, struct mfc_cache *cache,
130 static int ipmr_cache_report(struct mr_table *mrt,
131 struct sk_buff *pkt, vifi_t vifi, int assert);
132 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
133 struct mfc_cache *c, struct rtmsg *rtm);
134 static void ipmr_expire_process(unsigned long arg);
136 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
137 #define ipmr_for_each_table(mrt, net) \
138 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
140 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
142 struct mr_table *mrt;
144 ipmr_for_each_table(mrt, net) {
151 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
152 struct mr_table **mrt)
154 struct ipmr_result res;
155 struct fib_lookup_arg arg = { .result = &res, };
158 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
159 flowi4_to_flowi(flp4), 0, &arg);
166 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
167 int flags, struct fib_lookup_arg *arg)
169 struct ipmr_result *res = arg->result;
170 struct mr_table *mrt;
172 switch (rule->action) {
175 case FR_ACT_UNREACHABLE:
177 case FR_ACT_PROHIBIT:
179 case FR_ACT_BLACKHOLE:
184 mrt = ipmr_get_table(rule->fr_net, rule->table);
191 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
196 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
200 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
201 struct fib_rule_hdr *frh, struct nlattr **tb)
206 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
212 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
213 struct fib_rule_hdr *frh)
221 static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = {
222 .family = RTNL_FAMILY_IPMR,
223 .rule_size = sizeof(struct ipmr_rule),
224 .addr_size = sizeof(u32),
225 .action = ipmr_rule_action,
226 .match = ipmr_rule_match,
227 .configure = ipmr_rule_configure,
228 .compare = ipmr_rule_compare,
229 .default_pref = fib_default_rule_pref,
230 .fill = ipmr_rule_fill,
231 .nlgroup = RTNLGRP_IPV4_RULE,
232 .policy = ipmr_rule_policy,
233 .owner = THIS_MODULE,
236 static int __net_init ipmr_rules_init(struct net *net)
238 struct fib_rules_ops *ops;
239 struct mr_table *mrt;
242 ops = fib_rules_register(&ipmr_rules_ops_template, net);
246 INIT_LIST_HEAD(&net->ipv4.mr_tables);
248 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
254 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
258 net->ipv4.mr_rules_ops = ops;
264 fib_rules_unregister(ops);
268 static void __net_exit ipmr_rules_exit(struct net *net)
270 struct mr_table *mrt, *next;
272 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
273 list_del(&mrt->list);
276 fib_rules_unregister(net->ipv4.mr_rules_ops);
279 #define ipmr_for_each_table(mrt, net) \
280 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
282 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
284 return net->ipv4.mrt;
287 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
288 struct mr_table **mrt)
290 *mrt = net->ipv4.mrt;
294 static int __net_init ipmr_rules_init(struct net *net)
296 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
297 return net->ipv4.mrt ? 0 : -ENOMEM;
300 static void __net_exit ipmr_rules_exit(struct net *net)
302 kfree(net->ipv4.mrt);
306 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
308 struct mr_table *mrt;
311 mrt = ipmr_get_table(net, id);
315 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
318 write_pnet(&mrt->net, net);
321 /* Forwarding cache */
322 for (i = 0; i < MFC_LINES; i++)
323 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
325 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
327 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
330 #ifdef CONFIG_IP_PIMSM
331 mrt->mroute_reg_vif_num = -1;
333 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
334 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
339 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
341 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
343 struct net *net = dev_net(dev);
347 dev = __dev_get_by_name(net, "tunl0");
349 const struct net_device_ops *ops = dev->netdev_ops;
351 struct ip_tunnel_parm p;
353 memset(&p, 0, sizeof(p));
354 p.iph.daddr = v->vifc_rmt_addr.s_addr;
355 p.iph.saddr = v->vifc_lcl_addr.s_addr;
358 p.iph.protocol = IPPROTO_IPIP;
359 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
360 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
362 if (ops->ndo_do_ioctl) {
363 mm_segment_t oldfs = get_fs();
366 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
373 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
375 struct net_device *dev;
377 dev = __dev_get_by_name(net, "tunl0");
380 const struct net_device_ops *ops = dev->netdev_ops;
383 struct ip_tunnel_parm p;
384 struct in_device *in_dev;
386 memset(&p, 0, sizeof(p));
387 p.iph.daddr = v->vifc_rmt_addr.s_addr;
388 p.iph.saddr = v->vifc_lcl_addr.s_addr;
391 p.iph.protocol = IPPROTO_IPIP;
392 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
393 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
395 if (ops->ndo_do_ioctl) {
396 mm_segment_t oldfs = get_fs();
399 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
407 (dev = __dev_get_by_name(net, p.name)) != NULL) {
408 dev->flags |= IFF_MULTICAST;
410 in_dev = __in_dev_get_rtnl(dev);
414 ipv4_devconf_setall(in_dev);
415 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
425 /* allow the register to be completed before unregistering. */
429 unregister_netdevice(dev);
433 #ifdef CONFIG_IP_PIMSM
435 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
437 struct net *net = dev_net(dev);
438 struct mr_table *mrt;
439 struct flowi4 fl4 = {
440 .flowi4_oif = dev->ifindex,
441 .flowi4_iif = skb->skb_iif,
442 .flowi4_mark = skb->mark,
446 err = ipmr_fib_lookup(net, &fl4, &mrt);
452 read_lock(&mrt_lock);
453 dev->stats.tx_bytes += skb->len;
454 dev->stats.tx_packets++;
455 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
456 read_unlock(&mrt_lock);
461 static const struct net_device_ops reg_vif_netdev_ops = {
462 .ndo_start_xmit = reg_vif_xmit,
465 static void reg_vif_setup(struct net_device *dev)
467 dev->type = ARPHRD_PIMREG;
468 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
469 dev->flags = IFF_NOARP;
470 dev->netdev_ops = ®_vif_netdev_ops,
471 dev->destructor = free_netdev;
472 dev->features |= NETIF_F_NETNS_LOCAL;
475 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
477 struct net_device *dev;
478 struct in_device *in_dev;
481 if (mrt->id == RT_TABLE_DEFAULT)
482 sprintf(name, "pimreg");
484 sprintf(name, "pimreg%u", mrt->id);
486 dev = alloc_netdev(0, name, reg_vif_setup);
491 dev_net_set(dev, net);
493 if (register_netdevice(dev)) {
500 in_dev = __in_dev_get_rcu(dev);
506 ipv4_devconf_setall(in_dev);
507 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
518 /* allow the register to be completed before unregistering. */
522 unregister_netdevice(dev);
529 * @notify: Set to 1, if the caller is a notifier_call
532 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
533 struct list_head *head)
535 struct vif_device *v;
536 struct net_device *dev;
537 struct in_device *in_dev;
539 if (vifi < 0 || vifi >= mrt->maxvif)
540 return -EADDRNOTAVAIL;
542 v = &mrt->vif_table[vifi];
544 write_lock_bh(&mrt_lock);
549 write_unlock_bh(&mrt_lock);
550 return -EADDRNOTAVAIL;
553 #ifdef CONFIG_IP_PIMSM
554 if (vifi == mrt->mroute_reg_vif_num)
555 mrt->mroute_reg_vif_num = -1;
558 if (vifi + 1 == mrt->maxvif) {
561 for (tmp = vifi - 1; tmp >= 0; tmp--) {
562 if (VIF_EXISTS(mrt, tmp))
568 write_unlock_bh(&mrt_lock);
570 dev_set_allmulti(dev, -1);
572 in_dev = __in_dev_get_rtnl(dev);
574 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
575 ip_rt_multicast_event(in_dev);
578 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
579 unregister_netdevice_queue(dev, head);
585 static void ipmr_cache_free_rcu(struct rcu_head *head)
587 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
589 kmem_cache_free(mrt_cachep, c);
592 static inline void ipmr_cache_free(struct mfc_cache *c)
594 call_rcu(&c->rcu, ipmr_cache_free_rcu);
597 /* Destroy an unresolved cache entry, killing queued skbs
598 * and reporting error to netlink readers.
601 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
603 struct net *net = read_pnet(&mrt->net);
607 atomic_dec(&mrt->cache_resolve_queue_len);
609 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
610 if (ip_hdr(skb)->version == 0) {
611 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
612 nlh->nlmsg_type = NLMSG_ERROR;
613 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
614 skb_trim(skb, nlh->nlmsg_len);
616 e->error = -ETIMEDOUT;
617 memset(&e->msg, 0, sizeof(e->msg));
619 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
629 /* Timer process for the unresolved queue. */
631 static void ipmr_expire_process(unsigned long arg)
633 struct mr_table *mrt = (struct mr_table *)arg;
635 unsigned long expires;
636 struct mfc_cache *c, *next;
638 if (!spin_trylock(&mfc_unres_lock)) {
639 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
643 if (list_empty(&mrt->mfc_unres_queue))
649 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
650 if (time_after(c->mfc_un.unres.expires, now)) {
651 unsigned long interval = c->mfc_un.unres.expires - now;
652 if (interval < expires)
658 ipmr_destroy_unres(mrt, c);
661 if (!list_empty(&mrt->mfc_unres_queue))
662 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
665 spin_unlock(&mfc_unres_lock);
668 /* Fill oifs list. It is called under write locked mrt_lock. */
670 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
675 cache->mfc_un.res.minvif = MAXVIFS;
676 cache->mfc_un.res.maxvif = 0;
677 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
679 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
680 if (VIF_EXISTS(mrt, vifi) &&
681 ttls[vifi] && ttls[vifi] < 255) {
682 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
683 if (cache->mfc_un.res.minvif > vifi)
684 cache->mfc_un.res.minvif = vifi;
685 if (cache->mfc_un.res.maxvif <= vifi)
686 cache->mfc_un.res.maxvif = vifi + 1;
691 static int vif_add(struct net *net, struct mr_table *mrt,
692 struct vifctl *vifc, int mrtsock)
694 int vifi = vifc->vifc_vifi;
695 struct vif_device *v = &mrt->vif_table[vifi];
696 struct net_device *dev;
697 struct in_device *in_dev;
701 if (VIF_EXISTS(mrt, vifi))
704 switch (vifc->vifc_flags) {
705 #ifdef CONFIG_IP_PIMSM
708 * Special Purpose VIF in PIM
709 * All the packets will be sent to the daemon
711 if (mrt->mroute_reg_vif_num >= 0)
713 dev = ipmr_reg_vif(net, mrt);
716 err = dev_set_allmulti(dev, 1);
718 unregister_netdevice(dev);
725 dev = ipmr_new_tunnel(net, vifc);
728 err = dev_set_allmulti(dev, 1);
730 ipmr_del_tunnel(dev, vifc);
736 case VIFF_USE_IFINDEX:
738 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
739 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
740 if (dev && __in_dev_get_rtnl(dev) == NULL) {
742 return -EADDRNOTAVAIL;
745 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
748 return -EADDRNOTAVAIL;
749 err = dev_set_allmulti(dev, 1);
759 in_dev = __in_dev_get_rtnl(dev);
762 return -EADDRNOTAVAIL;
764 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
765 ip_rt_multicast_event(in_dev);
767 /* Fill in the VIF structures */
769 v->rate_limit = vifc->vifc_rate_limit;
770 v->local = vifc->vifc_lcl_addr.s_addr;
771 v->remote = vifc->vifc_rmt_addr.s_addr;
772 v->flags = vifc->vifc_flags;
774 v->flags |= VIFF_STATIC;
775 v->threshold = vifc->vifc_threshold;
780 v->link = dev->ifindex;
781 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
782 v->link = dev->iflink;
784 /* And finish update writing critical data */
785 write_lock_bh(&mrt_lock);
787 #ifdef CONFIG_IP_PIMSM
788 if (v->flags & VIFF_REGISTER)
789 mrt->mroute_reg_vif_num = vifi;
791 if (vifi+1 > mrt->maxvif)
792 mrt->maxvif = vifi+1;
793 write_unlock_bh(&mrt_lock);
797 /* called with rcu_read_lock() */
798 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
802 int line = MFC_HASH(mcastgrp, origin);
805 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
806 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
813 * Allocate a multicast cache entry
815 static struct mfc_cache *ipmr_cache_alloc(void)
817 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
820 c->mfc_un.res.minvif = MAXVIFS;
824 static struct mfc_cache *ipmr_cache_alloc_unres(void)
826 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
829 skb_queue_head_init(&c->mfc_un.unres.unresolved);
830 c->mfc_un.unres.expires = jiffies + 10*HZ;
836 * A cache entry has gone into a resolved state from queued
839 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
840 struct mfc_cache *uc, struct mfc_cache *c)
845 /* Play the pending entries through our router */
847 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
848 if (ip_hdr(skb)->version == 0) {
849 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
851 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
852 nlh->nlmsg_len = skb_tail_pointer(skb) -
855 nlh->nlmsg_type = NLMSG_ERROR;
856 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
857 skb_trim(skb, nlh->nlmsg_len);
859 e->error = -EMSGSIZE;
860 memset(&e->msg, 0, sizeof(e->msg));
863 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
865 ip_mr_forward(net, mrt, skb, c, 0);
871 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
872 * expects the following bizarre scheme.
874 * Called under mrt_lock.
877 static int ipmr_cache_report(struct mr_table *mrt,
878 struct sk_buff *pkt, vifi_t vifi, int assert)
881 const int ihl = ip_hdrlen(pkt);
882 struct igmphdr *igmp;
884 struct sock *mroute_sk;
887 #ifdef CONFIG_IP_PIMSM
888 if (assert == IGMPMSG_WHOLEPKT)
889 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
892 skb = alloc_skb(128, GFP_ATOMIC);
897 #ifdef CONFIG_IP_PIMSM
898 if (assert == IGMPMSG_WHOLEPKT) {
899 /* Ugly, but we have no choice with this interface.
900 * Duplicate old header, fix ihl, length etc.
901 * And all this only to mangle msg->im_msgtype and
902 * to set msg->im_mbz to "mbz" :-)
904 skb_push(skb, sizeof(struct iphdr));
905 skb_reset_network_header(skb);
906 skb_reset_transport_header(skb);
907 msg = (struct igmpmsg *)skb_network_header(skb);
908 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
909 msg->im_msgtype = IGMPMSG_WHOLEPKT;
911 msg->im_vif = mrt->mroute_reg_vif_num;
912 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
913 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
914 sizeof(struct iphdr));
919 /* Copy the IP header */
921 skb->network_header = skb->tail;
923 skb_copy_to_linear_data(skb, pkt->data, ihl);
924 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
925 msg = (struct igmpmsg *)skb_network_header(skb);
927 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
931 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
933 msg->im_msgtype = assert;
935 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
936 skb->transport_header = skb->network_header;
940 mroute_sk = rcu_dereference(mrt->mroute_sk);
941 if (mroute_sk == NULL) {
947 /* Deliver to mrouted */
949 ret = sock_queue_rcv_skb(mroute_sk, skb);
953 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
961 * Queue a packet for resolution. It gets locked cache entry!
965 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
970 const struct iphdr *iph = ip_hdr(skb);
972 spin_lock_bh(&mfc_unres_lock);
973 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
974 if (c->mfc_mcastgrp == iph->daddr &&
975 c->mfc_origin == iph->saddr) {
982 /* Create a new entry if allowable */
984 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
985 (c = ipmr_cache_alloc_unres()) == NULL) {
986 spin_unlock_bh(&mfc_unres_lock);
992 /* Fill in the new cache entry */
995 c->mfc_origin = iph->saddr;
996 c->mfc_mcastgrp = iph->daddr;
998 /* Reflect first query at mrouted. */
1000 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1002 /* If the report failed throw the cache entry
1005 spin_unlock_bh(&mfc_unres_lock);
1012 atomic_inc(&mrt->cache_resolve_queue_len);
1013 list_add(&c->list, &mrt->mfc_unres_queue);
1015 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1016 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1019 /* See if we can append the packet */
1021 if (c->mfc_un.unres.unresolved.qlen > 3) {
1025 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1029 spin_unlock_bh(&mfc_unres_lock);
1034 * MFC cache manipulation by user space mroute daemon
1037 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1040 struct mfc_cache *c, *next;
1042 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1044 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1045 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1046 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1047 list_del_rcu(&c->list);
1056 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1057 struct mfcctl *mfc, int mrtsock)
1061 struct mfc_cache *uc, *c;
1063 if (mfc->mfcc_parent >= MAXVIFS)
1066 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1068 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1069 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1070 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1077 write_lock_bh(&mrt_lock);
1078 c->mfc_parent = mfc->mfcc_parent;
1079 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1081 c->mfc_flags |= MFC_STATIC;
1082 write_unlock_bh(&mrt_lock);
1086 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1089 c = ipmr_cache_alloc();
1093 c->mfc_origin = mfc->mfcc_origin.s_addr;
1094 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1095 c->mfc_parent = mfc->mfcc_parent;
1096 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1098 c->mfc_flags |= MFC_STATIC;
1100 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1103 * Check to see if we resolved a queued list. If so we
1104 * need to send on the frames and tidy up.
1107 spin_lock_bh(&mfc_unres_lock);
1108 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1109 if (uc->mfc_origin == c->mfc_origin &&
1110 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1111 list_del(&uc->list);
1112 atomic_dec(&mrt->cache_resolve_queue_len);
1117 if (list_empty(&mrt->mfc_unres_queue))
1118 del_timer(&mrt->ipmr_expire_timer);
1119 spin_unlock_bh(&mfc_unres_lock);
1122 ipmr_cache_resolve(net, mrt, uc, c);
1123 ipmr_cache_free(uc);
1129 * Close the multicast socket, and clear the vif tables etc
1132 static void mroute_clean_tables(struct mr_table *mrt)
1136 struct mfc_cache *c, *next;
1138 /* Shut down all active vif entries */
1140 for (i = 0; i < mrt->maxvif; i++) {
1141 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1142 vif_delete(mrt, i, 0, &list);
1144 unregister_netdevice_many(&list);
1146 /* Wipe the cache */
1148 for (i = 0; i < MFC_LINES; i++) {
1149 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1150 if (c->mfc_flags & MFC_STATIC)
1152 list_del_rcu(&c->list);
1157 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1158 spin_lock_bh(&mfc_unres_lock);
1159 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1161 ipmr_destroy_unres(mrt, c);
1163 spin_unlock_bh(&mfc_unres_lock);
1167 /* called from ip_ra_control(), before an RCU grace period,
1168 * we dont need to call synchronize_rcu() here
1170 static void mrtsock_destruct(struct sock *sk)
1172 struct net *net = sock_net(sk);
1173 struct mr_table *mrt;
1176 ipmr_for_each_table(mrt, net) {
1177 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1178 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1179 rcu_assign_pointer(mrt->mroute_sk, NULL);
1180 mroute_clean_tables(mrt);
1187 * Socket options and virtual interface manipulation. The whole
1188 * virtual interface system is a complete heap, but unfortunately
1189 * that's how BSD mrouted happens to think. Maybe one day with a proper
1190 * MOSPF/PIM router set up we can clean this up.
1193 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1198 struct net *net = sock_net(sk);
1199 struct mr_table *mrt;
1201 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1205 if (optname != MRT_INIT) {
1206 if (sk != rcu_dereference_raw(mrt->mroute_sk) &&
1207 !capable(CAP_NET_ADMIN))
1213 if (sk->sk_type != SOCK_RAW ||
1214 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1216 if (optlen != sizeof(int))
1217 return -ENOPROTOOPT;
1220 if (rtnl_dereference(mrt->mroute_sk)) {
1225 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1227 rcu_assign_pointer(mrt->mroute_sk, sk);
1228 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1233 if (sk != rcu_dereference_raw(mrt->mroute_sk))
1235 return ip_ra_control(sk, 0, NULL);
1238 if (optlen != sizeof(vif))
1240 if (copy_from_user(&vif, optval, sizeof(vif)))
1242 if (vif.vifc_vifi >= MAXVIFS)
1245 if (optname == MRT_ADD_VIF) {
1246 ret = vif_add(net, mrt, &vif,
1247 sk == rtnl_dereference(mrt->mroute_sk));
1249 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1255 * Manipulate the forwarding caches. These live
1256 * in a sort of kernel/user symbiosis.
1260 if (optlen != sizeof(mfc))
1262 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1265 if (optname == MRT_DEL_MFC)
1266 ret = ipmr_mfc_delete(mrt, &mfc);
1268 ret = ipmr_mfc_add(net, mrt, &mfc,
1269 sk == rtnl_dereference(mrt->mroute_sk));
1273 * Control PIM assert.
1278 if (get_user(v, (int __user *)optval))
1280 mrt->mroute_do_assert = (v) ? 1 : 0;
1283 #ifdef CONFIG_IP_PIMSM
1288 if (get_user(v, (int __user *)optval))
1294 if (v != mrt->mroute_do_pim) {
1295 mrt->mroute_do_pim = v;
1296 mrt->mroute_do_assert = v;
1302 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1307 if (optlen != sizeof(u32))
1309 if (get_user(v, (u32 __user *)optval))
1314 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1317 if (!ipmr_new_table(net, v))
1319 raw_sk(sk)->ipmr_table = v;
1326 * Spurious command, or MRT_VERSION which you cannot
1330 return -ENOPROTOOPT;
1335 * Getsock opt support for the multicast routing system.
1338 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1342 struct net *net = sock_net(sk);
1343 struct mr_table *mrt;
1345 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1349 if (optname != MRT_VERSION &&
1350 #ifdef CONFIG_IP_PIMSM
1351 optname != MRT_PIM &&
1353 optname != MRT_ASSERT)
1354 return -ENOPROTOOPT;
1356 if (get_user(olr, optlen))
1359 olr = min_t(unsigned int, olr, sizeof(int));
1363 if (put_user(olr, optlen))
1365 if (optname == MRT_VERSION)
1367 #ifdef CONFIG_IP_PIMSM
1368 else if (optname == MRT_PIM)
1369 val = mrt->mroute_do_pim;
1372 val = mrt->mroute_do_assert;
1373 if (copy_to_user(optval, &val, olr))
1379 * The IP multicast ioctl support routines.
1382 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1384 struct sioc_sg_req sr;
1385 struct sioc_vif_req vr;
1386 struct vif_device *vif;
1387 struct mfc_cache *c;
1388 struct net *net = sock_net(sk);
1389 struct mr_table *mrt;
1391 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1397 if (copy_from_user(&vr, arg, sizeof(vr)))
1399 if (vr.vifi >= mrt->maxvif)
1401 read_lock(&mrt_lock);
1402 vif = &mrt->vif_table[vr.vifi];
1403 if (VIF_EXISTS(mrt, vr.vifi)) {
1404 vr.icount = vif->pkt_in;
1405 vr.ocount = vif->pkt_out;
1406 vr.ibytes = vif->bytes_in;
1407 vr.obytes = vif->bytes_out;
1408 read_unlock(&mrt_lock);
1410 if (copy_to_user(arg, &vr, sizeof(vr)))
1414 read_unlock(&mrt_lock);
1415 return -EADDRNOTAVAIL;
1417 if (copy_from_user(&sr, arg, sizeof(sr)))
1421 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1423 sr.pktcnt = c->mfc_un.res.pkt;
1424 sr.bytecnt = c->mfc_un.res.bytes;
1425 sr.wrong_if = c->mfc_un.res.wrong_if;
1428 if (copy_to_user(arg, &sr, sizeof(sr)))
1433 return -EADDRNOTAVAIL;
1435 return -ENOIOCTLCMD;
1439 #ifdef CONFIG_COMPAT
1440 struct compat_sioc_sg_req {
1443 compat_ulong_t pktcnt;
1444 compat_ulong_t bytecnt;
1445 compat_ulong_t wrong_if;
1448 struct compat_sioc_vif_req {
1449 vifi_t vifi; /* Which iface */
1450 compat_ulong_t icount;
1451 compat_ulong_t ocount;
1452 compat_ulong_t ibytes;
1453 compat_ulong_t obytes;
1456 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1458 struct compat_sioc_sg_req sr;
1459 struct compat_sioc_vif_req vr;
1460 struct vif_device *vif;
1461 struct mfc_cache *c;
1462 struct net *net = sock_net(sk);
1463 struct mr_table *mrt;
1465 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1471 if (copy_from_user(&vr, arg, sizeof(vr)))
1473 if (vr.vifi >= mrt->maxvif)
1475 read_lock(&mrt_lock);
1476 vif = &mrt->vif_table[vr.vifi];
1477 if (VIF_EXISTS(mrt, vr.vifi)) {
1478 vr.icount = vif->pkt_in;
1479 vr.ocount = vif->pkt_out;
1480 vr.ibytes = vif->bytes_in;
1481 vr.obytes = vif->bytes_out;
1482 read_unlock(&mrt_lock);
1484 if (copy_to_user(arg, &vr, sizeof(vr)))
1488 read_unlock(&mrt_lock);
1489 return -EADDRNOTAVAIL;
1491 if (copy_from_user(&sr, arg, sizeof(sr)))
1495 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1497 sr.pktcnt = c->mfc_un.res.pkt;
1498 sr.bytecnt = c->mfc_un.res.bytes;
1499 sr.wrong_if = c->mfc_un.res.wrong_if;
1502 if (copy_to_user(arg, &sr, sizeof(sr)))
1507 return -EADDRNOTAVAIL;
1509 return -ENOIOCTLCMD;
1515 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1517 struct net_device *dev = ptr;
1518 struct net *net = dev_net(dev);
1519 struct mr_table *mrt;
1520 struct vif_device *v;
1524 if (event != NETDEV_UNREGISTER)
1527 ipmr_for_each_table(mrt, net) {
1528 v = &mrt->vif_table[0];
1529 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1531 vif_delete(mrt, ct, 1, &list);
1534 unregister_netdevice_many(&list);
1539 static struct notifier_block ip_mr_notifier = {
1540 .notifier_call = ipmr_device_event,
1544 * Encapsulate a packet by attaching a valid IPIP header to it.
1545 * This avoids tunnel drivers and other mess and gives us the speed so
1546 * important for multicast video.
1549 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1552 const struct iphdr *old_iph = ip_hdr(skb);
1554 skb_push(skb, sizeof(struct iphdr));
1555 skb->transport_header = skb->network_header;
1556 skb_reset_network_header(skb);
1560 iph->tos = old_iph->tos;
1561 iph->ttl = old_iph->ttl;
1565 iph->protocol = IPPROTO_IPIP;
1567 iph->tot_len = htons(skb->len);
1568 ip_select_ident(iph, skb_dst(skb), NULL);
1571 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1575 static inline int ipmr_forward_finish(struct sk_buff *skb)
1577 struct ip_options *opt = &(IPCB(skb)->opt);
1579 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1581 if (unlikely(opt->optlen))
1582 ip_forward_options(skb);
1584 return dst_output(skb);
1588 * Processing handlers for ipmr_forward
1591 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1592 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1594 const struct iphdr *iph = ip_hdr(skb);
1595 struct vif_device *vif = &mrt->vif_table[vifi];
1596 struct net_device *dev;
1601 if (vif->dev == NULL)
1604 #ifdef CONFIG_IP_PIMSM
1605 if (vif->flags & VIFF_REGISTER) {
1607 vif->bytes_out += skb->len;
1608 vif->dev->stats.tx_bytes += skb->len;
1609 vif->dev->stats.tx_packets++;
1610 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1615 if (vif->flags & VIFF_TUNNEL) {
1616 rt = ip_route_output_ports(net, &fl4, NULL,
1617 vif->remote, vif->local,
1620 RT_TOS(iph->tos), vif->link);
1623 encap = sizeof(struct iphdr);
1625 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1628 RT_TOS(iph->tos), vif->link);
1635 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1636 /* Do not fragment multicasts. Alas, IPv4 does not
1637 * allow to send ICMP, so that packets will disappear
1641 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1646 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1648 if (skb_cow(skb, encap)) {
1654 vif->bytes_out += skb->len;
1657 skb_dst_set(skb, &rt->dst);
1658 ip_decrease_ttl(ip_hdr(skb));
1660 /* FIXME: forward and output firewalls used to be called here.
1661 * What do we do with netfilter? -- RR
1663 if (vif->flags & VIFF_TUNNEL) {
1664 ip_encap(skb, vif->local, vif->remote);
1665 /* FIXME: extra output firewall step used to be here. --RR */
1666 vif->dev->stats.tx_packets++;
1667 vif->dev->stats.tx_bytes += skb->len;
1670 IPCB(skb)->flags |= IPSKB_FORWARDED;
1673 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1674 * not only before forwarding, but after forwarding on all output
1675 * interfaces. It is clear, if mrouter runs a multicasting
1676 * program, it should receive packets not depending to what interface
1677 * program is joined.
1678 * If we will not make it, the program will have to join on all
1679 * interfaces. On the other hand, multihoming host (or router, but
1680 * not mrouter) cannot join to more than one interface - it will
1681 * result in receiving multiple packets.
1683 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev,
1684 ipmr_forward_finish);
1691 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1695 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1696 if (mrt->vif_table[ct].dev == dev)
1702 /* "local" means that we should preserve one skb (for local delivery) */
1704 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1705 struct sk_buff *skb, struct mfc_cache *cache,
1711 vif = cache->mfc_parent;
1712 cache->mfc_un.res.pkt++;
1713 cache->mfc_un.res.bytes += skb->len;
1716 * Wrong interface: drop packet and (maybe) send PIM assert.
1718 if (mrt->vif_table[vif].dev != skb->dev) {
1721 if (rt_is_output_route(skb_rtable(skb))) {
1722 /* It is our own packet, looped back.
1723 * Very complicated situation...
1725 * The best workaround until routing daemons will be
1726 * fixed is not to redistribute packet, if it was
1727 * send through wrong interface. It means, that
1728 * multicast applications WILL NOT work for
1729 * (S,G), which have default multicast route pointing
1730 * to wrong oif. In any case, it is not a good
1731 * idea to use multicasting applications on router.
1736 cache->mfc_un.res.wrong_if++;
1737 true_vifi = ipmr_find_vif(mrt, skb->dev);
1739 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1740 /* pimsm uses asserts, when switching from RPT to SPT,
1741 * so that we cannot check that packet arrived on an oif.
1742 * It is bad, but otherwise we would need to move pretty
1743 * large chunk of pimd to kernel. Ough... --ANK
1745 (mrt->mroute_do_pim ||
1746 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1748 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1749 cache->mfc_un.res.last_assert = jiffies;
1750 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1755 mrt->vif_table[vif].pkt_in++;
1756 mrt->vif_table[vif].bytes_in += skb->len;
1761 for (ct = cache->mfc_un.res.maxvif - 1;
1762 ct >= cache->mfc_un.res.minvif; ct--) {
1763 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1765 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1768 ipmr_queue_xmit(net, mrt, skb2, cache,
1776 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1779 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1781 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1792 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1794 struct rtable *rt = skb_rtable(skb);
1795 struct iphdr *iph = ip_hdr(skb);
1796 struct flowi4 fl4 = {
1797 .daddr = iph->daddr,
1798 .saddr = iph->saddr,
1799 .flowi4_tos = iph->tos,
1800 .flowi4_oif = rt->rt_oif,
1801 .flowi4_iif = rt->rt_iif,
1802 .flowi4_mark = rt->rt_mark,
1804 struct mr_table *mrt;
1807 err = ipmr_fib_lookup(net, &fl4, &mrt);
1809 return ERR_PTR(err);
1814 * Multicast packets for forwarding arrive here
1815 * Called with rcu_read_lock();
1818 int ip_mr_input(struct sk_buff *skb)
1820 struct mfc_cache *cache;
1821 struct net *net = dev_net(skb->dev);
1822 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1823 struct mr_table *mrt;
1825 /* Packet is looped back after forward, it should not be
1826 * forwarded second time, but still can be delivered locally.
1828 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1831 mrt = ipmr_rt_fib_lookup(net, skb);
1834 return PTR_ERR(mrt);
1837 if (IPCB(skb)->opt.router_alert) {
1838 if (ip_call_ra_chain(skb))
1840 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1841 /* IGMPv1 (and broken IGMPv2 implementations sort of
1842 * Cisco IOS <= 11.2(8)) do not put router alert
1843 * option to IGMP packets destined to routable
1844 * groups. It is very bad, because it means
1845 * that we can forward NO IGMP messages.
1847 struct sock *mroute_sk;
1849 mroute_sk = rcu_dereference(mrt->mroute_sk);
1852 raw_rcv(mroute_sk, skb);
1858 /* already under rcu_read_lock() */
1859 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1862 * No usable cache entry
1864 if (cache == NULL) {
1868 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1869 ip_local_deliver(skb);
1875 read_lock(&mrt_lock);
1876 vif = ipmr_find_vif(mrt, skb->dev);
1878 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1879 read_unlock(&mrt_lock);
1883 read_unlock(&mrt_lock);
1888 read_lock(&mrt_lock);
1889 ip_mr_forward(net, mrt, skb, cache, local);
1890 read_unlock(&mrt_lock);
1893 return ip_local_deliver(skb);
1899 return ip_local_deliver(skb);
1904 #ifdef CONFIG_IP_PIMSM
1905 /* called with rcu_read_lock() */
1906 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1907 unsigned int pimlen)
1909 struct net_device *reg_dev = NULL;
1910 struct iphdr *encap;
1912 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1915 * a. packet is really sent to a multicast group
1916 * b. packet is not a NULL-REGISTER
1917 * c. packet is not truncated
1919 if (!ipv4_is_multicast(encap->daddr) ||
1920 encap->tot_len == 0 ||
1921 ntohs(encap->tot_len) + pimlen > skb->len)
1924 read_lock(&mrt_lock);
1925 if (mrt->mroute_reg_vif_num >= 0)
1926 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
1927 read_unlock(&mrt_lock);
1929 if (reg_dev == NULL)
1932 skb->mac_header = skb->network_header;
1933 skb_pull(skb, (u8 *)encap - skb->data);
1934 skb_reset_network_header(skb);
1935 skb->protocol = htons(ETH_P_IP);
1936 skb->ip_summed = CHECKSUM_NONE;
1937 skb->pkt_type = PACKET_HOST;
1939 skb_tunnel_rx(skb, reg_dev);
1943 return NET_RX_SUCCESS;
1947 #ifdef CONFIG_IP_PIMSM_V1
1949 * Handle IGMP messages of PIMv1
1952 int pim_rcv_v1(struct sk_buff *skb)
1954 struct igmphdr *pim;
1955 struct net *net = dev_net(skb->dev);
1956 struct mr_table *mrt;
1958 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1961 pim = igmp_hdr(skb);
1963 mrt = ipmr_rt_fib_lookup(net, skb);
1966 if (!mrt->mroute_do_pim ||
1967 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1970 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1978 #ifdef CONFIG_IP_PIMSM_V2
1979 static int pim_rcv(struct sk_buff *skb)
1981 struct pimreghdr *pim;
1982 struct net *net = dev_net(skb->dev);
1983 struct mr_table *mrt;
1985 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1988 pim = (struct pimreghdr *)skb_transport_header(skb);
1989 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
1990 (pim->flags & PIM_NULL_REGISTER) ||
1991 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1992 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1995 mrt = ipmr_rt_fib_lookup(net, skb);
1998 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2006 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2007 struct mfc_cache *c, struct rtmsg *rtm)
2010 struct rtnexthop *nhp;
2011 u8 *b = skb_tail_pointer(skb);
2012 struct rtattr *mp_head;
2014 /* If cache is unresolved, don't try to parse IIF and OIF */
2015 if (c->mfc_parent >= MAXVIFS)
2018 if (VIF_EXISTS(mrt, c->mfc_parent))
2019 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
2021 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
2023 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2024 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2025 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
2026 goto rtattr_failure;
2027 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
2028 nhp->rtnh_flags = 0;
2029 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2030 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2031 nhp->rtnh_len = sizeof(*nhp);
2034 mp_head->rta_type = RTA_MULTIPATH;
2035 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
2036 rtm->rtm_type = RTN_MULTICAST;
2044 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2045 __be32 saddr, __be32 daddr,
2046 struct rtmsg *rtm, int nowait)
2048 struct mfc_cache *cache;
2049 struct mr_table *mrt;
2052 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2057 cache = ipmr_cache_find(mrt, saddr, daddr);
2059 if (cache == NULL) {
2060 struct sk_buff *skb2;
2062 struct net_device *dev;
2071 read_lock(&mrt_lock);
2073 vif = ipmr_find_vif(mrt, dev);
2075 read_unlock(&mrt_lock);
2079 skb2 = skb_clone(skb, GFP_ATOMIC);
2081 read_unlock(&mrt_lock);
2086 skb_push(skb2, sizeof(struct iphdr));
2087 skb_reset_network_header(skb2);
2089 iph->ihl = sizeof(struct iphdr) >> 2;
2093 err = ipmr_cache_unresolved(mrt, vif, skb2);
2094 read_unlock(&mrt_lock);
2099 read_lock(&mrt_lock);
2100 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
2101 cache->mfc_flags |= MFC_NOTIFY;
2102 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2103 read_unlock(&mrt_lock);
2108 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2109 u32 pid, u32 seq, struct mfc_cache *c)
2111 struct nlmsghdr *nlh;
2114 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2118 rtm = nlmsg_data(nlh);
2119 rtm->rtm_family = RTNL_FAMILY_IPMR;
2120 rtm->rtm_dst_len = 32;
2121 rtm->rtm_src_len = 32;
2123 rtm->rtm_table = mrt->id;
2124 NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
2125 rtm->rtm_type = RTN_MULTICAST;
2126 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2127 rtm->rtm_protocol = RTPROT_UNSPEC;
2130 NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin);
2131 NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp);
2133 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
2134 goto nla_put_failure;
2136 return nlmsg_end(skb, nlh);
2139 nlmsg_cancel(skb, nlh);
2143 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2145 struct net *net = sock_net(skb->sk);
2146 struct mr_table *mrt;
2147 struct mfc_cache *mfc;
2148 unsigned int t = 0, s_t;
2149 unsigned int h = 0, s_h;
2150 unsigned int e = 0, s_e;
2157 ipmr_for_each_table(mrt, net) {
2162 for (h = s_h; h < MFC_LINES; h++) {
2163 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2166 if (ipmr_fill_mroute(mrt, skb,
2167 NETLINK_CB(cb->skb).pid,
2190 #ifdef CONFIG_PROC_FS
2192 * The /proc interfaces to multicast routing :
2193 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2195 struct ipmr_vif_iter {
2196 struct seq_net_private p;
2197 struct mr_table *mrt;
2201 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2202 struct ipmr_vif_iter *iter,
2205 struct mr_table *mrt = iter->mrt;
2207 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2208 if (!VIF_EXISTS(mrt, iter->ct))
2211 return &mrt->vif_table[iter->ct];
2216 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2217 __acquires(mrt_lock)
2219 struct ipmr_vif_iter *iter = seq->private;
2220 struct net *net = seq_file_net(seq);
2221 struct mr_table *mrt;
2223 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2225 return ERR_PTR(-ENOENT);
2229 read_lock(&mrt_lock);
2230 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2234 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2236 struct ipmr_vif_iter *iter = seq->private;
2237 struct net *net = seq_file_net(seq);
2238 struct mr_table *mrt = iter->mrt;
2241 if (v == SEQ_START_TOKEN)
2242 return ipmr_vif_seq_idx(net, iter, 0);
2244 while (++iter->ct < mrt->maxvif) {
2245 if (!VIF_EXISTS(mrt, iter->ct))
2247 return &mrt->vif_table[iter->ct];
2252 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2253 __releases(mrt_lock)
2255 read_unlock(&mrt_lock);
2258 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2260 struct ipmr_vif_iter *iter = seq->private;
2261 struct mr_table *mrt = iter->mrt;
2263 if (v == SEQ_START_TOKEN) {
2265 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2267 const struct vif_device *vif = v;
2268 const char *name = vif->dev ? vif->dev->name : "none";
2271 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2272 vif - mrt->vif_table,
2273 name, vif->bytes_in, vif->pkt_in,
2274 vif->bytes_out, vif->pkt_out,
2275 vif->flags, vif->local, vif->remote);
2280 static const struct seq_operations ipmr_vif_seq_ops = {
2281 .start = ipmr_vif_seq_start,
2282 .next = ipmr_vif_seq_next,
2283 .stop = ipmr_vif_seq_stop,
2284 .show = ipmr_vif_seq_show,
2287 static int ipmr_vif_open(struct inode *inode, struct file *file)
2289 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2290 sizeof(struct ipmr_vif_iter));
2293 static const struct file_operations ipmr_vif_fops = {
2294 .owner = THIS_MODULE,
2295 .open = ipmr_vif_open,
2297 .llseek = seq_lseek,
2298 .release = seq_release_net,
2301 struct ipmr_mfc_iter {
2302 struct seq_net_private p;
2303 struct mr_table *mrt;
2304 struct list_head *cache;
2309 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2310 struct ipmr_mfc_iter *it, loff_t pos)
2312 struct mr_table *mrt = it->mrt;
2313 struct mfc_cache *mfc;
2316 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2317 it->cache = &mrt->mfc_cache_array[it->ct];
2318 list_for_each_entry_rcu(mfc, it->cache, list)
2324 spin_lock_bh(&mfc_unres_lock);
2325 it->cache = &mrt->mfc_unres_queue;
2326 list_for_each_entry(mfc, it->cache, list)
2329 spin_unlock_bh(&mfc_unres_lock);
2336 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2338 struct ipmr_mfc_iter *it = seq->private;
2339 struct net *net = seq_file_net(seq);
2340 struct mr_table *mrt;
2342 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2344 return ERR_PTR(-ENOENT);
2349 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2353 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2355 struct mfc_cache *mfc = v;
2356 struct ipmr_mfc_iter *it = seq->private;
2357 struct net *net = seq_file_net(seq);
2358 struct mr_table *mrt = it->mrt;
2362 if (v == SEQ_START_TOKEN)
2363 return ipmr_mfc_seq_idx(net, seq->private, 0);
2365 if (mfc->list.next != it->cache)
2366 return list_entry(mfc->list.next, struct mfc_cache, list);
2368 if (it->cache == &mrt->mfc_unres_queue)
2371 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2373 while (++it->ct < MFC_LINES) {
2374 it->cache = &mrt->mfc_cache_array[it->ct];
2375 if (list_empty(it->cache))
2377 return list_first_entry(it->cache, struct mfc_cache, list);
2380 /* exhausted cache_array, show unresolved */
2382 it->cache = &mrt->mfc_unres_queue;
2385 spin_lock_bh(&mfc_unres_lock);
2386 if (!list_empty(it->cache))
2387 return list_first_entry(it->cache, struct mfc_cache, list);
2390 spin_unlock_bh(&mfc_unres_lock);
2396 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2398 struct ipmr_mfc_iter *it = seq->private;
2399 struct mr_table *mrt = it->mrt;
2401 if (it->cache == &mrt->mfc_unres_queue)
2402 spin_unlock_bh(&mfc_unres_lock);
2403 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2407 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2411 if (v == SEQ_START_TOKEN) {
2413 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2415 const struct mfc_cache *mfc = v;
2416 const struct ipmr_mfc_iter *it = seq->private;
2417 const struct mr_table *mrt = it->mrt;
2419 seq_printf(seq, "%08X %08X %-3hd",
2420 (__force u32) mfc->mfc_mcastgrp,
2421 (__force u32) mfc->mfc_origin,
2424 if (it->cache != &mrt->mfc_unres_queue) {
2425 seq_printf(seq, " %8lu %8lu %8lu",
2426 mfc->mfc_un.res.pkt,
2427 mfc->mfc_un.res.bytes,
2428 mfc->mfc_un.res.wrong_if);
2429 for (n = mfc->mfc_un.res.minvif;
2430 n < mfc->mfc_un.res.maxvif; n++) {
2431 if (VIF_EXISTS(mrt, n) &&
2432 mfc->mfc_un.res.ttls[n] < 255)
2435 n, mfc->mfc_un.res.ttls[n]);
2438 /* unresolved mfc_caches don't contain
2439 * pkt, bytes and wrong_if values
2441 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2443 seq_putc(seq, '\n');
2448 static const struct seq_operations ipmr_mfc_seq_ops = {
2449 .start = ipmr_mfc_seq_start,
2450 .next = ipmr_mfc_seq_next,
2451 .stop = ipmr_mfc_seq_stop,
2452 .show = ipmr_mfc_seq_show,
2455 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2457 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2458 sizeof(struct ipmr_mfc_iter));
2461 static const struct file_operations ipmr_mfc_fops = {
2462 .owner = THIS_MODULE,
2463 .open = ipmr_mfc_open,
2465 .llseek = seq_lseek,
2466 .release = seq_release_net,
2470 #ifdef CONFIG_IP_PIMSM_V2
2471 static const struct net_protocol pim_protocol = {
2479 * Setup for IP multicast routing
2481 static int __net_init ipmr_net_init(struct net *net)
2485 err = ipmr_rules_init(net);
2489 #ifdef CONFIG_PROC_FS
2491 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops))
2493 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
2494 goto proc_cache_fail;
2498 #ifdef CONFIG_PROC_FS
2500 proc_net_remove(net, "ip_mr_vif");
2502 ipmr_rules_exit(net);
2508 static void __net_exit ipmr_net_exit(struct net *net)
2510 #ifdef CONFIG_PROC_FS
2511 proc_net_remove(net, "ip_mr_cache");
2512 proc_net_remove(net, "ip_mr_vif");
2514 ipmr_rules_exit(net);
2517 static struct pernet_operations ipmr_net_ops = {
2518 .init = ipmr_net_init,
2519 .exit = ipmr_net_exit,
2522 int __init ip_mr_init(void)
2526 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2527 sizeof(struct mfc_cache),
2528 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2533 err = register_pernet_subsys(&ipmr_net_ops);
2535 goto reg_pernet_fail;
2537 err = register_netdevice_notifier(&ip_mr_notifier);
2539 goto reg_notif_fail;
2540 #ifdef CONFIG_IP_PIMSM_V2
2541 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2542 printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n");
2544 goto add_proto_fail;
2547 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2548 NULL, ipmr_rtm_dumproute, NULL);
2551 #ifdef CONFIG_IP_PIMSM_V2
2553 unregister_netdevice_notifier(&ip_mr_notifier);
2556 unregister_pernet_subsys(&ipmr_net_ops);
2558 kmem_cache_destroy(mrt_cachep);