2 #include <linux/igmp.h>
3 #include <linux/kernel.h>
4 #include <linux/netdevice.h>
5 #include <linux/rculist.h>
6 #include <linux/skbuff.h>
7 #include <linux/if_ether.h>
9 #include <net/netlink.h>
10 #if IS_ENABLED(CONFIG_IPV6)
12 #include <net/addrconf.h>
15 #include "br_private.h"
17 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
18 struct net_device *dev)
20 struct net_bridge *br = netdev_priv(dev);
21 struct net_bridge_port *p;
24 if (!br->multicast_router || hlist_empty(&br->router_list))
27 nest = nla_nest_start(skb, MDBA_ROUTER);
31 hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
32 if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex))
36 nla_nest_end(skb, nest);
39 nla_nest_cancel(skb, nest);
43 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
44 struct net_device *dev)
46 struct net_bridge *br = netdev_priv(dev);
47 struct net_bridge_mdb_htable *mdb;
48 struct nlattr *nest, *nest2;
50 int idx = 0, s_idx = cb->args[1];
52 if (br->multicast_disabled)
55 mdb = rcu_dereference(br->mdb);
59 nest = nla_nest_start(skb, MDBA_MDB);
63 for (i = 0; i < mdb->max; i++) {
64 struct net_bridge_mdb_entry *mp;
65 struct net_bridge_port_group *p;
66 struct net_bridge_port_group __rcu **pp;
67 struct net_bridge_port *port;
69 hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
73 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
80 (p = rcu_dereference(*pp)) != NULL;
84 struct br_mdb_entry e;
85 memset(&e, 0, sizeof(e));
86 e.ifindex = port->dev->ifindex;
89 if (p->addr.proto == htons(ETH_P_IP))
90 e.addr.u.ip4 = p->addr.u.ip4;
91 #if IS_ENABLED(CONFIG_IPV6)
92 if (p->addr.proto == htons(ETH_P_IPV6))
93 e.addr.u.ip6 = p->addr.u.ip6;
95 e.addr.proto = p->addr.proto;
96 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) {
97 nla_nest_cancel(skb, nest2);
103 nla_nest_end(skb, nest2);
111 nla_nest_end(skb, nest);
115 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
117 struct net_device *dev;
118 struct net *net = sock_net(skb->sk);
119 struct nlmsghdr *nlh = NULL;
126 /* In theory this could be wrapped to 0... */
127 cb->seq = net->dev_base_seq + br_mdb_rehash_seq;
129 for_each_netdev_rcu(net, dev) {
130 if (dev->priv_flags & IFF_EBRIDGE) {
131 struct br_port_msg *bpm;
136 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
137 cb->nlh->nlmsg_seq, RTM_GETMDB,
138 sizeof(*bpm), NLM_F_MULTI);
142 bpm = nlmsg_data(nlh);
143 memset(bpm, 0, sizeof(*bpm));
144 bpm->ifindex = dev->ifindex;
145 if (br_mdb_fill_info(skb, cb, dev) < 0)
147 if (br_rports_fill_info(skb, cb, dev) < 0)
165 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
166 struct net_device *dev,
167 struct br_mdb_entry *entry, u32 pid,
168 u32 seq, int type, unsigned int flags)
170 struct nlmsghdr *nlh;
171 struct br_port_msg *bpm;
172 struct nlattr *nest, *nest2;
174 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
178 bpm = nlmsg_data(nlh);
179 memset(bpm, 0, sizeof(*bpm));
180 bpm->family = AF_BRIDGE;
181 bpm->ifindex = dev->ifindex;
182 nest = nla_nest_start(skb, MDBA_MDB);
185 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
189 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
192 nla_nest_end(skb, nest2);
193 nla_nest_end(skb, nest);
198 nla_nest_end(skb, nest);
200 nlmsg_cancel(skb, nlh);
204 static inline size_t rtnl_mdb_nlmsg_size(void)
206 return NLMSG_ALIGN(sizeof(struct br_port_msg))
207 + nla_total_size(sizeof(struct br_mdb_entry));
210 static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
213 struct net *net = dev_net(dev);
217 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
221 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
227 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
230 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
233 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
234 struct br_ip *group, int type, u8 state)
236 struct br_mdb_entry entry;
238 memset(&entry, 0, sizeof(entry));
239 entry.ifindex = port->dev->ifindex;
240 entry.addr.proto = group->proto;
241 entry.addr.u.ip4 = group->u.ip4;
242 #if IS_ENABLED(CONFIG_IPV6)
243 entry.addr.u.ip6 = group->u.ip6;
246 entry.vid = group->vid;
247 __br_mdb_notify(dev, &entry, type);
250 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
251 struct net_device *dev,
252 int ifindex, u32 pid,
253 u32 seq, int type, unsigned int flags)
255 struct br_port_msg *bpm;
256 struct nlmsghdr *nlh;
259 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
263 bpm = nlmsg_data(nlh);
264 memset(bpm, 0, sizeof(*bpm));
265 bpm->family = AF_BRIDGE;
266 bpm->ifindex = dev->ifindex;
267 nest = nla_nest_start(skb, MDBA_ROUTER);
271 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
274 nla_nest_end(skb, nest);
279 nla_nest_end(skb, nest);
281 nlmsg_cancel(skb, nlh);
285 static inline size_t rtnl_rtr_nlmsg_size(void)
287 return NLMSG_ALIGN(sizeof(struct br_port_msg))
288 + nla_total_size(sizeof(__u32));
291 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
294 struct net *net = dev_net(dev);
299 ifindex = port ? port->dev->ifindex : 0;
300 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
304 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
310 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
314 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
317 static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
319 if (entry->ifindex == 0)
322 if (entry->addr.proto == htons(ETH_P_IP)) {
323 if (!ipv4_is_multicast(entry->addr.u.ip4))
325 if (ipv4_is_local_multicast(entry->addr.u.ip4))
327 #if IS_ENABLED(CONFIG_IPV6)
328 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
329 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
334 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
336 if (entry->vid >= VLAN_VID_MASK)
342 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
343 struct net_device **pdev, struct br_mdb_entry **pentry)
345 struct net *net = sock_net(skb->sk);
346 struct br_mdb_entry *entry;
347 struct br_port_msg *bpm;
348 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
349 struct net_device *dev;
352 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL);
356 bpm = nlmsg_data(nlh);
357 if (bpm->ifindex == 0) {
358 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
362 dev = __dev_get_by_index(net, bpm->ifindex);
364 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
368 if (!(dev->priv_flags & IFF_EBRIDGE)) {
369 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
375 if (!tb[MDBA_SET_ENTRY] ||
376 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
377 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
381 entry = nla_data(tb[MDBA_SET_ENTRY]);
382 if (!is_valid_mdb_entry(entry)) {
383 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
391 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
392 struct br_ip *group, unsigned char state)
394 struct net_bridge_mdb_entry *mp;
395 struct net_bridge_port_group *p;
396 struct net_bridge_port_group __rcu **pp;
397 struct net_bridge_mdb_htable *mdb;
398 unsigned long now = jiffies;
401 mdb = mlock_dereference(br->mdb, br);
402 mp = br_mdb_ip_get(mdb, group);
404 mp = br_multicast_new_group(br, port, group);
410 for (pp = &mp->ports;
411 (p = mlock_dereference(*pp, br)) != NULL;
415 if ((unsigned long)p->port < (unsigned long)port)
419 p = br_multicast_new_port_group(port, group, *pp, state);
422 rcu_assign_pointer(*pp, p);
423 if (state == MDB_TEMPORARY)
424 mod_timer(&p->timer, now + br->multicast_membership_interval);
429 static int __br_mdb_add(struct net *net, struct net_bridge *br,
430 struct br_mdb_entry *entry)
433 struct net_device *dev;
434 struct net_bridge_port *p;
437 if (!netif_running(br->dev) || br->multicast_disabled)
440 dev = __dev_get_by_index(net, entry->ifindex);
444 p = br_port_get_rtnl(dev);
445 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
448 memset(&ip, 0, sizeof(ip));
450 ip.proto = entry->addr.proto;
451 if (ip.proto == htons(ETH_P_IP))
452 ip.u.ip4 = entry->addr.u.ip4;
453 #if IS_ENABLED(CONFIG_IPV6)
455 ip.u.ip6 = entry->addr.u.ip6;
458 spin_lock_bh(&br->multicast_lock);
459 ret = br_mdb_add_group(br, p, &ip, entry->state);
460 spin_unlock_bh(&br->multicast_lock);
464 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
466 struct net *net = sock_net(skb->sk);
467 unsigned short vid = VLAN_N_VID;
468 struct net_device *dev, *pdev;
469 struct br_mdb_entry *entry;
470 struct net_bridge_port *p;
471 struct net_port_vlans *pv;
472 struct net_bridge *br;
475 err = br_mdb_parse(skb, nlh, &dev, &entry);
479 br = netdev_priv(dev);
481 /* If vlan filtering is enabled and VLAN is not specified
482 * install mdb entry on all vlans configured on the port.
484 pdev = __dev_get_by_index(net, entry->ifindex);
488 p = br_port_get_rtnl(pdev);
489 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
492 pv = nbp_get_vlan_info(p);
493 if (br_vlan_enabled(br) && pv && entry->vid == 0) {
494 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
496 err = __br_mdb_add(net, br, entry);
499 __br_mdb_notify(dev, entry, RTM_NEWMDB);
502 err = __br_mdb_add(net, br, entry);
504 __br_mdb_notify(dev, entry, RTM_NEWMDB);
510 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
512 struct net_bridge_mdb_htable *mdb;
513 struct net_bridge_mdb_entry *mp;
514 struct net_bridge_port_group *p;
515 struct net_bridge_port_group __rcu **pp;
519 if (!netif_running(br->dev) || br->multicast_disabled)
522 memset(&ip, 0, sizeof(ip));
524 ip.proto = entry->addr.proto;
525 if (ip.proto == htons(ETH_P_IP))
526 ip.u.ip4 = entry->addr.u.ip4;
527 #if IS_ENABLED(CONFIG_IPV6)
529 ip.u.ip6 = entry->addr.u.ip6;
532 spin_lock_bh(&br->multicast_lock);
533 mdb = mlock_dereference(br->mdb, br);
535 mp = br_mdb_ip_get(mdb, &ip);
539 for (pp = &mp->ports;
540 (p = mlock_dereference(*pp, br)) != NULL;
542 if (!p->port || p->port->dev->ifindex != entry->ifindex)
545 if (p->port->state == BR_STATE_DISABLED)
548 entry->state = p->state;
549 rcu_assign_pointer(*pp, p->next);
550 hlist_del_init(&p->mglist);
551 del_timer(&p->timer);
552 call_rcu_bh(&p->rcu, br_multicast_free_pg);
555 if (!mp->ports && !mp->mglist &&
556 netif_running(br->dev))
557 mod_timer(&mp->timer, jiffies);
562 spin_unlock_bh(&br->multicast_lock);
566 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
568 struct net *net = sock_net(skb->sk);
569 unsigned short vid = VLAN_N_VID;
570 struct net_device *dev, *pdev;
571 struct br_mdb_entry *entry;
572 struct net_bridge_port *p;
573 struct net_port_vlans *pv;
574 struct net_bridge *br;
577 err = br_mdb_parse(skb, nlh, &dev, &entry);
581 br = netdev_priv(dev);
583 /* If vlan filtering is enabled and VLAN is not specified
584 * delete mdb entry on all vlans configured on the port.
586 pdev = __dev_get_by_index(net, entry->ifindex);
590 p = br_port_get_rtnl(pdev);
591 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
594 pv = nbp_get_vlan_info(p);
595 if (br_vlan_enabled(br) && pv && entry->vid == 0) {
596 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
598 err = __br_mdb_del(br, entry);
600 __br_mdb_notify(dev, entry, RTM_DELMDB);
603 err = __br_mdb_del(br, entry);
605 __br_mdb_notify(dev, entry, RTM_DELMDB);
611 void br_mdb_init(void)
613 rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL);
614 rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL);
615 rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL);
618 void br_mdb_uninit(void)
620 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
621 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
622 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);