3 * Ethernet-type device handling.
7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/capability.h>
24 #include <linux/module.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/rculist.h>
30 #include <net/p8022.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/notifier.h>
34 #include <net/rtnetlink.h>
35 #include <net/net_namespace.h>
36 #include <net/netns/generic.h>
37 #include <asm/uaccess.h>
39 #include <linux/if_vlan.h>
43 #define DRV_VERSION "1.8"
45 /* Global VLAN variables */
47 int vlan_net_id __read_mostly;
49 const char vlan_fullname[] = "802.1Q VLAN Support";
50 const char vlan_version[] = DRV_VERSION;
52 /* End of global variables definitions. */
54 static void vlan_group_free(struct vlan_group *grp)
58 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
59 kfree(grp->vlan_devices_arrays[i]);
63 static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
65 struct vlan_group *grp;
67 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
71 grp->real_dev = real_dev;
75 static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
77 struct net_device **array;
82 array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
86 size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
87 array = kzalloc(size, GFP_KERNEL);
91 vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array;
95 static void vlan_rcu_free(struct rcu_head *rcu)
97 vlan_group_free(container_of(rcu, struct vlan_group, rcu));
100 void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
102 struct vlan_dev_info *vlan = vlan_dev_info(dev);
103 struct net_device *real_dev = vlan->real_dev;
104 const struct net_device_ops *ops = real_dev->netdev_ops;
105 struct vlan_group *grp;
106 u16 vlan_id = vlan->vlan_id;
110 grp = rtnl_dereference(real_dev->vlgrp);
113 /* Take it out of our own structures, but be sure to interlock with
114 * HW accelerating devices or SW vlan input packet processing if
115 * VLAN is not 0 (leave it there for 802.1p).
117 if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
118 ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
122 if (vlan->flags & VLAN_FLAG_GVRP)
123 vlan_gvrp_request_leave(dev);
125 vlan_group_set_device(grp, vlan_id, NULL);
126 /* Because unregister_netdevice_queue() makes sure at least one rcu
127 * grace period is respected before device freeing,
128 * we dont need to call synchronize_net() here.
130 unregister_netdevice_queue(dev, head);
132 /* If the group is now empty, kill off the group. */
133 if (grp->nr_vlans == 0) {
134 vlan_gvrp_uninit_applicant(real_dev);
136 rcu_assign_pointer(real_dev->vlgrp, NULL);
138 /* Free the group, after all cpu's are done. */
139 call_rcu(&grp->rcu, vlan_rcu_free);
142 /* Get rid of the vlan's reference to real_dev */
146 int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
148 const char *name = real_dev->name;
149 const struct net_device_ops *ops = real_dev->netdev_ops;
151 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
152 pr_info("VLANs not supported on %s\n", name);
156 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
157 (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) {
158 pr_info("Device %s has buggy VLAN hw accel\n", name);
162 if (vlan_find_dev(real_dev, vlan_id) != NULL)
168 int register_vlan_dev(struct net_device *dev)
170 struct vlan_dev_info *vlan = vlan_dev_info(dev);
171 struct net_device *real_dev = vlan->real_dev;
172 const struct net_device_ops *ops = real_dev->netdev_ops;
173 u16 vlan_id = vlan->vlan_id;
174 struct vlan_group *grp, *ngrp = NULL;
177 grp = rtnl_dereference(real_dev->vlgrp);
179 ngrp = grp = vlan_group_alloc(real_dev);
182 err = vlan_gvrp_init_applicant(real_dev);
187 err = vlan_group_prealloc_vid(grp, vlan_id);
189 goto out_uninit_applicant;
191 err = register_netdevice(dev);
193 goto out_uninit_applicant;
195 /* Account for reference in struct vlan_dev_info */
198 netif_stacked_transfer_operstate(real_dev, dev);
199 linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
201 /* So, got the sucker initialized, now lets place
202 * it into our local structure.
204 vlan_group_set_device(grp, vlan_id, dev);
208 rcu_assign_pointer(real_dev->vlgrp, ngrp);
210 if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
211 ops->ndo_vlan_rx_add_vid(real_dev, vlan_id);
215 out_uninit_applicant:
217 vlan_gvrp_uninit_applicant(real_dev);
220 /* Free the group, after all cpu's are done. */
221 call_rcu(&ngrp->rcu, vlan_rcu_free);
226 /* Attach a VLAN device to a mac address (ie Ethernet Card).
227 * Returns 0 if the device was created or a negative error code otherwise.
229 static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
231 struct net_device *new_dev;
232 struct net *net = dev_net(real_dev);
233 struct vlan_net *vn = net_generic(net, vlan_net_id);
237 if (vlan_id >= VLAN_VID_MASK)
240 err = vlan_check_real_dev(real_dev, vlan_id);
244 /* Gotta set up the fields for the device. */
245 switch (vn->name_type) {
246 case VLAN_NAME_TYPE_RAW_PLUS_VID:
247 /* name will look like: eth1.0005 */
248 snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
250 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
251 /* Put our vlan.VID in the name.
252 * Name will look like: vlan5
254 snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
256 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
257 /* Put our vlan.VID in the name.
258 * Name will look like: eth0.5
260 snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
262 case VLAN_NAME_TYPE_PLUS_VID:
263 /* Put our vlan.VID in the name.
264 * Name will look like: vlan0005
267 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
270 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
275 dev_net_set(new_dev, net);
276 /* need 4 bytes for extra VLAN header info,
277 * hope the underlying device can handle it.
279 new_dev->mtu = real_dev->mtu;
281 vlan_dev_info(new_dev)->vlan_id = vlan_id;
282 vlan_dev_info(new_dev)->real_dev = real_dev;
283 vlan_dev_info(new_dev)->dent = NULL;
284 vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
286 new_dev->rtnl_link_ops = &vlan_link_ops;
287 err = register_vlan_dev(new_dev);
289 goto out_free_newdev;
294 free_netdev(new_dev);
298 static void vlan_sync_address(struct net_device *dev,
299 struct net_device *vlandev)
301 struct vlan_dev_info *vlan = vlan_dev_info(vlandev);
303 /* May be called without an actual change */
304 if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
307 /* vlan address was different from the old address and is equal to
309 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
310 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
311 dev_uc_del(dev, vlandev->dev_addr);
313 /* vlan address was equal to the old address and is different from
315 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
316 compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
317 dev_uc_add(dev, vlandev->dev_addr);
319 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
322 static void vlan_transfer_features(struct net_device *dev,
323 struct net_device *vlandev)
325 vlandev->gso_max_size = dev->gso_max_size;
327 if (dev->features & NETIF_F_HW_VLAN_TX)
328 vlandev->hard_header_len = dev->hard_header_len;
330 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
332 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
333 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
336 netdev_update_features(vlandev);
339 static void __vlan_device_event(struct net_device *dev, unsigned long event)
342 case NETDEV_CHANGENAME:
343 vlan_proc_rem_dev(dev);
344 if (vlan_proc_add_dev(dev) < 0)
345 pr_warn("failed to change proc name for %s\n",
348 case NETDEV_REGISTER:
349 if (vlan_proc_add_dev(dev) < 0)
350 pr_warn("failed to add proc entry for %s\n", dev->name);
352 case NETDEV_UNREGISTER:
353 vlan_proc_rem_dev(dev);
358 static int vlan_device_event(struct notifier_block *unused, unsigned long event,
361 struct net_device *dev = ptr;
362 struct vlan_group *grp;
364 struct net_device *vlandev;
365 struct vlan_dev_info *vlan;
368 if (is_vlan_dev(dev))
369 __vlan_device_event(dev, event);
371 if ((event == NETDEV_UP) &&
372 (dev->features & NETIF_F_HW_VLAN_FILTER) &&
373 dev->netdev_ops->ndo_vlan_rx_add_vid) {
374 pr_info("adding VLAN 0 to HW filter on device %s\n",
376 dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
379 grp = rtnl_dereference(dev->vlgrp);
383 /* It is OK that we do not hold the group lock right now,
384 * as we run under the RTNL lock.
389 /* Propagate real device state to vlan devices */
390 for (i = 0; i < VLAN_N_VID; i++) {
391 vlandev = vlan_group_get_device(grp, i);
395 netif_stacked_transfer_operstate(dev, vlandev);
399 case NETDEV_CHANGEADDR:
400 /* Adjust unicast filters on underlying device */
401 for (i = 0; i < VLAN_N_VID; i++) {
402 vlandev = vlan_group_get_device(grp, i);
406 flgs = vlandev->flags;
407 if (!(flgs & IFF_UP))
410 vlan_sync_address(dev, vlandev);
414 case NETDEV_CHANGEMTU:
415 for (i = 0; i < VLAN_N_VID; i++) {
416 vlandev = vlan_group_get_device(grp, i);
420 if (vlandev->mtu <= dev->mtu)
423 dev_set_mtu(vlandev, dev->mtu);
427 case NETDEV_FEAT_CHANGE:
428 /* Propagate device features to underlying device */
429 for (i = 0; i < VLAN_N_VID; i++) {
430 vlandev = vlan_group_get_device(grp, i);
434 vlan_transfer_features(dev, vlandev);
440 /* Put all VLANs for this dev in the down state too. */
441 for (i = 0; i < VLAN_N_VID; i++) {
442 vlandev = vlan_group_get_device(grp, i);
446 flgs = vlandev->flags;
447 if (!(flgs & IFF_UP))
450 vlan = vlan_dev_info(vlandev);
451 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
452 dev_change_flags(vlandev, flgs & ~IFF_UP);
453 netif_stacked_transfer_operstate(dev, vlandev);
458 /* Put all VLANs for this dev in the up state too. */
459 for (i = 0; i < VLAN_N_VID; i++) {
460 vlandev = vlan_group_get_device(grp, i);
464 flgs = vlandev->flags;
468 vlan = vlan_dev_info(vlandev);
469 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
470 dev_change_flags(vlandev, flgs | IFF_UP);
471 netif_stacked_transfer_operstate(dev, vlandev);
475 case NETDEV_UNREGISTER:
476 /* twiddle thumbs on netns device moves */
477 if (dev->reg_state != NETREG_UNREGISTERING)
480 for (i = 0; i < VLAN_N_VID; i++) {
481 vlandev = vlan_group_get_device(grp, i);
485 /* unregistration of last vlan destroys group, abort
487 if (grp->nr_vlans == 1)
490 unregister_vlan_dev(vlandev, &list);
492 unregister_netdevice_many(&list);
495 case NETDEV_PRE_TYPE_CHANGE:
496 /* Forbid underlaying device to change its type. */
499 case NETDEV_NOTIFY_PEERS:
500 case NETDEV_BONDING_FAILOVER:
501 /* Propagate to vlan devices */
502 for (i = 0; i < VLAN_N_VID; i++) {
503 vlandev = vlan_group_get_device(grp, i);
507 call_netdevice_notifiers(event, vlandev);
516 static struct notifier_block vlan_notifier_block __read_mostly = {
517 .notifier_call = vlan_device_event,
521 * VLAN IOCTL handler.
522 * o execute requested action or pass command to the device driver
523 * arg is really a struct vlan_ioctl_args __user *.
525 static int vlan_ioctl_handler(struct net *net, void __user *arg)
528 struct vlan_ioctl_args args;
529 struct net_device *dev = NULL;
531 if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
534 /* Null terminate this sucker, just in case. */
535 args.device1[23] = 0;
536 args.u.device2[23] = 0;
541 case SET_VLAN_INGRESS_PRIORITY_CMD:
542 case SET_VLAN_EGRESS_PRIORITY_CMD:
543 case SET_VLAN_FLAG_CMD:
546 case GET_VLAN_REALDEV_NAME_CMD:
547 case GET_VLAN_VID_CMD:
549 dev = __dev_get_by_name(net, args.device1);
554 if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
559 case SET_VLAN_INGRESS_PRIORITY_CMD:
561 if (!capable(CAP_NET_ADMIN))
563 vlan_dev_set_ingress_priority(dev,
569 case SET_VLAN_EGRESS_PRIORITY_CMD:
571 if (!capable(CAP_NET_ADMIN))
573 err = vlan_dev_set_egress_priority(dev,
578 case SET_VLAN_FLAG_CMD:
580 if (!capable(CAP_NET_ADMIN))
582 err = vlan_dev_change_flags(dev,
583 args.vlan_qos ? args.u.flag : 0,
587 case SET_VLAN_NAME_TYPE_CMD:
589 if (!capable(CAP_NET_ADMIN))
591 if ((args.u.name_type >= 0) &&
592 (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
595 vn = net_generic(net, vlan_net_id);
596 vn->name_type = args.u.name_type;
605 if (!capable(CAP_NET_ADMIN))
607 err = register_vlan_device(dev, args.u.VID);
612 if (!capable(CAP_NET_ADMIN))
614 unregister_vlan_dev(dev, NULL);
618 case GET_VLAN_REALDEV_NAME_CMD:
620 vlan_dev_get_realdev_name(dev, args.u.device2);
621 if (copy_to_user(arg, &args,
622 sizeof(struct vlan_ioctl_args)))
626 case GET_VLAN_VID_CMD:
628 args.u.VID = vlan_dev_vlan_id(dev);
629 if (copy_to_user(arg, &args,
630 sizeof(struct vlan_ioctl_args)))
643 static int __net_init vlan_init_net(struct net *net)
645 struct vlan_net *vn = net_generic(net, vlan_net_id);
648 vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
650 err = vlan_proc_init(net);
655 static void __net_exit vlan_exit_net(struct net *net)
657 vlan_proc_cleanup(net);
660 static struct pernet_operations vlan_net_ops = {
661 .init = vlan_init_net,
662 .exit = vlan_exit_net,
664 .size = sizeof(struct vlan_net),
667 static int __init vlan_proto_init(void)
671 pr_info("%s v%s\n", vlan_fullname, vlan_version);
673 err = register_pernet_subsys(&vlan_net_ops);
677 err = register_netdevice_notifier(&vlan_notifier_block);
681 err = vlan_gvrp_init();
685 err = vlan_netlink_init();
689 vlan_ioctl_set(vlan_ioctl_handler);
695 unregister_netdevice_notifier(&vlan_notifier_block);
697 unregister_pernet_subsys(&vlan_net_ops);
702 static void __exit vlan_cleanup_module(void)
704 vlan_ioctl_set(NULL);
707 unregister_netdevice_notifier(&vlan_notifier_block);
709 unregister_pernet_subsys(&vlan_net_ops);
710 rcu_barrier(); /* Wait for completion of call_rcu()'s */
715 module_init(vlan_proto_init);
716 module_exit(vlan_cleanup_module);
718 MODULE_LICENSE("GPL");
719 MODULE_VERSION(DRV_VERSION);