2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
18 #include <net/fib_rules.h>
19 #include <net/ip_tunnels.h>
21 static const struct fib_kuid_range fib_kuid_range_unset = {
26 bool fib_rule_matchall(const struct fib_rule *rule)
28 if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
31 if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
33 if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) ||
34 !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end))
38 EXPORT_SYMBOL_GPL(fib_rule_matchall);
40 int fib_default_rule_add(struct fib_rules_ops *ops,
41 u32 pref, u32 table, u32 flags)
45 r = kzalloc(ops->rule_size, GFP_KERNEL);
49 refcount_set(&r->refcnt, 1);
50 r->action = FR_ACT_TO_TBL;
54 r->fr_net = ops->fro_net;
55 r->uid_range = fib_kuid_range_unset;
57 r->suppress_prefixlen = -1;
58 r->suppress_ifgroup = -1;
60 /* The lock is not required here, the list in unreacheable
61 * at the moment this function is called */
62 list_add_tail(&r->list, &ops->rules_list);
65 EXPORT_SYMBOL(fib_default_rule_add);
67 static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
69 struct list_head *pos;
70 struct fib_rule *rule;
72 if (!list_empty(&ops->rules_list)) {
73 pos = ops->rules_list.next;
74 if (pos->next != &ops->rules_list) {
75 rule = list_entry(pos->next, struct fib_rule, list);
77 return rule->pref - 1;
84 static void notify_rule_change(int event, struct fib_rule *rule,
85 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
88 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
90 struct fib_rules_ops *ops;
93 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
94 if (ops->family == family) {
95 if (!try_module_get(ops->owner))
106 static void rules_ops_put(struct fib_rules_ops *ops)
109 module_put(ops->owner);
112 static void flush_route_cache(struct fib_rules_ops *ops)
114 if (ops->flush_cache)
115 ops->flush_cache(ops);
118 static int __fib_rules_register(struct fib_rules_ops *ops)
121 struct fib_rules_ops *o;
126 if (ops->rule_size < sizeof(struct fib_rule))
129 if (ops->match == NULL || ops->configure == NULL ||
130 ops->compare == NULL || ops->fill == NULL ||
134 spin_lock(&net->rules_mod_lock);
135 list_for_each_entry(o, &net->rules_ops, list)
136 if (ops->family == o->family)
139 list_add_tail_rcu(&ops->list, &net->rules_ops);
142 spin_unlock(&net->rules_mod_lock);
147 struct fib_rules_ops *
148 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
150 struct fib_rules_ops *ops;
153 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
155 return ERR_PTR(-ENOMEM);
157 INIT_LIST_HEAD(&ops->rules_list);
160 err = __fib_rules_register(ops);
168 EXPORT_SYMBOL_GPL(fib_rules_register);
170 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
172 struct fib_rule *rule, *tmp;
174 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
175 list_del_rcu(&rule->list);
182 void fib_rules_unregister(struct fib_rules_ops *ops)
184 struct net *net = ops->fro_net;
186 spin_lock(&net->rules_mod_lock);
187 list_del_rcu(&ops->list);
188 spin_unlock(&net->rules_mod_lock);
190 fib_rules_cleanup_ops(ops);
193 EXPORT_SYMBOL_GPL(fib_rules_unregister);
195 static int uid_range_set(struct fib_kuid_range *range)
197 return uid_valid(range->start) && uid_valid(range->end);
200 static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
202 struct fib_rule_uid_range *in;
203 struct fib_kuid_range out;
205 in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
207 out.start = make_kuid(current_user_ns(), in->start);
208 out.end = make_kuid(current_user_ns(), in->end);
213 static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
215 struct fib_rule_uid_range out = {
216 from_kuid_munged(current_user_ns(), range->start),
217 from_kuid_munged(current_user_ns(), range->end)
220 return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
223 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
224 struct flowi *fl, int flags,
225 struct fib_lookup_arg *arg)
229 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
232 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
235 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
238 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
241 if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
244 if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
245 uid_gt(fl->flowi_uid, rule->uid_range.end))
248 ret = ops->match(rule, fl, flags);
250 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
253 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
254 int flags, struct fib_lookup_arg *arg)
256 struct fib_rule *rule;
261 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
263 if (!fib_rule_match(rule, ops, fl, flags, arg))
266 if (rule->action == FR_ACT_GOTO) {
267 struct fib_rule *target;
269 target = rcu_dereference(rule->ctarget);
270 if (target == NULL) {
276 } else if (rule->action == FR_ACT_NOP)
279 err = ops->action(rule, fl, flags, arg);
281 if (!err && ops->suppress && ops->suppress(rule, arg))
284 if (err != -EAGAIN) {
285 if ((arg->flags & FIB_LOOKUP_NOREF) ||
286 likely(refcount_inc_not_zero(&rule->refcnt))) {
300 EXPORT_SYMBOL_GPL(fib_rules_lookup);
302 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
303 struct fib_rules_ops *ops)
308 if (tb[FRA_SRC] == NULL ||
309 frh->src_len > (ops->addr_size * 8) ||
310 nla_len(tb[FRA_SRC]) != ops->addr_size)
314 if (tb[FRA_DST] == NULL ||
315 frh->dst_len > (ops->addr_size * 8) ||
316 nla_len(tb[FRA_DST]) != ops->addr_size)
324 static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
325 struct nlattr **tb, struct fib_rule *rule)
329 list_for_each_entry(r, &ops->rules_list, list) {
330 if (r->action != rule->action)
333 if (r->table != rule->table)
336 if (r->pref != rule->pref)
339 if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
342 if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
345 if (r->mark != rule->mark)
348 if (r->mark_mask != rule->mark_mask)
351 if (r->tun_id != rule->tun_id)
354 if (r->fr_net != rule->fr_net)
357 if (r->l3mdev != rule->l3mdev)
360 if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
361 !uid_eq(r->uid_range.end, rule->uid_range.end))
364 if (!ops->compare(r, frh, tb))
371 int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
372 struct netlink_ext_ack *extack)
374 struct net *net = sock_net(skb->sk);
375 struct fib_rule_hdr *frh = nlmsg_data(nlh);
376 struct fib_rules_ops *ops = NULL;
377 struct fib_rule *rule, *r, *last = NULL;
378 struct nlattr *tb[FRA_MAX+1];
379 int err = -EINVAL, unresolved = 0;
381 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
384 ops = lookup_rules_ops(net, frh->family);
390 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
394 err = validate_rulemsg(frh, tb, ops);
398 rule = kzalloc(ops->rule_size, GFP_KERNEL);
405 rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
406 : fib_default_rule_pref(ops);
408 if (tb[FRA_IIFNAME]) {
409 struct net_device *dev;
412 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
413 dev = __dev_get_by_name(net, rule->iifname);
415 rule->iifindex = dev->ifindex;
418 if (tb[FRA_OIFNAME]) {
419 struct net_device *dev;
422 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
423 dev = __dev_get_by_name(net, rule->oifname);
425 rule->oifindex = dev->ifindex;
428 if (tb[FRA_FWMARK]) {
429 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
431 /* compatibility: if the mark value is non-zero all bits
432 * are compared unless a mask is explicitly specified.
434 rule->mark_mask = 0xFFFFFFFF;
438 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
441 rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
444 if (tb[FRA_L3MDEV]) {
445 #ifdef CONFIG_NET_L3_MASTER_DEV
446 rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]);
447 if (rule->l3mdev != 1)
452 rule->action = frh->action;
453 rule->flags = frh->flags;
454 rule->table = frh_get_table(frh, tb);
455 if (tb[FRA_SUPPRESS_PREFIXLEN])
456 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
458 rule->suppress_prefixlen = -1;
460 if (tb[FRA_SUPPRESS_IFGROUP])
461 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
463 rule->suppress_ifgroup = -1;
466 if (rule->action != FR_ACT_GOTO)
469 rule->target = nla_get_u32(tb[FRA_GOTO]);
470 /* Backward jumps are prohibited to avoid endless loops */
471 if (rule->target <= rule->pref)
474 list_for_each_entry(r, &ops->rules_list, list) {
475 if (r->pref == rule->target) {
476 RCU_INIT_POINTER(rule->ctarget, r);
481 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
483 } else if (rule->action == FR_ACT_GOTO)
486 if (rule->l3mdev && rule->table)
489 if (tb[FRA_UID_RANGE]) {
490 if (current_user_ns() != net->user_ns) {
495 rule->uid_range = nla_get_kuid_range(tb);
497 if (!uid_range_set(&rule->uid_range) ||
498 !uid_lte(rule->uid_range.start, rule->uid_range.end))
501 rule->uid_range = fib_kuid_range_unset;
504 if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
505 rule_exists(ops, frh, tb, rule)) {
510 err = ops->configure(rule, skb, frh, tb);
514 list_for_each_entry(r, &ops->rules_list, list) {
515 if (r->pref > rule->pref)
520 refcount_set(&rule->refcnt, 1);
523 list_add_rcu(&rule->list, &last->list);
525 list_add_rcu(&rule->list, &ops->rules_list);
527 if (ops->unresolved_rules) {
529 * There are unresolved goto rules in the list, check if
530 * any of them are pointing to this new rule.
532 list_for_each_entry(r, &ops->rules_list, list) {
533 if (r->action == FR_ACT_GOTO &&
534 r->target == rule->pref &&
535 rtnl_dereference(r->ctarget) == NULL) {
536 rcu_assign_pointer(r->ctarget, rule);
537 if (--ops->unresolved_rules == 0)
543 if (rule->action == FR_ACT_GOTO)
544 ops->nr_goto_rules++;
547 ops->unresolved_rules++;
550 ip_tunnel_need_metadata();
552 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
553 flush_route_cache(ops);
563 EXPORT_SYMBOL_GPL(fib_nl_newrule);
565 int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
566 struct netlink_ext_ack *extack)
568 struct net *net = sock_net(skb->sk);
569 struct fib_rule_hdr *frh = nlmsg_data(nlh);
570 struct fib_rules_ops *ops = NULL;
571 struct fib_rule *rule, *r;
572 struct nlattr *tb[FRA_MAX+1];
573 struct fib_kuid_range range;
576 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
579 ops = lookup_rules_ops(net, frh->family);
585 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
589 err = validate_rulemsg(frh, tb, ops);
593 if (tb[FRA_UID_RANGE]) {
594 range = nla_get_kuid_range(tb);
595 if (!uid_range_set(&range)) {
600 range = fib_kuid_range_unset;
603 list_for_each_entry(rule, &ops->rules_list, list) {
604 if (frh->action && (frh->action != rule->action))
607 if (frh_get_table(frh, tb) &&
608 (frh_get_table(frh, tb) != rule->table))
611 if (tb[FRA_PRIORITY] &&
612 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
615 if (tb[FRA_IIFNAME] &&
616 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
619 if (tb[FRA_OIFNAME] &&
620 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
623 if (tb[FRA_FWMARK] &&
624 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
627 if (tb[FRA_FWMASK] &&
628 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
631 if (tb[FRA_TUN_ID] &&
632 (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
635 if (tb[FRA_L3MDEV] &&
636 (rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV])))
639 if (uid_range_set(&range) &&
640 (!uid_eq(rule->uid_range.start, range.start) ||
641 !uid_eq(rule->uid_range.end, range.end)))
644 if (!ops->compare(rule, frh, tb))
647 if (rule->flags & FIB_RULE_PERMANENT) {
653 err = ops->delete(rule);
659 ip_tunnel_unneed_metadata();
661 list_del_rcu(&rule->list);
663 if (rule->action == FR_ACT_GOTO) {
664 ops->nr_goto_rules--;
665 if (rtnl_dereference(rule->ctarget) == NULL)
666 ops->unresolved_rules--;
670 * Check if this rule is a target to any of them. If so,
671 * adjust to the next one with the same preference or
672 * disable them. As this operation is eventually very
673 * expensive, it is only performed if goto rules, except
674 * current if it is goto rule, have actually been added.
676 if (ops->nr_goto_rules > 0) {
679 n = list_next_entry(rule, list);
680 if (&n->list == &ops->rules_list || n->pref != rule->pref)
682 list_for_each_entry(r, &ops->rules_list, list) {
683 if (rtnl_dereference(r->ctarget) != rule)
685 rcu_assign_pointer(r->ctarget, n);
687 ops->unresolved_rules++;
691 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
692 NETLINK_CB(skb).portid);
694 flush_route_cache(ops);
704 EXPORT_SYMBOL_GPL(fib_nl_delrule);
706 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
707 struct fib_rule *rule)
709 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
710 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
711 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
712 + nla_total_size(4) /* FRA_PRIORITY */
713 + nla_total_size(4) /* FRA_TABLE */
714 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
715 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
716 + nla_total_size(4) /* FRA_FWMARK */
717 + nla_total_size(4) /* FRA_FWMASK */
718 + nla_total_size_64bit(8) /* FRA_TUN_ID */
719 + nla_total_size(sizeof(struct fib_kuid_range));
721 if (ops->nlmsg_payload)
722 payload += ops->nlmsg_payload(rule);
727 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
728 u32 pid, u32 seq, int type, int flags,
729 struct fib_rules_ops *ops)
731 struct nlmsghdr *nlh;
732 struct fib_rule_hdr *frh;
734 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
738 frh = nlmsg_data(nlh);
739 frh->family = ops->family;
740 frh->table = rule->table;
741 if (nla_put_u32(skb, FRA_TABLE, rule->table))
742 goto nla_put_failure;
743 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
744 goto nla_put_failure;
747 frh->action = rule->action;
748 frh->flags = rule->flags;
750 if (rule->action == FR_ACT_GOTO &&
751 rcu_access_pointer(rule->ctarget) == NULL)
752 frh->flags |= FIB_RULE_UNRESOLVED;
754 if (rule->iifname[0]) {
755 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
756 goto nla_put_failure;
757 if (rule->iifindex == -1)
758 frh->flags |= FIB_RULE_IIF_DETACHED;
761 if (rule->oifname[0]) {
762 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
763 goto nla_put_failure;
764 if (rule->oifindex == -1)
765 frh->flags |= FIB_RULE_OIF_DETACHED;
769 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
771 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
772 ((rule->mark_mask || rule->mark) &&
773 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
775 nla_put_u32(skb, FRA_GOTO, rule->target)) ||
777 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
779 nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) ||
780 (uid_range_set(&rule->uid_range) &&
781 nla_put_uid_range(skb, &rule->uid_range)))
782 goto nla_put_failure;
784 if (rule->suppress_ifgroup != -1) {
785 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
786 goto nla_put_failure;
789 if (ops->fill(rule, skb, frh) < 0)
790 goto nla_put_failure;
796 nlmsg_cancel(skb, nlh);
800 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
801 struct fib_rules_ops *ops)
804 struct fib_rule *rule;
808 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
809 if (idx < cb->args[1])
812 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
813 cb->nlh->nlmsg_seq, RTM_NEWRULE,
827 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
829 struct net *net = sock_net(skb->sk);
830 struct fib_rules_ops *ops;
833 family = rtnl_msg_family(cb->nlh);
834 if (family != AF_UNSPEC) {
835 /* Protocol specific dump request */
836 ops = lookup_rules_ops(net, family);
838 return -EAFNOSUPPORT;
840 dump_rules(skb, cb, ops);
846 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
847 if (idx < cb->args[0] || !try_module_get(ops->owner))
850 if (dump_rules(skb, cb, ops) < 0)
863 static void notify_rule_change(int event, struct fib_rule *rule,
864 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
872 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
876 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
878 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
879 WARN_ON(err == -EMSGSIZE);
884 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
888 rtnl_set_sk_err(net, ops->nlgroup, err);
891 static void attach_rules(struct list_head *rules, struct net_device *dev)
893 struct fib_rule *rule;
895 list_for_each_entry(rule, rules, list) {
896 if (rule->iifindex == -1 &&
897 strcmp(dev->name, rule->iifname) == 0)
898 rule->iifindex = dev->ifindex;
899 if (rule->oifindex == -1 &&
900 strcmp(dev->name, rule->oifname) == 0)
901 rule->oifindex = dev->ifindex;
905 static void detach_rules(struct list_head *rules, struct net_device *dev)
907 struct fib_rule *rule;
909 list_for_each_entry(rule, rules, list) {
910 if (rule->iifindex == dev->ifindex)
912 if (rule->oifindex == dev->ifindex)
918 static int fib_rules_event(struct notifier_block *this, unsigned long event,
921 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
922 struct net *net = dev_net(dev);
923 struct fib_rules_ops *ops;
928 case NETDEV_REGISTER:
929 list_for_each_entry(ops, &net->rules_ops, list)
930 attach_rules(&ops->rules_list, dev);
933 case NETDEV_CHANGENAME:
934 list_for_each_entry(ops, &net->rules_ops, list) {
935 detach_rules(&ops->rules_list, dev);
936 attach_rules(&ops->rules_list, dev);
940 case NETDEV_UNREGISTER:
941 list_for_each_entry(ops, &net->rules_ops, list)
942 detach_rules(&ops->rules_list, dev);
949 static struct notifier_block fib_rules_notifier = {
950 .notifier_call = fib_rules_event,
953 static int __net_init fib_rules_net_init(struct net *net)
955 INIT_LIST_HEAD(&net->rules_ops);
956 spin_lock_init(&net->rules_mod_lock);
960 static struct pernet_operations fib_rules_net_ops = {
961 .init = fib_rules_net_init,
964 static int __init fib_rules_init(void)
967 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
968 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
969 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
971 err = register_pernet_subsys(&fib_rules_net_ops);
975 err = register_netdevice_notifier(&fib_rules_notifier);
977 goto fail_unregister;
982 unregister_pernet_subsys(&fib_rules_net_ops);
984 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
985 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
986 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
990 subsys_initcall(fib_rules_init);