1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
4 * Copyright (c) 2017-19 Cumulus Networks
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
13 #include <net/ipv6_stubs.h>
14 #include <net/lwtunnel.h>
15 #include <net/ndisc.h>
16 #include <net/nexthop.h>
17 #include <net/route.h>
20 #define NH_RES_DEFAULT_IDLE_TIMER (120 * HZ)
21 #define NH_RES_DEFAULT_UNBALANCED_TIMER 0 /* No forced rebalancing. */
23 static void remove_nexthop(struct net *net, struct nexthop *nh,
24 struct nl_info *nlinfo);
26 #define NH_DEV_HASHBITS 8
27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
29 static const struct nla_policy rtm_nh_policy_new[] = {
30 [NHA_ID] = { .type = NLA_U32 },
31 [NHA_GROUP] = { .type = NLA_BINARY },
32 [NHA_GROUP_TYPE] = { .type = NLA_U16 },
33 [NHA_BLACKHOLE] = { .type = NLA_FLAG },
34 [NHA_OIF] = { .type = NLA_U32 },
35 [NHA_GATEWAY] = { .type = NLA_BINARY },
36 [NHA_ENCAP_TYPE] = { .type = NLA_U16 },
37 [NHA_ENCAP] = { .type = NLA_NESTED },
38 [NHA_FDB] = { .type = NLA_FLAG },
39 [NHA_RES_GROUP] = { .type = NLA_NESTED },
42 static const struct nla_policy rtm_nh_policy_get[] = {
43 [NHA_ID] = { .type = NLA_U32 },
46 static const struct nla_policy rtm_nh_policy_dump[] = {
47 [NHA_OIF] = { .type = NLA_U32 },
48 [NHA_GROUPS] = { .type = NLA_FLAG },
49 [NHA_MASTER] = { .type = NLA_U32 },
50 [NHA_FDB] = { .type = NLA_FLAG },
53 static const struct nla_policy rtm_nh_res_policy_new[] = {
54 [NHA_RES_GROUP_BUCKETS] = { .type = NLA_U16 },
55 [NHA_RES_GROUP_IDLE_TIMER] = { .type = NLA_U32 },
56 [NHA_RES_GROUP_UNBALANCED_TIMER] = { .type = NLA_U32 },
59 static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
60 [NHA_ID] = { .type = NLA_U32 },
61 [NHA_OIF] = { .type = NLA_U32 },
62 [NHA_MASTER] = { .type = NLA_U32 },
63 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
66 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
67 [NHA_RES_BUCKET_NH_ID] = { .type = NLA_U32 },
70 static const struct nla_policy rtm_nh_policy_get_bucket[] = {
71 [NHA_ID] = { .type = NLA_U32 },
72 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
75 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
76 [NHA_RES_BUCKET_INDEX] = { .type = NLA_U16 },
79 static bool nexthop_notifiers_is_empty(struct net *net)
81 return !net->nexthop.notifier_chain.head;
85 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
86 const struct nh_info *nhi)
88 nh_info->dev = nhi->fib_nhc.nhc_dev;
89 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
90 if (nh_info->gw_family == AF_INET)
91 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
92 else if (nh_info->gw_family == AF_INET6)
93 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
95 nh_info->is_reject = nhi->reject_nh;
96 nh_info->is_fdb = nhi->fdb_nh;
97 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
100 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
101 const struct nexthop *nh)
103 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
105 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
106 info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
110 __nh_notifier_single_info_init(info->nh, nhi);
115 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
120 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
121 struct nh_group *nhg)
123 u16 num_nh = nhg->num_nh;
126 info->type = NH_NOTIFIER_INFO_TYPE_GRP;
127 info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
132 info->nh_grp->num_nh = num_nh;
133 info->nh_grp->is_fdb = nhg->fdb_nh;
135 for (i = 0; i < num_nh; i++) {
136 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
139 nhi = rtnl_dereference(nhge->nh->nh_info);
140 info->nh_grp->nh_entries[i].id = nhge->nh->id;
141 info->nh_grp->nh_entries[i].weight = nhge->weight;
142 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
149 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
150 struct nh_group *nhg)
152 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
153 u16 num_nh_buckets = res_table->num_nh_buckets;
157 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
158 size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
159 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
161 if (!info->nh_res_table)
164 info->nh_res_table->num_nh_buckets = num_nh_buckets;
166 for (i = 0; i < num_nh_buckets; i++) {
167 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
168 struct nh_grp_entry *nhge;
171 nhge = rtnl_dereference(bucket->nh_entry);
172 nhi = rtnl_dereference(nhge->nh->nh_info);
173 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
180 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
181 const struct nexthop *nh)
183 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
185 if (nhg->hash_threshold)
186 return nh_notifier_mpath_info_init(info, nhg);
187 else if (nhg->resilient)
188 return nh_notifier_res_table_info_init(info, nhg);
192 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
193 const struct nexthop *nh)
195 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
197 if (nhg->hash_threshold)
199 else if (nhg->resilient)
200 vfree(info->nh_res_table);
203 static int nh_notifier_info_init(struct nh_notifier_info *info,
204 const struct nexthop *nh)
209 return nh_notifier_grp_info_init(info, nh);
211 return nh_notifier_single_info_init(info, nh);
214 static void nh_notifier_info_fini(struct nh_notifier_info *info,
215 const struct nexthop *nh)
218 nh_notifier_grp_info_fini(info, nh);
220 nh_notifier_single_info_fini(info);
223 static int call_nexthop_notifiers(struct net *net,
224 enum nexthop_event_type event_type,
226 struct netlink_ext_ack *extack)
228 struct nh_notifier_info info = {
236 if (nexthop_notifiers_is_empty(net))
239 err = nh_notifier_info_init(&info, nh);
241 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
245 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
247 nh_notifier_info_fini(&info, nh);
249 return notifier_to_errno(err);
253 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
254 bool force, unsigned int *p_idle_timer_ms)
256 struct nh_res_table *res_table;
257 struct nh_group *nhg;
261 /* When 'force' is false, nexthop bucket replacement is performed
262 * because the bucket was deemed to be idle. In this case, capable
263 * listeners can choose to perform an atomic replacement: The bucket is
264 * only replaced if it is inactive. However, if the idle timer interval
265 * is smaller than the interval in which a listener is querying
266 * buckets' activity from the device, then atomic replacement should
267 * not be tried. Pass the idle timer value to listeners, so that they
268 * could determine which type of replacement to perform.
271 *p_idle_timer_ms = 0;
277 nh = nexthop_find_by_id(info->net, info->id);
283 nhg = rcu_dereference(nh->nh_grp);
284 res_table = rcu_dereference(nhg->res_table);
285 *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
293 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
294 u16 bucket_index, bool force,
295 struct nh_info *oldi,
296 struct nh_info *newi)
298 unsigned int idle_timer_ms;
301 err = nh_notifier_res_bucket_idle_timer_get(info, force,
306 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
307 info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
309 if (!info->nh_res_bucket)
312 info->nh_res_bucket->bucket_index = bucket_index;
313 info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
314 info->nh_res_bucket->force = force;
315 __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
316 __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
320 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
322 kfree(info->nh_res_bucket);
325 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
326 u16 bucket_index, bool force,
327 struct nh_info *oldi,
328 struct nh_info *newi,
329 struct netlink_ext_ack *extack)
331 struct nh_notifier_info info = {
338 if (nexthop_notifiers_is_empty(net))
341 err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
346 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
347 NEXTHOP_EVENT_BUCKET_REPLACE, &info);
348 nh_notifier_res_bucket_info_fini(&info);
350 return notifier_to_errno(err);
353 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
355 * 1) a collection of callbacks for NH maintenance. This operates under
357 * 2) the delayed work that gradually balances the resilient table,
358 * 3) and nexthop_select_path(), operating under RCU.
360 * Both the delayed work and the RTNL block are writers, and need to
361 * maintain mutual exclusion. Since there are only two and well-known
362 * writers for each table, the RTNL code can make sure it has exclusive
365 * - Have the DW operate without locking;
366 * - synchronously cancel the DW;
368 * - if the write was not actually a delete, call upkeep, which schedules
369 * DW again if necessary.
371 * The functions that are always called from the RTNL context use
372 * rtnl_dereference(). The functions that can also be called from the DW do
373 * a raw dereference and rely on the above mutual exclusion scheme.
375 #define nh_res_dereference(p) (rcu_dereference_raw(p))
377 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
378 u16 bucket_index, bool force,
379 struct nexthop *old_nh,
380 struct nexthop *new_nh,
381 struct netlink_ext_ack *extack)
383 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
384 struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
386 return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
387 force, oldi, newi, extack);
390 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
391 struct netlink_ext_ack *extack)
393 struct nh_notifier_info info = {
397 struct nh_group *nhg;
402 if (nexthop_notifiers_is_empty(net))
405 /* At this point, the nexthop buckets are still not populated. Only
406 * emit a notification with the logical nexthops, so that a listener
407 * could potentially veto it in case of unsupported configuration.
409 nhg = rtnl_dereference(nh->nh_grp);
410 err = nh_notifier_mpath_info_init(&info, nhg);
412 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
416 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
417 NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
421 return notifier_to_errno(err);
424 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
425 enum nexthop_event_type event_type,
427 struct netlink_ext_ack *extack)
429 struct nh_notifier_info info = {
435 err = nh_notifier_info_init(&info, nh);
439 err = nb->notifier_call(nb, event_type, &info);
440 nh_notifier_info_fini(&info, nh);
442 return notifier_to_errno(err);
445 static unsigned int nh_dev_hashfn(unsigned int val)
447 unsigned int mask = NH_DEV_HASHSIZE - 1;
450 (val >> NH_DEV_HASHBITS) ^
451 (val >> (NH_DEV_HASHBITS * 2))) & mask;
454 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
456 struct net_device *dev = nhi->fib_nhc.nhc_dev;
457 struct hlist_head *head;
462 hash = nh_dev_hashfn(dev->ifindex);
463 head = &net->nexthop.devhash[hash];
464 hlist_add_head(&nhi->dev_hash, head);
467 static void nexthop_free_group(struct nexthop *nh)
469 struct nh_group *nhg;
472 nhg = rcu_dereference_raw(nh->nh_grp);
473 for (i = 0; i < nhg->num_nh; ++i) {
474 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
476 WARN_ON(!list_empty(&nhge->nh_list));
477 nexthop_put(nhge->nh);
480 WARN_ON(nhg->spare == nhg);
483 vfree(rcu_dereference_raw(nhg->res_table));
489 static void nexthop_free_single(struct nexthop *nh)
493 nhi = rcu_dereference_raw(nh->nh_info);
494 switch (nhi->family) {
496 fib_nh_release(nh->net, &nhi->fib_nh);
499 ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
505 void nexthop_free_rcu(struct rcu_head *head)
507 struct nexthop *nh = container_of(head, struct nexthop, rcu);
510 nexthop_free_group(nh);
512 nexthop_free_single(nh);
516 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
518 static struct nexthop *nexthop_alloc(void)
522 nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
524 INIT_LIST_HEAD(&nh->fi_list);
525 INIT_LIST_HEAD(&nh->f6i_list);
526 INIT_LIST_HEAD(&nh->grp_list);
527 INIT_LIST_HEAD(&nh->fdb_list);
532 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
534 struct nh_group *nhg;
536 nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
538 nhg->num_nh = num_nh;
543 static void nh_res_table_upkeep_dw(struct work_struct *work);
545 static struct nh_res_table *
546 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
548 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
549 struct nh_res_table *res_table;
552 size = struct_size(res_table, nh_buckets, num_nh_buckets);
553 res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
557 res_table->net = net;
558 res_table->nhg_id = nhg_id;
559 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
560 INIT_LIST_HEAD(&res_table->uw_nh_entries);
561 res_table->idle_timer = cfg->nh_grp_res_idle_timer;
562 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
563 res_table->num_nh_buckets = num_nh_buckets;
567 static void nh_base_seq_inc(struct net *net)
569 while (++net->nexthop.seq == 0)
573 /* no reference taken; rcu lock or rtnl must be held */
574 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
576 struct rb_node **pp, *parent = NULL, *next;
578 pp = &net->nexthop.rb_root.rb_node;
582 next = rcu_dereference_raw(*pp);
587 nh = rb_entry(parent, struct nexthop, rb_node);
590 else if (id > nh->id)
591 pp = &next->rb_right;
597 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
599 /* used for auto id allocation; called with rtnl held */
600 static u32 nh_find_unused_id(struct net *net)
602 u32 id_start = net->nexthop.last_id_allocated;
605 net->nexthop.last_id_allocated++;
606 if (net->nexthop.last_id_allocated == id_start)
609 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
610 return net->nexthop.last_id_allocated;
615 static void nh_res_time_set_deadline(unsigned long next_time,
616 unsigned long *deadline)
618 if (time_before(next_time, *deadline))
619 *deadline = next_time;
622 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
624 if (list_empty(&res_table->uw_nh_entries))
626 return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
629 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
631 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
634 nest = nla_nest_start(skb, NHA_RES_GROUP);
638 if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
639 res_table->num_nh_buckets) ||
640 nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
641 jiffies_to_clock_t(res_table->idle_timer)) ||
642 nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
643 jiffies_to_clock_t(res_table->unbalanced_timer)) ||
644 nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
645 nh_res_table_unbalanced_time(res_table),
647 goto nla_put_failure;
649 nla_nest_end(skb, nest);
653 nla_nest_cancel(skb, nest);
657 static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
659 struct nexthop_grp *p;
660 size_t len = nhg->num_nh * sizeof(*p);
665 if (nhg->hash_threshold)
666 group_type = NEXTHOP_GRP_TYPE_MPATH;
667 else if (nhg->resilient)
668 group_type = NEXTHOP_GRP_TYPE_RES;
670 if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
671 goto nla_put_failure;
673 nla = nla_reserve(skb, NHA_GROUP, len);
675 goto nla_put_failure;
678 for (i = 0; i < nhg->num_nh; ++i) {
679 p->id = nhg->nh_entries[i].nh->id;
680 p->weight = nhg->nh_entries[i].weight - 1;
684 if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
685 goto nla_put_failure;
693 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
694 int event, u32 portid, u32 seq, unsigned int nlflags)
696 struct fib6_nh *fib6_nh;
697 struct fib_nh *fib_nh;
698 struct nlmsghdr *nlh;
702 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
706 nhm = nlmsg_data(nlh);
707 nhm->nh_family = AF_UNSPEC;
708 nhm->nh_flags = nh->nh_flags;
709 nhm->nh_protocol = nh->protocol;
713 if (nla_put_u32(skb, NHA_ID, nh->id))
714 goto nla_put_failure;
717 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
719 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
720 goto nla_put_failure;
721 if (nla_put_nh_group(skb, nhg))
722 goto nla_put_failure;
726 nhi = rtnl_dereference(nh->nh_info);
727 nhm->nh_family = nhi->family;
728 if (nhi->reject_nh) {
729 if (nla_put_flag(skb, NHA_BLACKHOLE))
730 goto nla_put_failure;
732 } else if (nhi->fdb_nh) {
733 if (nla_put_flag(skb, NHA_FDB))
734 goto nla_put_failure;
736 const struct net_device *dev;
738 dev = nhi->fib_nhc.nhc_dev;
739 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
740 goto nla_put_failure;
743 nhm->nh_scope = nhi->fib_nhc.nhc_scope;
744 switch (nhi->family) {
746 fib_nh = &nhi->fib_nh;
747 if (fib_nh->fib_nh_gw_family &&
748 nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
749 goto nla_put_failure;
753 fib6_nh = &nhi->fib6_nh;
754 if (fib6_nh->fib_nh_gw_family &&
755 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
756 goto nla_put_failure;
760 if (nhi->fib_nhc.nhc_lwtstate &&
761 lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
762 NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
763 goto nla_put_failure;
770 nlmsg_cancel(skb, nlh);
774 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
776 return nla_total_size(0) + /* NHA_RES_GROUP */
777 nla_total_size(2) + /* NHA_RES_GROUP_BUCKETS */
778 nla_total_size(4) + /* NHA_RES_GROUP_IDLE_TIMER */
779 nla_total_size(4) + /* NHA_RES_GROUP_UNBALANCED_TIMER */
780 nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
783 static size_t nh_nlmsg_size_grp(struct nexthop *nh)
785 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
786 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
787 size_t tot = nla_total_size(sz) +
788 nla_total_size(2); /* NHA_GROUP_TYPE */
791 tot += nh_nlmsg_size_grp_res(nhg);
796 static size_t nh_nlmsg_size_single(struct nexthop *nh)
798 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
801 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
802 * are mutually exclusive
804 sz = nla_total_size(4); /* NHA_OIF */
806 switch (nhi->family) {
808 if (nhi->fib_nh.fib_nh_gw_family)
809 sz += nla_total_size(4); /* NHA_GATEWAY */
814 if (nhi->fib6_nh.fib_nh_gw_family)
815 sz += nla_total_size(sizeof(const struct in6_addr));
819 if (nhi->fib_nhc.nhc_lwtstate) {
820 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
821 sz += nla_total_size(2); /* NHA_ENCAP_TYPE */
827 static size_t nh_nlmsg_size(struct nexthop *nh)
829 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
831 sz += nla_total_size(4); /* NHA_ID */
834 sz += nh_nlmsg_size_grp(nh);
836 sz += nh_nlmsg_size_single(nh);
841 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
843 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
844 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
848 skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
852 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
854 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
855 WARN_ON(err == -EMSGSIZE);
860 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
861 info->nlh, gfp_any());
865 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
868 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
870 return (unsigned long)atomic_long_read(&bucket->used_time);
874 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
875 const struct nh_res_bucket *bucket,
878 unsigned long time = nh_res_bucket_used_time(bucket);
880 /* Bucket was not used since it was migrated. The idle time is now. */
881 if (time == bucket->migrated_time)
884 return time + res_table->idle_timer;
888 nh_res_table_unb_point(const struct nh_res_table *res_table)
890 return res_table->unbalanced_since + res_table->unbalanced_timer;
893 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
894 struct nh_res_bucket *bucket)
896 unsigned long now = jiffies;
898 atomic_long_set(&bucket->used_time, (long)now);
899 bucket->migrated_time = now;
902 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
904 atomic_long_set(&bucket->used_time, (long)jiffies);
907 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
909 unsigned long used_time = nh_res_bucket_used_time(bucket);
911 return jiffies_delta_to_clock_t(jiffies - used_time);
914 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
915 struct nh_res_bucket *bucket, u16 bucket_index,
916 int event, u32 portid, u32 seq,
917 unsigned int nlflags,
918 struct netlink_ext_ack *extack)
920 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
921 struct nlmsghdr *nlh;
925 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
929 nhm = nlmsg_data(nlh);
930 nhm->nh_family = AF_UNSPEC;
931 nhm->nh_flags = bucket->nh_flags;
932 nhm->nh_protocol = nh->protocol;
936 if (nla_put_u32(skb, NHA_ID, nh->id))
937 goto nla_put_failure;
939 nest = nla_nest_start(skb, NHA_RES_BUCKET);
941 goto nla_put_failure;
943 if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) ||
944 nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) ||
945 nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME,
946 nh_res_bucket_idle_time(bucket),
948 goto nla_put_failure_nest;
950 nla_nest_end(skb, nest);
954 nla_put_failure_nest:
955 nla_nest_cancel(skb, nest);
957 nlmsg_cancel(skb, nlh);
961 static void nexthop_bucket_notify(struct nh_res_table *res_table,
964 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
965 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
966 struct nexthop *nh = nhge->nh_parent;
970 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
974 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
975 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE,
982 rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
986 rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
989 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
990 bool *is_fdb, struct netlink_ext_ack *extack)
993 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
995 /* Nesting groups within groups is not supported. */
996 if (nhg->hash_threshold) {
997 NL_SET_ERR_MSG(extack,
998 "Hash-threshold group can not be a nexthop within a group");
1001 if (nhg->resilient) {
1002 NL_SET_ERR_MSG(extack,
1003 "Resilient group can not be a nexthop within a group");
1006 *is_fdb = nhg->fdb_nh;
1008 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1010 if (nhi->reject_nh && npaths > 1) {
1011 NL_SET_ERR_MSG(extack,
1012 "Blackhole nexthop can not be used in a group with more than 1 path");
1015 *is_fdb = nhi->fdb_nh;
1021 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1022 struct netlink_ext_ack *extack)
1024 struct nh_info *nhi;
1026 nhi = rtnl_dereference(nh->nh_info);
1029 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1033 if (*nh_family == AF_UNSPEC) {
1034 *nh_family = nhi->family;
1035 } else if (*nh_family != nhi->family) {
1036 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1043 static int nh_check_attr_group(struct net *net,
1044 struct nlattr *tb[], size_t tb_size,
1045 u16 nh_grp_type, struct netlink_ext_ack *extack)
1047 unsigned int len = nla_len(tb[NHA_GROUP]);
1048 u8 nh_family = AF_UNSPEC;
1049 struct nexthop_grp *nhg;
1053 if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1054 NL_SET_ERR_MSG(extack,
1055 "Invalid length for nexthop group attribute");
1059 /* convert len to number of nexthop ids */
1060 len /= sizeof(*nhg);
1062 nhg = nla_data(tb[NHA_GROUP]);
1063 for (i = 0; i < len; ++i) {
1064 if (nhg[i].resvd1 || nhg[i].resvd2) {
1065 NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
1068 if (nhg[i].weight > 254) {
1069 NL_SET_ERR_MSG(extack, "Invalid value for weight");
1072 for (j = i + 1; j < len; ++j) {
1073 if (nhg[i].id == nhg[j].id) {
1074 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1082 nhg = nla_data(tb[NHA_GROUP]);
1083 for (i = 0; i < len; ++i) {
1087 nh = nexthop_find_by_id(net, nhg[i].id);
1089 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1092 if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
1095 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
1098 if (!nhg_fdb && is_fdb_nh) {
1099 NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1103 for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1110 if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1114 NL_SET_ERR_MSG(extack,
1115 "No other attributes can be set in nexthop groups");
1122 static bool ipv6_good_nh(const struct fib6_nh *nh)
1124 int state = NUD_REACHABLE;
1125 struct neighbour *n;
1129 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1131 state = READ_ONCE(n->nud_state);
1135 return !!(state & NUD_VALID);
1138 static bool ipv4_good_nh(const struct fib_nh *nh)
1140 int state = NUD_REACHABLE;
1141 struct neighbour *n;
1145 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1146 (__force u32)nh->fib_nh_gw4);
1148 state = READ_ONCE(n->nud_state);
1152 return !!(state & NUD_VALID);
1155 static bool nexthop_is_good_nh(const struct nexthop *nh)
1157 struct nh_info *nhi = rcu_dereference(nh->nh_info);
1159 switch (nhi->family) {
1161 return ipv4_good_nh(&nhi->fib_nh);
1163 return ipv6_good_nh(&nhi->fib6_nh);
1169 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
1173 for (i = 0; i < nhg->num_nh; i++) {
1174 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1176 if (hash > atomic_read(&nhge->hthr.upper_bound))
1186 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
1188 struct nexthop *rc = NULL;
1192 return nexthop_select_path_fdb(nhg, hash);
1194 for (i = 0; i < nhg->num_nh; ++i) {
1195 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1197 /* nexthops always check if it is good and does
1198 * not rely on a sysctl for this behavior
1200 if (!nexthop_is_good_nh(nhge->nh))
1206 if (hash > atomic_read(&nhge->hthr.upper_bound))
1212 return rc ? : nhg->nh_entries[0].nh;
1215 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1217 struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1218 u16 bucket_index = hash % res_table->num_nh_buckets;
1219 struct nh_res_bucket *bucket;
1220 struct nh_grp_entry *nhge;
1222 /* nexthop_select_path() is expected to return a non-NULL value, so
1223 * skip protocol validation and just hand out whatever there is.
1225 bucket = &res_table->nh_buckets[bucket_index];
1226 nh_res_bucket_set_busy(bucket);
1227 nhge = rcu_dereference(bucket->nh_entry);
1231 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1233 struct nh_group *nhg;
1238 nhg = rcu_dereference(nh->nh_grp);
1239 if (nhg->hash_threshold)
1240 return nexthop_select_path_hthr(nhg, hash);
1241 else if (nhg->resilient)
1242 return nexthop_select_path_res(nhg, hash);
1247 EXPORT_SYMBOL_GPL(nexthop_select_path);
1249 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1250 int (*cb)(struct fib6_nh *nh, void *arg),
1253 struct nh_info *nhi;
1257 struct nh_group *nhg;
1260 nhg = rcu_dereference_rtnl(nh->nh_grp);
1261 for (i = 0; i < nhg->num_nh; i++) {
1262 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1264 nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1265 err = cb(&nhi->fib6_nh, arg);
1270 nhi = rcu_dereference_rtnl(nh->nh_info);
1271 err = cb(&nhi->fib6_nh, arg);
1278 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1280 static int check_src_addr(const struct in6_addr *saddr,
1281 struct netlink_ext_ack *extack)
1283 if (!ipv6_addr_any(saddr)) {
1284 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1290 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1291 struct netlink_ext_ack *extack)
1293 struct nh_info *nhi;
1296 /* fib6_src is unique to a fib6_info and limits the ability to cache
1297 * routes in fib6_nh within a nexthop that is potentially shared
1298 * across multiple fib entries. If the config wants to use source
1299 * routing it can not use nexthop objects. mlxsw also does not allow
1300 * fib6_src on routes.
1302 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1306 struct nh_group *nhg;
1308 nhg = rtnl_dereference(nh->nh_grp);
1311 is_fdb_nh = nhg->fdb_nh;
1313 nhi = rtnl_dereference(nh->nh_info);
1314 if (nhi->family == AF_INET)
1316 is_fdb_nh = nhi->fdb_nh;
1320 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1326 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1329 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1331 /* if existing nexthop has ipv6 routes linked to it, need
1332 * to verify this new spec works with ipv6
1334 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1335 struct netlink_ext_ack *extack)
1337 struct fib6_info *f6i;
1339 if (list_empty(&old->f6i_list))
1342 list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1343 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1347 return fib6_check_nexthop(new, NULL, extack);
1350 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1351 struct netlink_ext_ack *extack)
1353 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1354 NL_SET_ERR_MSG(extack,
1355 "Route with host scope can not have a gateway");
1359 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1360 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1367 /* Invoked by fib add code to verify nexthop by id is ok with
1368 * config for prefix; parts of fib_check_nh not done when nexthop
1371 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1372 struct netlink_ext_ack *extack)
1374 struct nh_info *nhi;
1378 struct nh_group *nhg;
1380 nhg = rtnl_dereference(nh->nh_grp);
1382 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1387 if (scope == RT_SCOPE_HOST) {
1388 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1393 /* all nexthops in a group have the same scope */
1394 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1395 err = nexthop_check_scope(nhi, scope, extack);
1397 nhi = rtnl_dereference(nh->nh_info);
1399 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1403 err = nexthop_check_scope(nhi, scope, extack);
1410 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1411 struct netlink_ext_ack *extack)
1413 struct fib_info *fi;
1415 list_for_each_entry(fi, &old->fi_list, nh_list) {
1418 err = fib_check_nexthop(new, fi->fib_scope, extack);
1425 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1427 return nhge->res.count_buckets == nhge->res.wants_buckets;
1430 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1432 return nhge->res.count_buckets > nhge->res.wants_buckets;
1435 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1437 return nhge->res.count_buckets < nhge->res.wants_buckets;
1440 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1442 return list_empty(&res_table->uw_nh_entries);
1445 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1447 struct nh_grp_entry *nhge;
1449 if (bucket->occupied) {
1450 nhge = nh_res_dereference(bucket->nh_entry);
1451 nhge->res.count_buckets--;
1452 bucket->occupied = false;
1456 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1457 struct nh_grp_entry *nhge)
1459 nh_res_bucket_unset_nh(bucket);
1461 bucket->occupied = true;
1462 rcu_assign_pointer(bucket->nh_entry, nhge);
1463 nhge->res.count_buckets++;
1466 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1467 struct nh_res_bucket *bucket,
1468 unsigned long *deadline, bool *force)
1470 unsigned long now = jiffies;
1471 struct nh_grp_entry *nhge;
1472 unsigned long idle_point;
1474 if (!bucket->occupied) {
1475 /* The bucket is not occupied, its NHGE pointer is either
1476 * NULL or obsolete. We _have to_ migrate: set force.
1482 nhge = nh_res_dereference(bucket->nh_entry);
1484 /* If the bucket is populated by an underweight or balanced
1485 * nexthop, do not migrate.
1487 if (!nh_res_nhge_is_ow(nhge))
1490 /* At this point we know that the bucket is populated with an
1491 * overweight nexthop. It needs to be migrated to a new nexthop if
1492 * the idle timer of unbalanced timer expired.
1495 idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1496 if (time_after_eq(now, idle_point)) {
1497 /* The bucket is idle. We _can_ migrate: unset force. */
1502 /* Unbalanced timer of 0 means "never force". */
1503 if (res_table->unbalanced_timer) {
1504 unsigned long unb_point;
1506 unb_point = nh_res_table_unb_point(res_table);
1507 if (time_after(now, unb_point)) {
1508 /* The bucket is not idle, but the unbalanced timer
1509 * expired. We _can_ migrate, but set force anyway,
1510 * so that drivers know to ignore activity reports
1517 nh_res_time_set_deadline(unb_point, deadline);
1520 nh_res_time_set_deadline(idle_point, deadline);
1524 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1525 u16 bucket_index, bool notify,
1526 bool notify_nl, bool force)
1528 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1529 struct nh_grp_entry *new_nhge;
1530 struct netlink_ext_ack extack;
1533 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1534 struct nh_grp_entry,
1536 if (WARN_ON_ONCE(!new_nhge))
1537 /* If this function is called, "bucket" is either not
1538 * occupied, or it belongs to a next hop that is
1539 * overweight. In either case, there ought to be a
1540 * corresponding underweight next hop.
1545 struct nh_grp_entry *old_nhge;
1547 old_nhge = nh_res_dereference(bucket->nh_entry);
1548 err = call_nexthop_res_bucket_notifiers(res_table->net,
1550 bucket_index, force,
1552 new_nhge->nh, &extack);
1554 pr_err_ratelimited("%s\n", extack._msg);
1557 /* It is not possible to veto a forced replacement, so
1558 * just clear the hardware flags from the nexthop
1559 * bucket to indicate to user space that this bucket is
1560 * not correctly populated in hardware.
1562 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1566 nh_res_bucket_set_nh(bucket, new_nhge);
1567 nh_res_bucket_set_idle(res_table, bucket);
1570 nexthop_bucket_notify(res_table, bucket_index);
1572 if (nh_res_nhge_is_balanced(new_nhge))
1573 list_del(&new_nhge->res.uw_nh_entry);
1577 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1579 static void nh_res_table_upkeep(struct nh_res_table *res_table,
1580 bool notify, bool notify_nl)
1582 unsigned long now = jiffies;
1583 unsigned long deadline;
1586 /* Deadline is the next time that upkeep should be run. It is the
1587 * earliest time at which one of the buckets might be migrated.
1588 * Start at the most pessimistic estimate: either unbalanced_timer
1589 * from now, or if there is none, idle_timer from now. For each
1590 * encountered time point, call nh_res_time_set_deadline() to
1591 * refine the estimate.
1593 if (res_table->unbalanced_timer)
1594 deadline = now + res_table->unbalanced_timer;
1596 deadline = now + res_table->idle_timer;
1598 for (i = 0; i < res_table->num_nh_buckets; i++) {
1599 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1602 if (nh_res_bucket_should_migrate(res_table, bucket,
1603 &deadline, &force)) {
1604 if (!nh_res_bucket_migrate(res_table, i, notify,
1605 notify_nl, force)) {
1606 unsigned long idle_point;
1608 /* A driver can override the migration
1609 * decision if the HW reports that the
1610 * bucket is actually not idle. Therefore
1611 * remark the bucket as busy again and
1612 * update the deadline.
1614 nh_res_bucket_set_busy(bucket);
1615 idle_point = nh_res_bucket_idle_point(res_table,
1618 nh_res_time_set_deadline(idle_point, &deadline);
1623 /* If the group is still unbalanced, schedule the next upkeep to
1624 * either the deadline computed above, or the minimum deadline,
1625 * whichever comes later.
1627 if (!nh_res_table_is_balanced(res_table)) {
1628 unsigned long now = jiffies;
1629 unsigned long min_deadline;
1631 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1632 if (time_before(deadline, min_deadline))
1633 deadline = min_deadline;
1635 queue_delayed_work(system_power_efficient_wq,
1636 &res_table->upkeep_dw, deadline - now);
1640 static void nh_res_table_upkeep_dw(struct work_struct *work)
1642 struct delayed_work *dw = to_delayed_work(work);
1643 struct nh_res_table *res_table;
1645 res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1646 nh_res_table_upkeep(res_table, true, true);
1649 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1651 cancel_delayed_work_sync(&res_table->upkeep_dw);
1654 static void nh_res_group_rebalance(struct nh_group *nhg,
1655 struct nh_res_table *res_table)
1657 int prev_upper_bound = 0;
1662 INIT_LIST_HEAD(&res_table->uw_nh_entries);
1664 for (i = 0; i < nhg->num_nh; ++i)
1665 total += nhg->nh_entries[i].weight;
1667 for (i = 0; i < nhg->num_nh; ++i) {
1668 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1672 upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w,
1674 nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1675 prev_upper_bound = upper_bound;
1677 if (nh_res_nhge_is_uw(nhge)) {
1678 if (list_empty(&res_table->uw_nh_entries))
1679 res_table->unbalanced_since = jiffies;
1680 list_add(&nhge->res.uw_nh_entry,
1681 &res_table->uw_nh_entries);
1686 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1687 * the right NH ID. Set those buckets that do not have a corresponding NHGE
1688 * entry in NHG as not occupied.
1690 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1691 struct nh_group *nhg)
1695 for (i = 0; i < res_table->num_nh_buckets; i++) {
1696 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1697 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1701 for (j = 0; j < nhg->num_nh; j++) {
1702 struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1704 if (nhge->nh->id == id) {
1705 nh_res_bucket_set_nh(bucket, nhge);
1712 nh_res_bucket_unset_nh(bucket);
1716 static void replace_nexthop_grp_res(struct nh_group *oldg,
1717 struct nh_group *newg)
1719 /* For NH group replacement, the new NHG might only have a stub
1720 * hash table with 0 buckets, because the number of buckets was not
1721 * specified. For NH removal, oldg and newg both reference the same
1722 * res_table. So in any case, in the following, we want to work
1723 * with oldg->res_table.
1725 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1726 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1727 bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1729 nh_res_table_cancel_upkeep(old_res_table);
1730 nh_res_table_migrate_buckets(old_res_table, newg);
1731 nh_res_group_rebalance(newg, old_res_table);
1732 if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1733 old_res_table->unbalanced_since = prev_unbalanced_since;
1734 nh_res_table_upkeep(old_res_table, true, false);
1737 static void nh_hthr_group_rebalance(struct nh_group *nhg)
1743 for (i = 0; i < nhg->num_nh; ++i)
1744 total += nhg->nh_entries[i].weight;
1746 for (i = 0; i < nhg->num_nh; ++i) {
1747 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1751 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
1752 atomic_set(&nhge->hthr.upper_bound, upper_bound);
1756 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
1757 struct nl_info *nlinfo)
1759 struct nh_grp_entry *nhges, *new_nhges;
1760 struct nexthop *nhp = nhge->nh_parent;
1761 struct netlink_ext_ack extack;
1762 struct nexthop *nh = nhge->nh;
1763 struct nh_group *nhg, *newg;
1768 nhg = rtnl_dereference(nhp->nh_grp);
1771 /* last entry, keep it visible and remove the parent */
1772 if (nhg->num_nh == 1) {
1773 remove_nexthop(net, nhp, nlinfo);
1777 newg->has_v4 = false;
1778 newg->is_multipath = nhg->is_multipath;
1779 newg->hash_threshold = nhg->hash_threshold;
1780 newg->resilient = nhg->resilient;
1781 newg->fdb_nh = nhg->fdb_nh;
1782 newg->num_nh = nhg->num_nh;
1784 /* copy old entries to new except the one getting removed */
1785 nhges = nhg->nh_entries;
1786 new_nhges = newg->nh_entries;
1787 for (i = 0, j = 0; i < nhg->num_nh; ++i) {
1788 struct nh_info *nhi;
1790 /* current nexthop getting removed */
1791 if (nhg->nh_entries[i].nh == nh) {
1796 nhi = rtnl_dereference(nhges[i].nh->nh_info);
1797 if (nhi->family == AF_INET)
1798 newg->has_v4 = true;
1800 list_del(&nhges[i].nh_list);
1801 new_nhges[j].nh_parent = nhges[i].nh_parent;
1802 new_nhges[j].nh = nhges[i].nh;
1803 new_nhges[j].weight = nhges[i].weight;
1804 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
1808 if (newg->hash_threshold)
1809 nh_hthr_group_rebalance(newg);
1810 else if (newg->resilient)
1811 replace_nexthop_grp_res(nhg, newg);
1813 rcu_assign_pointer(nhp->nh_grp, newg);
1815 list_del(&nhge->nh_list);
1816 nexthop_put(nhge->nh);
1818 /* Removal of a NH from a resilient group is notified through
1819 * bucket notifications.
1821 if (newg->hash_threshold) {
1822 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
1825 pr_err("%s\n", extack._msg);
1829 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
1832 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
1833 struct nl_info *nlinfo)
1835 struct nh_grp_entry *nhge, *tmp;
1837 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
1838 remove_nh_grp_entry(net, nhge, nlinfo);
1840 /* make sure all see the newly published array before releasing rtnl */
1844 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
1846 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
1847 struct nh_res_table *res_table;
1848 int i, num_nh = nhg->num_nh;
1850 for (i = 0; i < num_nh; ++i) {
1851 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1853 if (WARN_ON(!nhge->nh))
1856 list_del_init(&nhge->nh_list);
1859 if (nhg->resilient) {
1860 res_table = rtnl_dereference(nhg->res_table);
1861 nh_res_table_cancel_upkeep(res_table);
1865 /* not called for nexthop replace */
1866 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
1868 struct fib6_info *f6i, *tmp;
1869 bool do_flush = false;
1870 struct fib_info *fi;
1872 list_for_each_entry(fi, &nh->fi_list, nh_list) {
1873 fi->fib_flags |= RTNH_F_DEAD;
1879 /* ip6_del_rt removes the entry from this list hence the _safe */
1880 list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
1881 /* __ip6_del_rt does a release, so do a hold here */
1882 fib6_info_hold(f6i);
1883 ipv6_stub->ip6_del_rt(net, f6i,
1884 !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
1888 static void __remove_nexthop(struct net *net, struct nexthop *nh,
1889 struct nl_info *nlinfo)
1891 __remove_nexthop_fib(net, nh);
1894 remove_nexthop_group(nh, nlinfo);
1896 struct nh_info *nhi;
1898 nhi = rtnl_dereference(nh->nh_info);
1899 if (nhi->fib_nhc.nhc_dev)
1900 hlist_del(&nhi->dev_hash);
1902 remove_nexthop_from_groups(net, nh, nlinfo);
1906 static void remove_nexthop(struct net *net, struct nexthop *nh,
1907 struct nl_info *nlinfo)
1909 call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
1911 /* remove from the tree */
1912 rb_erase(&nh->rb_node, &net->nexthop.rb_root);
1915 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
1917 __remove_nexthop(net, nh, nlinfo);
1918 nh_base_seq_inc(net);
1923 /* if any FIB entries reference this nexthop, any dst entries
1924 * need to be regenerated
1926 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
1927 struct nexthop *replaced_nh)
1929 struct fib6_info *f6i;
1930 struct nh_group *nhg;
1933 if (!list_empty(&nh->fi_list))
1934 rt_cache_flush(net);
1936 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
1937 ipv6_stub->fib6_update_sernum(net, f6i);
1939 /* if an IPv6 group was replaced, we have to release all old
1940 * dsts to make sure all refcounts are released
1942 if (!replaced_nh->is_group)
1945 nhg = rtnl_dereference(replaced_nh->nh_grp);
1946 for (i = 0; i < nhg->num_nh; i++) {
1947 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1948 struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
1950 if (nhi->family == AF_INET6)
1951 ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
1955 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
1956 struct nexthop *new, const struct nh_config *cfg,
1957 struct netlink_ext_ack *extack)
1959 struct nh_res_table *tmp_table = NULL;
1960 struct nh_res_table *new_res_table;
1961 struct nh_res_table *old_res_table;
1962 struct nh_group *oldg, *newg;
1965 if (!new->is_group) {
1966 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
1970 oldg = rtnl_dereference(old->nh_grp);
1971 newg = rtnl_dereference(new->nh_grp);
1973 if (newg->hash_threshold != oldg->hash_threshold) {
1974 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
1978 if (newg->hash_threshold) {
1979 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
1983 } else if (newg->resilient) {
1984 new_res_table = rtnl_dereference(newg->res_table);
1985 old_res_table = rtnl_dereference(oldg->res_table);
1987 /* Accept if num_nh_buckets was not given, but if it was
1988 * given, demand that the value be correct.
1990 if (cfg->nh_grp_res_has_num_buckets &&
1991 cfg->nh_grp_res_num_buckets !=
1992 old_res_table->num_nh_buckets) {
1993 NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
1997 /* Emit a pre-replace notification so that listeners could veto
1998 * a potentially unsupported configuration. Otherwise,
1999 * individual bucket replacement notifications would need to be
2000 * vetoed, which is something that should only happen if the
2001 * bucket is currently active.
2003 err = call_nexthop_res_table_notifiers(net, new, extack);
2007 if (cfg->nh_grp_res_has_idle_timer)
2008 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
2009 if (cfg->nh_grp_res_has_unbalanced_timer)
2010 old_res_table->unbalanced_timer =
2011 cfg->nh_grp_res_unbalanced_timer;
2013 replace_nexthop_grp_res(oldg, newg);
2015 tmp_table = new_res_table;
2016 rcu_assign_pointer(newg->res_table, old_res_table);
2017 rcu_assign_pointer(newg->spare->res_table, old_res_table);
2020 /* update parents - used by nexthop code for cleanup */
2021 for (i = 0; i < newg->num_nh; i++)
2022 newg->nh_entries[i].nh_parent = old;
2024 rcu_assign_pointer(old->nh_grp, newg);
2026 /* Make sure concurrent readers are not using 'oldg' anymore. */
2029 if (newg->resilient) {
2030 rcu_assign_pointer(oldg->res_table, tmp_table);
2031 rcu_assign_pointer(oldg->spare->res_table, tmp_table);
2034 for (i = 0; i < oldg->num_nh; i++)
2035 oldg->nh_entries[i].nh_parent = new;
2037 rcu_assign_pointer(new->nh_grp, oldg);
2042 static void nh_group_v4_update(struct nh_group *nhg)
2044 struct nh_grp_entry *nhges;
2045 bool has_v4 = false;
2048 nhges = nhg->nh_entries;
2049 for (i = 0; i < nhg->num_nh; i++) {
2050 struct nh_info *nhi;
2052 nhi = rtnl_dereference(nhges[i].nh->nh_info);
2053 if (nhi->family == AF_INET)
2056 nhg->has_v4 = has_v4;
2059 static int replace_nexthop_single_notify_res(struct net *net,
2060 struct nh_res_table *res_table,
2061 struct nexthop *old,
2062 struct nh_info *oldi,
2063 struct nh_info *newi,
2064 struct netlink_ext_ack *extack)
2066 u32 nhg_id = res_table->nhg_id;
2070 for (i = 0; i < res_table->num_nh_buckets; i++) {
2071 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2072 struct nh_grp_entry *nhge;
2074 nhge = rtnl_dereference(bucket->nh_entry);
2075 if (nhge->nh == old) {
2076 err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2089 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2090 struct nh_grp_entry *nhge;
2092 nhge = rtnl_dereference(bucket->nh_entry);
2093 if (nhge->nh == old)
2094 __call_nexthop_res_bucket_notifiers(net, nhg_id, i,
2101 static int replace_nexthop_single_notify(struct net *net,
2102 struct nexthop *group_nh,
2103 struct nexthop *old,
2104 struct nh_info *oldi,
2105 struct nh_info *newi,
2106 struct netlink_ext_ack *extack)
2108 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2109 struct nh_res_table *res_table;
2111 if (nhg->hash_threshold) {
2112 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
2114 } else if (nhg->resilient) {
2115 res_table = rtnl_dereference(nhg->res_table);
2116 return replace_nexthop_single_notify_res(net, res_table,
2124 static int replace_nexthop_single(struct net *net, struct nexthop *old,
2125 struct nexthop *new,
2126 struct netlink_ext_ack *extack)
2128 u8 old_protocol, old_nh_flags;
2129 struct nh_info *oldi, *newi;
2130 struct nh_grp_entry *nhge;
2133 if (new->is_group) {
2134 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2138 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
2142 /* Hardware flags were set on 'old' as 'new' is not in the red-black
2143 * tree. Therefore, inherit the flags from 'old' to 'new'.
2145 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2147 oldi = rtnl_dereference(old->nh_info);
2148 newi = rtnl_dereference(new->nh_info);
2150 newi->nh_parent = old;
2151 oldi->nh_parent = new;
2153 old_protocol = old->protocol;
2154 old_nh_flags = old->nh_flags;
2156 old->protocol = new->protocol;
2157 old->nh_flags = new->nh_flags;
2159 rcu_assign_pointer(old->nh_info, newi);
2160 rcu_assign_pointer(new->nh_info, oldi);
2162 /* Send a replace notification for all the groups using the nexthop. */
2163 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2164 struct nexthop *nhp = nhge->nh_parent;
2166 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2172 /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2173 * update IPv4 indication in all the groups using the nexthop.
2175 if (oldi->family == AF_INET && newi->family == AF_INET6) {
2176 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2177 struct nexthop *nhp = nhge->nh_parent;
2178 struct nh_group *nhg;
2180 nhg = rtnl_dereference(nhp->nh_grp);
2181 nh_group_v4_update(nhg);
2188 rcu_assign_pointer(new->nh_info, newi);
2189 rcu_assign_pointer(old->nh_info, oldi);
2190 old->nh_flags = old_nh_flags;
2191 old->protocol = old_protocol;
2192 oldi->nh_parent = old;
2193 newi->nh_parent = new;
2194 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2195 struct nexthop *nhp = nhge->nh_parent;
2197 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2199 call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2203 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2204 struct nl_info *info)
2206 struct fib6_info *f6i;
2208 if (!list_empty(&nh->fi_list)) {
2209 struct fib_info *fi;
2211 /* expectation is a few fib_info per nexthop and then
2212 * a lot of routes per fib_info. So mark the fib_info
2213 * and then walk the fib tables once
2215 list_for_each_entry(fi, &nh->fi_list, nh_list)
2216 fi->nh_updated = true;
2218 fib_info_notify_update(net, info);
2220 list_for_each_entry(fi, &nh->fi_list, nh_list)
2221 fi->nh_updated = false;
2224 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2225 ipv6_stub->fib6_rt_update(net, f6i, info);
2228 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2229 * linked to this nexthop and for all groups that the nexthop
2232 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2233 struct nl_info *info)
2235 struct nh_grp_entry *nhge;
2237 __nexthop_replace_notify(net, nh, info);
2239 list_for_each_entry(nhge, &nh->grp_list, nh_list)
2240 __nexthop_replace_notify(net, nhge->nh_parent, info);
2243 static int replace_nexthop(struct net *net, struct nexthop *old,
2244 struct nexthop *new, const struct nh_config *cfg,
2245 struct netlink_ext_ack *extack)
2247 bool new_is_reject = false;
2248 struct nh_grp_entry *nhge;
2251 /* check that existing FIB entries are ok with the
2252 * new nexthop definition
2254 err = fib_check_nh_list(old, new, extack);
2258 err = fib6_check_nh_list(old, new, extack);
2262 if (!new->is_group) {
2263 struct nh_info *nhi = rtnl_dereference(new->nh_info);
2265 new_is_reject = nhi->reject_nh;
2268 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2269 /* if new nexthop is a blackhole, any groups using this
2270 * nexthop cannot have more than 1 path
2272 if (new_is_reject &&
2273 nexthop_num_path(nhge->nh_parent) > 1) {
2274 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2278 err = fib_check_nh_list(nhge->nh_parent, new, extack);
2282 err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2288 err = replace_nexthop_grp(net, old, new, cfg, extack);
2290 err = replace_nexthop_single(net, old, new, extack);
2293 nh_rt_cache_flush(net, old, new);
2295 __remove_nexthop(net, new, NULL);
2302 /* called with rtnl_lock held */
2303 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2304 struct nh_config *cfg, struct netlink_ext_ack *extack)
2306 struct rb_node **pp, *parent = NULL, *next;
2307 struct rb_root *root = &net->nexthop.rb_root;
2308 bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2309 bool create = !!(cfg->nlflags & NLM_F_CREATE);
2310 u32 new_id = new_nh->id;
2311 int replace_notify = 0;
2314 pp = &root->rb_node;
2324 nh = rb_entry(parent, struct nexthop, rb_node);
2325 if (new_id < nh->id) {
2326 pp = &next->rb_left;
2327 } else if (new_id > nh->id) {
2328 pp = &next->rb_right;
2329 } else if (replace) {
2330 rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2332 new_nh = nh; /* send notification with old nh */
2337 /* id already exists and not a replace */
2342 if (replace && !create) {
2343 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2348 if (new_nh->is_group) {
2349 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2350 struct nh_res_table *res_table;
2352 if (nhg->resilient) {
2353 res_table = rtnl_dereference(nhg->res_table);
2355 /* Not passing the number of buckets is OK when
2356 * replacing, but not when creating a new group.
2358 if (!cfg->nh_grp_res_has_num_buckets) {
2359 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2364 nh_res_group_rebalance(nhg, res_table);
2366 /* Do not send bucket notifications, we do full
2367 * notification below.
2369 nh_res_table_upkeep(res_table, false, false);
2373 rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2374 rb_insert_color(&new_nh->rb_node, root);
2376 /* The initial insertion is a full notification for hash-threshold as
2377 * well as resilient groups.
2379 rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2381 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2385 nh_base_seq_inc(net);
2386 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2387 if (replace_notify &&
2388 READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
2389 nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2396 /* remove all nexthops tied to a device being deleted */
2397 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2399 unsigned int hash = nh_dev_hashfn(dev->ifindex);
2400 struct net *net = dev_net(dev);
2401 struct hlist_head *head = &net->nexthop.devhash[hash];
2402 struct hlist_node *n;
2403 struct nh_info *nhi;
2405 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2406 if (nhi->fib_nhc.nhc_dev != dev)
2409 if (nhi->reject_nh &&
2410 (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2413 remove_nexthop(net, nhi->nh_parent, NULL);
2417 /* rtnl; called when net namespace is deleted */
2418 static void flush_all_nexthops(struct net *net)
2420 struct rb_root *root = &net->nexthop.rb_root;
2421 struct rb_node *node;
2424 while ((node = rb_first(root))) {
2425 nh = rb_entry(node, struct nexthop, rb_node);
2426 remove_nexthop(net, nh, NULL);
2431 static struct nexthop *nexthop_create_group(struct net *net,
2432 struct nh_config *cfg)
2434 struct nlattr *grps_attr = cfg->nh_grp;
2435 struct nexthop_grp *entry = nla_data(grps_attr);
2436 u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2437 struct nh_group *nhg;
2442 if (WARN_ON(!num_nh))
2443 return ERR_PTR(-EINVAL);
2445 nh = nexthop_alloc();
2447 return ERR_PTR(-ENOMEM);
2451 nhg = nexthop_grp_alloc(num_nh);
2454 return ERR_PTR(-ENOMEM);
2457 /* spare group used for removals */
2458 nhg->spare = nexthop_grp_alloc(num_nh);
2462 return ERR_PTR(-ENOMEM);
2464 nhg->spare->spare = nhg;
2466 for (i = 0; i < nhg->num_nh; ++i) {
2467 struct nexthop *nhe;
2468 struct nh_info *nhi;
2470 nhe = nexthop_find_by_id(net, entry[i].id);
2471 if (!nexthop_get(nhe)) {
2476 nhi = rtnl_dereference(nhe->nh_info);
2477 if (nhi->family == AF_INET)
2480 nhg->nh_entries[i].nh = nhe;
2481 nhg->nh_entries[i].weight = entry[i].weight + 1;
2482 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2483 nhg->nh_entries[i].nh_parent = nh;
2486 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2487 nhg->hash_threshold = 1;
2488 nhg->is_multipath = true;
2489 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2490 struct nh_res_table *res_table;
2492 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2498 rcu_assign_pointer(nhg->spare->res_table, res_table);
2499 rcu_assign_pointer(nhg->res_table, res_table);
2500 nhg->resilient = true;
2501 nhg->is_multipath = true;
2504 WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2506 if (nhg->hash_threshold)
2507 nh_hthr_group_rebalance(nhg);
2512 rcu_assign_pointer(nh->nh_grp, nhg);
2517 for (i--; i >= 0; --i) {
2518 list_del(&nhg->nh_entries[i].nh_list);
2519 nexthop_put(nhg->nh_entries[i].nh);
2526 return ERR_PTR(err);
2529 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2530 struct nh_info *nhi, struct nh_config *cfg,
2531 struct netlink_ext_ack *extack)
2533 struct fib_nh *fib_nh = &nhi->fib_nh;
2534 struct fib_config fib_cfg = {
2535 .fc_oif = cfg->nh_ifindex,
2536 .fc_gw4 = cfg->gw.ipv4,
2537 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2538 .fc_flags = cfg->nh_flags,
2539 .fc_nlinfo = cfg->nlinfo,
2540 .fc_encap = cfg->nh_encap,
2541 .fc_encap_type = cfg->nh_encap_type,
2543 u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2546 err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2548 fib_nh_release(net, fib_nh);
2555 /* sets nh_dev if successful */
2556 err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2558 nh->nh_flags = fib_nh->fib_nh_flags;
2559 fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2560 !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
2562 fib_nh_release(net, fib_nh);
2568 static int nh_create_ipv6(struct net *net, struct nexthop *nh,
2569 struct nh_info *nhi, struct nh_config *cfg,
2570 struct netlink_ext_ack *extack)
2572 struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2573 struct fib6_config fib6_cfg = {
2574 .fc_table = l3mdev_fib_table(cfg->dev),
2575 .fc_ifindex = cfg->nh_ifindex,
2576 .fc_gateway = cfg->gw.ipv6,
2577 .fc_flags = cfg->nh_flags,
2578 .fc_nlinfo = cfg->nlinfo,
2579 .fc_encap = cfg->nh_encap,
2580 .fc_encap_type = cfg->nh_encap_type,
2581 .fc_is_fdb = cfg->nh_fdb,
2585 if (!ipv6_addr_any(&cfg->gw.ipv6))
2586 fib6_cfg.fc_flags |= RTF_GATEWAY;
2588 /* sets nh_dev if successful */
2589 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2592 /* IPv6 is not enabled, don't call fib6_nh_release */
2593 if (err == -EAFNOSUPPORT)
2595 ipv6_stub->fib6_nh_release(fib6_nh);
2597 nh->nh_flags = fib6_nh->fib_nh_flags;
2603 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2604 struct netlink_ext_ack *extack)
2606 struct nh_info *nhi;
2610 nh = nexthop_alloc();
2612 return ERR_PTR(-ENOMEM);
2614 nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2617 return ERR_PTR(-ENOMEM);
2620 nh->nh_flags = cfg->nh_flags;
2623 nhi->nh_parent = nh;
2624 nhi->family = cfg->nh_family;
2625 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2630 if (cfg->nh_blackhole) {
2632 cfg->nh_ifindex = net->loopback_dev->ifindex;
2635 switch (cfg->nh_family) {
2637 err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2640 err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2647 return ERR_PTR(err);
2650 /* add the entry to the device based hash */
2652 nexthop_devhash_add(net, nhi);
2654 rcu_assign_pointer(nh->nh_info, nhi);
2659 /* called with rtnl lock held */
2660 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2661 struct netlink_ext_ack *extack)
2666 if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
2667 NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
2668 return ERR_PTR(-EINVAL);
2672 cfg->nh_id = nh_find_unused_id(net);
2674 NL_SET_ERR_MSG(extack, "No unused id");
2675 return ERR_PTR(-EINVAL);
2680 nh = nexthop_create_group(net, cfg);
2682 nh = nexthop_create(net, cfg, extack);
2687 refcount_set(&nh->refcnt, 1);
2688 nh->id = cfg->nh_id;
2689 nh->protocol = cfg->nh_protocol;
2692 err = insert_nexthop(net, nh, cfg, extack);
2694 __remove_nexthop(net, nh, NULL);
2702 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2703 unsigned long *timer_p, bool *has_p,
2704 struct netlink_ext_ack *extack)
2706 unsigned long timer;
2710 *timer_p = fallback;
2715 value = nla_get_u32(attr);
2716 timer = clock_t_to_jiffies(value);
2717 if (timer == ~0UL) {
2718 NL_SET_ERR_MSG(extack, "Timer value too large");
2727 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
2728 struct netlink_ext_ack *extack)
2730 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
2734 err = nla_parse_nested(tb,
2735 ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
2736 res, rtm_nh_res_policy_new, extack);
2741 if (tb[NHA_RES_GROUP_BUCKETS]) {
2742 cfg->nh_grp_res_num_buckets =
2743 nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
2744 cfg->nh_grp_res_has_num_buckets = true;
2745 if (!cfg->nh_grp_res_num_buckets) {
2746 NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
2751 err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
2752 NH_RES_DEFAULT_IDLE_TIMER,
2753 &cfg->nh_grp_res_idle_timer,
2754 &cfg->nh_grp_res_has_idle_timer,
2759 return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
2760 NH_RES_DEFAULT_UNBALANCED_TIMER,
2761 &cfg->nh_grp_res_unbalanced_timer,
2762 &cfg->nh_grp_res_has_unbalanced_timer,
2766 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
2767 struct nlmsghdr *nlh, struct nh_config *cfg,
2768 struct netlink_ext_ack *extack)
2770 struct nhmsg *nhm = nlmsg_data(nlh);
2771 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
2774 err = nlmsg_parse(nlh, sizeof(*nhm), tb,
2775 ARRAY_SIZE(rtm_nh_policy_new) - 1,
2776 rtm_nh_policy_new, extack);
2781 if (nhm->resvd || nhm->nh_scope) {
2782 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
2785 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
2786 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
2790 switch (nhm->nh_family) {
2799 NL_SET_ERR_MSG(extack, "Invalid address family");
2803 memset(cfg, 0, sizeof(*cfg));
2804 cfg->nlflags = nlh->nlmsg_flags;
2805 cfg->nlinfo.portid = NETLINK_CB(skb).portid;
2806 cfg->nlinfo.nlh = nlh;
2807 cfg->nlinfo.nl_net = net;
2809 cfg->nh_family = nhm->nh_family;
2810 cfg->nh_protocol = nhm->nh_protocol;
2811 cfg->nh_flags = nhm->nh_flags;
2814 cfg->nh_id = nla_get_u32(tb[NHA_ID]);
2817 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
2818 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) {
2819 NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
2822 if (nhm->nh_flags) {
2823 NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
2826 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
2829 if (tb[NHA_GROUP]) {
2830 if (nhm->nh_family != AF_UNSPEC) {
2831 NL_SET_ERR_MSG(extack, "Invalid family for group");
2834 cfg->nh_grp = tb[NHA_GROUP];
2836 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
2837 if (tb[NHA_GROUP_TYPE])
2838 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
2840 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
2841 NL_SET_ERR_MSG(extack, "Invalid group type");
2844 err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb),
2845 cfg->nh_grp_type, extack);
2849 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
2850 err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
2853 /* no other attributes should be set */
2857 if (tb[NHA_BLACKHOLE]) {
2858 if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
2859 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
2860 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
2864 cfg->nh_blackhole = 1;
2869 if (!cfg->nh_fdb && !tb[NHA_OIF]) {
2870 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
2874 if (!cfg->nh_fdb && tb[NHA_OIF]) {
2875 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
2876 if (cfg->nh_ifindex)
2877 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
2880 NL_SET_ERR_MSG(extack, "Invalid device index");
2882 } else if (!(cfg->dev->flags & IFF_UP)) {
2883 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
2886 } else if (!netif_carrier_ok(cfg->dev)) {
2887 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
2894 if (tb[NHA_GATEWAY]) {
2895 struct nlattr *gwa = tb[NHA_GATEWAY];
2897 switch (cfg->nh_family) {
2899 if (nla_len(gwa) != sizeof(u32)) {
2900 NL_SET_ERR_MSG(extack, "Invalid gateway");
2903 cfg->gw.ipv4 = nla_get_be32(gwa);
2906 if (nla_len(gwa) != sizeof(struct in6_addr)) {
2907 NL_SET_ERR_MSG(extack, "Invalid gateway");
2910 cfg->gw.ipv6 = nla_get_in6_addr(gwa);
2913 NL_SET_ERR_MSG(extack,
2914 "Unknown address family for gateway");
2918 /* device only nexthop (no gateway) */
2919 if (cfg->nh_flags & RTNH_F_ONLINK) {
2920 NL_SET_ERR_MSG(extack,
2921 "ONLINK flag can not be set for nexthop without a gateway");
2926 if (tb[NHA_ENCAP]) {
2927 cfg->nh_encap = tb[NHA_ENCAP];
2929 if (!tb[NHA_ENCAP_TYPE]) {
2930 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
2934 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
2935 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
2939 } else if (tb[NHA_ENCAP_TYPE]) {
2940 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
2951 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
2952 struct netlink_ext_ack *extack)
2954 struct net *net = sock_net(skb->sk);
2955 struct nh_config cfg;
2959 err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
2961 nh = nexthop_add(net, &cfg, extack);
2969 static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
2970 struct nlattr **tb, u32 *id,
2971 struct netlink_ext_ack *extack)
2973 struct nhmsg *nhm = nlmsg_data(nlh);
2975 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
2976 NL_SET_ERR_MSG(extack, "Invalid values in header");
2981 NL_SET_ERR_MSG(extack, "Nexthop id is missing");
2985 *id = nla_get_u32(tb[NHA_ID]);
2987 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
2994 static int nh_valid_get_del_req(const struct nlmsghdr *nlh, u32 *id,
2995 struct netlink_ext_ack *extack)
2997 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
3000 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3001 ARRAY_SIZE(rtm_nh_policy_get) - 1,
3002 rtm_nh_policy_get, extack);
3006 return __nh_valid_get_del_req(nlh, tb, id, extack);
3010 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3011 struct netlink_ext_ack *extack)
3013 struct net *net = sock_net(skb->sk);
3014 struct nl_info nlinfo = {
3017 .portid = NETLINK_CB(skb).portid,
3023 err = nh_valid_get_del_req(nlh, &id, extack);
3027 nh = nexthop_find_by_id(net, id);
3031 remove_nexthop(net, nh, &nlinfo);
3037 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3038 struct netlink_ext_ack *extack)
3040 struct net *net = sock_net(in_skb->sk);
3041 struct sk_buff *skb = NULL;
3046 err = nh_valid_get_del_req(nlh, &id, extack);
3051 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3056 nh = nexthop_find_by_id(net, id);
3060 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3063 WARN_ON(err == -EMSGSIZE);
3067 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3075 struct nh_dump_filter {
3081 u32 res_bucket_nh_id;
3084 static bool nh_dump_filtered(struct nexthop *nh,
3085 struct nh_dump_filter *filter, u8 family)
3087 const struct net_device *dev;
3088 const struct nh_info *nhi;
3090 if (filter->group_filter && !nh->is_group)
3093 if (!filter->dev_idx && !filter->master_idx && !family)
3099 nhi = rtnl_dereference(nh->nh_info);
3100 if (family && nhi->family != family)
3103 dev = nhi->fib_nhc.nhc_dev;
3104 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3107 if (filter->master_idx) {
3108 struct net_device *master;
3113 master = netdev_master_upper_dev_get((struct net_device *)dev);
3114 if (!master || master->ifindex != filter->master_idx)
3121 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3122 struct nh_dump_filter *filter,
3123 struct netlink_ext_ack *extack)
3129 idx = nla_get_u32(tb[NHA_OIF]);
3130 if (idx > INT_MAX) {
3131 NL_SET_ERR_MSG(extack, "Invalid device index");
3134 filter->dev_idx = idx;
3136 if (tb[NHA_MASTER]) {
3137 idx = nla_get_u32(tb[NHA_MASTER]);
3138 if (idx > INT_MAX) {
3139 NL_SET_ERR_MSG(extack, "Invalid master device index");
3142 filter->master_idx = idx;
3144 filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
3145 filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
3147 nhm = nlmsg_data(nlh);
3148 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3149 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3156 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3157 struct nh_dump_filter *filter,
3158 struct netlink_callback *cb)
3160 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3163 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3164 ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3165 rtm_nh_policy_dump, cb->extack);
3169 return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3172 struct rtm_dump_nh_ctx {
3176 static struct rtm_dump_nh_ctx *
3177 rtm_dump_nh_ctx(struct netlink_callback *cb)
3179 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3181 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3185 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3186 struct netlink_callback *cb,
3187 struct rb_root *root,
3188 struct rtm_dump_nh_ctx *ctx,
3189 int (*nh_cb)(struct sk_buff *skb,
3190 struct netlink_callback *cb,
3191 struct nexthop *nh, void *data),
3194 struct rb_node *node;
3199 for (node = rb_first(root); node; node = rb_next(node)) {
3202 nh = rb_entry(node, struct nexthop, rb_node);
3207 err = nh_cb(skb, cb, nh, data);
3215 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3216 struct nexthop *nh, void *data)
3218 struct nhmsg *nhm = nlmsg_data(cb->nlh);
3219 struct nh_dump_filter *filter = data;
3221 if (nh_dump_filtered(nh, filter, nhm->nh_family))
3224 return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3225 NETLINK_CB(cb->skb).portid,
3226 cb->nlh->nlmsg_seq, NLM_F_MULTI);
3230 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3232 struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3233 struct net *net = sock_net(skb->sk);
3234 struct rb_root *root = &net->nexthop.rb_root;
3235 struct nh_dump_filter filter = {};
3238 err = nh_valid_dump_req(cb->nlh, &filter, cb);
3242 err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3243 &rtm_dump_nexthop_cb, &filter);
3245 if (likely(skb->len))
3249 cb->seq = net->nexthop.seq;
3250 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3254 static struct nexthop *
3255 nexthop_find_group_resilient(struct net *net, u32 id,
3256 struct netlink_ext_ack *extack)
3258 struct nh_group *nhg;
3261 nh = nexthop_find_by_id(net, id);
3263 return ERR_PTR(-ENOENT);
3265 if (!nh->is_group) {
3266 NL_SET_ERR_MSG(extack, "Not a nexthop group");
3267 return ERR_PTR(-EINVAL);
3270 nhg = rtnl_dereference(nh->nh_grp);
3271 if (!nhg->resilient) {
3272 NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3273 return ERR_PTR(-EINVAL);
3279 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3280 struct netlink_ext_ack *extack)
3285 idx = nla_get_u32(attr);
3287 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3298 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3299 struct nh_dump_filter *filter,
3300 struct netlink_callback *cb)
3302 struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3303 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
3306 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3307 ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
3308 rtm_nh_policy_dump_bucket, NULL);
3312 err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
3316 if (tb[NHA_RES_BUCKET]) {
3317 size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3319 err = nla_parse_nested(res_tb, max,
3321 rtm_nh_res_bucket_policy_dump,
3326 err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID],
3327 &filter->res_bucket_nh_id,
3333 return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3336 struct rtm_dump_res_bucket_ctx {
3337 struct rtm_dump_nh_ctx nh;
3341 static struct rtm_dump_res_bucket_ctx *
3342 rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3344 struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3346 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3350 struct rtm_dump_nexthop_bucket_data {
3351 struct rtm_dump_res_bucket_ctx *ctx;
3352 struct nh_dump_filter filter;
3355 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3356 struct netlink_callback *cb,
3358 struct rtm_dump_nexthop_bucket_data *dd)
3360 u32 portid = NETLINK_CB(cb->skb).portid;
3361 struct nhmsg *nhm = nlmsg_data(cb->nlh);
3362 struct nh_res_table *res_table;
3363 struct nh_group *nhg;
3367 nhg = rtnl_dereference(nh->nh_grp);
3368 res_table = rtnl_dereference(nhg->res_table);
3369 for (bucket_index = dd->ctx->bucket_index;
3370 bucket_index < res_table->num_nh_buckets;
3372 struct nh_res_bucket *bucket;
3373 struct nh_grp_entry *nhge;
3375 bucket = &res_table->nh_buckets[bucket_index];
3376 nhge = rtnl_dereference(bucket->nh_entry);
3377 if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family))
3380 if (dd->filter.res_bucket_nh_id &&
3381 dd->filter.res_bucket_nh_id != nhge->nh->id)
3384 dd->ctx->bucket_index = bucket_index;
3385 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3386 RTM_NEWNEXTHOPBUCKET, portid,
3387 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3393 dd->ctx->bucket_index = 0;
3398 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3399 struct netlink_callback *cb,
3400 struct nexthop *nh, void *data)
3402 struct rtm_dump_nexthop_bucket_data *dd = data;
3403 struct nh_group *nhg;
3408 nhg = rtnl_dereference(nh->nh_grp);
3409 if (!nhg->resilient)
3412 return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3416 static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3417 struct netlink_callback *cb)
3419 struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3420 struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3421 struct net *net = sock_net(skb->sk);
3425 err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
3429 if (dd.filter.nh_id) {
3430 nh = nexthop_find_group_resilient(net, dd.filter.nh_id,
3434 err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
3436 struct rb_root *root = &net->nexthop.rb_root;
3438 err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
3439 &rtm_dump_nexthop_bucket_cb, &dd);
3443 if (likely(skb->len))
3447 cb->seq = net->nexthop.seq;
3448 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3452 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3454 struct netlink_ext_ack *extack)
3456 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3459 err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3460 res, rtm_nh_res_bucket_policy_get, extack);
3464 if (!tb[NHA_RES_BUCKET_INDEX]) {
3465 NL_SET_ERR_MSG(extack, "Bucket index is missing");
3469 *bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]);
3473 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3474 u32 *id, u16 *bucket_index,
3475 struct netlink_ext_ack *extack)
3477 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
3480 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3481 ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
3482 rtm_nh_policy_get_bucket, extack);
3486 err = __nh_valid_get_del_req(nlh, tb, id, extack);
3490 if (!tb[NHA_RES_BUCKET]) {
3491 NL_SET_ERR_MSG(extack, "Bucket information is missing");
3495 err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET],
3496 bucket_index, extack);
3504 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3505 struct netlink_ext_ack *extack)
3507 struct net *net = sock_net(in_skb->sk);
3508 struct nh_res_table *res_table;
3509 struct sk_buff *skb = NULL;
3510 struct nh_group *nhg;
3516 err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack);
3520 nh = nexthop_find_group_resilient(net, id, extack);
3524 nhg = rtnl_dereference(nh->nh_grp);
3525 res_table = rtnl_dereference(nhg->res_table);
3526 if (bucket_index >= res_table->num_nh_buckets) {
3527 NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3531 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3535 err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index],
3536 bucket_index, RTM_NEWNEXTHOPBUCKET,
3537 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
3540 WARN_ON(err == -EMSGSIZE);
3544 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3551 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3553 unsigned int hash = nh_dev_hashfn(dev->ifindex);
3554 struct net *net = dev_net(dev);
3555 struct hlist_head *head = &net->nexthop.devhash[hash];
3556 struct hlist_node *n;
3557 struct nh_info *nhi;
3559 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3560 if (nhi->fib_nhc.nhc_dev == dev) {
3561 if (nhi->family == AF_INET)
3562 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3569 static int nh_netdev_event(struct notifier_block *this,
3570 unsigned long event, void *ptr)
3572 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3573 struct netdev_notifier_info_ext *info_ext;
3577 case NETDEV_UNREGISTER:
3578 nexthop_flush_dev(dev, event);
3581 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3582 nexthop_flush_dev(dev, event);
3584 case NETDEV_CHANGEMTU:
3586 nexthop_sync_mtu(dev, info_ext->ext.mtu);
3587 rt_cache_flush(dev_net(dev));
3593 static struct notifier_block nh_netdev_notifier = {
3594 .notifier_call = nh_netdev_event,
3597 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3598 enum nexthop_event_type event_type,
3599 struct netlink_ext_ack *extack)
3601 struct rb_root *root = &net->nexthop.rb_root;
3602 struct rb_node *node;
3605 for (node = rb_first(root); node; node = rb_next(node)) {
3608 nh = rb_entry(node, struct nexthop, rb_node);
3609 err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3617 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3618 struct netlink_ext_ack *extack)
3623 err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
3626 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3632 EXPORT_SYMBOL(register_nexthop_notifier);
3634 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3639 err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3643 nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
3648 EXPORT_SYMBOL(unregister_nexthop_notifier);
3650 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
3652 struct nexthop *nexthop;
3656 nexthop = nexthop_find_by_id(net, id);
3660 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3662 nexthop->nh_flags |= RTNH_F_OFFLOAD;
3664 nexthop->nh_flags |= RTNH_F_TRAP;
3669 EXPORT_SYMBOL(nexthop_set_hw_flags);
3671 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
3672 bool offload, bool trap)
3674 struct nh_res_table *res_table;
3675 struct nh_res_bucket *bucket;
3676 struct nexthop *nexthop;
3677 struct nh_group *nhg;
3681 nexthop = nexthop_find_by_id(net, id);
3682 if (!nexthop || !nexthop->is_group)
3685 nhg = rcu_dereference(nexthop->nh_grp);
3686 if (!nhg->resilient)
3689 if (bucket_index >= nhg->res_table->num_nh_buckets)
3692 res_table = rcu_dereference(nhg->res_table);
3693 bucket = &res_table->nh_buckets[bucket_index];
3694 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3696 bucket->nh_flags |= RTNH_F_OFFLOAD;
3698 bucket->nh_flags |= RTNH_F_TRAP;
3703 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
3705 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
3706 unsigned long *activity)
3708 struct nh_res_table *res_table;
3709 struct nexthop *nexthop;
3710 struct nh_group *nhg;
3715 nexthop = nexthop_find_by_id(net, id);
3716 if (!nexthop || !nexthop->is_group)
3719 nhg = rcu_dereference(nexthop->nh_grp);
3720 if (!nhg->resilient)
3723 /* Instead of silently ignoring some buckets, demand that the sizes
3726 res_table = rcu_dereference(nhg->res_table);
3727 if (num_buckets != res_table->num_nh_buckets)
3730 for (i = 0; i < num_buckets; i++) {
3731 if (test_bit(i, activity))
3732 nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
3738 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
3740 static void __net_exit nexthop_net_exit_batch(struct list_head *net_list)
3745 list_for_each_entry(net, net_list, exit_list) {
3746 flush_all_nexthops(net);
3747 kfree(net->nexthop.devhash);
3752 static int __net_init nexthop_net_init(struct net *net)
3754 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
3756 net->nexthop.rb_root = RB_ROOT;
3757 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
3758 if (!net->nexthop.devhash)
3760 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
3765 static struct pernet_operations nexthop_net_ops = {
3766 .init = nexthop_net_init,
3767 .exit_batch = nexthop_net_exit_batch,
3770 static int __init nexthop_init(void)
3772 register_pernet_subsys(&nexthop_net_ops);
3774 register_netdevice_notifier(&nh_netdev_notifier);
3776 rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3777 rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
3778 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
3779 rtm_dump_nexthop, 0);
3781 rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3782 rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3784 rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3785 rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3787 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket,
3788 rtm_dump_nexthop_bucket, 0);
3792 subsys_initcall(nexthop_init);