1 // SPDX-License-Identifier: GPL-2.0-only
3 * net/core/dst.c Protocol independent destination cache.
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/workqueue.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <net/net_namespace.h>
22 #include <linux/sched.h>
23 #include <linux/prefetch.h>
24 #include <net/lwtunnel.h>
28 #include <net/dst_metadata.h>
30 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
35 EXPORT_SYMBOL(dst_discard_out);
37 const struct dst_metrics dst_default_metrics = {
38 /* This initializer is needed to force linker to place this variable
39 * into const section. Otherwise it might end into bss section.
40 * We really want to avoid false sharing on this variable, and catch
43 .refcnt = REFCOUNT_INIT(1),
45 EXPORT_SYMBOL(dst_default_metrics);
47 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
48 struct net_device *dev, int initial_ref, int initial_obsolete,
52 netdev_hold(dev, &dst->dev_tracker, GFP_ATOMIC);
54 dst_init_metrics(dst, dst_default_metrics.metrics, true);
59 dst->input = dst_discard;
60 dst->output = dst_discard_out;
62 dst->obsolete = initial_obsolete;
65 #ifdef CONFIG_IP_ROUTE_CLASSID
69 atomic_set(&dst->__refcnt, initial_ref);
71 dst->lastuse = jiffies;
73 if (!(flags & DST_NOCOUNT))
74 dst_entries_add(ops, 1);
76 EXPORT_SYMBOL(dst_init);
78 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
79 int initial_ref, int initial_obsolete, unsigned short flags)
81 struct dst_entry *dst;
84 !(flags & DST_NOCOUNT) &&
85 dst_entries_get_fast(ops) > ops->gc_thresh)
88 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
92 dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
96 EXPORT_SYMBOL(dst_alloc);
98 struct dst_entry *dst_destroy(struct dst_entry * dst)
100 struct dst_entry *child = NULL;
106 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
111 if (!(dst->flags & DST_NOCOUNT))
112 dst_entries_add(dst->ops, -1);
114 if (dst->ops->destroy)
115 dst->ops->destroy(dst);
116 netdev_put(dst->dev, &dst->dev_tracker);
118 lwtstate_put(dst->lwtstate);
120 if (dst->flags & DST_METADATA)
121 metadata_dst_free((struct metadata_dst *)dst);
123 kmem_cache_free(dst->ops->kmem_cachep, dst);
127 dst_release_immediate(dst);
130 EXPORT_SYMBOL(dst_destroy);
132 static void dst_destroy_rcu(struct rcu_head *head)
134 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
136 dst = dst_destroy(dst);
139 /* Operations to mark dst as DEAD and clean up the net device referenced
141 * 1. put the dst under blackhole interface and discard all tx/rx packets
143 * 2. release the net_device
144 * This function should be called when removing routes from the fib tree
145 * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
146 * make the next dst_ops->check() fail.
148 void dst_dev_put(struct dst_entry *dst)
150 struct net_device *dev = dst->dev;
152 dst->obsolete = DST_OBSOLETE_DEAD;
153 if (dst->ops->ifdown)
154 dst->ops->ifdown(dst, dev, true);
155 dst->input = dst_discard;
156 dst->output = dst_discard_out;
157 dst->dev = blackhole_netdev;
158 netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
161 EXPORT_SYMBOL(dst_dev_put);
163 void dst_release(struct dst_entry *dst)
168 newrefcnt = atomic_dec_return(&dst->__refcnt);
169 if (WARN_ONCE(newrefcnt < 0, "dst_release underflow"))
170 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
171 __func__, dst, newrefcnt);
173 call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu);
176 EXPORT_SYMBOL(dst_release);
178 void dst_release_immediate(struct dst_entry *dst)
183 newrefcnt = atomic_dec_return(&dst->__refcnt);
184 if (WARN_ONCE(newrefcnt < 0, "dst_release_immediate underflow"))
185 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
186 __func__, dst, newrefcnt);
191 EXPORT_SYMBOL(dst_release_immediate);
193 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
195 struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
198 struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
199 unsigned long prev, new;
201 refcount_set(&p->refcnt, 1);
202 memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
204 new = (unsigned long) p;
205 prev = cmpxchg(&dst->_metrics, old, new);
209 p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
210 if (prev & DST_METRICS_READ_ONLY)
212 } else if (prev & DST_METRICS_REFCOUNTED) {
213 if (refcount_dec_and_test(&old_p->refcnt))
217 BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
220 EXPORT_SYMBOL(dst_cow_metrics_generic);
222 /* Caller asserts that dst_metrics_read_only(dst) is false. */
223 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
225 unsigned long prev, new;
227 new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
228 prev = cmpxchg(&dst->_metrics, old, new);
230 kfree(__DST_METRICS_PTR(old));
232 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
234 struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
239 u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
244 struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
251 void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
252 struct sk_buff *skb, u32 mtu,
256 EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
258 void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
262 EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
264 unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
266 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
268 return mtu ? : dst->dev->mtu;
270 EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
272 static struct dst_ops dst_blackhole_ops = {
274 .neigh_lookup = dst_blackhole_neigh_lookup,
275 .check = dst_blackhole_check,
276 .cow_metrics = dst_blackhole_cow_metrics,
277 .update_pmtu = dst_blackhole_update_pmtu,
278 .redirect = dst_blackhole_redirect,
279 .mtu = dst_blackhole_mtu,
282 static void __metadata_dst_init(struct metadata_dst *md_dst,
283 enum metadata_type type, u8 optslen)
285 struct dst_entry *dst;
288 dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE,
289 DST_METADATA | DST_NOCOUNT);
290 memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
294 struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
297 struct metadata_dst *md_dst;
299 md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
303 __metadata_dst_init(md_dst, type, optslen);
307 EXPORT_SYMBOL_GPL(metadata_dst_alloc);
309 void metadata_dst_free(struct metadata_dst *md_dst)
311 #ifdef CONFIG_DST_CACHE
312 if (md_dst->type == METADATA_IP_TUNNEL)
313 dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
315 if (md_dst->type == METADATA_XFRM)
316 dst_release(md_dst->u.xfrm_info.dst_orig);
319 EXPORT_SYMBOL_GPL(metadata_dst_free);
321 struct metadata_dst __percpu *
322 metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
325 struct metadata_dst __percpu *md_dst;
327 md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
328 __alignof__(struct metadata_dst), flags);
332 for_each_possible_cpu(cpu)
333 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
337 EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
339 void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst)
343 for_each_possible_cpu(cpu) {
344 struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu);
346 #ifdef CONFIG_DST_CACHE
347 if (one_md_dst->type == METADATA_IP_TUNNEL)
348 dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache);
350 if (one_md_dst->type == METADATA_XFRM)
351 dst_release(one_md_dst->u.xfrm_info.dst_orig);
355 EXPORT_SYMBOL_GPL(metadata_dst_free_percpu);