2 * net/core/dst.c Protocol independent destination cache.
8 #include <linux/bitops.h>
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/workqueue.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/netdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <net/net_namespace.h>
21 #include <linux/sched.h>
22 #include <linux/prefetch.h>
23 #include <net/lwtunnel.h>
27 #include <net/dst_metadata.h>
29 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
34 EXPORT_SYMBOL(dst_discard_out);
36 const struct dst_metrics dst_default_metrics = {
37 /* This initializer is needed to force linker to place this variable
38 * into const section. Otherwise it might end into bss section.
39 * We really want to avoid false sharing on this variable, and catch
42 .refcnt = REFCOUNT_INIT(1),
44 EXPORT_SYMBOL(dst_default_metrics);
46 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
47 struct net_device *dev, int initial_ref, int initial_obsolete,
54 dst_init_metrics(dst, dst_default_metrics.metrics, true);
59 dst->input = dst_discard;
60 dst->output = dst_discard_out;
62 dst->obsolete = initial_obsolete;
65 #ifdef CONFIG_IP_ROUTE_CLASSID
69 atomic_set(&dst->__refcnt, initial_ref);
71 dst->lastuse = jiffies;
73 if (!(flags & DST_NOCOUNT))
74 dst_entries_add(ops, 1);
76 EXPORT_SYMBOL(dst_init);
78 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
79 int initial_ref, int initial_obsolete, unsigned short flags)
81 struct dst_entry *dst;
83 if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
85 printk_ratelimited(KERN_NOTICE "Route cache is full: "
86 "consider increasing sysctl "
87 "net.ipv[4|6].route.max_size.\n");
92 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
96 dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
100 EXPORT_SYMBOL(dst_alloc);
102 struct dst_entry *dst_destroy(struct dst_entry * dst)
104 struct dst_entry *child = NULL;
110 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
115 if (!(dst->flags & DST_NOCOUNT))
116 dst_entries_add(dst->ops, -1);
118 if (dst->ops->destroy)
119 dst->ops->destroy(dst);
123 lwtstate_put(dst->lwtstate);
125 if (dst->flags & DST_METADATA)
126 metadata_dst_free((struct metadata_dst *)dst);
128 kmem_cache_free(dst->ops->kmem_cachep, dst);
132 dst_release_immediate(dst);
135 EXPORT_SYMBOL(dst_destroy);
137 static void dst_destroy_rcu(struct rcu_head *head)
139 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
141 dst = dst_destroy(dst);
144 /* Operations to mark dst as DEAD and clean up the net device referenced
146 * 1. put the dst under loopback interface and discard all tx/rx packets
148 * 2. release the net_device
149 * This function should be called when removing routes from the fib tree
150 * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
151 * make the next dst_ops->check() fail.
153 void dst_dev_put(struct dst_entry *dst)
155 struct net_device *dev = dst->dev;
157 dst->obsolete = DST_OBSOLETE_DEAD;
158 if (dst->ops->ifdown)
159 dst->ops->ifdown(dst, dev, true);
160 dst->input = dst_discard;
161 dst->output = dst_discard_out;
162 dst->dev = dev_net(dst->dev)->loopback_dev;
166 EXPORT_SYMBOL(dst_dev_put);
168 void dst_release(struct dst_entry *dst)
173 newrefcnt = atomic_dec_return(&dst->__refcnt);
174 if (unlikely(newrefcnt < 0))
175 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
176 __func__, dst, newrefcnt);
178 call_rcu(&dst->rcu_head, dst_destroy_rcu);
181 EXPORT_SYMBOL(dst_release);
183 void dst_release_immediate(struct dst_entry *dst)
188 newrefcnt = atomic_dec_return(&dst->__refcnt);
189 if (unlikely(newrefcnt < 0))
190 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
191 __func__, dst, newrefcnt);
196 EXPORT_SYMBOL(dst_release_immediate);
198 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
200 struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
203 struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
204 unsigned long prev, new;
206 refcount_set(&p->refcnt, 1);
207 memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
209 new = (unsigned long) p;
210 prev = cmpxchg(&dst->_metrics, old, new);
214 p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
215 if (prev & DST_METRICS_READ_ONLY)
217 } else if (prev & DST_METRICS_REFCOUNTED) {
218 if (refcount_dec_and_test(&old_p->refcnt))
222 BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
225 EXPORT_SYMBOL(dst_cow_metrics_generic);
227 /* Caller asserts that dst_metrics_read_only(dst) is false. */
228 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
230 unsigned long prev, new;
232 new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
233 prev = cmpxchg(&dst->_metrics, old, new);
235 kfree(__DST_METRICS_PTR(old));
237 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
239 static struct dst_ops md_dst_ops = {
243 static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
245 WARN_ONCE(1, "Attempting to call output on metadata dst\n");
250 static int dst_md_discard(struct sk_buff *skb)
252 WARN_ONCE(1, "Attempting to call input on metadata dst\n");
257 static void __metadata_dst_init(struct metadata_dst *md_dst,
258 enum metadata_type type, u8 optslen)
261 struct dst_entry *dst;
264 dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
265 DST_METADATA | DST_NOCOUNT);
267 dst->input = dst_md_discard;
268 dst->output = dst_md_discard_out;
270 memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
274 struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
277 struct metadata_dst *md_dst;
279 md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
283 __metadata_dst_init(md_dst, type, optslen);
287 EXPORT_SYMBOL_GPL(metadata_dst_alloc);
289 void metadata_dst_free(struct metadata_dst *md_dst)
291 #ifdef CONFIG_DST_CACHE
292 if (md_dst->type == METADATA_IP_TUNNEL)
293 dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
297 EXPORT_SYMBOL_GPL(metadata_dst_free);
299 struct metadata_dst __percpu *
300 metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
303 struct metadata_dst __percpu *md_dst;
305 md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
306 __alignof__(struct metadata_dst), flags);
310 for_each_possible_cpu(cpu)
311 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
315 EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
317 void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst)
319 #ifdef CONFIG_DST_CACHE
322 for_each_possible_cpu(cpu) {
323 struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu);
325 if (one_md_dst->type == METADATA_IP_TUNNEL)
326 dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache);
331 EXPORT_SYMBOL_GPL(metadata_dst_free_percpu);