]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/core/dst.c Protocol independent destination cache. | |
3 | * | |
4 | * Authors: Alexey Kuznetsov, <[email protected]> | |
5 | * | |
6 | */ | |
7 | ||
8 | #include <linux/bitops.h> | |
9 | #include <linux/errno.h> | |
10 | #include <linux/init.h> | |
11 | #include <linux/kernel.h> | |
86bba269 | 12 | #include <linux/workqueue.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/module.h> | |
5a0e3ad6 | 15 | #include <linux/slab.h> |
1da177e4 | 16 | #include <linux/netdevice.h> |
1da177e4 LT |
17 | #include <linux/skbuff.h> |
18 | #include <linux/string.h> | |
19 | #include <linux/types.h> | |
e9dc8653 | 20 | #include <net/net_namespace.h> |
2fc1b5dd | 21 | #include <linux/sched.h> |
268bb0ce | 22 | #include <linux/prefetch.h> |
1da177e4 LT |
23 | |
24 | #include <net/dst.h> | |
25 | ||
86bba269 ED |
26 | /* |
27 | * Theory of operations: | |
28 | * 1) We use a list, protected by a spinlock, to add | |
29 | * new entries from both BH and non-BH context. | |
30 | * 2) In order to keep spinlock held for a small delay, | |
31 | * we use a second list where are stored long lived | |
32 | * entries, that are handled by the garbage collect thread | |
33 | * fired by a workqueue. | |
34 | * 3) This list is guarded by a mutex, | |
35 | * so that the gc_task and dst_dev_event() can be synchronized. | |
1da177e4 | 36 | */ |
1da177e4 | 37 | |
86bba269 ED |
38 | /* |
39 | * We want to keep lock & list close together | |
40 | * to dirty as few cache lines as possible in __dst_free(). | |
41 | * As this is not a very strong hint, we dont force an alignment on SMP. | |
42 | */ | |
43 | static struct { | |
44 | spinlock_t lock; | |
598ed936 | 45 | struct dst_entry *list; |
86bba269 ED |
46 | unsigned long timer_inc; |
47 | unsigned long timer_expires; | |
48 | } dst_garbage = { | |
49 | .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock), | |
50 | .timer_inc = DST_GC_MAX, | |
51 | }; | |
52 | static void dst_gc_task(struct work_struct *work); | |
598ed936 | 53 | static void ___dst_free(struct dst_entry *dst); |
1da177e4 | 54 | |
86bba269 | 55 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); |
1da177e4 | 56 | |
86bba269 ED |
57 | static DEFINE_MUTEX(dst_gc_mutex); |
58 | /* | |
59 | * long lived entries are maintained in this list, guarded by dst_gc_mutex | |
60 | */ | |
61 | static struct dst_entry *dst_busy_list; | |
62 | ||
63 | static void dst_gc_task(struct work_struct *work) | |
1da177e4 LT |
64 | { |
65 | int delayed = 0; | |
86bba269 ED |
66 | int work_performed = 0; |
67 | unsigned long expires = ~0L; | |
68 | struct dst_entry *dst, *next, head; | |
69 | struct dst_entry *last = &head; | |
1da177e4 | 70 | |
86bba269 ED |
71 | mutex_lock(&dst_gc_mutex); |
72 | next = dst_busy_list; | |
1da177e4 | 73 | |
86bba269 ED |
74 | loop: |
75 | while ((dst = next) != NULL) { | |
76 | next = dst->next; | |
77 | prefetch(&next->next); | |
2fc1b5dd | 78 | cond_resched(); |
86bba269 ED |
79 | if (likely(atomic_read(&dst->__refcnt))) { |
80 | last->next = dst; | |
81 | last = dst; | |
1da177e4 LT |
82 | delayed++; |
83 | continue; | |
84 | } | |
86bba269 | 85 | work_performed++; |
1da177e4 LT |
86 | |
87 | dst = dst_destroy(dst); | |
88 | if (dst) { | |
89 | /* NOHASH and still referenced. Unless it is already | |
90 | * on gc list, invalidate it and add to gc list. | |
91 | * | |
92 | * Note: this is temporary. Actually, NOHASH dst's | |
93 | * must be obsoleted when parent is obsoleted. | |
94 | * But we do not have state "obsoleted, but | |
95 | * referenced by parent", so it is right. | |
96 | */ | |
97 | if (dst->obsolete > 1) | |
98 | continue; | |
99 | ||
100 | ___dst_free(dst); | |
86bba269 ED |
101 | dst->next = next; |
102 | next = dst; | |
1da177e4 LT |
103 | } |
104 | } | |
86bba269 ED |
105 | |
106 | spin_lock_bh(&dst_garbage.lock); | |
107 | next = dst_garbage.list; | |
108 | if (next) { | |
109 | dst_garbage.list = NULL; | |
110 | spin_unlock_bh(&dst_garbage.lock); | |
111 | goto loop; | |
1da177e4 | 112 | } |
86bba269 ED |
113 | last->next = NULL; |
114 | dst_busy_list = head.next; | |
115 | if (!dst_busy_list) | |
116 | dst_garbage.timer_inc = DST_GC_MAX; | |
117 | else { | |
118 | /* | |
119 | * if we freed less than 1/10 of delayed entries, | |
120 | * we can sleep longer. | |
121 | */ | |
122 | if (work_performed <= delayed/10) { | |
123 | dst_garbage.timer_expires += dst_garbage.timer_inc; | |
124 | if (dst_garbage.timer_expires > DST_GC_MAX) | |
125 | dst_garbage.timer_expires = DST_GC_MAX; | |
126 | dst_garbage.timer_inc += DST_GC_INC; | |
127 | } else { | |
128 | dst_garbage.timer_inc = DST_GC_INC; | |
129 | dst_garbage.timer_expires = DST_GC_MIN; | |
130 | } | |
131 | expires = dst_garbage.timer_expires; | |
132 | /* | |
598ed936 | 133 | * if the next desired timer is more than 4 seconds in the |
134 | * future then round the timer to whole seconds | |
86bba269 ED |
135 | */ |
136 | if (expires > 4*HZ) | |
137 | expires = round_jiffies_relative(expires); | |
138 | schedule_delayed_work(&dst_gc_work, expires); | |
f0098f78 | 139 | } |
86bba269 ED |
140 | |
141 | spin_unlock_bh(&dst_garbage.lock); | |
142 | mutex_unlock(&dst_gc_mutex); | |
1da177e4 LT |
143 | } |
144 | ||
352e512c | 145 | int dst_discard(struct sk_buff *skb) |
1da177e4 LT |
146 | { |
147 | kfree_skb(skb); | |
148 | return 0; | |
149 | } | |
352e512c | 150 | EXPORT_SYMBOL(dst_discard); |
1da177e4 | 151 | |
725d1e1b | 152 | const u32 dst_default_metrics[RTAX_MAX]; |
62fa8a84 | 153 | |
5c1e6aa3 DM |
154 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, |
155 | int initial_ref, int initial_obsolete, int flags) | |
1da177e4 | 156 | { |
598ed936 | 157 | struct dst_entry *dst; |
1da177e4 | 158 | |
fc66f95c | 159 | if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) { |
569d3645 | 160 | if (ops->gc(ops)) |
1da177e4 LT |
161 | return NULL; |
162 | } | |
cf911662 | 163 | dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); |
1da177e4 LT |
164 | if (!dst) |
165 | return NULL; | |
cf911662 | 166 | dst->child = NULL; |
5c1e6aa3 DM |
167 | dst->dev = dev; |
168 | if (dev) | |
169 | dev_hold(dev); | |
1da177e4 | 170 | dst->ops = ops; |
62fa8a84 | 171 | dst_init_metrics(dst, dst_default_metrics, true); |
cf911662 | 172 | dst->expires = 0UL; |
5c1e6aa3 | 173 | dst->path = dst; |
9de79c12 | 174 | RCU_INIT_POINTER(dst->_neighbour, NULL); |
cf911662 DM |
175 | #ifdef CONFIG_XFRM |
176 | dst->xfrm = NULL; | |
177 | #endif | |
5c1e6aa3 DM |
178 | dst->input = dst_discard; |
179 | dst->output = dst_discard; | |
cf911662 | 180 | dst->error = 0; |
5c1e6aa3 | 181 | dst->obsolete = initial_obsolete; |
cf911662 DM |
182 | dst->header_len = 0; |
183 | dst->trailer_len = 0; | |
184 | #ifdef CONFIG_IP_ROUTE_CLASSID | |
185 | dst->tclassid = 0; | |
1da177e4 | 186 | #endif |
5c1e6aa3 | 187 | atomic_set(&dst->__refcnt, initial_ref); |
cf911662 | 188 | dst->__use = 0; |
5c1e6aa3 DM |
189 | dst->lastuse = jiffies; |
190 | dst->flags = flags; | |
cf911662 | 191 | dst->next = NULL; |
957c665f DM |
192 | if (!(flags & DST_NOCOUNT)) |
193 | dst_entries_add(ops, 1); | |
1da177e4 LT |
194 | return dst; |
195 | } | |
598ed936 | 196 | EXPORT_SYMBOL(dst_alloc); |
1da177e4 | 197 | |
598ed936 | 198 | static void ___dst_free(struct dst_entry *dst) |
1da177e4 LT |
199 | { |
200 | /* The first case (dev==NULL) is required, when | |
201 | protocol module is unloaded. | |
202 | */ | |
598ed936 | 203 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) |
c4b1010f | 204 | dst->input = dst->output = dst_discard; |
1da177e4 LT |
205 | dst->obsolete = 2; |
206 | } | |
207 | ||
598ed936 | 208 | void __dst_free(struct dst_entry *dst) |
1da177e4 | 209 | { |
86bba269 | 210 | spin_lock_bh(&dst_garbage.lock); |
1da177e4 | 211 | ___dst_free(dst); |
86bba269 ED |
212 | dst->next = dst_garbage.list; |
213 | dst_garbage.list = dst; | |
214 | if (dst_garbage.timer_inc > DST_GC_INC) { | |
215 | dst_garbage.timer_inc = DST_GC_INC; | |
216 | dst_garbage.timer_expires = DST_GC_MIN; | |
f262b59b | 217 | cancel_delayed_work(&dst_gc_work); |
86bba269 | 218 | schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); |
1da177e4 | 219 | } |
86bba269 | 220 | spin_unlock_bh(&dst_garbage.lock); |
1da177e4 | 221 | } |
d79d9913 | 222 | EXPORT_SYMBOL(__dst_free); |
1da177e4 LT |
223 | |
224 | struct dst_entry *dst_destroy(struct dst_entry * dst) | |
225 | { | |
226 | struct dst_entry *child; | |
227 | struct neighbour *neigh; | |
1da177e4 LT |
228 | |
229 | smp_rmb(); | |
230 | ||
231 | again: | |
9de79c12 | 232 | neigh = rcu_dereference_protected(dst->_neighbour, 1); |
1da177e4 LT |
233 | child = dst->child; |
234 | ||
1da177e4 | 235 | if (neigh) { |
9de79c12 | 236 | RCU_INIT_POINTER(dst->_neighbour, NULL); |
1da177e4 LT |
237 | neigh_release(neigh); |
238 | } | |
239 | ||
957c665f DM |
240 | if (!(dst->flags & DST_NOCOUNT)) |
241 | dst_entries_add(dst->ops, -1); | |
1da177e4 LT |
242 | |
243 | if (dst->ops->destroy) | |
244 | dst->ops->destroy(dst); | |
245 | if (dst->dev) | |
246 | dev_put(dst->dev); | |
1da177e4 LT |
247 | kmem_cache_free(dst->ops->kmem_cachep, dst); |
248 | ||
249 | dst = child; | |
250 | if (dst) { | |
6775cab9 HX |
251 | int nohash = dst->flags & DST_NOHASH; |
252 | ||
1da177e4 LT |
253 | if (atomic_dec_and_test(&dst->__refcnt)) { |
254 | /* We were real parent of this dst, so kill child. */ | |
6775cab9 | 255 | if (nohash) |
1da177e4 LT |
256 | goto again; |
257 | } else { | |
258 | /* Child is still referenced, return it for freeing. */ | |
6775cab9 | 259 | if (nohash) |
1da177e4 LT |
260 | return dst; |
261 | /* Child is still in his hash table */ | |
262 | } | |
263 | } | |
264 | return NULL; | |
265 | } | |
598ed936 | 266 | EXPORT_SYMBOL(dst_destroy); |
1da177e4 | 267 | |
8d330868 IJ |
268 | void dst_release(struct dst_entry *dst) |
269 | { | |
270 | if (dst) { | |
598ed936 | 271 | int newrefcnt; |
ef711cf1 | 272 | |
598ed936 | 273 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
274 | WARN_ON(newrefcnt < 0); | |
27b75c95 ED |
275 | if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) { |
276 | dst = dst_destroy(dst); | |
277 | if (dst) | |
278 | __dst_free(dst); | |
279 | } | |
8d330868 IJ |
280 | } |
281 | } | |
282 | EXPORT_SYMBOL(dst_release); | |
283 | ||
62fa8a84 DM |
284 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) |
285 | { | |
286 | u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); | |
287 | ||
288 | if (p) { | |
289 | u32 *old_p = __DST_METRICS_PTR(old); | |
290 | unsigned long prev, new; | |
291 | ||
292 | memcpy(p, old_p, sizeof(u32) * RTAX_MAX); | |
293 | ||
294 | new = (unsigned long) p; | |
295 | prev = cmpxchg(&dst->_metrics, old, new); | |
296 | ||
297 | if (prev != old) { | |
298 | kfree(p); | |
299 | p = __DST_METRICS_PTR(prev); | |
300 | if (prev & DST_METRICS_READ_ONLY) | |
301 | p = NULL; | |
302 | } | |
303 | } | |
304 | return p; | |
305 | } | |
306 | EXPORT_SYMBOL(dst_cow_metrics_generic); | |
307 | ||
308 | /* Caller asserts that dst_metrics_read_only(dst) is false. */ | |
309 | void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) | |
310 | { | |
311 | unsigned long prev, new; | |
312 | ||
b30c516f | 313 | new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; |
62fa8a84 DM |
314 | prev = cmpxchg(&dst->_metrics, old, new); |
315 | if (prev == old) | |
316 | kfree(__DST_METRICS_PTR(old)); | |
317 | } | |
318 | EXPORT_SYMBOL(__dst_destroy_metrics_generic); | |
319 | ||
27b75c95 ED |
320 | /** |
321 | * skb_dst_set_noref - sets skb dst, without a reference | |
322 | * @skb: buffer | |
323 | * @dst: dst entry | |
324 | * | |
325 | * Sets skb dst, assuming a reference was not taken on dst | |
326 | * skb_dst_drop() should not dst_release() this dst | |
327 | */ | |
328 | void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) | |
329 | { | |
330 | WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); | |
331 | /* If dst not in cache, we must take a reference, because | |
332 | * dst_release() will destroy dst as soon as its refcount becomes zero | |
333 | */ | |
334 | if (unlikely(dst->flags & DST_NOCACHE)) { | |
335 | dst_hold(dst); | |
336 | skb_dst_set(skb, dst); | |
337 | } else { | |
338 | skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; | |
339 | } | |
340 | } | |
341 | EXPORT_SYMBOL(skb_dst_set_noref); | |
342 | ||
1da177e4 LT |
343 | /* Dirty hack. We did it in 2.2 (in __dst_free), |
344 | * we have _very_ good reasons not to repeat | |
345 | * this mistake in 2.3, but we have no choice | |
346 | * now. _It_ _is_ _explicit_ _deliberate_ | |
347 | * _race_ _condition_. | |
348 | * | |
349 | * Commented and originally written by Alexey. | |
350 | */ | |
56115511 | 351 | static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, |
352 | int unregister) | |
1da177e4 LT |
353 | { |
354 | if (dst->ops->ifdown) | |
355 | dst->ops->ifdown(dst, dev, unregister); | |
356 | ||
357 | if (dev != dst->dev) | |
358 | return; | |
359 | ||
360 | if (!unregister) { | |
c4b1010f | 361 | dst->input = dst->output = dst_discard; |
1da177e4 | 362 | } else { |
9de79c12 ED |
363 | struct neighbour *neigh; |
364 | ||
c346dca1 | 365 | dst->dev = dev_net(dst->dev)->loopback_dev; |
de3cb747 | 366 | dev_hold(dst->dev); |
1da177e4 | 367 | dev_put(dev); |
9de79c12 | 368 | rcu_read_lock(); |
27217455 | 369 | neigh = dst_get_neighbour_noref(dst); |
9de79c12 ED |
370 | if (neigh && neigh->dev == dev) { |
371 | neigh->dev = dst->dev; | |
64b7d961 | 372 | dev_hold(dst->dev); |
1da177e4 | 373 | dev_put(dev); |
1da177e4 | 374 | } |
9de79c12 | 375 | rcu_read_unlock(); |
1da177e4 LT |
376 | } |
377 | } | |
378 | ||
598ed936 | 379 | static int dst_dev_event(struct notifier_block *this, unsigned long event, |
380 | void *ptr) | |
1da177e4 LT |
381 | { |
382 | struct net_device *dev = ptr; | |
86bba269 | 383 | struct dst_entry *dst, *last = NULL; |
1da177e4 LT |
384 | |
385 | switch (event) { | |
386 | case NETDEV_UNREGISTER: | |
387 | case NETDEV_DOWN: | |
86bba269 ED |
388 | mutex_lock(&dst_gc_mutex); |
389 | for (dst = dst_busy_list; dst; dst = dst->next) { | |
390 | last = dst; | |
391 | dst_ifdown(dst, dev, event != NETDEV_DOWN); | |
392 | } | |
393 | ||
394 | spin_lock_bh(&dst_garbage.lock); | |
395 | dst = dst_garbage.list; | |
396 | dst_garbage.list = NULL; | |
397 | spin_unlock_bh(&dst_garbage.lock); | |
398 | ||
399 | if (last) | |
400 | last->next = dst; | |
401 | else | |
402 | dst_busy_list = dst; | |
598ed936 | 403 | for (; dst; dst = dst->next) |
1da177e4 | 404 | dst_ifdown(dst, dev, event != NETDEV_DOWN); |
86bba269 | 405 | mutex_unlock(&dst_gc_mutex); |
1da177e4 LT |
406 | break; |
407 | } | |
408 | return NOTIFY_DONE; | |
409 | } | |
410 | ||
411 | static struct notifier_block dst_dev_notifier = { | |
412 | .notifier_call = dst_dev_event, | |
332dd96f | 413 | .priority = -10, /* must be called after other network notifiers */ |
1da177e4 LT |
414 | }; |
415 | ||
416 | void __init dst_init(void) | |
417 | { | |
418 | register_netdevice_notifier(&dst_dev_notifier); | |
419 | } |