]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Generic address resolution entity | |
3 | * | |
4 | * Authors: | |
5 | * Pedro Roque <[email protected]> | |
6 | * Alexey Kuznetsov <[email protected]> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License | |
10 | * as published by the Free Software Foundation; either version | |
11 | * 2 of the License, or (at your option) any later version. | |
12 | * | |
13 | * Fixes: | |
14 | * Vitaly E. Lavrov releasing NULL neighbor in neigh_add. | |
15 | * Harald Welte Add neighbour cache statistics like rtstat | |
16 | */ | |
17 | ||
1da177e4 LT |
18 | #include <linux/types.h> |
19 | #include <linux/kernel.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/socket.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/netdevice.h> | |
24 | #include <linux/proc_fs.h> | |
25 | #ifdef CONFIG_SYSCTL | |
26 | #include <linux/sysctl.h> | |
27 | #endif | |
28 | #include <linux/times.h> | |
29 | #include <net/neighbour.h> | |
30 | #include <net/dst.h> | |
31 | #include <net/sock.h> | |
8d71740c | 32 | #include <net/netevent.h> |
a14a49d2 | 33 | #include <net/netlink.h> |
1da177e4 LT |
34 | #include <linux/rtnetlink.h> |
35 | #include <linux/random.h> | |
543537bd | 36 | #include <linux/string.h> |
1da177e4 LT |
37 | |
38 | #define NEIGH_DEBUG 1 | |
39 | ||
40 | #define NEIGH_PRINTK(x...) printk(x) | |
41 | #define NEIGH_NOPRINTK(x...) do { ; } while(0) | |
42 | #define NEIGH_PRINTK0 NEIGH_PRINTK | |
43 | #define NEIGH_PRINTK1 NEIGH_NOPRINTK | |
44 | #define NEIGH_PRINTK2 NEIGH_NOPRINTK | |
45 | ||
46 | #if NEIGH_DEBUG >= 1 | |
47 | #undef NEIGH_PRINTK1 | |
48 | #define NEIGH_PRINTK1 NEIGH_PRINTK | |
49 | #endif | |
50 | #if NEIGH_DEBUG >= 2 | |
51 | #undef NEIGH_PRINTK2 | |
52 | #define NEIGH_PRINTK2 NEIGH_PRINTK | |
53 | #endif | |
54 | ||
55 | #define PNEIGH_HASHMASK 0xF | |
56 | ||
57 | static void neigh_timer_handler(unsigned long arg); | |
58 | #ifdef CONFIG_ARPD | |
59 | static void neigh_app_notify(struct neighbour *n); | |
60 | #endif | |
61 | static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); | |
62 | void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); | |
63 | ||
64 | static struct neigh_table *neigh_tables; | |
45fc3b11 | 65 | #ifdef CONFIG_PROC_FS |
1da177e4 | 66 | static struct file_operations neigh_stat_seq_fops; |
45fc3b11 | 67 | #endif |
1da177e4 LT |
68 | |
69 | /* | |
70 | Neighbour hash table buckets are protected with rwlock tbl->lock. | |
71 | ||
72 | - All the scans/updates to hash buckets MUST be made under this lock. | |
73 | - NOTHING clever should be made under this lock: no callbacks | |
74 | to protocol backends, no attempts to send something to network. | |
75 | It will result in deadlocks, if backend/driver wants to use neighbour | |
76 | cache. | |
77 | - If the entry requires some non-trivial actions, increase | |
78 | its reference count and release table lock. | |
79 | ||
80 | Neighbour entries are protected: | |
81 | - with reference count. | |
82 | - with rwlock neigh->lock | |
83 | ||
84 | Reference count prevents destruction. | |
85 | ||
86 | neigh->lock mainly serializes ll address data and its validity state. | |
87 | However, the same lock is used to protect another entry fields: | |
88 | - timer | |
89 | - resolution queue | |
90 | ||
91 | Again, nothing clever shall be made under neigh->lock, | |
92 | the most complicated procedure, which we allow is dev->hard_header. | |
93 | It is supposed, that dev->hard_header is simplistic and does | |
94 | not make callbacks to neighbour tables. | |
95 | ||
96 | The last lock is neigh_tbl_lock. It is pure SMP lock, protecting | |
97 | list of neighbour tables. This list is used only in process context, | |
98 | */ | |
99 | ||
100 | static DEFINE_RWLOCK(neigh_tbl_lock); | |
101 | ||
102 | static int neigh_blackhole(struct sk_buff *skb) | |
103 | { | |
104 | kfree_skb(skb); | |
105 | return -ENETDOWN; | |
106 | } | |
107 | ||
108 | /* | |
109 | * It is random distribution in the interval (1/2)*base...(3/2)*base. | |
110 | * It corresponds to default IPv6 settings and is not overridable, | |
111 | * because it is really reasonable choice. | |
112 | */ | |
113 | ||
114 | unsigned long neigh_rand_reach_time(unsigned long base) | |
115 | { | |
116 | return (base ? (net_random() % base) + (base >> 1) : 0); | |
117 | } | |
118 | ||
119 | ||
120 | static int neigh_forced_gc(struct neigh_table *tbl) | |
121 | { | |
122 | int shrunk = 0; | |
123 | int i; | |
124 | ||
125 | NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); | |
126 | ||
127 | write_lock_bh(&tbl->lock); | |
128 | for (i = 0; i <= tbl->hash_mask; i++) { | |
129 | struct neighbour *n, **np; | |
130 | ||
131 | np = &tbl->hash_buckets[i]; | |
132 | while ((n = *np) != NULL) { | |
133 | /* Neighbour record may be discarded if: | |
134 | * - nobody refers to it. | |
135 | * - it is not permanent | |
136 | */ | |
137 | write_lock(&n->lock); | |
138 | if (atomic_read(&n->refcnt) == 1 && | |
139 | !(n->nud_state & NUD_PERMANENT)) { | |
140 | *np = n->next; | |
141 | n->dead = 1; | |
142 | shrunk = 1; | |
143 | write_unlock(&n->lock); | |
144 | neigh_release(n); | |
145 | continue; | |
146 | } | |
147 | write_unlock(&n->lock); | |
148 | np = &n->next; | |
149 | } | |
150 | } | |
151 | ||
152 | tbl->last_flush = jiffies; | |
153 | ||
154 | write_unlock_bh(&tbl->lock); | |
155 | ||
156 | return shrunk; | |
157 | } | |
158 | ||
159 | static int neigh_del_timer(struct neighbour *n) | |
160 | { | |
161 | if ((n->nud_state & NUD_IN_TIMER) && | |
162 | del_timer(&n->timer)) { | |
163 | neigh_release(n); | |
164 | return 1; | |
165 | } | |
166 | return 0; | |
167 | } | |
168 | ||
169 | static void pneigh_queue_purge(struct sk_buff_head *list) | |
170 | { | |
171 | struct sk_buff *skb; | |
172 | ||
173 | while ((skb = skb_dequeue(list)) != NULL) { | |
174 | dev_put(skb->dev); | |
175 | kfree_skb(skb); | |
176 | } | |
177 | } | |
178 | ||
49636bb1 | 179 | static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) |
1da177e4 LT |
180 | { |
181 | int i; | |
182 | ||
1da177e4 LT |
183 | for (i = 0; i <= tbl->hash_mask; i++) { |
184 | struct neighbour *n, **np = &tbl->hash_buckets[i]; | |
185 | ||
186 | while ((n = *np) != NULL) { | |
187 | if (dev && n->dev != dev) { | |
188 | np = &n->next; | |
189 | continue; | |
190 | } | |
191 | *np = n->next; | |
192 | write_lock(&n->lock); | |
193 | neigh_del_timer(n); | |
194 | n->dead = 1; | |
195 | ||
196 | if (atomic_read(&n->refcnt) != 1) { | |
197 | /* The most unpleasant situation. | |
198 | We must destroy neighbour entry, | |
199 | but someone still uses it. | |
200 | ||
201 | The destroy will be delayed until | |
202 | the last user releases us, but | |
203 | we must kill timers etc. and move | |
204 | it to safe state. | |
205 | */ | |
206 | skb_queue_purge(&n->arp_queue); | |
207 | n->output = neigh_blackhole; | |
208 | if (n->nud_state & NUD_VALID) | |
209 | n->nud_state = NUD_NOARP; | |
210 | else | |
211 | n->nud_state = NUD_NONE; | |
212 | NEIGH_PRINTK2("neigh %p is stray.\n", n); | |
213 | } | |
214 | write_unlock(&n->lock); | |
215 | neigh_release(n); | |
216 | } | |
217 | } | |
49636bb1 | 218 | } |
1da177e4 | 219 | |
49636bb1 HX |
220 | void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) |
221 | { | |
222 | write_lock_bh(&tbl->lock); | |
223 | neigh_flush_dev(tbl, dev); | |
224 | write_unlock_bh(&tbl->lock); | |
225 | } | |
226 | ||
227 | int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) | |
228 | { | |
229 | write_lock_bh(&tbl->lock); | |
230 | neigh_flush_dev(tbl, dev); | |
1da177e4 LT |
231 | pneigh_ifdown(tbl, dev); |
232 | write_unlock_bh(&tbl->lock); | |
233 | ||
234 | del_timer_sync(&tbl->proxy_timer); | |
235 | pneigh_queue_purge(&tbl->proxy_queue); | |
236 | return 0; | |
237 | } | |
238 | ||
239 | static struct neighbour *neigh_alloc(struct neigh_table *tbl) | |
240 | { | |
241 | struct neighbour *n = NULL; | |
242 | unsigned long now = jiffies; | |
243 | int entries; | |
244 | ||
245 | entries = atomic_inc_return(&tbl->entries) - 1; | |
246 | if (entries >= tbl->gc_thresh3 || | |
247 | (entries >= tbl->gc_thresh2 && | |
248 | time_after(now, tbl->last_flush + 5 * HZ))) { | |
249 | if (!neigh_forced_gc(tbl) && | |
250 | entries >= tbl->gc_thresh3) | |
251 | goto out_entries; | |
252 | } | |
253 | ||
254 | n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC); | |
255 | if (!n) | |
256 | goto out_entries; | |
257 | ||
258 | memset(n, 0, tbl->entry_size); | |
259 | ||
260 | skb_queue_head_init(&n->arp_queue); | |
261 | rwlock_init(&n->lock); | |
262 | n->updated = n->used = now; | |
263 | n->nud_state = NUD_NONE; | |
264 | n->output = neigh_blackhole; | |
265 | n->parms = neigh_parms_clone(&tbl->parms); | |
266 | init_timer(&n->timer); | |
267 | n->timer.function = neigh_timer_handler; | |
268 | n->timer.data = (unsigned long)n; | |
269 | ||
270 | NEIGH_CACHE_STAT_INC(tbl, allocs); | |
271 | n->tbl = tbl; | |
272 | atomic_set(&n->refcnt, 1); | |
273 | n->dead = 1; | |
274 | out: | |
275 | return n; | |
276 | ||
277 | out_entries: | |
278 | atomic_dec(&tbl->entries); | |
279 | goto out; | |
280 | } | |
281 | ||
282 | static struct neighbour **neigh_hash_alloc(unsigned int entries) | |
283 | { | |
284 | unsigned long size = entries * sizeof(struct neighbour *); | |
285 | struct neighbour **ret; | |
286 | ||
287 | if (size <= PAGE_SIZE) { | |
77d04bd9 | 288 | ret = kzalloc(size, GFP_ATOMIC); |
1da177e4 LT |
289 | } else { |
290 | ret = (struct neighbour **) | |
77d04bd9 | 291 | __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size)); |
1da177e4 | 292 | } |
1da177e4 LT |
293 | return ret; |
294 | } | |
295 | ||
296 | static void neigh_hash_free(struct neighbour **hash, unsigned int entries) | |
297 | { | |
298 | unsigned long size = entries * sizeof(struct neighbour *); | |
299 | ||
300 | if (size <= PAGE_SIZE) | |
301 | kfree(hash); | |
302 | else | |
303 | free_pages((unsigned long)hash, get_order(size)); | |
304 | } | |
305 | ||
306 | static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries) | |
307 | { | |
308 | struct neighbour **new_hash, **old_hash; | |
309 | unsigned int i, new_hash_mask, old_entries; | |
310 | ||
311 | NEIGH_CACHE_STAT_INC(tbl, hash_grows); | |
312 | ||
313 | BUG_ON(new_entries & (new_entries - 1)); | |
314 | new_hash = neigh_hash_alloc(new_entries); | |
315 | if (!new_hash) | |
316 | return; | |
317 | ||
318 | old_entries = tbl->hash_mask + 1; | |
319 | new_hash_mask = new_entries - 1; | |
320 | old_hash = tbl->hash_buckets; | |
321 | ||
322 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); | |
323 | for (i = 0; i < old_entries; i++) { | |
324 | struct neighbour *n, *next; | |
325 | ||
326 | for (n = old_hash[i]; n; n = next) { | |
327 | unsigned int hash_val = tbl->hash(n->primary_key, n->dev); | |
328 | ||
329 | hash_val &= new_hash_mask; | |
330 | next = n->next; | |
331 | ||
332 | n->next = new_hash[hash_val]; | |
333 | new_hash[hash_val] = n; | |
334 | } | |
335 | } | |
336 | tbl->hash_buckets = new_hash; | |
337 | tbl->hash_mask = new_hash_mask; | |
338 | ||
339 | neigh_hash_free(old_hash, old_entries); | |
340 | } | |
341 | ||
342 | struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, | |
343 | struct net_device *dev) | |
344 | { | |
345 | struct neighbour *n; | |
346 | int key_len = tbl->key_len; | |
347 | u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask; | |
348 | ||
349 | NEIGH_CACHE_STAT_INC(tbl, lookups); | |
350 | ||
351 | read_lock_bh(&tbl->lock); | |
352 | for (n = tbl->hash_buckets[hash_val]; n; n = n->next) { | |
353 | if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) { | |
354 | neigh_hold(n); | |
355 | NEIGH_CACHE_STAT_INC(tbl, hits); | |
356 | break; | |
357 | } | |
358 | } | |
359 | read_unlock_bh(&tbl->lock); | |
360 | return n; | |
361 | } | |
362 | ||
363 | struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey) | |
364 | { | |
365 | struct neighbour *n; | |
366 | int key_len = tbl->key_len; | |
367 | u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask; | |
368 | ||
369 | NEIGH_CACHE_STAT_INC(tbl, lookups); | |
370 | ||
371 | read_lock_bh(&tbl->lock); | |
372 | for (n = tbl->hash_buckets[hash_val]; n; n = n->next) { | |
373 | if (!memcmp(n->primary_key, pkey, key_len)) { | |
374 | neigh_hold(n); | |
375 | NEIGH_CACHE_STAT_INC(tbl, hits); | |
376 | break; | |
377 | } | |
378 | } | |
379 | read_unlock_bh(&tbl->lock); | |
380 | return n; | |
381 | } | |
382 | ||
383 | struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, | |
384 | struct net_device *dev) | |
385 | { | |
386 | u32 hash_val; | |
387 | int key_len = tbl->key_len; | |
388 | int error; | |
389 | struct neighbour *n1, *rc, *n = neigh_alloc(tbl); | |
390 | ||
391 | if (!n) { | |
392 | rc = ERR_PTR(-ENOBUFS); | |
393 | goto out; | |
394 | } | |
395 | ||
396 | memcpy(n->primary_key, pkey, key_len); | |
397 | n->dev = dev; | |
398 | dev_hold(dev); | |
399 | ||
400 | /* Protocol specific setup. */ | |
401 | if (tbl->constructor && (error = tbl->constructor(n)) < 0) { | |
402 | rc = ERR_PTR(error); | |
403 | goto out_neigh_release; | |
404 | } | |
405 | ||
406 | /* Device specific setup. */ | |
407 | if (n->parms->neigh_setup && | |
408 | (error = n->parms->neigh_setup(n)) < 0) { | |
409 | rc = ERR_PTR(error); | |
410 | goto out_neigh_release; | |
411 | } | |
412 | ||
413 | n->confirmed = jiffies - (n->parms->base_reachable_time << 1); | |
414 | ||
415 | write_lock_bh(&tbl->lock); | |
416 | ||
417 | if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1)) | |
418 | neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1); | |
419 | ||
420 | hash_val = tbl->hash(pkey, dev) & tbl->hash_mask; | |
421 | ||
422 | if (n->parms->dead) { | |
423 | rc = ERR_PTR(-EINVAL); | |
424 | goto out_tbl_unlock; | |
425 | } | |
426 | ||
427 | for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) { | |
428 | if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { | |
429 | neigh_hold(n1); | |
430 | rc = n1; | |
431 | goto out_tbl_unlock; | |
432 | } | |
433 | } | |
434 | ||
435 | n->next = tbl->hash_buckets[hash_val]; | |
436 | tbl->hash_buckets[hash_val] = n; | |
437 | n->dead = 0; | |
438 | neigh_hold(n); | |
439 | write_unlock_bh(&tbl->lock); | |
440 | NEIGH_PRINTK2("neigh %p is created.\n", n); | |
441 | rc = n; | |
442 | out: | |
443 | return rc; | |
444 | out_tbl_unlock: | |
445 | write_unlock_bh(&tbl->lock); | |
446 | out_neigh_release: | |
447 | neigh_release(n); | |
448 | goto out; | |
449 | } | |
450 | ||
451 | struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey, | |
452 | struct net_device *dev, int creat) | |
453 | { | |
454 | struct pneigh_entry *n; | |
455 | int key_len = tbl->key_len; | |
456 | u32 hash_val = *(u32 *)(pkey + key_len - 4); | |
457 | ||
458 | hash_val ^= (hash_val >> 16); | |
459 | hash_val ^= hash_val >> 8; | |
460 | hash_val ^= hash_val >> 4; | |
461 | hash_val &= PNEIGH_HASHMASK; | |
462 | ||
463 | read_lock_bh(&tbl->lock); | |
464 | ||
465 | for (n = tbl->phash_buckets[hash_val]; n; n = n->next) { | |
466 | if (!memcmp(n->key, pkey, key_len) && | |
467 | (n->dev == dev || !n->dev)) { | |
468 | read_unlock_bh(&tbl->lock); | |
469 | goto out; | |
470 | } | |
471 | } | |
472 | read_unlock_bh(&tbl->lock); | |
473 | n = NULL; | |
474 | if (!creat) | |
475 | goto out; | |
476 | ||
477 | n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL); | |
478 | if (!n) | |
479 | goto out; | |
480 | ||
481 | memcpy(n->key, pkey, key_len); | |
482 | n->dev = dev; | |
483 | if (dev) | |
484 | dev_hold(dev); | |
485 | ||
486 | if (tbl->pconstructor && tbl->pconstructor(n)) { | |
487 | if (dev) | |
488 | dev_put(dev); | |
489 | kfree(n); | |
490 | n = NULL; | |
491 | goto out; | |
492 | } | |
493 | ||
494 | write_lock_bh(&tbl->lock); | |
495 | n->next = tbl->phash_buckets[hash_val]; | |
496 | tbl->phash_buckets[hash_val] = n; | |
497 | write_unlock_bh(&tbl->lock); | |
498 | out: | |
499 | return n; | |
500 | } | |
501 | ||
502 | ||
503 | int pneigh_delete(struct neigh_table *tbl, const void *pkey, | |
504 | struct net_device *dev) | |
505 | { | |
506 | struct pneigh_entry *n, **np; | |
507 | int key_len = tbl->key_len; | |
508 | u32 hash_val = *(u32 *)(pkey + key_len - 4); | |
509 | ||
510 | hash_val ^= (hash_val >> 16); | |
511 | hash_val ^= hash_val >> 8; | |
512 | hash_val ^= hash_val >> 4; | |
513 | hash_val &= PNEIGH_HASHMASK; | |
514 | ||
515 | write_lock_bh(&tbl->lock); | |
516 | for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; | |
517 | np = &n->next) { | |
518 | if (!memcmp(n->key, pkey, key_len) && n->dev == dev) { | |
519 | *np = n->next; | |
520 | write_unlock_bh(&tbl->lock); | |
521 | if (tbl->pdestructor) | |
522 | tbl->pdestructor(n); | |
523 | if (n->dev) | |
524 | dev_put(n->dev); | |
525 | kfree(n); | |
526 | return 0; | |
527 | } | |
528 | } | |
529 | write_unlock_bh(&tbl->lock); | |
530 | return -ENOENT; | |
531 | } | |
532 | ||
533 | static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) | |
534 | { | |
535 | struct pneigh_entry *n, **np; | |
536 | u32 h; | |
537 | ||
538 | for (h = 0; h <= PNEIGH_HASHMASK; h++) { | |
539 | np = &tbl->phash_buckets[h]; | |
540 | while ((n = *np) != NULL) { | |
541 | if (!dev || n->dev == dev) { | |
542 | *np = n->next; | |
543 | if (tbl->pdestructor) | |
544 | tbl->pdestructor(n); | |
545 | if (n->dev) | |
546 | dev_put(n->dev); | |
547 | kfree(n); | |
548 | continue; | |
549 | } | |
550 | np = &n->next; | |
551 | } | |
552 | } | |
553 | return -ENOENT; | |
554 | } | |
555 | ||
556 | ||
557 | /* | |
558 | * neighbour must already be out of the table; | |
559 | * | |
560 | */ | |
561 | void neigh_destroy(struct neighbour *neigh) | |
562 | { | |
563 | struct hh_cache *hh; | |
564 | ||
565 | NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); | |
566 | ||
567 | if (!neigh->dead) { | |
568 | printk(KERN_WARNING | |
569 | "Destroying alive neighbour %p\n", neigh); | |
570 | dump_stack(); | |
571 | return; | |
572 | } | |
573 | ||
574 | if (neigh_del_timer(neigh)) | |
575 | printk(KERN_WARNING "Impossible event.\n"); | |
576 | ||
577 | while ((hh = neigh->hh) != NULL) { | |
578 | neigh->hh = hh->hh_next; | |
579 | hh->hh_next = NULL; | |
580 | write_lock_bh(&hh->hh_lock); | |
581 | hh->hh_output = neigh_blackhole; | |
582 | write_unlock_bh(&hh->hh_lock); | |
583 | if (atomic_dec_and_test(&hh->hh_refcnt)) | |
584 | kfree(hh); | |
585 | } | |
586 | ||
c5ecd62c MT |
587 | if (neigh->parms->neigh_destructor) |
588 | (neigh->parms->neigh_destructor)(neigh); | |
1da177e4 LT |
589 | |
590 | skb_queue_purge(&neigh->arp_queue); | |
591 | ||
592 | dev_put(neigh->dev); | |
593 | neigh_parms_put(neigh->parms); | |
594 | ||
595 | NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh); | |
596 | ||
597 | atomic_dec(&neigh->tbl->entries); | |
598 | kmem_cache_free(neigh->tbl->kmem_cachep, neigh); | |
599 | } | |
600 | ||
601 | /* Neighbour state is suspicious; | |
602 | disable fast path. | |
603 | ||
604 | Called with write_locked neigh. | |
605 | */ | |
606 | static void neigh_suspect(struct neighbour *neigh) | |
607 | { | |
608 | struct hh_cache *hh; | |
609 | ||
610 | NEIGH_PRINTK2("neigh %p is suspected.\n", neigh); | |
611 | ||
612 | neigh->output = neigh->ops->output; | |
613 | ||
614 | for (hh = neigh->hh; hh; hh = hh->hh_next) | |
615 | hh->hh_output = neigh->ops->output; | |
616 | } | |
617 | ||
618 | /* Neighbour state is OK; | |
619 | enable fast path. | |
620 | ||
621 | Called with write_locked neigh. | |
622 | */ | |
623 | static void neigh_connect(struct neighbour *neigh) | |
624 | { | |
625 | struct hh_cache *hh; | |
626 | ||
627 | NEIGH_PRINTK2("neigh %p is connected.\n", neigh); | |
628 | ||
629 | neigh->output = neigh->ops->connected_output; | |
630 | ||
631 | for (hh = neigh->hh; hh; hh = hh->hh_next) | |
632 | hh->hh_output = neigh->ops->hh_output; | |
633 | } | |
634 | ||
635 | static void neigh_periodic_timer(unsigned long arg) | |
636 | { | |
637 | struct neigh_table *tbl = (struct neigh_table *)arg; | |
638 | struct neighbour *n, **np; | |
639 | unsigned long expire, now = jiffies; | |
640 | ||
641 | NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); | |
642 | ||
643 | write_lock(&tbl->lock); | |
644 | ||
645 | /* | |
646 | * periodically recompute ReachableTime from random function | |
647 | */ | |
648 | ||
649 | if (time_after(now, tbl->last_rand + 300 * HZ)) { | |
650 | struct neigh_parms *p; | |
651 | tbl->last_rand = now; | |
652 | for (p = &tbl->parms; p; p = p->next) | |
653 | p->reachable_time = | |
654 | neigh_rand_reach_time(p->base_reachable_time); | |
655 | } | |
656 | ||
657 | np = &tbl->hash_buckets[tbl->hash_chain_gc]; | |
658 | tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask); | |
659 | ||
660 | while ((n = *np) != NULL) { | |
661 | unsigned int state; | |
662 | ||
663 | write_lock(&n->lock); | |
664 | ||
665 | state = n->nud_state; | |
666 | if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { | |
667 | write_unlock(&n->lock); | |
668 | goto next_elt; | |
669 | } | |
670 | ||
671 | if (time_before(n->used, n->confirmed)) | |
672 | n->used = n->confirmed; | |
673 | ||
674 | if (atomic_read(&n->refcnt) == 1 && | |
675 | (state == NUD_FAILED || | |
676 | time_after(now, n->used + n->parms->gc_staletime))) { | |
677 | *np = n->next; | |
678 | n->dead = 1; | |
679 | write_unlock(&n->lock); | |
680 | neigh_release(n); | |
681 | continue; | |
682 | } | |
683 | write_unlock(&n->lock); | |
684 | ||
685 | next_elt: | |
686 | np = &n->next; | |
687 | } | |
688 | ||
689 | /* Cycle through all hash buckets every base_reachable_time/2 ticks. | |
690 | * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 | |
691 | * base_reachable_time. | |
692 | */ | |
693 | expire = tbl->parms.base_reachable_time >> 1; | |
694 | expire /= (tbl->hash_mask + 1); | |
695 | if (!expire) | |
696 | expire = 1; | |
697 | ||
698 | mod_timer(&tbl->gc_timer, now + expire); | |
699 | ||
700 | write_unlock(&tbl->lock); | |
701 | } | |
702 | ||
703 | static __inline__ int neigh_max_probes(struct neighbour *n) | |
704 | { | |
705 | struct neigh_parms *p = n->parms; | |
706 | return (n->nud_state & NUD_PROBE ? | |
707 | p->ucast_probes : | |
708 | p->ucast_probes + p->app_probes + p->mcast_probes); | |
709 | } | |
710 | ||
667347f1 DM |
711 | static inline void neigh_add_timer(struct neighbour *n, unsigned long when) |
712 | { | |
713 | if (unlikely(mod_timer(&n->timer, when))) { | |
714 | printk("NEIGH: BUG, double timer add, state is %x\n", | |
715 | n->nud_state); | |
20375502 | 716 | dump_stack(); |
667347f1 DM |
717 | } |
718 | } | |
1da177e4 LT |
719 | |
720 | /* Called when a timer expires for a neighbour entry. */ | |
721 | ||
722 | static void neigh_timer_handler(unsigned long arg) | |
723 | { | |
724 | unsigned long now, next; | |
725 | struct neighbour *neigh = (struct neighbour *)arg; | |
726 | unsigned state; | |
727 | int notify = 0; | |
728 | ||
729 | write_lock(&neigh->lock); | |
730 | ||
731 | state = neigh->nud_state; | |
732 | now = jiffies; | |
733 | next = now + HZ; | |
734 | ||
735 | if (!(state & NUD_IN_TIMER)) { | |
736 | #ifndef CONFIG_SMP | |
737 | printk(KERN_WARNING "neigh: timer & !nud_in_timer\n"); | |
738 | #endif | |
739 | goto out; | |
740 | } | |
741 | ||
742 | if (state & NUD_REACHABLE) { | |
743 | if (time_before_eq(now, | |
744 | neigh->confirmed + neigh->parms->reachable_time)) { | |
745 | NEIGH_PRINTK2("neigh %p is still alive.\n", neigh); | |
746 | next = neigh->confirmed + neigh->parms->reachable_time; | |
747 | } else if (time_before_eq(now, | |
748 | neigh->used + neigh->parms->delay_probe_time)) { | |
749 | NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); | |
750 | neigh->nud_state = NUD_DELAY; | |
955aaa2f | 751 | neigh->updated = jiffies; |
1da177e4 LT |
752 | neigh_suspect(neigh); |
753 | next = now + neigh->parms->delay_probe_time; | |
754 | } else { | |
755 | NEIGH_PRINTK2("neigh %p is suspected.\n", neigh); | |
756 | neigh->nud_state = NUD_STALE; | |
955aaa2f | 757 | neigh->updated = jiffies; |
1da177e4 | 758 | neigh_suspect(neigh); |
8d71740c | 759 | notify = 1; |
1da177e4 LT |
760 | } |
761 | } else if (state & NUD_DELAY) { | |
762 | if (time_before_eq(now, | |
763 | neigh->confirmed + neigh->parms->delay_probe_time)) { | |
764 | NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh); | |
765 | neigh->nud_state = NUD_REACHABLE; | |
955aaa2f | 766 | neigh->updated = jiffies; |
1da177e4 | 767 | neigh_connect(neigh); |
8d71740c | 768 | notify = 1; |
1da177e4 LT |
769 | next = neigh->confirmed + neigh->parms->reachable_time; |
770 | } else { | |
771 | NEIGH_PRINTK2("neigh %p is probed.\n", neigh); | |
772 | neigh->nud_state = NUD_PROBE; | |
955aaa2f | 773 | neigh->updated = jiffies; |
1da177e4 LT |
774 | atomic_set(&neigh->probes, 0); |
775 | next = now + neigh->parms->retrans_time; | |
776 | } | |
777 | } else { | |
778 | /* NUD_PROBE|NUD_INCOMPLETE */ | |
779 | next = now + neigh->parms->retrans_time; | |
780 | } | |
781 | ||
782 | if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && | |
783 | atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { | |
784 | struct sk_buff *skb; | |
785 | ||
786 | neigh->nud_state = NUD_FAILED; | |
955aaa2f | 787 | neigh->updated = jiffies; |
1da177e4 LT |
788 | notify = 1; |
789 | NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); | |
790 | NEIGH_PRINTK2("neigh %p is failed.\n", neigh); | |
791 | ||
792 | /* It is very thin place. report_unreachable is very complicated | |
793 | routine. Particularly, it can hit the same neighbour entry! | |
794 | ||
795 | So that, we try to be accurate and avoid dead loop. --ANK | |
796 | */ | |
797 | while (neigh->nud_state == NUD_FAILED && | |
798 | (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { | |
799 | write_unlock(&neigh->lock); | |
800 | neigh->ops->error_report(neigh, skb); | |
801 | write_lock(&neigh->lock); | |
802 | } | |
803 | skb_queue_purge(&neigh->arp_queue); | |
804 | } | |
805 | ||
806 | if (neigh->nud_state & NUD_IN_TIMER) { | |
1da177e4 LT |
807 | if (time_before(next, jiffies + HZ/2)) |
808 | next = jiffies + HZ/2; | |
6fb9974f HX |
809 | if (!mod_timer(&neigh->timer, next)) |
810 | neigh_hold(neigh); | |
1da177e4 LT |
811 | } |
812 | if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { | |
813 | struct sk_buff *skb = skb_peek(&neigh->arp_queue); | |
814 | /* keep skb alive even if arp_queue overflows */ | |
815 | if (skb) | |
816 | skb_get(skb); | |
817 | write_unlock(&neigh->lock); | |
818 | neigh->ops->solicit(neigh, skb); | |
819 | atomic_inc(&neigh->probes); | |
820 | if (skb) | |
821 | kfree_skb(skb); | |
822 | } else { | |
823 | out: | |
824 | write_unlock(&neigh->lock); | |
825 | } | |
8d71740c TT |
826 | if (notify) |
827 | call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); | |
1da177e4 LT |
828 | |
829 | #ifdef CONFIG_ARPD | |
830 | if (notify && neigh->parms->app_probes) | |
831 | neigh_app_notify(neigh); | |
832 | #endif | |
833 | neigh_release(neigh); | |
834 | } | |
835 | ||
836 | int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) | |
837 | { | |
838 | int rc; | |
839 | unsigned long now; | |
840 | ||
841 | write_lock_bh(&neigh->lock); | |
842 | ||
843 | rc = 0; | |
844 | if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) | |
845 | goto out_unlock_bh; | |
846 | ||
847 | now = jiffies; | |
848 | ||
849 | if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { | |
850 | if (neigh->parms->mcast_probes + neigh->parms->app_probes) { | |
851 | atomic_set(&neigh->probes, neigh->parms->ucast_probes); | |
852 | neigh->nud_state = NUD_INCOMPLETE; | |
955aaa2f | 853 | neigh->updated = jiffies; |
1da177e4 | 854 | neigh_hold(neigh); |
667347f1 | 855 | neigh_add_timer(neigh, now + 1); |
1da177e4 LT |
856 | } else { |
857 | neigh->nud_state = NUD_FAILED; | |
955aaa2f | 858 | neigh->updated = jiffies; |
1da177e4 LT |
859 | write_unlock_bh(&neigh->lock); |
860 | ||
861 | if (skb) | |
862 | kfree_skb(skb); | |
863 | return 1; | |
864 | } | |
865 | } else if (neigh->nud_state & NUD_STALE) { | |
866 | NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); | |
867 | neigh_hold(neigh); | |
868 | neigh->nud_state = NUD_DELAY; | |
955aaa2f | 869 | neigh->updated = jiffies; |
667347f1 DM |
870 | neigh_add_timer(neigh, |
871 | jiffies + neigh->parms->delay_probe_time); | |
1da177e4 LT |
872 | } |
873 | ||
874 | if (neigh->nud_state == NUD_INCOMPLETE) { | |
875 | if (skb) { | |
876 | if (skb_queue_len(&neigh->arp_queue) >= | |
877 | neigh->parms->queue_len) { | |
878 | struct sk_buff *buff; | |
879 | buff = neigh->arp_queue.next; | |
880 | __skb_unlink(buff, &neigh->arp_queue); | |
881 | kfree_skb(buff); | |
882 | } | |
883 | __skb_queue_tail(&neigh->arp_queue, skb); | |
884 | } | |
885 | rc = 1; | |
886 | } | |
887 | out_unlock_bh: | |
888 | write_unlock_bh(&neigh->lock); | |
889 | return rc; | |
890 | } | |
891 | ||
e92b43a3 | 892 | static void neigh_update_hhs(struct neighbour *neigh) |
1da177e4 LT |
893 | { |
894 | struct hh_cache *hh; | |
895 | void (*update)(struct hh_cache*, struct net_device*, unsigned char *) = | |
896 | neigh->dev->header_cache_update; | |
897 | ||
898 | if (update) { | |
899 | for (hh = neigh->hh; hh; hh = hh->hh_next) { | |
900 | write_lock_bh(&hh->hh_lock); | |
901 | update(hh, neigh->dev, neigh->ha); | |
902 | write_unlock_bh(&hh->hh_lock); | |
903 | } | |
904 | } | |
905 | } | |
906 | ||
907 | ||
908 | ||
909 | /* Generic update routine. | |
910 | -- lladdr is new lladdr or NULL, if it is not supplied. | |
911 | -- new is new state. | |
912 | -- flags | |
913 | NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr, | |
914 | if it is different. | |
915 | NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected" | |
916 | lladdr instead of overriding it | |
917 | if it is different. | |
918 | It also allows to retain current state | |
919 | if lladdr is unchanged. | |
920 | NEIGH_UPDATE_F_ADMIN means that the change is administrative. | |
921 | ||
922 | NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing | |
923 | NTF_ROUTER flag. | |
924 | NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as | |
925 | a router. | |
926 | ||
927 | Caller MUST hold reference count on the entry. | |
928 | */ | |
929 | ||
930 | int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |
931 | u32 flags) | |
932 | { | |
933 | u8 old; | |
934 | int err; | |
1da177e4 | 935 | int notify = 0; |
1da177e4 LT |
936 | struct net_device *dev; |
937 | int update_isrouter = 0; | |
938 | ||
939 | write_lock_bh(&neigh->lock); | |
940 | ||
941 | dev = neigh->dev; | |
942 | old = neigh->nud_state; | |
943 | err = -EPERM; | |
944 | ||
945 | if (!(flags & NEIGH_UPDATE_F_ADMIN) && | |
946 | (old & (NUD_NOARP | NUD_PERMANENT))) | |
947 | goto out; | |
948 | ||
949 | if (!(new & NUD_VALID)) { | |
950 | neigh_del_timer(neigh); | |
951 | if (old & NUD_CONNECTED) | |
952 | neigh_suspect(neigh); | |
953 | neigh->nud_state = new; | |
954 | err = 0; | |
1da177e4 | 955 | notify = old & NUD_VALID; |
1da177e4 LT |
956 | goto out; |
957 | } | |
958 | ||
959 | /* Compare new lladdr with cached one */ | |
960 | if (!dev->addr_len) { | |
961 | /* First case: device needs no address. */ | |
962 | lladdr = neigh->ha; | |
963 | } else if (lladdr) { | |
964 | /* The second case: if something is already cached | |
965 | and a new address is proposed: | |
966 | - compare new & old | |
967 | - if they are different, check override flag | |
968 | */ | |
969 | if ((old & NUD_VALID) && | |
970 | !memcmp(lladdr, neigh->ha, dev->addr_len)) | |
971 | lladdr = neigh->ha; | |
972 | } else { | |
973 | /* No address is supplied; if we know something, | |
974 | use it, otherwise discard the request. | |
975 | */ | |
976 | err = -EINVAL; | |
977 | if (!(old & NUD_VALID)) | |
978 | goto out; | |
979 | lladdr = neigh->ha; | |
980 | } | |
981 | ||
982 | if (new & NUD_CONNECTED) | |
983 | neigh->confirmed = jiffies; | |
984 | neigh->updated = jiffies; | |
985 | ||
986 | /* If entry was valid and address is not changed, | |
987 | do not change entry state, if new one is STALE. | |
988 | */ | |
989 | err = 0; | |
990 | update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER; | |
991 | if (old & NUD_VALID) { | |
992 | if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) { | |
993 | update_isrouter = 0; | |
994 | if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) && | |
995 | (old & NUD_CONNECTED)) { | |
996 | lladdr = neigh->ha; | |
997 | new = NUD_STALE; | |
998 | } else | |
999 | goto out; | |
1000 | } else { | |
1001 | if (lladdr == neigh->ha && new == NUD_STALE && | |
1002 | ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) || | |
1003 | (old & NUD_CONNECTED)) | |
1004 | ) | |
1005 | new = old; | |
1006 | } | |
1007 | } | |
1008 | ||
1009 | if (new != old) { | |
1010 | neigh_del_timer(neigh); | |
1011 | if (new & NUD_IN_TIMER) { | |
1012 | neigh_hold(neigh); | |
667347f1 | 1013 | neigh_add_timer(neigh, (jiffies + |
1da177e4 | 1014 | ((new & NUD_REACHABLE) ? |
667347f1 DM |
1015 | neigh->parms->reachable_time : |
1016 | 0))); | |
1da177e4 LT |
1017 | } |
1018 | neigh->nud_state = new; | |
1019 | } | |
1020 | ||
1021 | if (lladdr != neigh->ha) { | |
1022 | memcpy(&neigh->ha, lladdr, dev->addr_len); | |
1023 | neigh_update_hhs(neigh); | |
1024 | if (!(new & NUD_CONNECTED)) | |
1025 | neigh->confirmed = jiffies - | |
1026 | (neigh->parms->base_reachable_time << 1); | |
1da177e4 | 1027 | notify = 1; |
1da177e4 LT |
1028 | } |
1029 | if (new == old) | |
1030 | goto out; | |
1031 | if (new & NUD_CONNECTED) | |
1032 | neigh_connect(neigh); | |
1033 | else | |
1034 | neigh_suspect(neigh); | |
1035 | if (!(old & NUD_VALID)) { | |
1036 | struct sk_buff *skb; | |
1037 | ||
1038 | /* Again: avoid dead loop if something went wrong */ | |
1039 | ||
1040 | while (neigh->nud_state & NUD_VALID && | |
1041 | (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { | |
1042 | struct neighbour *n1 = neigh; | |
1043 | write_unlock_bh(&neigh->lock); | |
1044 | /* On shaper/eql skb->dst->neighbour != neigh :( */ | |
1045 | if (skb->dst && skb->dst->neighbour) | |
1046 | n1 = skb->dst->neighbour; | |
1047 | n1->output(skb); | |
1048 | write_lock_bh(&neigh->lock); | |
1049 | } | |
1050 | skb_queue_purge(&neigh->arp_queue); | |
1051 | } | |
1052 | out: | |
1053 | if (update_isrouter) { | |
1054 | neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ? | |
1055 | (neigh->flags | NTF_ROUTER) : | |
1056 | (neigh->flags & ~NTF_ROUTER); | |
1057 | } | |
1058 | write_unlock_bh(&neigh->lock); | |
8d71740c TT |
1059 | |
1060 | if (notify) | |
1061 | call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); | |
1da177e4 LT |
1062 | #ifdef CONFIG_ARPD |
1063 | if (notify && neigh->parms->app_probes) | |
1064 | neigh_app_notify(neigh); | |
1065 | #endif | |
1066 | return err; | |
1067 | } | |
1068 | ||
1069 | struct neighbour *neigh_event_ns(struct neigh_table *tbl, | |
1070 | u8 *lladdr, void *saddr, | |
1071 | struct net_device *dev) | |
1072 | { | |
1073 | struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, | |
1074 | lladdr || !dev->addr_len); | |
1075 | if (neigh) | |
1076 | neigh_update(neigh, lladdr, NUD_STALE, | |
1077 | NEIGH_UPDATE_F_OVERRIDE); | |
1078 | return neigh; | |
1079 | } | |
1080 | ||
1081 | static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, | |
d77072ec | 1082 | __be16 protocol) |
1da177e4 LT |
1083 | { |
1084 | struct hh_cache *hh; | |
1085 | struct net_device *dev = dst->dev; | |
1086 | ||
1087 | for (hh = n->hh; hh; hh = hh->hh_next) | |
1088 | if (hh->hh_type == protocol) | |
1089 | break; | |
1090 | ||
77d04bd9 | 1091 | if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) { |
1da177e4 LT |
1092 | rwlock_init(&hh->hh_lock); |
1093 | hh->hh_type = protocol; | |
1094 | atomic_set(&hh->hh_refcnt, 0); | |
1095 | hh->hh_next = NULL; | |
1096 | if (dev->hard_header_cache(n, hh)) { | |
1097 | kfree(hh); | |
1098 | hh = NULL; | |
1099 | } else { | |
1100 | atomic_inc(&hh->hh_refcnt); | |
1101 | hh->hh_next = n->hh; | |
1102 | n->hh = hh; | |
1103 | if (n->nud_state & NUD_CONNECTED) | |
1104 | hh->hh_output = n->ops->hh_output; | |
1105 | else | |
1106 | hh->hh_output = n->ops->output; | |
1107 | } | |
1108 | } | |
1109 | if (hh) { | |
1110 | atomic_inc(&hh->hh_refcnt); | |
1111 | dst->hh = hh; | |
1112 | } | |
1113 | } | |
1114 | ||
1115 | /* This function can be used in contexts, where only old dev_queue_xmit | |
1116 | worked, f.e. if you want to override normal output path (eql, shaper), | |
1117 | but resolution is not made yet. | |
1118 | */ | |
1119 | ||
1120 | int neigh_compat_output(struct sk_buff *skb) | |
1121 | { | |
1122 | struct net_device *dev = skb->dev; | |
1123 | ||
1124 | __skb_pull(skb, skb->nh.raw - skb->data); | |
1125 | ||
1126 | if (dev->hard_header && | |
1127 | dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, | |
1128 | skb->len) < 0 && | |
1129 | dev->rebuild_header(skb)) | |
1130 | return 0; | |
1131 | ||
1132 | return dev_queue_xmit(skb); | |
1133 | } | |
1134 | ||
1135 | /* Slow and careful. */ | |
1136 | ||
1137 | int neigh_resolve_output(struct sk_buff *skb) | |
1138 | { | |
1139 | struct dst_entry *dst = skb->dst; | |
1140 | struct neighbour *neigh; | |
1141 | int rc = 0; | |
1142 | ||
1143 | if (!dst || !(neigh = dst->neighbour)) | |
1144 | goto discard; | |
1145 | ||
1146 | __skb_pull(skb, skb->nh.raw - skb->data); | |
1147 | ||
1148 | if (!neigh_event_send(neigh, skb)) { | |
1149 | int err; | |
1150 | struct net_device *dev = neigh->dev; | |
1151 | if (dev->hard_header_cache && !dst->hh) { | |
1152 | write_lock_bh(&neigh->lock); | |
1153 | if (!dst->hh) | |
1154 | neigh_hh_init(neigh, dst, dst->ops->protocol); | |
1155 | err = dev->hard_header(skb, dev, ntohs(skb->protocol), | |
1156 | neigh->ha, NULL, skb->len); | |
1157 | write_unlock_bh(&neigh->lock); | |
1158 | } else { | |
1159 | read_lock_bh(&neigh->lock); | |
1160 | err = dev->hard_header(skb, dev, ntohs(skb->protocol), | |
1161 | neigh->ha, NULL, skb->len); | |
1162 | read_unlock_bh(&neigh->lock); | |
1163 | } | |
1164 | if (err >= 0) | |
1165 | rc = neigh->ops->queue_xmit(skb); | |
1166 | else | |
1167 | goto out_kfree_skb; | |
1168 | } | |
1169 | out: | |
1170 | return rc; | |
1171 | discard: | |
1172 | NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", | |
1173 | dst, dst ? dst->neighbour : NULL); | |
1174 | out_kfree_skb: | |
1175 | rc = -EINVAL; | |
1176 | kfree_skb(skb); | |
1177 | goto out; | |
1178 | } | |
1179 | ||
1180 | /* As fast as possible without hh cache */ | |
1181 | ||
1182 | int neigh_connected_output(struct sk_buff *skb) | |
1183 | { | |
1184 | int err; | |
1185 | struct dst_entry *dst = skb->dst; | |
1186 | struct neighbour *neigh = dst->neighbour; | |
1187 | struct net_device *dev = neigh->dev; | |
1188 | ||
1189 | __skb_pull(skb, skb->nh.raw - skb->data); | |
1190 | ||
1191 | read_lock_bh(&neigh->lock); | |
1192 | err = dev->hard_header(skb, dev, ntohs(skb->protocol), | |
1193 | neigh->ha, NULL, skb->len); | |
1194 | read_unlock_bh(&neigh->lock); | |
1195 | if (err >= 0) | |
1196 | err = neigh->ops->queue_xmit(skb); | |
1197 | else { | |
1198 | err = -EINVAL; | |
1199 | kfree_skb(skb); | |
1200 | } | |
1201 | return err; | |
1202 | } | |
1203 | ||
1204 | static void neigh_proxy_process(unsigned long arg) | |
1205 | { | |
1206 | struct neigh_table *tbl = (struct neigh_table *)arg; | |
1207 | long sched_next = 0; | |
1208 | unsigned long now = jiffies; | |
1209 | struct sk_buff *skb; | |
1210 | ||
1211 | spin_lock(&tbl->proxy_queue.lock); | |
1212 | ||
1213 | skb = tbl->proxy_queue.next; | |
1214 | ||
1215 | while (skb != (struct sk_buff *)&tbl->proxy_queue) { | |
1216 | struct sk_buff *back = skb; | |
a61bbcf2 | 1217 | long tdif = NEIGH_CB(back)->sched_next - now; |
1da177e4 LT |
1218 | |
1219 | skb = skb->next; | |
1220 | if (tdif <= 0) { | |
1221 | struct net_device *dev = back->dev; | |
1222 | __skb_unlink(back, &tbl->proxy_queue); | |
1223 | if (tbl->proxy_redo && netif_running(dev)) | |
1224 | tbl->proxy_redo(back); | |
1225 | else | |
1226 | kfree_skb(back); | |
1227 | ||
1228 | dev_put(dev); | |
1229 | } else if (!sched_next || tdif < sched_next) | |
1230 | sched_next = tdif; | |
1231 | } | |
1232 | del_timer(&tbl->proxy_timer); | |
1233 | if (sched_next) | |
1234 | mod_timer(&tbl->proxy_timer, jiffies + sched_next); | |
1235 | spin_unlock(&tbl->proxy_queue.lock); | |
1236 | } | |
1237 | ||
1238 | void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, | |
1239 | struct sk_buff *skb) | |
1240 | { | |
1241 | unsigned long now = jiffies; | |
1242 | unsigned long sched_next = now + (net_random() % p->proxy_delay); | |
1243 | ||
1244 | if (tbl->proxy_queue.qlen > p->proxy_qlen) { | |
1245 | kfree_skb(skb); | |
1246 | return; | |
1247 | } | |
a61bbcf2 PM |
1248 | |
1249 | NEIGH_CB(skb)->sched_next = sched_next; | |
1250 | NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED; | |
1da177e4 LT |
1251 | |
1252 | spin_lock(&tbl->proxy_queue.lock); | |
1253 | if (del_timer(&tbl->proxy_timer)) { | |
1254 | if (time_before(tbl->proxy_timer.expires, sched_next)) | |
1255 | sched_next = tbl->proxy_timer.expires; | |
1256 | } | |
1257 | dst_release(skb->dst); | |
1258 | skb->dst = NULL; | |
1259 | dev_hold(skb->dev); | |
1260 | __skb_queue_tail(&tbl->proxy_queue, skb); | |
1261 | mod_timer(&tbl->proxy_timer, sched_next); | |
1262 | spin_unlock(&tbl->proxy_queue.lock); | |
1263 | } | |
1264 | ||
1265 | ||
1266 | struct neigh_parms *neigh_parms_alloc(struct net_device *dev, | |
1267 | struct neigh_table *tbl) | |
1268 | { | |
1269 | struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL); | |
1270 | ||
1271 | if (p) { | |
1272 | memcpy(p, &tbl->parms, sizeof(*p)); | |
1273 | p->tbl = tbl; | |
1274 | atomic_set(&p->refcnt, 1); | |
1275 | INIT_RCU_HEAD(&p->rcu_head); | |
1276 | p->reachable_time = | |
1277 | neigh_rand_reach_time(p->base_reachable_time); | |
c7fb64db TG |
1278 | if (dev) { |
1279 | if (dev->neigh_setup && dev->neigh_setup(dev, p)) { | |
1280 | kfree(p); | |
1281 | return NULL; | |
1282 | } | |
1283 | ||
1284 | dev_hold(dev); | |
1285 | p->dev = dev; | |
1da177e4 LT |
1286 | } |
1287 | p->sysctl_table = NULL; | |
1288 | write_lock_bh(&tbl->lock); | |
1289 | p->next = tbl->parms.next; | |
1290 | tbl->parms.next = p; | |
1291 | write_unlock_bh(&tbl->lock); | |
1292 | } | |
1293 | return p; | |
1294 | } | |
1295 | ||
1296 | static void neigh_rcu_free_parms(struct rcu_head *head) | |
1297 | { | |
1298 | struct neigh_parms *parms = | |
1299 | container_of(head, struct neigh_parms, rcu_head); | |
1300 | ||
1301 | neigh_parms_put(parms); | |
1302 | } | |
1303 | ||
1304 | void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) | |
1305 | { | |
1306 | struct neigh_parms **p; | |
1307 | ||
1308 | if (!parms || parms == &tbl->parms) | |
1309 | return; | |
1310 | write_lock_bh(&tbl->lock); | |
1311 | for (p = &tbl->parms.next; *p; p = &(*p)->next) { | |
1312 | if (*p == parms) { | |
1313 | *p = parms->next; | |
1314 | parms->dead = 1; | |
1315 | write_unlock_bh(&tbl->lock); | |
c7fb64db TG |
1316 | if (parms->dev) |
1317 | dev_put(parms->dev); | |
1da177e4 LT |
1318 | call_rcu(&parms->rcu_head, neigh_rcu_free_parms); |
1319 | return; | |
1320 | } | |
1321 | } | |
1322 | write_unlock_bh(&tbl->lock); | |
1323 | NEIGH_PRINTK1("neigh_parms_release: not found\n"); | |
1324 | } | |
1325 | ||
1326 | void neigh_parms_destroy(struct neigh_parms *parms) | |
1327 | { | |
1328 | kfree(parms); | |
1329 | } | |
1330 | ||
bd89efc5 | 1331 | void neigh_table_init_no_netlink(struct neigh_table *tbl) |
1da177e4 LT |
1332 | { |
1333 | unsigned long now = jiffies; | |
1334 | unsigned long phsize; | |
1335 | ||
1336 | atomic_set(&tbl->parms.refcnt, 1); | |
1337 | INIT_RCU_HEAD(&tbl->parms.rcu_head); | |
1338 | tbl->parms.reachable_time = | |
1339 | neigh_rand_reach_time(tbl->parms.base_reachable_time); | |
1340 | ||
1341 | if (!tbl->kmem_cachep) | |
e5d679f3 AD |
1342 | tbl->kmem_cachep = |
1343 | kmem_cache_create(tbl->id, tbl->entry_size, 0, | |
1344 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, | |
1345 | NULL, NULL); | |
1da177e4 LT |
1346 | tbl->stats = alloc_percpu(struct neigh_statistics); |
1347 | if (!tbl->stats) | |
1348 | panic("cannot create neighbour cache statistics"); | |
1349 | ||
1350 | #ifdef CONFIG_PROC_FS | |
1351 | tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat); | |
1352 | if (!tbl->pde) | |
1353 | panic("cannot create neighbour proc dir entry"); | |
1354 | tbl->pde->proc_fops = &neigh_stat_seq_fops; | |
1355 | tbl->pde->data = tbl; | |
1356 | #endif | |
1357 | ||
1358 | tbl->hash_mask = 1; | |
1359 | tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1); | |
1360 | ||
1361 | phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); | |
77d04bd9 | 1362 | tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); |
1da177e4 LT |
1363 | |
1364 | if (!tbl->hash_buckets || !tbl->phash_buckets) | |
1365 | panic("cannot allocate neighbour cache hashes"); | |
1366 | ||
1da177e4 LT |
1367 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); |
1368 | ||
1369 | rwlock_init(&tbl->lock); | |
1370 | init_timer(&tbl->gc_timer); | |
1371 | tbl->gc_timer.data = (unsigned long)tbl; | |
1372 | tbl->gc_timer.function = neigh_periodic_timer; | |
1373 | tbl->gc_timer.expires = now + 1; | |
1374 | add_timer(&tbl->gc_timer); | |
1375 | ||
1376 | init_timer(&tbl->proxy_timer); | |
1377 | tbl->proxy_timer.data = (unsigned long)tbl; | |
1378 | tbl->proxy_timer.function = neigh_proxy_process; | |
1379 | skb_queue_head_init(&tbl->proxy_queue); | |
1380 | ||
1381 | tbl->last_flush = now; | |
1382 | tbl->last_rand = now + tbl->parms.reachable_time * 20; | |
bd89efc5 SK |
1383 | } |
1384 | ||
1385 | void neigh_table_init(struct neigh_table *tbl) | |
1386 | { | |
1387 | struct neigh_table *tmp; | |
1388 | ||
1389 | neigh_table_init_no_netlink(tbl); | |
1da177e4 | 1390 | write_lock(&neigh_tbl_lock); |
bd89efc5 SK |
1391 | for (tmp = neigh_tables; tmp; tmp = tmp->next) { |
1392 | if (tmp->family == tbl->family) | |
1393 | break; | |
1394 | } | |
1da177e4 LT |
1395 | tbl->next = neigh_tables; |
1396 | neigh_tables = tbl; | |
1397 | write_unlock(&neigh_tbl_lock); | |
bd89efc5 SK |
1398 | |
1399 | if (unlikely(tmp)) { | |
1400 | printk(KERN_ERR "NEIGH: Registering multiple tables for " | |
1401 | "family %d\n", tbl->family); | |
1402 | dump_stack(); | |
1403 | } | |
1da177e4 LT |
1404 | } |
1405 | ||
1406 | int neigh_table_clear(struct neigh_table *tbl) | |
1407 | { | |
1408 | struct neigh_table **tp; | |
1409 | ||
1410 | /* It is not clean... Fix it to unload IPv6 module safely */ | |
1411 | del_timer_sync(&tbl->gc_timer); | |
1412 | del_timer_sync(&tbl->proxy_timer); | |
1413 | pneigh_queue_purge(&tbl->proxy_queue); | |
1414 | neigh_ifdown(tbl, NULL); | |
1415 | if (atomic_read(&tbl->entries)) | |
1416 | printk(KERN_CRIT "neighbour leakage\n"); | |
1417 | write_lock(&neigh_tbl_lock); | |
1418 | for (tp = &neigh_tables; *tp; tp = &(*tp)->next) { | |
1419 | if (*tp == tbl) { | |
1420 | *tp = tbl->next; | |
1421 | break; | |
1422 | } | |
1423 | } | |
1424 | write_unlock(&neigh_tbl_lock); | |
1425 | ||
1426 | neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1); | |
1427 | tbl->hash_buckets = NULL; | |
1428 | ||
1429 | kfree(tbl->phash_buckets); | |
1430 | tbl->phash_buckets = NULL; | |
1431 | ||
3fcde74b KK |
1432 | free_percpu(tbl->stats); |
1433 | tbl->stats = NULL; | |
1434 | ||
1da177e4 LT |
1435 | return 0; |
1436 | } | |
1437 | ||
1438 | int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |
1439 | { | |
a14a49d2 TG |
1440 | struct ndmsg *ndm; |
1441 | struct nlattr *dst_attr; | |
1da177e4 LT |
1442 | struct neigh_table *tbl; |
1443 | struct net_device *dev = NULL; | |
a14a49d2 | 1444 | int err = -EINVAL; |
1da177e4 | 1445 | |
a14a49d2 | 1446 | if (nlmsg_len(nlh) < sizeof(*ndm)) |
1da177e4 LT |
1447 | goto out; |
1448 | ||
a14a49d2 TG |
1449 | dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST); |
1450 | if (dst_attr == NULL) | |
1451 | goto out; | |
1452 | ||
1453 | ndm = nlmsg_data(nlh); | |
1454 | if (ndm->ndm_ifindex) { | |
1455 | dev = dev_get_by_index(ndm->ndm_ifindex); | |
1456 | if (dev == NULL) { | |
1457 | err = -ENODEV; | |
1458 | goto out; | |
1459 | } | |
1460 | } | |
1461 | ||
1da177e4 LT |
1462 | read_lock(&neigh_tbl_lock); |
1463 | for (tbl = neigh_tables; tbl; tbl = tbl->next) { | |
a14a49d2 | 1464 | struct neighbour *neigh; |
1da177e4 LT |
1465 | |
1466 | if (tbl->family != ndm->ndm_family) | |
1467 | continue; | |
1468 | read_unlock(&neigh_tbl_lock); | |
1469 | ||
a14a49d2 | 1470 | if (nla_len(dst_attr) < tbl->key_len) |
1da177e4 LT |
1471 | goto out_dev_put; |
1472 | ||
1473 | if (ndm->ndm_flags & NTF_PROXY) { | |
a14a49d2 | 1474 | err = pneigh_delete(tbl, nla_data(dst_attr), dev); |
1da177e4 LT |
1475 | goto out_dev_put; |
1476 | } | |
1477 | ||
a14a49d2 TG |
1478 | if (dev == NULL) |
1479 | goto out_dev_put; | |
1da177e4 | 1480 | |
a14a49d2 TG |
1481 | neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); |
1482 | if (neigh == NULL) { | |
1483 | err = -ENOENT; | |
1484 | goto out_dev_put; | |
1da177e4 | 1485 | } |
a14a49d2 TG |
1486 | |
1487 | err = neigh_update(neigh, NULL, NUD_FAILED, | |
1488 | NEIGH_UPDATE_F_OVERRIDE | | |
1489 | NEIGH_UPDATE_F_ADMIN); | |
1490 | neigh_release(neigh); | |
1da177e4 LT |
1491 | goto out_dev_put; |
1492 | } | |
1493 | read_unlock(&neigh_tbl_lock); | |
a14a49d2 TG |
1494 | err = -EAFNOSUPPORT; |
1495 | ||
1da177e4 LT |
1496 | out_dev_put: |
1497 | if (dev) | |
1498 | dev_put(dev); | |
1499 | out: | |
1500 | return err; | |
1501 | } | |
1502 | ||
1503 | int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |
1504 | { | |
5208debd TG |
1505 | struct ndmsg *ndm; |
1506 | struct nlattr *tb[NDA_MAX+1]; | |
1da177e4 LT |
1507 | struct neigh_table *tbl; |
1508 | struct net_device *dev = NULL; | |
5208debd | 1509 | int err; |
1da177e4 | 1510 | |
5208debd TG |
1511 | err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); |
1512 | if (err < 0) | |
1da177e4 LT |
1513 | goto out; |
1514 | ||
5208debd TG |
1515 | err = -EINVAL; |
1516 | if (tb[NDA_DST] == NULL) | |
1517 | goto out; | |
1518 | ||
1519 | ndm = nlmsg_data(nlh); | |
1520 | if (ndm->ndm_ifindex) { | |
1521 | dev = dev_get_by_index(ndm->ndm_ifindex); | |
1522 | if (dev == NULL) { | |
1523 | err = -ENODEV; | |
1524 | goto out; | |
1525 | } | |
1526 | ||
1527 | if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) | |
1528 | goto out_dev_put; | |
1529 | } | |
1530 | ||
1da177e4 LT |
1531 | read_lock(&neigh_tbl_lock); |
1532 | for (tbl = neigh_tables; tbl; tbl = tbl->next) { | |
5208debd TG |
1533 | int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE; |
1534 | struct neighbour *neigh; | |
1535 | void *dst, *lladdr; | |
1da177e4 LT |
1536 | |
1537 | if (tbl->family != ndm->ndm_family) | |
1538 | continue; | |
1539 | read_unlock(&neigh_tbl_lock); | |
1540 | ||
5208debd | 1541 | if (nla_len(tb[NDA_DST]) < tbl->key_len) |
1da177e4 | 1542 | goto out_dev_put; |
5208debd TG |
1543 | dst = nla_data(tb[NDA_DST]); |
1544 | lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; | |
1da177e4 LT |
1545 | |
1546 | if (ndm->ndm_flags & NTF_PROXY) { | |
62dd9318 VN |
1547 | struct pneigh_entry *pn; |
1548 | ||
1549 | err = -ENOBUFS; | |
1550 | pn = pneigh_lookup(tbl, dst, dev, 1); | |
1551 | if (pn) { | |
1552 | pn->flags = ndm->ndm_flags; | |
1553 | err = 0; | |
1554 | } | |
1da177e4 LT |
1555 | goto out_dev_put; |
1556 | } | |
1557 | ||
5208debd | 1558 | if (dev == NULL) |
1da177e4 | 1559 | goto out_dev_put; |
5208debd TG |
1560 | |
1561 | neigh = neigh_lookup(tbl, dst, dev); | |
1562 | if (neigh == NULL) { | |
1563 | if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { | |
1564 | err = -ENOENT; | |
1565 | goto out_dev_put; | |
1566 | } | |
1da177e4 | 1567 | |
5208debd TG |
1568 | neigh = __neigh_lookup_errno(tbl, dst, dev); |
1569 | if (IS_ERR(neigh)) { | |
1570 | err = PTR_ERR(neigh); | |
1da177e4 LT |
1571 | goto out_dev_put; |
1572 | } | |
1da177e4 | 1573 | } else { |
5208debd TG |
1574 | if (nlh->nlmsg_flags & NLM_F_EXCL) { |
1575 | err = -EEXIST; | |
1576 | neigh_release(neigh); | |
1da177e4 LT |
1577 | goto out_dev_put; |
1578 | } | |
1da177e4 | 1579 | |
5208debd TG |
1580 | if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) |
1581 | flags &= ~NEIGH_UPDATE_F_OVERRIDE; | |
1582 | } | |
1da177e4 | 1583 | |
5208debd TG |
1584 | err = neigh_update(neigh, lladdr, ndm->ndm_state, flags); |
1585 | neigh_release(neigh); | |
1da177e4 LT |
1586 | goto out_dev_put; |
1587 | } | |
1588 | ||
1589 | read_unlock(&neigh_tbl_lock); | |
5208debd TG |
1590 | err = -EAFNOSUPPORT; |
1591 | ||
1da177e4 LT |
1592 | out_dev_put: |
1593 | if (dev) | |
1594 | dev_put(dev); | |
1595 | out: | |
1596 | return err; | |
1597 | } | |
1598 | ||
c7fb64db TG |
1599 | static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) |
1600 | { | |
ca860fb3 TG |
1601 | struct nlattr *nest; |
1602 | ||
1603 | nest = nla_nest_start(skb, NDTA_PARMS); | |
1604 | if (nest == NULL) | |
1605 | return -ENOBUFS; | |
c7fb64db TG |
1606 | |
1607 | if (parms->dev) | |
ca860fb3 TG |
1608 | NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex); |
1609 | ||
1610 | NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)); | |
1611 | NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len); | |
1612 | NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen); | |
1613 | NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes); | |
1614 | NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes); | |
1615 | NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes); | |
1616 | NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time); | |
1617 | NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME, | |
c7fb64db | 1618 | parms->base_reachable_time); |
ca860fb3 TG |
1619 | NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime); |
1620 | NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time); | |
1621 | NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time); | |
1622 | NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay); | |
1623 | NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay); | |
1624 | NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime); | |
c7fb64db | 1625 | |
ca860fb3 | 1626 | return nla_nest_end(skb, nest); |
c7fb64db | 1627 | |
ca860fb3 TG |
1628 | nla_put_failure: |
1629 | return nla_nest_cancel(skb, nest); | |
c7fb64db TG |
1630 | } |
1631 | ||
ca860fb3 TG |
1632 | static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, |
1633 | u32 pid, u32 seq, int type, int flags) | |
c7fb64db TG |
1634 | { |
1635 | struct nlmsghdr *nlh; | |
1636 | struct ndtmsg *ndtmsg; | |
1637 | ||
ca860fb3 TG |
1638 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); |
1639 | if (nlh == NULL) | |
1640 | return -ENOBUFS; | |
c7fb64db | 1641 | |
ca860fb3 | 1642 | ndtmsg = nlmsg_data(nlh); |
c7fb64db TG |
1643 | |
1644 | read_lock_bh(&tbl->lock); | |
1645 | ndtmsg->ndtm_family = tbl->family; | |
9ef1d4c7 PM |
1646 | ndtmsg->ndtm_pad1 = 0; |
1647 | ndtmsg->ndtm_pad2 = 0; | |
c7fb64db | 1648 | |
ca860fb3 TG |
1649 | NLA_PUT_STRING(skb, NDTA_NAME, tbl->id); |
1650 | NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval); | |
1651 | NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1); | |
1652 | NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2); | |
1653 | NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3); | |
c7fb64db TG |
1654 | |
1655 | { | |
1656 | unsigned long now = jiffies; | |
1657 | unsigned int flush_delta = now - tbl->last_flush; | |
1658 | unsigned int rand_delta = now - tbl->last_rand; | |
1659 | ||
1660 | struct ndt_config ndc = { | |
1661 | .ndtc_key_len = tbl->key_len, | |
1662 | .ndtc_entry_size = tbl->entry_size, | |
1663 | .ndtc_entries = atomic_read(&tbl->entries), | |
1664 | .ndtc_last_flush = jiffies_to_msecs(flush_delta), | |
1665 | .ndtc_last_rand = jiffies_to_msecs(rand_delta), | |
1666 | .ndtc_hash_rnd = tbl->hash_rnd, | |
1667 | .ndtc_hash_mask = tbl->hash_mask, | |
1668 | .ndtc_hash_chain_gc = tbl->hash_chain_gc, | |
1669 | .ndtc_proxy_qlen = tbl->proxy_queue.qlen, | |
1670 | }; | |
1671 | ||
ca860fb3 | 1672 | NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc); |
c7fb64db TG |
1673 | } |
1674 | ||
1675 | { | |
1676 | int cpu; | |
1677 | struct ndt_stats ndst; | |
1678 | ||
1679 | memset(&ndst, 0, sizeof(ndst)); | |
1680 | ||
6f912042 | 1681 | for_each_possible_cpu(cpu) { |
c7fb64db TG |
1682 | struct neigh_statistics *st; |
1683 | ||
c7fb64db TG |
1684 | st = per_cpu_ptr(tbl->stats, cpu); |
1685 | ndst.ndts_allocs += st->allocs; | |
1686 | ndst.ndts_destroys += st->destroys; | |
1687 | ndst.ndts_hash_grows += st->hash_grows; | |
1688 | ndst.ndts_res_failed += st->res_failed; | |
1689 | ndst.ndts_lookups += st->lookups; | |
1690 | ndst.ndts_hits += st->hits; | |
1691 | ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast; | |
1692 | ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast; | |
1693 | ndst.ndts_periodic_gc_runs += st->periodic_gc_runs; | |
1694 | ndst.ndts_forced_gc_runs += st->forced_gc_runs; | |
1695 | } | |
1696 | ||
ca860fb3 | 1697 | NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst); |
c7fb64db TG |
1698 | } |
1699 | ||
1700 | BUG_ON(tbl->parms.dev); | |
1701 | if (neightbl_fill_parms(skb, &tbl->parms) < 0) | |
ca860fb3 | 1702 | goto nla_put_failure; |
c7fb64db TG |
1703 | |
1704 | read_unlock_bh(&tbl->lock); | |
ca860fb3 | 1705 | return nlmsg_end(skb, nlh); |
c7fb64db | 1706 | |
ca860fb3 | 1707 | nla_put_failure: |
c7fb64db | 1708 | read_unlock_bh(&tbl->lock); |
ca860fb3 | 1709 | return nlmsg_cancel(skb, nlh); |
c7fb64db TG |
1710 | } |
1711 | ||
ca860fb3 TG |
1712 | static int neightbl_fill_param_info(struct sk_buff *skb, |
1713 | struct neigh_table *tbl, | |
c7fb64db | 1714 | struct neigh_parms *parms, |
ca860fb3 TG |
1715 | u32 pid, u32 seq, int type, |
1716 | unsigned int flags) | |
c7fb64db TG |
1717 | { |
1718 | struct ndtmsg *ndtmsg; | |
1719 | struct nlmsghdr *nlh; | |
1720 | ||
ca860fb3 TG |
1721 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); |
1722 | if (nlh == NULL) | |
1723 | return -ENOBUFS; | |
c7fb64db | 1724 | |
ca860fb3 | 1725 | ndtmsg = nlmsg_data(nlh); |
c7fb64db TG |
1726 | |
1727 | read_lock_bh(&tbl->lock); | |
1728 | ndtmsg->ndtm_family = tbl->family; | |
9ef1d4c7 PM |
1729 | ndtmsg->ndtm_pad1 = 0; |
1730 | ndtmsg->ndtm_pad2 = 0; | |
c7fb64db | 1731 | |
ca860fb3 TG |
1732 | if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 || |
1733 | neightbl_fill_parms(skb, parms) < 0) | |
1734 | goto errout; | |
c7fb64db TG |
1735 | |
1736 | read_unlock_bh(&tbl->lock); | |
ca860fb3 TG |
1737 | return nlmsg_end(skb, nlh); |
1738 | errout: | |
c7fb64db | 1739 | read_unlock_bh(&tbl->lock); |
ca860fb3 | 1740 | return nlmsg_cancel(skb, nlh); |
c7fb64db TG |
1741 | } |
1742 | ||
1743 | static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, | |
1744 | int ifindex) | |
1745 | { | |
1746 | struct neigh_parms *p; | |
1747 | ||
1748 | for (p = &tbl->parms; p; p = p->next) | |
1749 | if ((p->dev && p->dev->ifindex == ifindex) || | |
1750 | (!p->dev && !ifindex)) | |
1751 | return p; | |
1752 | ||
1753 | return NULL; | |
1754 | } | |
1755 | ||
6b3f8674 TG |
1756 | static struct nla_policy nl_neightbl_policy[NDTA_MAX+1] __read_mostly = { |
1757 | [NDTA_NAME] = { .type = NLA_STRING }, | |
1758 | [NDTA_THRESH1] = { .type = NLA_U32 }, | |
1759 | [NDTA_THRESH2] = { .type = NLA_U32 }, | |
1760 | [NDTA_THRESH3] = { .type = NLA_U32 }, | |
1761 | [NDTA_GC_INTERVAL] = { .type = NLA_U64 }, | |
1762 | [NDTA_PARMS] = { .type = NLA_NESTED }, | |
1763 | }; | |
1764 | ||
1765 | static struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] __read_mostly = { | |
1766 | [NDTPA_IFINDEX] = { .type = NLA_U32 }, | |
1767 | [NDTPA_QUEUE_LEN] = { .type = NLA_U32 }, | |
1768 | [NDTPA_PROXY_QLEN] = { .type = NLA_U32 }, | |
1769 | [NDTPA_APP_PROBES] = { .type = NLA_U32 }, | |
1770 | [NDTPA_UCAST_PROBES] = { .type = NLA_U32 }, | |
1771 | [NDTPA_MCAST_PROBES] = { .type = NLA_U32 }, | |
1772 | [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 }, | |
1773 | [NDTPA_GC_STALETIME] = { .type = NLA_U64 }, | |
1774 | [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 }, | |
1775 | [NDTPA_RETRANS_TIME] = { .type = NLA_U64 }, | |
1776 | [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 }, | |
1777 | [NDTPA_PROXY_DELAY] = { .type = NLA_U64 }, | |
1778 | [NDTPA_LOCKTIME] = { .type = NLA_U64 }, | |
1779 | }; | |
1780 | ||
c7fb64db TG |
1781 | int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) |
1782 | { | |
1783 | struct neigh_table *tbl; | |
6b3f8674 TG |
1784 | struct ndtmsg *ndtmsg; |
1785 | struct nlattr *tb[NDTA_MAX+1]; | |
1786 | int err; | |
c7fb64db | 1787 | |
6b3f8674 TG |
1788 | err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX, |
1789 | nl_neightbl_policy); | |
1790 | if (err < 0) | |
1791 | goto errout; | |
c7fb64db | 1792 | |
6b3f8674 TG |
1793 | if (tb[NDTA_NAME] == NULL) { |
1794 | err = -EINVAL; | |
1795 | goto errout; | |
1796 | } | |
1797 | ||
1798 | ndtmsg = nlmsg_data(nlh); | |
c7fb64db TG |
1799 | read_lock(&neigh_tbl_lock); |
1800 | for (tbl = neigh_tables; tbl; tbl = tbl->next) { | |
1801 | if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) | |
1802 | continue; | |
1803 | ||
6b3f8674 | 1804 | if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) |
c7fb64db TG |
1805 | break; |
1806 | } | |
1807 | ||
1808 | if (tbl == NULL) { | |
1809 | err = -ENOENT; | |
6b3f8674 | 1810 | goto errout_locked; |
c7fb64db TG |
1811 | } |
1812 | ||
1813 | /* | |
1814 | * We acquire tbl->lock to be nice to the periodic timers and | |
1815 | * make sure they always see a consistent set of values. | |
1816 | */ | |
1817 | write_lock_bh(&tbl->lock); | |
1818 | ||
6b3f8674 TG |
1819 | if (tb[NDTA_PARMS]) { |
1820 | struct nlattr *tbp[NDTPA_MAX+1]; | |
c7fb64db | 1821 | struct neigh_parms *p; |
6b3f8674 | 1822 | int i, ifindex = 0; |
c7fb64db | 1823 | |
6b3f8674 TG |
1824 | err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS], |
1825 | nl_ntbl_parm_policy); | |
1826 | if (err < 0) | |
1827 | goto errout_tbl_lock; | |
c7fb64db | 1828 | |
6b3f8674 TG |
1829 | if (tbp[NDTPA_IFINDEX]) |
1830 | ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]); | |
c7fb64db TG |
1831 | |
1832 | p = lookup_neigh_params(tbl, ifindex); | |
1833 | if (p == NULL) { | |
1834 | err = -ENOENT; | |
6b3f8674 | 1835 | goto errout_tbl_lock; |
c7fb64db | 1836 | } |
c7fb64db | 1837 | |
6b3f8674 TG |
1838 | for (i = 1; i <= NDTPA_MAX; i++) { |
1839 | if (tbp[i] == NULL) | |
1840 | continue; | |
c7fb64db | 1841 | |
6b3f8674 TG |
1842 | switch (i) { |
1843 | case NDTPA_QUEUE_LEN: | |
1844 | p->queue_len = nla_get_u32(tbp[i]); | |
1845 | break; | |
1846 | case NDTPA_PROXY_QLEN: | |
1847 | p->proxy_qlen = nla_get_u32(tbp[i]); | |
1848 | break; | |
1849 | case NDTPA_APP_PROBES: | |
1850 | p->app_probes = nla_get_u32(tbp[i]); | |
1851 | break; | |
1852 | case NDTPA_UCAST_PROBES: | |
1853 | p->ucast_probes = nla_get_u32(tbp[i]); | |
1854 | break; | |
1855 | case NDTPA_MCAST_PROBES: | |
1856 | p->mcast_probes = nla_get_u32(tbp[i]); | |
1857 | break; | |
1858 | case NDTPA_BASE_REACHABLE_TIME: | |
1859 | p->base_reachable_time = nla_get_msecs(tbp[i]); | |
1860 | break; | |
1861 | case NDTPA_GC_STALETIME: | |
1862 | p->gc_staletime = nla_get_msecs(tbp[i]); | |
1863 | break; | |
1864 | case NDTPA_DELAY_PROBE_TIME: | |
1865 | p->delay_probe_time = nla_get_msecs(tbp[i]); | |
1866 | break; | |
1867 | case NDTPA_RETRANS_TIME: | |
1868 | p->retrans_time = nla_get_msecs(tbp[i]); | |
1869 | break; | |
1870 | case NDTPA_ANYCAST_DELAY: | |
1871 | p->anycast_delay = nla_get_msecs(tbp[i]); | |
1872 | break; | |
1873 | case NDTPA_PROXY_DELAY: | |
1874 | p->proxy_delay = nla_get_msecs(tbp[i]); | |
1875 | break; | |
1876 | case NDTPA_LOCKTIME: | |
1877 | p->locktime = nla_get_msecs(tbp[i]); | |
1878 | break; | |
1879 | } | |
1880 | } | |
1881 | } | |
c7fb64db | 1882 | |
6b3f8674 TG |
1883 | if (tb[NDTA_THRESH1]) |
1884 | tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]); | |
c7fb64db | 1885 | |
6b3f8674 TG |
1886 | if (tb[NDTA_THRESH2]) |
1887 | tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]); | |
c7fb64db | 1888 | |
6b3f8674 TG |
1889 | if (tb[NDTA_THRESH3]) |
1890 | tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]); | |
c7fb64db | 1891 | |
6b3f8674 TG |
1892 | if (tb[NDTA_GC_INTERVAL]) |
1893 | tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]); | |
c7fb64db TG |
1894 | |
1895 | err = 0; | |
1896 | ||
6b3f8674 | 1897 | errout_tbl_lock: |
c7fb64db | 1898 | write_unlock_bh(&tbl->lock); |
6b3f8674 | 1899 | errout_locked: |
c7fb64db | 1900 | read_unlock(&neigh_tbl_lock); |
6b3f8674 | 1901 | errout: |
c7fb64db TG |
1902 | return err; |
1903 | } | |
1904 | ||
1905 | int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |
1906 | { | |
ca860fb3 TG |
1907 | int family, tidx, nidx = 0; |
1908 | int tbl_skip = cb->args[0]; | |
1909 | int neigh_skip = cb->args[1]; | |
c7fb64db TG |
1910 | struct neigh_table *tbl; |
1911 | ||
ca860fb3 | 1912 | family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; |
c7fb64db TG |
1913 | |
1914 | read_lock(&neigh_tbl_lock); | |
ca860fb3 | 1915 | for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) { |
c7fb64db TG |
1916 | struct neigh_parms *p; |
1917 | ||
ca860fb3 | 1918 | if (tidx < tbl_skip || (family && tbl->family != family)) |
c7fb64db TG |
1919 | continue; |
1920 | ||
ca860fb3 TG |
1921 | if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid, |
1922 | cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL, | |
1923 | NLM_F_MULTI) <= 0) | |
c7fb64db TG |
1924 | break; |
1925 | ||
ca860fb3 TG |
1926 | for (nidx = 0, p = tbl->parms.next; p; p = p->next, nidx++) { |
1927 | if (nidx < neigh_skip) | |
c7fb64db TG |
1928 | continue; |
1929 | ||
ca860fb3 TG |
1930 | if (neightbl_fill_param_info(skb, tbl, p, |
1931 | NETLINK_CB(cb->skb).pid, | |
1932 | cb->nlh->nlmsg_seq, | |
1933 | RTM_NEWNEIGHTBL, | |
1934 | NLM_F_MULTI) <= 0) | |
c7fb64db TG |
1935 | goto out; |
1936 | } | |
1937 | ||
ca860fb3 | 1938 | neigh_skip = 0; |
c7fb64db TG |
1939 | } |
1940 | out: | |
1941 | read_unlock(&neigh_tbl_lock); | |
ca860fb3 TG |
1942 | cb->args[0] = tidx; |
1943 | cb->args[1] = nidx; | |
c7fb64db TG |
1944 | |
1945 | return skb->len; | |
1946 | } | |
1da177e4 | 1947 | |
8b8aec50 TG |
1948 | static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, |
1949 | u32 pid, u32 seq, int type, unsigned int flags) | |
1da177e4 LT |
1950 | { |
1951 | unsigned long now = jiffies; | |
1da177e4 | 1952 | struct nda_cacheinfo ci; |
8b8aec50 TG |
1953 | struct nlmsghdr *nlh; |
1954 | struct ndmsg *ndm; | |
1955 | ||
1956 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); | |
1957 | if (nlh == NULL) | |
1958 | return -ENOBUFS; | |
1da177e4 | 1959 | |
8b8aec50 TG |
1960 | ndm = nlmsg_data(nlh); |
1961 | ndm->ndm_family = neigh->ops->family; | |
9ef1d4c7 PM |
1962 | ndm->ndm_pad1 = 0; |
1963 | ndm->ndm_pad2 = 0; | |
8b8aec50 TG |
1964 | ndm->ndm_flags = neigh->flags; |
1965 | ndm->ndm_type = neigh->type; | |
1966 | ndm->ndm_ifindex = neigh->dev->ifindex; | |
1da177e4 | 1967 | |
8b8aec50 TG |
1968 | NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key); |
1969 | ||
1970 | read_lock_bh(&neigh->lock); | |
1971 | ndm->ndm_state = neigh->nud_state; | |
1972 | if ((neigh->nud_state & NUD_VALID) && | |
1973 | nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) { | |
1974 | read_unlock_bh(&neigh->lock); | |
1975 | goto nla_put_failure; | |
1976 | } | |
1977 | ||
1978 | ci.ndm_used = now - neigh->used; | |
1979 | ci.ndm_confirmed = now - neigh->confirmed; | |
1980 | ci.ndm_updated = now - neigh->updated; | |
1981 | ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1; | |
1982 | read_unlock_bh(&neigh->lock); | |
1983 | ||
1984 | NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes)); | |
1985 | NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci); | |
1986 | ||
1987 | return nlmsg_end(skb, nlh); | |
1988 | ||
1989 | nla_put_failure: | |
1990 | return nlmsg_cancel(skb, nlh); | |
1da177e4 LT |
1991 | } |
1992 | ||
1993 | ||
1994 | static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, | |
1995 | struct netlink_callback *cb) | |
1996 | { | |
1997 | struct neighbour *n; | |
1998 | int rc, h, s_h = cb->args[1]; | |
1999 | int idx, s_idx = idx = cb->args[2]; | |
2000 | ||
2001 | for (h = 0; h <= tbl->hash_mask; h++) { | |
2002 | if (h < s_h) | |
2003 | continue; | |
2004 | if (h > s_h) | |
2005 | s_idx = 0; | |
2006 | read_lock_bh(&tbl->lock); | |
2007 | for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) { | |
2008 | if (idx < s_idx) | |
2009 | continue; | |
2010 | if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid, | |
2011 | cb->nlh->nlmsg_seq, | |
b6544c0b JHS |
2012 | RTM_NEWNEIGH, |
2013 | NLM_F_MULTI) <= 0) { | |
1da177e4 LT |
2014 | read_unlock_bh(&tbl->lock); |
2015 | rc = -1; | |
2016 | goto out; | |
2017 | } | |
2018 | } | |
2019 | read_unlock_bh(&tbl->lock); | |
2020 | } | |
2021 | rc = skb->len; | |
2022 | out: | |
2023 | cb->args[1] = h; | |
2024 | cb->args[2] = idx; | |
2025 | return rc; | |
2026 | } | |
2027 | ||
2028 | int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |
2029 | { | |
2030 | struct neigh_table *tbl; | |
2031 | int t, family, s_t; | |
2032 | ||
2033 | read_lock(&neigh_tbl_lock); | |
8b8aec50 | 2034 | family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; |
1da177e4 LT |
2035 | s_t = cb->args[0]; |
2036 | ||
2037 | for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) { | |
2038 | if (t < s_t || (family && tbl->family != family)) | |
2039 | continue; | |
2040 | if (t > s_t) | |
2041 | memset(&cb->args[1], 0, sizeof(cb->args) - | |
2042 | sizeof(cb->args[0])); | |
2043 | if (neigh_dump_table(tbl, skb, cb) < 0) | |
2044 | break; | |
2045 | } | |
2046 | read_unlock(&neigh_tbl_lock); | |
2047 | ||
2048 | cb->args[0] = t; | |
2049 | return skb->len; | |
2050 | } | |
2051 | ||
2052 | void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie) | |
2053 | { | |
2054 | int chain; | |
2055 | ||
2056 | read_lock_bh(&tbl->lock); | |
2057 | for (chain = 0; chain <= tbl->hash_mask; chain++) { | |
2058 | struct neighbour *n; | |
2059 | ||
2060 | for (n = tbl->hash_buckets[chain]; n; n = n->next) | |
2061 | cb(n, cookie); | |
2062 | } | |
2063 | read_unlock_bh(&tbl->lock); | |
2064 | } | |
2065 | EXPORT_SYMBOL(neigh_for_each); | |
2066 | ||
2067 | /* The tbl->lock must be held as a writer and BH disabled. */ | |
2068 | void __neigh_for_each_release(struct neigh_table *tbl, | |
2069 | int (*cb)(struct neighbour *)) | |
2070 | { | |
2071 | int chain; | |
2072 | ||
2073 | for (chain = 0; chain <= tbl->hash_mask; chain++) { | |
2074 | struct neighbour *n, **np; | |
2075 | ||
2076 | np = &tbl->hash_buckets[chain]; | |
2077 | while ((n = *np) != NULL) { | |
2078 | int release; | |
2079 | ||
2080 | write_lock(&n->lock); | |
2081 | release = cb(n); | |
2082 | if (release) { | |
2083 | *np = n->next; | |
2084 | n->dead = 1; | |
2085 | } else | |
2086 | np = &n->next; | |
2087 | write_unlock(&n->lock); | |
2088 | if (release) | |
2089 | neigh_release(n); | |
2090 | } | |
2091 | } | |
2092 | } | |
2093 | EXPORT_SYMBOL(__neigh_for_each_release); | |
2094 | ||
2095 | #ifdef CONFIG_PROC_FS | |
2096 | ||
2097 | static struct neighbour *neigh_get_first(struct seq_file *seq) | |
2098 | { | |
2099 | struct neigh_seq_state *state = seq->private; | |
2100 | struct neigh_table *tbl = state->tbl; | |
2101 | struct neighbour *n = NULL; | |
2102 | int bucket = state->bucket; | |
2103 | ||
2104 | state->flags &= ~NEIGH_SEQ_IS_PNEIGH; | |
2105 | for (bucket = 0; bucket <= tbl->hash_mask; bucket++) { | |
2106 | n = tbl->hash_buckets[bucket]; | |
2107 | ||
2108 | while (n) { | |
2109 | if (state->neigh_sub_iter) { | |
2110 | loff_t fakep = 0; | |
2111 | void *v; | |
2112 | ||
2113 | v = state->neigh_sub_iter(state, n, &fakep); | |
2114 | if (!v) | |
2115 | goto next; | |
2116 | } | |
2117 | if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) | |
2118 | break; | |
2119 | if (n->nud_state & ~NUD_NOARP) | |
2120 | break; | |
2121 | next: | |
2122 | n = n->next; | |
2123 | } | |
2124 | ||
2125 | if (n) | |
2126 | break; | |
2127 | } | |
2128 | state->bucket = bucket; | |
2129 | ||
2130 | return n; | |
2131 | } | |
2132 | ||
2133 | static struct neighbour *neigh_get_next(struct seq_file *seq, | |
2134 | struct neighbour *n, | |
2135 | loff_t *pos) | |
2136 | { | |
2137 | struct neigh_seq_state *state = seq->private; | |
2138 | struct neigh_table *tbl = state->tbl; | |
2139 | ||
2140 | if (state->neigh_sub_iter) { | |
2141 | void *v = state->neigh_sub_iter(state, n, pos); | |
2142 | if (v) | |
2143 | return n; | |
2144 | } | |
2145 | n = n->next; | |
2146 | ||
2147 | while (1) { | |
2148 | while (n) { | |
2149 | if (state->neigh_sub_iter) { | |
2150 | void *v = state->neigh_sub_iter(state, n, pos); | |
2151 | if (v) | |
2152 | return n; | |
2153 | goto next; | |
2154 | } | |
2155 | if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) | |
2156 | break; | |
2157 | ||
2158 | if (n->nud_state & ~NUD_NOARP) | |
2159 | break; | |
2160 | next: | |
2161 | n = n->next; | |
2162 | } | |
2163 | ||
2164 | if (n) | |
2165 | break; | |
2166 | ||
2167 | if (++state->bucket > tbl->hash_mask) | |
2168 | break; | |
2169 | ||
2170 | n = tbl->hash_buckets[state->bucket]; | |
2171 | } | |
2172 | ||
2173 | if (n && pos) | |
2174 | --(*pos); | |
2175 | return n; | |
2176 | } | |
2177 | ||
2178 | static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos) | |
2179 | { | |
2180 | struct neighbour *n = neigh_get_first(seq); | |
2181 | ||
2182 | if (n) { | |
2183 | while (*pos) { | |
2184 | n = neigh_get_next(seq, n, pos); | |
2185 | if (!n) | |
2186 | break; | |
2187 | } | |
2188 | } | |
2189 | return *pos ? NULL : n; | |
2190 | } | |
2191 | ||
2192 | static struct pneigh_entry *pneigh_get_first(struct seq_file *seq) | |
2193 | { | |
2194 | struct neigh_seq_state *state = seq->private; | |
2195 | struct neigh_table *tbl = state->tbl; | |
2196 | struct pneigh_entry *pn = NULL; | |
2197 | int bucket = state->bucket; | |
2198 | ||
2199 | state->flags |= NEIGH_SEQ_IS_PNEIGH; | |
2200 | for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { | |
2201 | pn = tbl->phash_buckets[bucket]; | |
2202 | if (pn) | |
2203 | break; | |
2204 | } | |
2205 | state->bucket = bucket; | |
2206 | ||
2207 | return pn; | |
2208 | } | |
2209 | ||
2210 | static struct pneigh_entry *pneigh_get_next(struct seq_file *seq, | |
2211 | struct pneigh_entry *pn, | |
2212 | loff_t *pos) | |
2213 | { | |
2214 | struct neigh_seq_state *state = seq->private; | |
2215 | struct neigh_table *tbl = state->tbl; | |
2216 | ||
2217 | pn = pn->next; | |
2218 | while (!pn) { | |
2219 | if (++state->bucket > PNEIGH_HASHMASK) | |
2220 | break; | |
2221 | pn = tbl->phash_buckets[state->bucket]; | |
2222 | if (pn) | |
2223 | break; | |
2224 | } | |
2225 | ||
2226 | if (pn && pos) | |
2227 | --(*pos); | |
2228 | ||
2229 | return pn; | |
2230 | } | |
2231 | ||
2232 | static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos) | |
2233 | { | |
2234 | struct pneigh_entry *pn = pneigh_get_first(seq); | |
2235 | ||
2236 | if (pn) { | |
2237 | while (*pos) { | |
2238 | pn = pneigh_get_next(seq, pn, pos); | |
2239 | if (!pn) | |
2240 | break; | |
2241 | } | |
2242 | } | |
2243 | return *pos ? NULL : pn; | |
2244 | } | |
2245 | ||
2246 | static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) | |
2247 | { | |
2248 | struct neigh_seq_state *state = seq->private; | |
2249 | void *rc; | |
2250 | ||
2251 | rc = neigh_get_idx(seq, pos); | |
2252 | if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) | |
2253 | rc = pneigh_get_idx(seq, pos); | |
2254 | ||
2255 | return rc; | |
2256 | } | |
2257 | ||
2258 | void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) | |
2259 | { | |
2260 | struct neigh_seq_state *state = seq->private; | |
2261 | loff_t pos_minus_one; | |
2262 | ||
2263 | state->tbl = tbl; | |
2264 | state->bucket = 0; | |
2265 | state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH); | |
2266 | ||
2267 | read_lock_bh(&tbl->lock); | |
2268 | ||
2269 | pos_minus_one = *pos - 1; | |
2270 | return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN; | |
2271 | } | |
2272 | EXPORT_SYMBOL(neigh_seq_start); | |
2273 | ||
2274 | void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
2275 | { | |
2276 | struct neigh_seq_state *state; | |
2277 | void *rc; | |
2278 | ||
2279 | if (v == SEQ_START_TOKEN) { | |
2280 | rc = neigh_get_idx(seq, pos); | |
2281 | goto out; | |
2282 | } | |
2283 | ||
2284 | state = seq->private; | |
2285 | if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) { | |
2286 | rc = neigh_get_next(seq, v, NULL); | |
2287 | if (rc) | |
2288 | goto out; | |
2289 | if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY)) | |
2290 | rc = pneigh_get_first(seq); | |
2291 | } else { | |
2292 | BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY); | |
2293 | rc = pneigh_get_next(seq, v, NULL); | |
2294 | } | |
2295 | out: | |
2296 | ++(*pos); | |
2297 | return rc; | |
2298 | } | |
2299 | EXPORT_SYMBOL(neigh_seq_next); | |
2300 | ||
2301 | void neigh_seq_stop(struct seq_file *seq, void *v) | |
2302 | { | |
2303 | struct neigh_seq_state *state = seq->private; | |
2304 | struct neigh_table *tbl = state->tbl; | |
2305 | ||
2306 | read_unlock_bh(&tbl->lock); | |
2307 | } | |
2308 | EXPORT_SYMBOL(neigh_seq_stop); | |
2309 | ||
2310 | /* statistics via seq_file */ | |
2311 | ||
2312 | static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) | |
2313 | { | |
2314 | struct proc_dir_entry *pde = seq->private; | |
2315 | struct neigh_table *tbl = pde->data; | |
2316 | int cpu; | |
2317 | ||
2318 | if (*pos == 0) | |
2319 | return SEQ_START_TOKEN; | |
2320 | ||
2321 | for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | |
2322 | if (!cpu_possible(cpu)) | |
2323 | continue; | |
2324 | *pos = cpu+1; | |
2325 | return per_cpu_ptr(tbl->stats, cpu); | |
2326 | } | |
2327 | return NULL; | |
2328 | } | |
2329 | ||
2330 | static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
2331 | { | |
2332 | struct proc_dir_entry *pde = seq->private; | |
2333 | struct neigh_table *tbl = pde->data; | |
2334 | int cpu; | |
2335 | ||
2336 | for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | |
2337 | if (!cpu_possible(cpu)) | |
2338 | continue; | |
2339 | *pos = cpu+1; | |
2340 | return per_cpu_ptr(tbl->stats, cpu); | |
2341 | } | |
2342 | return NULL; | |
2343 | } | |
2344 | ||
2345 | static void neigh_stat_seq_stop(struct seq_file *seq, void *v) | |
2346 | { | |
2347 | ||
2348 | } | |
2349 | ||
2350 | static int neigh_stat_seq_show(struct seq_file *seq, void *v) | |
2351 | { | |
2352 | struct proc_dir_entry *pde = seq->private; | |
2353 | struct neigh_table *tbl = pde->data; | |
2354 | struct neigh_statistics *st = v; | |
2355 | ||
2356 | if (v == SEQ_START_TOKEN) { | |
5bec0039 | 2357 | seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n"); |
1da177e4 LT |
2358 | return 0; |
2359 | } | |
2360 | ||
2361 | seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " | |
2362 | "%08lx %08lx %08lx %08lx\n", | |
2363 | atomic_read(&tbl->entries), | |
2364 | ||
2365 | st->allocs, | |
2366 | st->destroys, | |
2367 | st->hash_grows, | |
2368 | ||
2369 | st->lookups, | |
2370 | st->hits, | |
2371 | ||
2372 | st->res_failed, | |
2373 | ||
2374 | st->rcv_probes_mcast, | |
2375 | st->rcv_probes_ucast, | |
2376 | ||
2377 | st->periodic_gc_runs, | |
2378 | st->forced_gc_runs | |
2379 | ); | |
2380 | ||
2381 | return 0; | |
2382 | } | |
2383 | ||
2384 | static struct seq_operations neigh_stat_seq_ops = { | |
2385 | .start = neigh_stat_seq_start, | |
2386 | .next = neigh_stat_seq_next, | |
2387 | .stop = neigh_stat_seq_stop, | |
2388 | .show = neigh_stat_seq_show, | |
2389 | }; | |
2390 | ||
2391 | static int neigh_stat_seq_open(struct inode *inode, struct file *file) | |
2392 | { | |
2393 | int ret = seq_open(file, &neigh_stat_seq_ops); | |
2394 | ||
2395 | if (!ret) { | |
2396 | struct seq_file *sf = file->private_data; | |
2397 | sf->private = PDE(inode); | |
2398 | } | |
2399 | return ret; | |
2400 | }; | |
2401 | ||
2402 | static struct file_operations neigh_stat_seq_fops = { | |
2403 | .owner = THIS_MODULE, | |
2404 | .open = neigh_stat_seq_open, | |
2405 | .read = seq_read, | |
2406 | .llseek = seq_lseek, | |
2407 | .release = seq_release, | |
2408 | }; | |
2409 | ||
2410 | #endif /* CONFIG_PROC_FS */ | |
2411 | ||
2412 | #ifdef CONFIG_ARPD | |
b8673311 | 2413 | static void __neigh_notify(struct neighbour *n, int type, int flags) |
1da177e4 | 2414 | { |
8b8aec50 | 2415 | struct sk_buff *skb; |
b8673311 | 2416 | int err = -ENOBUFS; |
1da177e4 | 2417 | |
8b8aec50 TG |
2418 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); |
2419 | if (skb == NULL) | |
b8673311 | 2420 | goto errout; |
1da177e4 | 2421 | |
b8673311 TG |
2422 | err = neigh_fill_info(skb, n, 0, 0, type, flags); |
2423 | if (err < 0) { | |
1da177e4 | 2424 | kfree_skb(skb); |
b8673311 | 2425 | goto errout; |
1da177e4 | 2426 | } |
b8673311 TG |
2427 | |
2428 | err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); | |
2429 | errout: | |
2430 | if (err < 0) | |
2431 | rtnl_set_sk_err(RTNLGRP_NEIGH, err); | |
1da177e4 LT |
2432 | } |
2433 | ||
b8673311 | 2434 | void neigh_app_ns(struct neighbour *n) |
1da177e4 | 2435 | { |
b8673311 TG |
2436 | __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST); |
2437 | } | |
1da177e4 | 2438 | |
b8673311 TG |
2439 | static void neigh_app_notify(struct neighbour *n) |
2440 | { | |
2441 | __neigh_notify(n, RTM_NEWNEIGH, 0); | |
1da177e4 LT |
2442 | } |
2443 | ||
2444 | #endif /* CONFIG_ARPD */ | |
2445 | ||
2446 | #ifdef CONFIG_SYSCTL | |
2447 | ||
2448 | static struct neigh_sysctl_table { | |
2449 | struct ctl_table_header *sysctl_header; | |
2450 | ctl_table neigh_vars[__NET_NEIGH_MAX]; | |
2451 | ctl_table neigh_dev[2]; | |
2452 | ctl_table neigh_neigh_dir[2]; | |
2453 | ctl_table neigh_proto_dir[2]; | |
2454 | ctl_table neigh_root_dir[2]; | |
ab32ea5d | 2455 | } neigh_sysctl_template __read_mostly = { |
1da177e4 LT |
2456 | .neigh_vars = { |
2457 | { | |
2458 | .ctl_name = NET_NEIGH_MCAST_SOLICIT, | |
2459 | .procname = "mcast_solicit", | |
2460 | .maxlen = sizeof(int), | |
2461 | .mode = 0644, | |
2462 | .proc_handler = &proc_dointvec, | |
2463 | }, | |
2464 | { | |
2465 | .ctl_name = NET_NEIGH_UCAST_SOLICIT, | |
2466 | .procname = "ucast_solicit", | |
2467 | .maxlen = sizeof(int), | |
2468 | .mode = 0644, | |
2469 | .proc_handler = &proc_dointvec, | |
2470 | }, | |
2471 | { | |
2472 | .ctl_name = NET_NEIGH_APP_SOLICIT, | |
2473 | .procname = "app_solicit", | |
2474 | .maxlen = sizeof(int), | |
2475 | .mode = 0644, | |
2476 | .proc_handler = &proc_dointvec, | |
2477 | }, | |
2478 | { | |
2479 | .ctl_name = NET_NEIGH_RETRANS_TIME, | |
2480 | .procname = "retrans_time", | |
2481 | .maxlen = sizeof(int), | |
2482 | .mode = 0644, | |
2483 | .proc_handler = &proc_dointvec_userhz_jiffies, | |
2484 | }, | |
2485 | { | |
2486 | .ctl_name = NET_NEIGH_REACHABLE_TIME, | |
2487 | .procname = "base_reachable_time", | |
2488 | .maxlen = sizeof(int), | |
2489 | .mode = 0644, | |
2490 | .proc_handler = &proc_dointvec_jiffies, | |
2491 | .strategy = &sysctl_jiffies, | |
2492 | }, | |
2493 | { | |
2494 | .ctl_name = NET_NEIGH_DELAY_PROBE_TIME, | |
2495 | .procname = "delay_first_probe_time", | |
2496 | .maxlen = sizeof(int), | |
2497 | .mode = 0644, | |
2498 | .proc_handler = &proc_dointvec_jiffies, | |
2499 | .strategy = &sysctl_jiffies, | |
2500 | }, | |
2501 | { | |
2502 | .ctl_name = NET_NEIGH_GC_STALE_TIME, | |
2503 | .procname = "gc_stale_time", | |
2504 | .maxlen = sizeof(int), | |
2505 | .mode = 0644, | |
2506 | .proc_handler = &proc_dointvec_jiffies, | |
2507 | .strategy = &sysctl_jiffies, | |
2508 | }, | |
2509 | { | |
2510 | .ctl_name = NET_NEIGH_UNRES_QLEN, | |
2511 | .procname = "unres_qlen", | |
2512 | .maxlen = sizeof(int), | |
2513 | .mode = 0644, | |
2514 | .proc_handler = &proc_dointvec, | |
2515 | }, | |
2516 | { | |
2517 | .ctl_name = NET_NEIGH_PROXY_QLEN, | |
2518 | .procname = "proxy_qlen", | |
2519 | .maxlen = sizeof(int), | |
2520 | .mode = 0644, | |
2521 | .proc_handler = &proc_dointvec, | |
2522 | }, | |
2523 | { | |
2524 | .ctl_name = NET_NEIGH_ANYCAST_DELAY, | |
2525 | .procname = "anycast_delay", | |
2526 | .maxlen = sizeof(int), | |
2527 | .mode = 0644, | |
2528 | .proc_handler = &proc_dointvec_userhz_jiffies, | |
2529 | }, | |
2530 | { | |
2531 | .ctl_name = NET_NEIGH_PROXY_DELAY, | |
2532 | .procname = "proxy_delay", | |
2533 | .maxlen = sizeof(int), | |
2534 | .mode = 0644, | |
2535 | .proc_handler = &proc_dointvec_userhz_jiffies, | |
2536 | }, | |
2537 | { | |
2538 | .ctl_name = NET_NEIGH_LOCKTIME, | |
2539 | .procname = "locktime", | |
2540 | .maxlen = sizeof(int), | |
2541 | .mode = 0644, | |
2542 | .proc_handler = &proc_dointvec_userhz_jiffies, | |
2543 | }, | |
2544 | { | |
2545 | .ctl_name = NET_NEIGH_GC_INTERVAL, | |
2546 | .procname = "gc_interval", | |
2547 | .maxlen = sizeof(int), | |
2548 | .mode = 0644, | |
2549 | .proc_handler = &proc_dointvec_jiffies, | |
2550 | .strategy = &sysctl_jiffies, | |
2551 | }, | |
2552 | { | |
2553 | .ctl_name = NET_NEIGH_GC_THRESH1, | |
2554 | .procname = "gc_thresh1", | |
2555 | .maxlen = sizeof(int), | |
2556 | .mode = 0644, | |
2557 | .proc_handler = &proc_dointvec, | |
2558 | }, | |
2559 | { | |
2560 | .ctl_name = NET_NEIGH_GC_THRESH2, | |
2561 | .procname = "gc_thresh2", | |
2562 | .maxlen = sizeof(int), | |
2563 | .mode = 0644, | |
2564 | .proc_handler = &proc_dointvec, | |
2565 | }, | |
2566 | { | |
2567 | .ctl_name = NET_NEIGH_GC_THRESH3, | |
2568 | .procname = "gc_thresh3", | |
2569 | .maxlen = sizeof(int), | |
2570 | .mode = 0644, | |
2571 | .proc_handler = &proc_dointvec, | |
2572 | }, | |
2573 | { | |
2574 | .ctl_name = NET_NEIGH_RETRANS_TIME_MS, | |
2575 | .procname = "retrans_time_ms", | |
2576 | .maxlen = sizeof(int), | |
2577 | .mode = 0644, | |
2578 | .proc_handler = &proc_dointvec_ms_jiffies, | |
2579 | .strategy = &sysctl_ms_jiffies, | |
2580 | }, | |
2581 | { | |
2582 | .ctl_name = NET_NEIGH_REACHABLE_TIME_MS, | |
2583 | .procname = "base_reachable_time_ms", | |
2584 | .maxlen = sizeof(int), | |
2585 | .mode = 0644, | |
2586 | .proc_handler = &proc_dointvec_ms_jiffies, | |
2587 | .strategy = &sysctl_ms_jiffies, | |
2588 | }, | |
2589 | }, | |
2590 | .neigh_dev = { | |
2591 | { | |
2592 | .ctl_name = NET_PROTO_CONF_DEFAULT, | |
2593 | .procname = "default", | |
2594 | .mode = 0555, | |
2595 | }, | |
2596 | }, | |
2597 | .neigh_neigh_dir = { | |
2598 | { | |
2599 | .procname = "neigh", | |
2600 | .mode = 0555, | |
2601 | }, | |
2602 | }, | |
2603 | .neigh_proto_dir = { | |
2604 | { | |
2605 | .mode = 0555, | |
2606 | }, | |
2607 | }, | |
2608 | .neigh_root_dir = { | |
2609 | { | |
2610 | .ctl_name = CTL_NET, | |
2611 | .procname = "net", | |
2612 | .mode = 0555, | |
2613 | }, | |
2614 | }, | |
2615 | }; | |
2616 | ||
2617 | int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, | |
2618 | int p_id, int pdev_id, char *p_name, | |
2619 | proc_handler *handler, ctl_handler *strategy) | |
2620 | { | |
2621 | struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL); | |
2622 | const char *dev_name_source = NULL; | |
2623 | char *dev_name = NULL; | |
2624 | int err = 0; | |
2625 | ||
2626 | if (!t) | |
2627 | return -ENOBUFS; | |
2628 | memcpy(t, &neigh_sysctl_template, sizeof(*t)); | |
2629 | t->neigh_vars[0].data = &p->mcast_probes; | |
2630 | t->neigh_vars[1].data = &p->ucast_probes; | |
2631 | t->neigh_vars[2].data = &p->app_probes; | |
2632 | t->neigh_vars[3].data = &p->retrans_time; | |
2633 | t->neigh_vars[4].data = &p->base_reachable_time; | |
2634 | t->neigh_vars[5].data = &p->delay_probe_time; | |
2635 | t->neigh_vars[6].data = &p->gc_staletime; | |
2636 | t->neigh_vars[7].data = &p->queue_len; | |
2637 | t->neigh_vars[8].data = &p->proxy_qlen; | |
2638 | t->neigh_vars[9].data = &p->anycast_delay; | |
2639 | t->neigh_vars[10].data = &p->proxy_delay; | |
2640 | t->neigh_vars[11].data = &p->locktime; | |
2641 | ||
2642 | if (dev) { | |
2643 | dev_name_source = dev->name; | |
2644 | t->neigh_dev[0].ctl_name = dev->ifindex; | |
2645 | t->neigh_vars[12].procname = NULL; | |
2646 | t->neigh_vars[13].procname = NULL; | |
2647 | t->neigh_vars[14].procname = NULL; | |
2648 | t->neigh_vars[15].procname = NULL; | |
2649 | } else { | |
2650 | dev_name_source = t->neigh_dev[0].procname; | |
2651 | t->neigh_vars[12].data = (int *)(p + 1); | |
2652 | t->neigh_vars[13].data = (int *)(p + 1) + 1; | |
2653 | t->neigh_vars[14].data = (int *)(p + 1) + 2; | |
2654 | t->neigh_vars[15].data = (int *)(p + 1) + 3; | |
2655 | } | |
2656 | ||
2657 | t->neigh_vars[16].data = &p->retrans_time; | |
2658 | t->neigh_vars[17].data = &p->base_reachable_time; | |
2659 | ||
2660 | if (handler || strategy) { | |
2661 | /* RetransTime */ | |
2662 | t->neigh_vars[3].proc_handler = handler; | |
2663 | t->neigh_vars[3].strategy = strategy; | |
2664 | t->neigh_vars[3].extra1 = dev; | |
2665 | /* ReachableTime */ | |
2666 | t->neigh_vars[4].proc_handler = handler; | |
2667 | t->neigh_vars[4].strategy = strategy; | |
2668 | t->neigh_vars[4].extra1 = dev; | |
2669 | /* RetransTime (in milliseconds)*/ | |
2670 | t->neigh_vars[16].proc_handler = handler; | |
2671 | t->neigh_vars[16].strategy = strategy; | |
2672 | t->neigh_vars[16].extra1 = dev; | |
2673 | /* ReachableTime (in milliseconds) */ | |
2674 | t->neigh_vars[17].proc_handler = handler; | |
2675 | t->neigh_vars[17].strategy = strategy; | |
2676 | t->neigh_vars[17].extra1 = dev; | |
2677 | } | |
2678 | ||
543537bd | 2679 | dev_name = kstrdup(dev_name_source, GFP_KERNEL); |
1da177e4 LT |
2680 | if (!dev_name) { |
2681 | err = -ENOBUFS; | |
2682 | goto free; | |
2683 | } | |
2684 | ||
2685 | t->neigh_dev[0].procname = dev_name; | |
2686 | ||
2687 | t->neigh_neigh_dir[0].ctl_name = pdev_id; | |
2688 | ||
2689 | t->neigh_proto_dir[0].procname = p_name; | |
2690 | t->neigh_proto_dir[0].ctl_name = p_id; | |
2691 | ||
2692 | t->neigh_dev[0].child = t->neigh_vars; | |
2693 | t->neigh_neigh_dir[0].child = t->neigh_dev; | |
2694 | t->neigh_proto_dir[0].child = t->neigh_neigh_dir; | |
2695 | t->neigh_root_dir[0].child = t->neigh_proto_dir; | |
2696 | ||
2697 | t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0); | |
2698 | if (!t->sysctl_header) { | |
2699 | err = -ENOBUFS; | |
2700 | goto free_procname; | |
2701 | } | |
2702 | p->sysctl_table = t; | |
2703 | return 0; | |
2704 | ||
2705 | /* error path */ | |
2706 | free_procname: | |
2707 | kfree(dev_name); | |
2708 | free: | |
2709 | kfree(t); | |
2710 | ||
2711 | return err; | |
2712 | } | |
2713 | ||
2714 | void neigh_sysctl_unregister(struct neigh_parms *p) | |
2715 | { | |
2716 | if (p->sysctl_table) { | |
2717 | struct neigh_sysctl_table *t = p->sysctl_table; | |
2718 | p->sysctl_table = NULL; | |
2719 | unregister_sysctl_table(t->sysctl_header); | |
2720 | kfree(t->neigh_dev[0].procname); | |
2721 | kfree(t); | |
2722 | } | |
2723 | } | |
2724 | ||
2725 | #endif /* CONFIG_SYSCTL */ | |
2726 | ||
2727 | EXPORT_SYMBOL(__neigh_event_send); | |
1da177e4 LT |
2728 | EXPORT_SYMBOL(neigh_changeaddr); |
2729 | EXPORT_SYMBOL(neigh_compat_output); | |
2730 | EXPORT_SYMBOL(neigh_connected_output); | |
2731 | EXPORT_SYMBOL(neigh_create); | |
2732 | EXPORT_SYMBOL(neigh_delete); | |
2733 | EXPORT_SYMBOL(neigh_destroy); | |
2734 | EXPORT_SYMBOL(neigh_dump_info); | |
2735 | EXPORT_SYMBOL(neigh_event_ns); | |
2736 | EXPORT_SYMBOL(neigh_ifdown); | |
2737 | EXPORT_SYMBOL(neigh_lookup); | |
2738 | EXPORT_SYMBOL(neigh_lookup_nodev); | |
2739 | EXPORT_SYMBOL(neigh_parms_alloc); | |
2740 | EXPORT_SYMBOL(neigh_parms_release); | |
2741 | EXPORT_SYMBOL(neigh_rand_reach_time); | |
2742 | EXPORT_SYMBOL(neigh_resolve_output); | |
2743 | EXPORT_SYMBOL(neigh_table_clear); | |
2744 | EXPORT_SYMBOL(neigh_table_init); | |
bd89efc5 | 2745 | EXPORT_SYMBOL(neigh_table_init_no_netlink); |
1da177e4 | 2746 | EXPORT_SYMBOL(neigh_update); |
1da177e4 LT |
2747 | EXPORT_SYMBOL(pneigh_enqueue); |
2748 | EXPORT_SYMBOL(pneigh_lookup); | |
2749 | ||
2750 | #ifdef CONFIG_ARPD | |
2751 | EXPORT_SYMBOL(neigh_app_ns); | |
2752 | #endif | |
2753 | #ifdef CONFIG_SYSCTL | |
2754 | EXPORT_SYMBOL(neigh_sysctl_register); | |
2755 | EXPORT_SYMBOL(neigh_sysctl_unregister); | |
2756 | #endif |