2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
25 #include <linux/sysctl.h>
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
56 #define PNEIGH_HASHMASK 0xF
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
64 static struct neigh_table *neigh_tables;
66 static const struct file_operations neigh_stat_seq_fops;
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
84 Reference count prevents destruction.
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
100 static DEFINE_RWLOCK(neigh_tbl_lock);
102 static int neigh_blackhole(struct sk_buff *skb)
108 static void neigh_cleanup_and_release(struct neighbour *neigh)
110 if (neigh->parms->neigh_cleanup)
111 neigh->parms->neigh_cleanup(neigh);
113 __neigh_notify(neigh, RTM_DELNEIGH, 0);
114 neigh_release(neigh);
118 * It is random distribution in the interval (1/2)*base...(3/2)*base.
119 * It corresponds to default IPv6 settings and is not overridable,
120 * because it is really reasonable choice.
123 unsigned long neigh_rand_reach_time(unsigned long base)
125 return (base ? (net_random() % base) + (base >> 1) : 0);
129 static int neigh_forced_gc(struct neigh_table *tbl)
134 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
136 write_lock_bh(&tbl->lock);
137 for (i = 0; i <= tbl->hash_mask; i++) {
138 struct neighbour *n, **np;
140 np = &tbl->hash_buckets[i];
141 while ((n = *np) != NULL) {
142 /* Neighbour record may be discarded if:
143 * - nobody refers to it.
144 * - it is not permanent
146 write_lock(&n->lock);
147 if (atomic_read(&n->refcnt) == 1 &&
148 !(n->nud_state & NUD_PERMANENT)) {
152 write_unlock(&n->lock);
153 neigh_cleanup_and_release(n);
156 write_unlock(&n->lock);
161 tbl->last_flush = jiffies;
163 write_unlock_bh(&tbl->lock);
168 static void neigh_add_timer(struct neighbour *n, unsigned long when)
171 if (unlikely(mod_timer(&n->timer, when))) {
172 printk("NEIGH: BUG, double timer add, state is %x\n",
178 static int neigh_del_timer(struct neighbour *n)
180 if ((n->nud_state & NUD_IN_TIMER) &&
181 del_timer(&n->timer)) {
188 static void pneigh_queue_purge(struct sk_buff_head *list)
192 while ((skb = skb_dequeue(list)) != NULL) {
198 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
202 for (i = 0; i <= tbl->hash_mask; i++) {
203 struct neighbour *n, **np = &tbl->hash_buckets[i];
205 while ((n = *np) != NULL) {
206 if (dev && n->dev != dev) {
211 write_lock(&n->lock);
215 if (atomic_read(&n->refcnt) != 1) {
216 /* The most unpleasant situation.
217 We must destroy neighbour entry,
218 but someone still uses it.
220 The destroy will be delayed until
221 the last user releases us, but
222 we must kill timers etc. and move
225 skb_queue_purge(&n->arp_queue);
226 n->output = neigh_blackhole;
227 if (n->nud_state & NUD_VALID)
228 n->nud_state = NUD_NOARP;
230 n->nud_state = NUD_NONE;
231 NEIGH_PRINTK2("neigh %p is stray.\n", n);
233 write_unlock(&n->lock);
234 neigh_cleanup_and_release(n);
239 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
241 write_lock_bh(&tbl->lock);
242 neigh_flush_dev(tbl, dev);
243 write_unlock_bh(&tbl->lock);
246 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
248 write_lock_bh(&tbl->lock);
249 neigh_flush_dev(tbl, dev);
250 pneigh_ifdown(tbl, dev);
251 write_unlock_bh(&tbl->lock);
253 del_timer_sync(&tbl->proxy_timer);
254 pneigh_queue_purge(&tbl->proxy_queue);
258 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
260 struct neighbour *n = NULL;
261 unsigned long now = jiffies;
264 entries = atomic_inc_return(&tbl->entries) - 1;
265 if (entries >= tbl->gc_thresh3 ||
266 (entries >= tbl->gc_thresh2 &&
267 time_after(now, tbl->last_flush + 5 * HZ))) {
268 if (!neigh_forced_gc(tbl) &&
269 entries >= tbl->gc_thresh3)
273 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
277 skb_queue_head_init(&n->arp_queue);
278 rwlock_init(&n->lock);
279 n->updated = n->used = now;
280 n->nud_state = NUD_NONE;
281 n->output = neigh_blackhole;
282 n->parms = neigh_parms_clone(&tbl->parms);
283 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
285 NEIGH_CACHE_STAT_INC(tbl, allocs);
287 atomic_set(&n->refcnt, 1);
293 atomic_dec(&tbl->entries);
297 static struct neighbour **neigh_hash_alloc(unsigned int entries)
299 unsigned long size = entries * sizeof(struct neighbour *);
300 struct neighbour **ret;
302 if (size <= PAGE_SIZE) {
303 ret = kzalloc(size, GFP_ATOMIC);
305 ret = (struct neighbour **)
306 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
311 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
313 unsigned long size = entries * sizeof(struct neighbour *);
315 if (size <= PAGE_SIZE)
318 free_pages((unsigned long)hash, get_order(size));
321 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
323 struct neighbour **new_hash, **old_hash;
324 unsigned int i, new_hash_mask, old_entries;
326 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
328 BUG_ON(!is_power_of_2(new_entries));
329 new_hash = neigh_hash_alloc(new_entries);
333 old_entries = tbl->hash_mask + 1;
334 new_hash_mask = new_entries - 1;
335 old_hash = tbl->hash_buckets;
337 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
338 for (i = 0; i < old_entries; i++) {
339 struct neighbour *n, *next;
341 for (n = old_hash[i]; n; n = next) {
342 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
344 hash_val &= new_hash_mask;
347 n->next = new_hash[hash_val];
348 new_hash[hash_val] = n;
351 tbl->hash_buckets = new_hash;
352 tbl->hash_mask = new_hash_mask;
354 neigh_hash_free(old_hash, old_entries);
357 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
358 struct net_device *dev)
361 int key_len = tbl->key_len;
362 u32 hash_val = tbl->hash(pkey, dev);
364 NEIGH_CACHE_STAT_INC(tbl, lookups);
366 read_lock_bh(&tbl->lock);
367 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
368 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
370 NEIGH_CACHE_STAT_INC(tbl, hits);
374 read_unlock_bh(&tbl->lock);
378 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
381 int key_len = tbl->key_len;
382 u32 hash_val = tbl->hash(pkey, NULL);
384 NEIGH_CACHE_STAT_INC(tbl, lookups);
386 read_lock_bh(&tbl->lock);
387 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
388 if (!memcmp(n->primary_key, pkey, key_len)) {
390 NEIGH_CACHE_STAT_INC(tbl, hits);
394 read_unlock_bh(&tbl->lock);
398 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
399 struct net_device *dev)
402 int key_len = tbl->key_len;
404 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
407 rc = ERR_PTR(-ENOBUFS);
411 memcpy(n->primary_key, pkey, key_len);
415 /* Protocol specific setup. */
416 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
418 goto out_neigh_release;
421 /* Device specific setup. */
422 if (n->parms->neigh_setup &&
423 (error = n->parms->neigh_setup(n)) < 0) {
425 goto out_neigh_release;
428 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
430 write_lock_bh(&tbl->lock);
432 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
433 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
435 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
437 if (n->parms->dead) {
438 rc = ERR_PTR(-EINVAL);
442 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
443 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
450 n->next = tbl->hash_buckets[hash_val];
451 tbl->hash_buckets[hash_val] = n;
454 write_unlock_bh(&tbl->lock);
455 NEIGH_PRINTK2("neigh %p is created.\n", n);
460 write_unlock_bh(&tbl->lock);
466 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
467 struct net_device *dev, int creat)
469 struct pneigh_entry *n;
470 int key_len = tbl->key_len;
471 u32 hash_val = *(u32 *)(pkey + key_len - 4);
473 hash_val ^= (hash_val >> 16);
474 hash_val ^= hash_val >> 8;
475 hash_val ^= hash_val >> 4;
476 hash_val &= PNEIGH_HASHMASK;
478 read_lock_bh(&tbl->lock);
480 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
481 if (!memcmp(n->key, pkey, key_len) &&
482 (n->dev == dev || !n->dev)) {
483 read_unlock_bh(&tbl->lock);
487 read_unlock_bh(&tbl->lock);
494 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
498 memcpy(n->key, pkey, key_len);
503 if (tbl->pconstructor && tbl->pconstructor(n)) {
511 write_lock_bh(&tbl->lock);
512 n->next = tbl->phash_buckets[hash_val];
513 tbl->phash_buckets[hash_val] = n;
514 write_unlock_bh(&tbl->lock);
520 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
521 struct net_device *dev)
523 struct pneigh_entry *n, **np;
524 int key_len = tbl->key_len;
525 u32 hash_val = *(u32 *)(pkey + key_len - 4);
527 hash_val ^= (hash_val >> 16);
528 hash_val ^= hash_val >> 8;
529 hash_val ^= hash_val >> 4;
530 hash_val &= PNEIGH_HASHMASK;
532 write_lock_bh(&tbl->lock);
533 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
535 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
537 write_unlock_bh(&tbl->lock);
538 if (tbl->pdestructor)
546 write_unlock_bh(&tbl->lock);
550 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
552 struct pneigh_entry *n, **np;
555 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
556 np = &tbl->phash_buckets[h];
557 while ((n = *np) != NULL) {
558 if (!dev || n->dev == dev) {
560 if (tbl->pdestructor)
575 * neighbour must already be out of the table;
578 void neigh_destroy(struct neighbour *neigh)
582 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
586 "Destroying alive neighbour %p\n", neigh);
591 if (neigh_del_timer(neigh))
592 printk(KERN_WARNING "Impossible event.\n");
594 while ((hh = neigh->hh) != NULL) {
595 neigh->hh = hh->hh_next;
598 write_seqlock_bh(&hh->hh_lock);
599 hh->hh_output = neigh_blackhole;
600 write_sequnlock_bh(&hh->hh_lock);
601 if (atomic_dec_and_test(&hh->hh_refcnt))
605 skb_queue_purge(&neigh->arp_queue);
608 neigh_parms_put(neigh->parms);
610 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
612 atomic_dec(&neigh->tbl->entries);
613 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
616 /* Neighbour state is suspicious;
619 Called with write_locked neigh.
621 static void neigh_suspect(struct neighbour *neigh)
625 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
627 neigh->output = neigh->ops->output;
629 for (hh = neigh->hh; hh; hh = hh->hh_next)
630 hh->hh_output = neigh->ops->output;
633 /* Neighbour state is OK;
636 Called with write_locked neigh.
638 static void neigh_connect(struct neighbour *neigh)
642 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
644 neigh->output = neigh->ops->connected_output;
646 for (hh = neigh->hh; hh; hh = hh->hh_next)
647 hh->hh_output = neigh->ops->hh_output;
650 static void neigh_periodic_timer(unsigned long arg)
652 struct neigh_table *tbl = (struct neigh_table *)arg;
653 struct neighbour *n, **np;
654 unsigned long expire, now = jiffies;
656 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
658 write_lock(&tbl->lock);
661 * periodically recompute ReachableTime from random function
664 if (time_after(now, tbl->last_rand + 300 * HZ)) {
665 struct neigh_parms *p;
666 tbl->last_rand = now;
667 for (p = &tbl->parms; p; p = p->next)
669 neigh_rand_reach_time(p->base_reachable_time);
672 np = &tbl->hash_buckets[tbl->hash_chain_gc];
673 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
675 while ((n = *np) != NULL) {
678 write_lock(&n->lock);
680 state = n->nud_state;
681 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
682 write_unlock(&n->lock);
686 if (time_before(n->used, n->confirmed))
687 n->used = n->confirmed;
689 if (atomic_read(&n->refcnt) == 1 &&
690 (state == NUD_FAILED ||
691 time_after(now, n->used + n->parms->gc_staletime))) {
694 write_unlock(&n->lock);
695 neigh_cleanup_and_release(n);
698 write_unlock(&n->lock);
704 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
705 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
706 * base_reachable_time.
708 expire = tbl->parms.base_reachable_time >> 1;
709 expire /= (tbl->hash_mask + 1);
714 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
716 mod_timer(&tbl->gc_timer, now + expire);
718 write_unlock(&tbl->lock);
721 static __inline__ int neigh_max_probes(struct neighbour *n)
723 struct neigh_parms *p = n->parms;
724 return (n->nud_state & NUD_PROBE ?
726 p->ucast_probes + p->app_probes + p->mcast_probes);
729 /* Called when a timer expires for a neighbour entry. */
731 static void neigh_timer_handler(unsigned long arg)
733 unsigned long now, next;
734 struct neighbour *neigh = (struct neighbour *)arg;
738 write_lock(&neigh->lock);
740 state = neigh->nud_state;
744 if (!(state & NUD_IN_TIMER)) {
746 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
751 if (state & NUD_REACHABLE) {
752 if (time_before_eq(now,
753 neigh->confirmed + neigh->parms->reachable_time)) {
754 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
755 next = neigh->confirmed + neigh->parms->reachable_time;
756 } else if (time_before_eq(now,
757 neigh->used + neigh->parms->delay_probe_time)) {
758 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
759 neigh->nud_state = NUD_DELAY;
760 neigh->updated = jiffies;
761 neigh_suspect(neigh);
762 next = now + neigh->parms->delay_probe_time;
764 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
765 neigh->nud_state = NUD_STALE;
766 neigh->updated = jiffies;
767 neigh_suspect(neigh);
770 } else if (state & NUD_DELAY) {
771 if (time_before_eq(now,
772 neigh->confirmed + neigh->parms->delay_probe_time)) {
773 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
774 neigh->nud_state = NUD_REACHABLE;
775 neigh->updated = jiffies;
776 neigh_connect(neigh);
778 next = neigh->confirmed + neigh->parms->reachable_time;
780 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
781 neigh->nud_state = NUD_PROBE;
782 neigh->updated = jiffies;
783 atomic_set(&neigh->probes, 0);
784 next = now + neigh->parms->retrans_time;
787 /* NUD_PROBE|NUD_INCOMPLETE */
788 next = now + neigh->parms->retrans_time;
791 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
792 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
795 neigh->nud_state = NUD_FAILED;
796 neigh->updated = jiffies;
798 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
799 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
801 /* It is very thin place. report_unreachable is very complicated
802 routine. Particularly, it can hit the same neighbour entry!
804 So that, we try to be accurate and avoid dead loop. --ANK
806 while (neigh->nud_state == NUD_FAILED &&
807 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
808 write_unlock(&neigh->lock);
809 neigh->ops->error_report(neigh, skb);
810 write_lock(&neigh->lock);
812 skb_queue_purge(&neigh->arp_queue);
815 if (neigh->nud_state & NUD_IN_TIMER) {
816 if (time_before(next, jiffies + HZ/2))
817 next = jiffies + HZ/2;
818 if (!mod_timer(&neigh->timer, next))
821 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
822 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
823 /* keep skb alive even if arp_queue overflows */
826 write_unlock(&neigh->lock);
827 neigh->ops->solicit(neigh, skb);
828 atomic_inc(&neigh->probes);
833 write_unlock(&neigh->lock);
837 neigh_update_notify(neigh);
839 neigh_release(neigh);
842 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
847 write_lock_bh(&neigh->lock);
850 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
855 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
856 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
857 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
858 neigh->nud_state = NUD_INCOMPLETE;
859 neigh->updated = jiffies;
860 neigh_add_timer(neigh, now + 1);
862 neigh->nud_state = NUD_FAILED;
863 neigh->updated = jiffies;
864 write_unlock_bh(&neigh->lock);
870 } else if (neigh->nud_state & NUD_STALE) {
871 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
872 neigh->nud_state = NUD_DELAY;
873 neigh->updated = jiffies;
874 neigh_add_timer(neigh,
875 jiffies + neigh->parms->delay_probe_time);
878 if (neigh->nud_state == NUD_INCOMPLETE) {
880 if (skb_queue_len(&neigh->arp_queue) >=
881 neigh->parms->queue_len) {
882 struct sk_buff *buff;
883 buff = neigh->arp_queue.next;
884 __skb_unlink(buff, &neigh->arp_queue);
887 __skb_queue_tail(&neigh->arp_queue, skb);
892 write_unlock_bh(&neigh->lock);
896 static void neigh_update_hhs(struct neighbour *neigh)
899 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
900 = neigh->dev->header_ops->cache_update;
903 for (hh = neigh->hh; hh; hh = hh->hh_next) {
904 write_seqlock_bh(&hh->hh_lock);
905 update(hh, neigh->dev, neigh->ha);
906 write_sequnlock_bh(&hh->hh_lock);
913 /* Generic update routine.
914 -- lladdr is new lladdr or NULL, if it is not supplied.
917 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
919 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
920 lladdr instead of overriding it
922 It also allows to retain current state
923 if lladdr is unchanged.
924 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
926 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
928 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
931 Caller MUST hold reference count on the entry.
934 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
940 struct net_device *dev;
941 int update_isrouter = 0;
943 write_lock_bh(&neigh->lock);
946 old = neigh->nud_state;
949 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
950 (old & (NUD_NOARP | NUD_PERMANENT)))
953 if (!(new & NUD_VALID)) {
954 neigh_del_timer(neigh);
955 if (old & NUD_CONNECTED)
956 neigh_suspect(neigh);
957 neigh->nud_state = new;
959 notify = old & NUD_VALID;
963 /* Compare new lladdr with cached one */
964 if (!dev->addr_len) {
965 /* First case: device needs no address. */
968 /* The second case: if something is already cached
969 and a new address is proposed:
971 - if they are different, check override flag
973 if ((old & NUD_VALID) &&
974 !memcmp(lladdr, neigh->ha, dev->addr_len))
977 /* No address is supplied; if we know something,
978 use it, otherwise discard the request.
981 if (!(old & NUD_VALID))
986 if (new & NUD_CONNECTED)
987 neigh->confirmed = jiffies;
988 neigh->updated = jiffies;
990 /* If entry was valid and address is not changed,
991 do not change entry state, if new one is STALE.
994 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
995 if (old & NUD_VALID) {
996 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
998 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
999 (old & NUD_CONNECTED)) {
1005 if (lladdr == neigh->ha && new == NUD_STALE &&
1006 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1007 (old & NUD_CONNECTED))
1014 neigh_del_timer(neigh);
1015 if (new & NUD_IN_TIMER)
1016 neigh_add_timer(neigh, (jiffies +
1017 ((new & NUD_REACHABLE) ?
1018 neigh->parms->reachable_time :
1020 neigh->nud_state = new;
1023 if (lladdr != neigh->ha) {
1024 memcpy(&neigh->ha, lladdr, dev->addr_len);
1025 neigh_update_hhs(neigh);
1026 if (!(new & NUD_CONNECTED))
1027 neigh->confirmed = jiffies -
1028 (neigh->parms->base_reachable_time << 1);
1033 if (new & NUD_CONNECTED)
1034 neigh_connect(neigh);
1036 neigh_suspect(neigh);
1037 if (!(old & NUD_VALID)) {
1038 struct sk_buff *skb;
1040 /* Again: avoid dead loop if something went wrong */
1042 while (neigh->nud_state & NUD_VALID &&
1043 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1044 struct neighbour *n1 = neigh;
1045 write_unlock_bh(&neigh->lock);
1046 /* On shaper/eql skb->dst->neighbour != neigh :( */
1047 if (skb->dst && skb->dst->neighbour)
1048 n1 = skb->dst->neighbour;
1050 write_lock_bh(&neigh->lock);
1052 skb_queue_purge(&neigh->arp_queue);
1055 if (update_isrouter) {
1056 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1057 (neigh->flags | NTF_ROUTER) :
1058 (neigh->flags & ~NTF_ROUTER);
1060 write_unlock_bh(&neigh->lock);
1063 neigh_update_notify(neigh);
1068 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1069 u8 *lladdr, void *saddr,
1070 struct net_device *dev)
1072 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1073 lladdr || !dev->addr_len);
1075 neigh_update(neigh, lladdr, NUD_STALE,
1076 NEIGH_UPDATE_F_OVERRIDE);
1080 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1083 struct hh_cache *hh;
1084 struct net_device *dev = dst->dev;
1086 for (hh = n->hh; hh; hh = hh->hh_next)
1087 if (hh->hh_type == protocol)
1090 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1091 seqlock_init(&hh->hh_lock);
1092 hh->hh_type = protocol;
1093 atomic_set(&hh->hh_refcnt, 0);
1096 if (dev->header_ops->cache(n, hh)) {
1100 atomic_inc(&hh->hh_refcnt);
1101 hh->hh_next = n->hh;
1103 if (n->nud_state & NUD_CONNECTED)
1104 hh->hh_output = n->ops->hh_output;
1106 hh->hh_output = n->ops->output;
1110 atomic_inc(&hh->hh_refcnt);
1115 /* This function can be used in contexts, where only old dev_queue_xmit
1116 worked, f.e. if you want to override normal output path (eql, shaper),
1117 but resolution is not made yet.
1120 int neigh_compat_output(struct sk_buff *skb)
1122 struct net_device *dev = skb->dev;
1124 __skb_pull(skb, skb_network_offset(skb));
1126 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1128 dev->header_ops->rebuild(skb))
1131 return dev_queue_xmit(skb);
1134 /* Slow and careful. */
1136 int neigh_resolve_output(struct sk_buff *skb)
1138 struct dst_entry *dst = skb->dst;
1139 struct neighbour *neigh;
1142 if (!dst || !(neigh = dst->neighbour))
1145 __skb_pull(skb, skb_network_offset(skb));
1147 if (!neigh_event_send(neigh, skb)) {
1149 struct net_device *dev = neigh->dev;
1150 if (dev->header_ops->cache && !dst->hh) {
1151 write_lock_bh(&neigh->lock);
1153 neigh_hh_init(neigh, dst, dst->ops->protocol);
1154 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1155 neigh->ha, NULL, skb->len);
1156 write_unlock_bh(&neigh->lock);
1158 read_lock_bh(&neigh->lock);
1159 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1160 neigh->ha, NULL, skb->len);
1161 read_unlock_bh(&neigh->lock);
1164 rc = neigh->ops->queue_xmit(skb);
1171 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1172 dst, dst ? dst->neighbour : NULL);
1179 /* As fast as possible without hh cache */
1181 int neigh_connected_output(struct sk_buff *skb)
1184 struct dst_entry *dst = skb->dst;
1185 struct neighbour *neigh = dst->neighbour;
1186 struct net_device *dev = neigh->dev;
1188 __skb_pull(skb, skb_network_offset(skb));
1190 read_lock_bh(&neigh->lock);
1191 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1192 neigh->ha, NULL, skb->len);
1193 read_unlock_bh(&neigh->lock);
1195 err = neigh->ops->queue_xmit(skb);
1203 static void neigh_proxy_process(unsigned long arg)
1205 struct neigh_table *tbl = (struct neigh_table *)arg;
1206 long sched_next = 0;
1207 unsigned long now = jiffies;
1208 struct sk_buff *skb;
1210 spin_lock(&tbl->proxy_queue.lock);
1212 skb = tbl->proxy_queue.next;
1214 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1215 struct sk_buff *back = skb;
1216 long tdif = NEIGH_CB(back)->sched_next - now;
1220 struct net_device *dev = back->dev;
1221 __skb_unlink(back, &tbl->proxy_queue);
1222 if (tbl->proxy_redo && netif_running(dev))
1223 tbl->proxy_redo(back);
1228 } else if (!sched_next || tdif < sched_next)
1231 del_timer(&tbl->proxy_timer);
1233 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1234 spin_unlock(&tbl->proxy_queue.lock);
1237 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1238 struct sk_buff *skb)
1240 unsigned long now = jiffies;
1241 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1243 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1248 NEIGH_CB(skb)->sched_next = sched_next;
1249 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1251 spin_lock(&tbl->proxy_queue.lock);
1252 if (del_timer(&tbl->proxy_timer)) {
1253 if (time_before(tbl->proxy_timer.expires, sched_next))
1254 sched_next = tbl->proxy_timer.expires;
1256 dst_release(skb->dst);
1259 __skb_queue_tail(&tbl->proxy_queue, skb);
1260 mod_timer(&tbl->proxy_timer, sched_next);
1261 spin_unlock(&tbl->proxy_queue.lock);
1265 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1266 struct neigh_table *tbl)
1268 struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1272 atomic_set(&p->refcnt, 1);
1273 INIT_RCU_HEAD(&p->rcu_head);
1275 neigh_rand_reach_time(p->base_reachable_time);
1277 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1285 p->sysctl_table = NULL;
1286 write_lock_bh(&tbl->lock);
1287 p->next = tbl->parms.next;
1288 tbl->parms.next = p;
1289 write_unlock_bh(&tbl->lock);
1294 static void neigh_rcu_free_parms(struct rcu_head *head)
1296 struct neigh_parms *parms =
1297 container_of(head, struct neigh_parms, rcu_head);
1299 neigh_parms_put(parms);
1302 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1304 struct neigh_parms **p;
1306 if (!parms || parms == &tbl->parms)
1308 write_lock_bh(&tbl->lock);
1309 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1313 write_unlock_bh(&tbl->lock);
1315 dev_put(parms->dev);
1316 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1320 write_unlock_bh(&tbl->lock);
1321 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1324 void neigh_parms_destroy(struct neigh_parms *parms)
1329 static struct lock_class_key neigh_table_proxy_queue_class;
1331 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1333 unsigned long now = jiffies;
1334 unsigned long phsize;
1336 atomic_set(&tbl->parms.refcnt, 1);
1337 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1338 tbl->parms.reachable_time =
1339 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1341 if (!tbl->kmem_cachep)
1343 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1344 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1346 tbl->stats = alloc_percpu(struct neigh_statistics);
1348 panic("cannot create neighbour cache statistics");
1350 #ifdef CONFIG_PROC_FS
1351 tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat);
1353 panic("cannot create neighbour proc dir entry");
1354 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1355 tbl->pde->data = tbl;
1359 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1361 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1362 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1364 if (!tbl->hash_buckets || !tbl->phash_buckets)
1365 panic("cannot allocate neighbour cache hashes");
1367 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1369 rwlock_init(&tbl->lock);
1370 setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1371 tbl->gc_timer.expires = now + 1;
1372 add_timer(&tbl->gc_timer);
1374 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1375 skb_queue_head_init_class(&tbl->proxy_queue,
1376 &neigh_table_proxy_queue_class);
1378 tbl->last_flush = now;
1379 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1382 void neigh_table_init(struct neigh_table *tbl)
1384 struct neigh_table *tmp;
1386 neigh_table_init_no_netlink(tbl);
1387 write_lock(&neigh_tbl_lock);
1388 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1389 if (tmp->family == tbl->family)
1392 tbl->next = neigh_tables;
1394 write_unlock(&neigh_tbl_lock);
1396 if (unlikely(tmp)) {
1397 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1398 "family %d\n", tbl->family);
1403 int neigh_table_clear(struct neigh_table *tbl)
1405 struct neigh_table **tp;
1407 /* It is not clean... Fix it to unload IPv6 module safely */
1408 del_timer_sync(&tbl->gc_timer);
1409 del_timer_sync(&tbl->proxy_timer);
1410 pneigh_queue_purge(&tbl->proxy_queue);
1411 neigh_ifdown(tbl, NULL);
1412 if (atomic_read(&tbl->entries))
1413 printk(KERN_CRIT "neighbour leakage\n");
1414 write_lock(&neigh_tbl_lock);
1415 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1421 write_unlock(&neigh_tbl_lock);
1423 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1424 tbl->hash_buckets = NULL;
1426 kfree(tbl->phash_buckets);
1427 tbl->phash_buckets = NULL;
1429 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1431 free_percpu(tbl->stats);
1434 kmem_cache_destroy(tbl->kmem_cachep);
1435 tbl->kmem_cachep = NULL;
1440 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1442 struct net *net = skb->sk->sk_net;
1444 struct nlattr *dst_attr;
1445 struct neigh_table *tbl;
1446 struct net_device *dev = NULL;
1449 if (net != &init_net)
1452 if (nlmsg_len(nlh) < sizeof(*ndm))
1455 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1456 if (dst_attr == NULL)
1459 ndm = nlmsg_data(nlh);
1460 if (ndm->ndm_ifindex) {
1461 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1468 read_lock(&neigh_tbl_lock);
1469 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1470 struct neighbour *neigh;
1472 if (tbl->family != ndm->ndm_family)
1474 read_unlock(&neigh_tbl_lock);
1476 if (nla_len(dst_attr) < tbl->key_len)
1479 if (ndm->ndm_flags & NTF_PROXY) {
1480 err = pneigh_delete(tbl, nla_data(dst_attr), dev);
1487 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1488 if (neigh == NULL) {
1493 err = neigh_update(neigh, NULL, NUD_FAILED,
1494 NEIGH_UPDATE_F_OVERRIDE |
1495 NEIGH_UPDATE_F_ADMIN);
1496 neigh_release(neigh);
1499 read_unlock(&neigh_tbl_lock);
1500 err = -EAFNOSUPPORT;
1509 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1511 struct net *net = skb->sk->sk_net;
1513 struct nlattr *tb[NDA_MAX+1];
1514 struct neigh_table *tbl;
1515 struct net_device *dev = NULL;
1518 if (net != &init_net)
1521 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1526 if (tb[NDA_DST] == NULL)
1529 ndm = nlmsg_data(nlh);
1530 if (ndm->ndm_ifindex) {
1531 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1537 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1541 read_lock(&neigh_tbl_lock);
1542 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1543 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1544 struct neighbour *neigh;
1547 if (tbl->family != ndm->ndm_family)
1549 read_unlock(&neigh_tbl_lock);
1551 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1553 dst = nla_data(tb[NDA_DST]);
1554 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1556 if (ndm->ndm_flags & NTF_PROXY) {
1557 struct pneigh_entry *pn;
1560 pn = pneigh_lookup(tbl, dst, dev, 1);
1562 pn->flags = ndm->ndm_flags;
1571 neigh = neigh_lookup(tbl, dst, dev);
1572 if (neigh == NULL) {
1573 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1578 neigh = __neigh_lookup_errno(tbl, dst, dev);
1579 if (IS_ERR(neigh)) {
1580 err = PTR_ERR(neigh);
1584 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1586 neigh_release(neigh);
1590 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1591 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1594 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1595 neigh_release(neigh);
1599 read_unlock(&neigh_tbl_lock);
1600 err = -EAFNOSUPPORT;
1609 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1611 struct nlattr *nest;
1613 nest = nla_nest_start(skb, NDTA_PARMS);
1618 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1620 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1621 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1622 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1623 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1624 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1625 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1626 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1627 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1628 parms->base_reachable_time);
1629 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1630 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1631 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1632 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1633 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1634 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1636 return nla_nest_end(skb, nest);
1639 return nla_nest_cancel(skb, nest);
1642 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1643 u32 pid, u32 seq, int type, int flags)
1645 struct nlmsghdr *nlh;
1646 struct ndtmsg *ndtmsg;
1648 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1652 ndtmsg = nlmsg_data(nlh);
1654 read_lock_bh(&tbl->lock);
1655 ndtmsg->ndtm_family = tbl->family;
1656 ndtmsg->ndtm_pad1 = 0;
1657 ndtmsg->ndtm_pad2 = 0;
1659 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1660 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1661 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1662 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1663 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1666 unsigned long now = jiffies;
1667 unsigned int flush_delta = now - tbl->last_flush;
1668 unsigned int rand_delta = now - tbl->last_rand;
1670 struct ndt_config ndc = {
1671 .ndtc_key_len = tbl->key_len,
1672 .ndtc_entry_size = tbl->entry_size,
1673 .ndtc_entries = atomic_read(&tbl->entries),
1674 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1675 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1676 .ndtc_hash_rnd = tbl->hash_rnd,
1677 .ndtc_hash_mask = tbl->hash_mask,
1678 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1679 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1682 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1687 struct ndt_stats ndst;
1689 memset(&ndst, 0, sizeof(ndst));
1691 for_each_possible_cpu(cpu) {
1692 struct neigh_statistics *st;
1694 st = per_cpu_ptr(tbl->stats, cpu);
1695 ndst.ndts_allocs += st->allocs;
1696 ndst.ndts_destroys += st->destroys;
1697 ndst.ndts_hash_grows += st->hash_grows;
1698 ndst.ndts_res_failed += st->res_failed;
1699 ndst.ndts_lookups += st->lookups;
1700 ndst.ndts_hits += st->hits;
1701 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1702 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1703 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1704 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1707 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1710 BUG_ON(tbl->parms.dev);
1711 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1712 goto nla_put_failure;
1714 read_unlock_bh(&tbl->lock);
1715 return nlmsg_end(skb, nlh);
1718 read_unlock_bh(&tbl->lock);
1719 nlmsg_cancel(skb, nlh);
1723 static int neightbl_fill_param_info(struct sk_buff *skb,
1724 struct neigh_table *tbl,
1725 struct neigh_parms *parms,
1726 u32 pid, u32 seq, int type,
1729 struct ndtmsg *ndtmsg;
1730 struct nlmsghdr *nlh;
1732 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1736 ndtmsg = nlmsg_data(nlh);
1738 read_lock_bh(&tbl->lock);
1739 ndtmsg->ndtm_family = tbl->family;
1740 ndtmsg->ndtm_pad1 = 0;
1741 ndtmsg->ndtm_pad2 = 0;
1743 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1744 neightbl_fill_parms(skb, parms) < 0)
1747 read_unlock_bh(&tbl->lock);
1748 return nlmsg_end(skb, nlh);
1750 read_unlock_bh(&tbl->lock);
1751 nlmsg_cancel(skb, nlh);
1755 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1758 struct neigh_parms *p;
1760 for (p = &tbl->parms; p; p = p->next)
1761 if ((p->dev && p->dev->ifindex == ifindex) ||
1762 (!p->dev && !ifindex))
1768 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1769 [NDTA_NAME] = { .type = NLA_STRING },
1770 [NDTA_THRESH1] = { .type = NLA_U32 },
1771 [NDTA_THRESH2] = { .type = NLA_U32 },
1772 [NDTA_THRESH3] = { .type = NLA_U32 },
1773 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1774 [NDTA_PARMS] = { .type = NLA_NESTED },
1777 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1778 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1779 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1780 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1781 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1782 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1783 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1784 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1785 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1786 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1787 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1788 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1789 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1790 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1793 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1795 struct net *net = skb->sk->sk_net;
1796 struct neigh_table *tbl;
1797 struct ndtmsg *ndtmsg;
1798 struct nlattr *tb[NDTA_MAX+1];
1801 if (net != &init_net)
1804 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1805 nl_neightbl_policy);
1809 if (tb[NDTA_NAME] == NULL) {
1814 ndtmsg = nlmsg_data(nlh);
1815 read_lock(&neigh_tbl_lock);
1816 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1817 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1820 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1830 * We acquire tbl->lock to be nice to the periodic timers and
1831 * make sure they always see a consistent set of values.
1833 write_lock_bh(&tbl->lock);
1835 if (tb[NDTA_PARMS]) {
1836 struct nlattr *tbp[NDTPA_MAX+1];
1837 struct neigh_parms *p;
1840 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1841 nl_ntbl_parm_policy);
1843 goto errout_tbl_lock;
1845 if (tbp[NDTPA_IFINDEX])
1846 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1848 p = lookup_neigh_params(tbl, ifindex);
1851 goto errout_tbl_lock;
1854 for (i = 1; i <= NDTPA_MAX; i++) {
1859 case NDTPA_QUEUE_LEN:
1860 p->queue_len = nla_get_u32(tbp[i]);
1862 case NDTPA_PROXY_QLEN:
1863 p->proxy_qlen = nla_get_u32(tbp[i]);
1865 case NDTPA_APP_PROBES:
1866 p->app_probes = nla_get_u32(tbp[i]);
1868 case NDTPA_UCAST_PROBES:
1869 p->ucast_probes = nla_get_u32(tbp[i]);
1871 case NDTPA_MCAST_PROBES:
1872 p->mcast_probes = nla_get_u32(tbp[i]);
1874 case NDTPA_BASE_REACHABLE_TIME:
1875 p->base_reachable_time = nla_get_msecs(tbp[i]);
1877 case NDTPA_GC_STALETIME:
1878 p->gc_staletime = nla_get_msecs(tbp[i]);
1880 case NDTPA_DELAY_PROBE_TIME:
1881 p->delay_probe_time = nla_get_msecs(tbp[i]);
1883 case NDTPA_RETRANS_TIME:
1884 p->retrans_time = nla_get_msecs(tbp[i]);
1886 case NDTPA_ANYCAST_DELAY:
1887 p->anycast_delay = nla_get_msecs(tbp[i]);
1889 case NDTPA_PROXY_DELAY:
1890 p->proxy_delay = nla_get_msecs(tbp[i]);
1892 case NDTPA_LOCKTIME:
1893 p->locktime = nla_get_msecs(tbp[i]);
1899 if (tb[NDTA_THRESH1])
1900 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1902 if (tb[NDTA_THRESH2])
1903 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1905 if (tb[NDTA_THRESH3])
1906 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1908 if (tb[NDTA_GC_INTERVAL])
1909 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1914 write_unlock_bh(&tbl->lock);
1916 read_unlock(&neigh_tbl_lock);
1921 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1923 struct net *net = skb->sk->sk_net;
1924 int family, tidx, nidx = 0;
1925 int tbl_skip = cb->args[0];
1926 int neigh_skip = cb->args[1];
1927 struct neigh_table *tbl;
1929 if (net != &init_net)
1932 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1934 read_lock(&neigh_tbl_lock);
1935 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1936 struct neigh_parms *p;
1938 if (tidx < tbl_skip || (family && tbl->family != family))
1941 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1942 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1946 for (nidx = 0, p = tbl->parms.next; p; p = p->next, nidx++) {
1947 if (nidx < neigh_skip)
1950 if (neightbl_fill_param_info(skb, tbl, p,
1951 NETLINK_CB(cb->skb).pid,
1961 read_unlock(&neigh_tbl_lock);
1968 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1969 u32 pid, u32 seq, int type, unsigned int flags)
1971 unsigned long now = jiffies;
1972 struct nda_cacheinfo ci;
1973 struct nlmsghdr *nlh;
1976 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1980 ndm = nlmsg_data(nlh);
1981 ndm->ndm_family = neigh->ops->family;
1984 ndm->ndm_flags = neigh->flags;
1985 ndm->ndm_type = neigh->type;
1986 ndm->ndm_ifindex = neigh->dev->ifindex;
1988 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
1990 read_lock_bh(&neigh->lock);
1991 ndm->ndm_state = neigh->nud_state;
1992 if ((neigh->nud_state & NUD_VALID) &&
1993 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
1994 read_unlock_bh(&neigh->lock);
1995 goto nla_put_failure;
1998 ci.ndm_used = now - neigh->used;
1999 ci.ndm_confirmed = now - neigh->confirmed;
2000 ci.ndm_updated = now - neigh->updated;
2001 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2002 read_unlock_bh(&neigh->lock);
2004 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2005 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2007 return nlmsg_end(skb, nlh);
2010 nlmsg_cancel(skb, nlh);
2014 static void neigh_update_notify(struct neighbour *neigh)
2016 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2017 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2020 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2021 struct netlink_callback *cb)
2023 struct neighbour *n;
2024 int rc, h, s_h = cb->args[1];
2025 int idx, s_idx = idx = cb->args[2];
2027 read_lock_bh(&tbl->lock);
2028 for (h = 0; h <= tbl->hash_mask; h++) {
2033 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2036 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2039 NLM_F_MULTI) <= 0) {
2040 read_unlock_bh(&tbl->lock);
2046 read_unlock_bh(&tbl->lock);
2054 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2056 struct net *net = skb->sk->sk_net;
2057 struct neigh_table *tbl;
2060 if (net != &init_net)
2063 read_lock(&neigh_tbl_lock);
2064 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2067 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2068 if (t < s_t || (family && tbl->family != family))
2071 memset(&cb->args[1], 0, sizeof(cb->args) -
2072 sizeof(cb->args[0]));
2073 if (neigh_dump_table(tbl, skb, cb) < 0)
2076 read_unlock(&neigh_tbl_lock);
2082 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2086 read_lock_bh(&tbl->lock);
2087 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2088 struct neighbour *n;
2090 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2093 read_unlock_bh(&tbl->lock);
2095 EXPORT_SYMBOL(neigh_for_each);
2097 /* The tbl->lock must be held as a writer and BH disabled. */
2098 void __neigh_for_each_release(struct neigh_table *tbl,
2099 int (*cb)(struct neighbour *))
2103 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2104 struct neighbour *n, **np;
2106 np = &tbl->hash_buckets[chain];
2107 while ((n = *np) != NULL) {
2110 write_lock(&n->lock);
2117 write_unlock(&n->lock);
2119 neigh_cleanup_and_release(n);
2123 EXPORT_SYMBOL(__neigh_for_each_release);
2125 #ifdef CONFIG_PROC_FS
2127 static struct neighbour *neigh_get_first(struct seq_file *seq)
2129 struct neigh_seq_state *state = seq->private;
2130 struct neigh_table *tbl = state->tbl;
2131 struct neighbour *n = NULL;
2132 int bucket = state->bucket;
2134 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2135 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2136 n = tbl->hash_buckets[bucket];
2139 if (state->neigh_sub_iter) {
2143 v = state->neigh_sub_iter(state, n, &fakep);
2147 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2149 if (n->nud_state & ~NUD_NOARP)
2158 state->bucket = bucket;
2163 static struct neighbour *neigh_get_next(struct seq_file *seq,
2164 struct neighbour *n,
2167 struct neigh_seq_state *state = seq->private;
2168 struct neigh_table *tbl = state->tbl;
2170 if (state->neigh_sub_iter) {
2171 void *v = state->neigh_sub_iter(state, n, pos);
2179 if (state->neigh_sub_iter) {
2180 void *v = state->neigh_sub_iter(state, n, pos);
2185 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2188 if (n->nud_state & ~NUD_NOARP)
2197 if (++state->bucket > tbl->hash_mask)
2200 n = tbl->hash_buckets[state->bucket];
2208 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2210 struct neighbour *n = neigh_get_first(seq);
2214 n = neigh_get_next(seq, n, pos);
2219 return *pos ? NULL : n;
2222 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2224 struct neigh_seq_state *state = seq->private;
2225 struct neigh_table *tbl = state->tbl;
2226 struct pneigh_entry *pn = NULL;
2227 int bucket = state->bucket;
2229 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2230 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2231 pn = tbl->phash_buckets[bucket];
2235 state->bucket = bucket;
2240 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2241 struct pneigh_entry *pn,
2244 struct neigh_seq_state *state = seq->private;
2245 struct neigh_table *tbl = state->tbl;
2249 if (++state->bucket > PNEIGH_HASHMASK)
2251 pn = tbl->phash_buckets[state->bucket];
2262 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2264 struct pneigh_entry *pn = pneigh_get_first(seq);
2268 pn = pneigh_get_next(seq, pn, pos);
2273 return *pos ? NULL : pn;
2276 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2278 struct neigh_seq_state *state = seq->private;
2281 rc = neigh_get_idx(seq, pos);
2282 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2283 rc = pneigh_get_idx(seq, pos);
2288 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2290 struct neigh_seq_state *state = seq->private;
2291 loff_t pos_minus_one;
2295 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2297 read_lock_bh(&tbl->lock);
2299 pos_minus_one = *pos - 1;
2300 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2302 EXPORT_SYMBOL(neigh_seq_start);
2304 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2306 struct neigh_seq_state *state;
2309 if (v == SEQ_START_TOKEN) {
2310 rc = neigh_get_idx(seq, pos);
2314 state = seq->private;
2315 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2316 rc = neigh_get_next(seq, v, NULL);
2319 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2320 rc = pneigh_get_first(seq);
2322 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2323 rc = pneigh_get_next(seq, v, NULL);
2329 EXPORT_SYMBOL(neigh_seq_next);
2331 void neigh_seq_stop(struct seq_file *seq, void *v)
2333 struct neigh_seq_state *state = seq->private;
2334 struct neigh_table *tbl = state->tbl;
2336 read_unlock_bh(&tbl->lock);
2338 EXPORT_SYMBOL(neigh_seq_stop);
2340 /* statistics via seq_file */
2342 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2344 struct proc_dir_entry *pde = seq->private;
2345 struct neigh_table *tbl = pde->data;
2349 return SEQ_START_TOKEN;
2351 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2352 if (!cpu_possible(cpu))
2355 return per_cpu_ptr(tbl->stats, cpu);
2360 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2362 struct proc_dir_entry *pde = seq->private;
2363 struct neigh_table *tbl = pde->data;
2366 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2367 if (!cpu_possible(cpu))
2370 return per_cpu_ptr(tbl->stats, cpu);
2375 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2380 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2382 struct proc_dir_entry *pde = seq->private;
2383 struct neigh_table *tbl = pde->data;
2384 struct neigh_statistics *st = v;
2386 if (v == SEQ_START_TOKEN) {
2387 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2391 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2392 "%08lx %08lx %08lx %08lx\n",
2393 atomic_read(&tbl->entries),
2404 st->rcv_probes_mcast,
2405 st->rcv_probes_ucast,
2407 st->periodic_gc_runs,
2414 static const struct seq_operations neigh_stat_seq_ops = {
2415 .start = neigh_stat_seq_start,
2416 .next = neigh_stat_seq_next,
2417 .stop = neigh_stat_seq_stop,
2418 .show = neigh_stat_seq_show,
2421 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2423 int ret = seq_open(file, &neigh_stat_seq_ops);
2426 struct seq_file *sf = file->private_data;
2427 sf->private = PDE(inode);
2432 static const struct file_operations neigh_stat_seq_fops = {
2433 .owner = THIS_MODULE,
2434 .open = neigh_stat_seq_open,
2436 .llseek = seq_lseek,
2437 .release = seq_release,
2440 #endif /* CONFIG_PROC_FS */
2442 static inline size_t neigh_nlmsg_size(void)
2444 return NLMSG_ALIGN(sizeof(struct ndmsg))
2445 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2446 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2447 + nla_total_size(sizeof(struct nda_cacheinfo))
2448 + nla_total_size(4); /* NDA_PROBES */
2451 static void __neigh_notify(struct neighbour *n, int type, int flags)
2453 struct sk_buff *skb;
2456 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2460 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2462 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2463 WARN_ON(err == -EMSGSIZE);
2467 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2470 rtnl_set_sk_err(&init_net, RTNLGRP_NEIGH, err);
2474 void neigh_app_ns(struct neighbour *n)
2476 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2478 #endif /* CONFIG_ARPD */
2480 #ifdef CONFIG_SYSCTL
2482 static struct neigh_sysctl_table {
2483 struct ctl_table_header *sysctl_header;
2484 struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2486 } neigh_sysctl_template __read_mostly = {
2489 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2490 .procname = "mcast_solicit",
2491 .maxlen = sizeof(int),
2493 .proc_handler = &proc_dointvec,
2496 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2497 .procname = "ucast_solicit",
2498 .maxlen = sizeof(int),
2500 .proc_handler = &proc_dointvec,
2503 .ctl_name = NET_NEIGH_APP_SOLICIT,
2504 .procname = "app_solicit",
2505 .maxlen = sizeof(int),
2507 .proc_handler = &proc_dointvec,
2510 .procname = "retrans_time",
2511 .maxlen = sizeof(int),
2513 .proc_handler = &proc_dointvec_userhz_jiffies,
2516 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2517 .procname = "base_reachable_time",
2518 .maxlen = sizeof(int),
2520 .proc_handler = &proc_dointvec_jiffies,
2521 .strategy = &sysctl_jiffies,
2524 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2525 .procname = "delay_first_probe_time",
2526 .maxlen = sizeof(int),
2528 .proc_handler = &proc_dointvec_jiffies,
2529 .strategy = &sysctl_jiffies,
2532 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2533 .procname = "gc_stale_time",
2534 .maxlen = sizeof(int),
2536 .proc_handler = &proc_dointvec_jiffies,
2537 .strategy = &sysctl_jiffies,
2540 .ctl_name = NET_NEIGH_UNRES_QLEN,
2541 .procname = "unres_qlen",
2542 .maxlen = sizeof(int),
2544 .proc_handler = &proc_dointvec,
2547 .ctl_name = NET_NEIGH_PROXY_QLEN,
2548 .procname = "proxy_qlen",
2549 .maxlen = sizeof(int),
2551 .proc_handler = &proc_dointvec,
2554 .procname = "anycast_delay",
2555 .maxlen = sizeof(int),
2557 .proc_handler = &proc_dointvec_userhz_jiffies,
2560 .procname = "proxy_delay",
2561 .maxlen = sizeof(int),
2563 .proc_handler = &proc_dointvec_userhz_jiffies,
2566 .procname = "locktime",
2567 .maxlen = sizeof(int),
2569 .proc_handler = &proc_dointvec_userhz_jiffies,
2572 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2573 .procname = "retrans_time_ms",
2574 .maxlen = sizeof(int),
2576 .proc_handler = &proc_dointvec_ms_jiffies,
2577 .strategy = &sysctl_ms_jiffies,
2580 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2581 .procname = "base_reachable_time_ms",
2582 .maxlen = sizeof(int),
2584 .proc_handler = &proc_dointvec_ms_jiffies,
2585 .strategy = &sysctl_ms_jiffies,
2588 .ctl_name = NET_NEIGH_GC_INTERVAL,
2589 .procname = "gc_interval",
2590 .maxlen = sizeof(int),
2592 .proc_handler = &proc_dointvec_jiffies,
2593 .strategy = &sysctl_jiffies,
2596 .ctl_name = NET_NEIGH_GC_THRESH1,
2597 .procname = "gc_thresh1",
2598 .maxlen = sizeof(int),
2600 .proc_handler = &proc_dointvec,
2603 .ctl_name = NET_NEIGH_GC_THRESH2,
2604 .procname = "gc_thresh2",
2605 .maxlen = sizeof(int),
2607 .proc_handler = &proc_dointvec,
2610 .ctl_name = NET_NEIGH_GC_THRESH3,
2611 .procname = "gc_thresh3",
2612 .maxlen = sizeof(int),
2614 .proc_handler = &proc_dointvec,
2620 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2621 int p_id, int pdev_id, char *p_name,
2622 proc_handler *handler, ctl_handler *strategy)
2624 struct neigh_sysctl_table *t;
2625 const char *dev_name_source = NULL;
2627 #define NEIGH_CTL_PATH_ROOT 0
2628 #define NEIGH_CTL_PATH_PROTO 1
2629 #define NEIGH_CTL_PATH_NEIGH 2
2630 #define NEIGH_CTL_PATH_DEV 3
2632 struct ctl_path neigh_path[] = {
2633 { .procname = "net", .ctl_name = CTL_NET, },
2634 { .procname = "proto", .ctl_name = 0, },
2635 { .procname = "neigh", .ctl_name = 0, },
2636 { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2640 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2644 t->neigh_vars[0].data = &p->mcast_probes;
2645 t->neigh_vars[1].data = &p->ucast_probes;
2646 t->neigh_vars[2].data = &p->app_probes;
2647 t->neigh_vars[3].data = &p->retrans_time;
2648 t->neigh_vars[4].data = &p->base_reachable_time;
2649 t->neigh_vars[5].data = &p->delay_probe_time;
2650 t->neigh_vars[6].data = &p->gc_staletime;
2651 t->neigh_vars[7].data = &p->queue_len;
2652 t->neigh_vars[8].data = &p->proxy_qlen;
2653 t->neigh_vars[9].data = &p->anycast_delay;
2654 t->neigh_vars[10].data = &p->proxy_delay;
2655 t->neigh_vars[11].data = &p->locktime;
2656 t->neigh_vars[12].data = &p->retrans_time;
2657 t->neigh_vars[13].data = &p->base_reachable_time;
2660 dev_name_source = dev->name;
2661 neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
2662 /* Terminate the table early */
2663 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2665 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2666 t->neigh_vars[14].data = (int *)(p + 1);
2667 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2668 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2669 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2673 if (handler || strategy) {
2675 t->neigh_vars[3].proc_handler = handler;
2676 t->neigh_vars[3].strategy = strategy;
2677 t->neigh_vars[3].extra1 = dev;
2679 t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2681 t->neigh_vars[4].proc_handler = handler;
2682 t->neigh_vars[4].strategy = strategy;
2683 t->neigh_vars[4].extra1 = dev;
2685 t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2686 /* RetransTime (in milliseconds)*/
2687 t->neigh_vars[12].proc_handler = handler;
2688 t->neigh_vars[12].strategy = strategy;
2689 t->neigh_vars[12].extra1 = dev;
2691 t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2692 /* ReachableTime (in milliseconds) */
2693 t->neigh_vars[13].proc_handler = handler;
2694 t->neigh_vars[13].strategy = strategy;
2695 t->neigh_vars[13].extra1 = dev;
2697 t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2700 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2704 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2705 neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2706 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2707 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2709 t->sysctl_header = register_sysctl_paths(neigh_path, t->neigh_vars);
2710 if (!t->sysctl_header)
2713 p->sysctl_table = t;
2724 void neigh_sysctl_unregister(struct neigh_parms *p)
2726 if (p->sysctl_table) {
2727 struct neigh_sysctl_table *t = p->sysctl_table;
2728 p->sysctl_table = NULL;
2729 unregister_sysctl_table(t->sysctl_header);
2735 #endif /* CONFIG_SYSCTL */
2737 static int __init neigh_init(void)
2739 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2740 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2741 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2743 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2744 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2749 subsys_initcall(neigh_init);
2751 EXPORT_SYMBOL(__neigh_event_send);
2752 EXPORT_SYMBOL(neigh_changeaddr);
2753 EXPORT_SYMBOL(neigh_compat_output);
2754 EXPORT_SYMBOL(neigh_connected_output);
2755 EXPORT_SYMBOL(neigh_create);
2756 EXPORT_SYMBOL(neigh_destroy);
2757 EXPORT_SYMBOL(neigh_event_ns);
2758 EXPORT_SYMBOL(neigh_ifdown);
2759 EXPORT_SYMBOL(neigh_lookup);
2760 EXPORT_SYMBOL(neigh_lookup_nodev);
2761 EXPORT_SYMBOL(neigh_parms_alloc);
2762 EXPORT_SYMBOL(neigh_parms_release);
2763 EXPORT_SYMBOL(neigh_rand_reach_time);
2764 EXPORT_SYMBOL(neigh_resolve_output);
2765 EXPORT_SYMBOL(neigh_table_clear);
2766 EXPORT_SYMBOL(neigh_table_init);
2767 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2768 EXPORT_SYMBOL(neigh_update);
2769 EXPORT_SYMBOL(pneigh_enqueue);
2770 EXPORT_SYMBOL(pneigh_lookup);
2773 EXPORT_SYMBOL(neigh_app_ns);
2775 #ifdef CONFIG_SYSCTL
2776 EXPORT_SYMBOL(neigh_sysctl_register);
2777 EXPORT_SYMBOL(neigh_sysctl_unregister);