Pull hp-machvec into release branch
[pandora-kernel.git] / net / core / neighbour.c
1 /*
2  *      Generic address resolution entity
3  *
4  *      Authors:
5  *      Pedro Roque             <roque@di.fc.ul.pt>
6  *      Alexey Kuznetsov        <kuznet@ms2.inr.ac.ru>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *      Fixes:
14  *      Vitaly E. Lavrov        releasing NULL neighbor in neigh_add.
15  *      Harald Welte            Add neighbour cache statistics like rtstat
16  */
17
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
26 #ifdef CONFIG_SYSCTL
27 #include <linux/sysctl.h>
28 #endif
29 #include <linux/times.h>
30 #include <net/neighbour.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36
37 #define NEIGH_DEBUG 1
38
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
44
45 #if NEIGH_DEBUG >= 1
46 #undef NEIGH_PRINTK1
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
48 #endif
49 #if NEIGH_DEBUG >= 2
50 #undef NEIGH_PRINTK2
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
52 #endif
53
54 #define PNEIGH_HASHMASK         0xF
55
56 static void neigh_timer_handler(unsigned long arg);
57 #ifdef CONFIG_ARPD
58 static void neigh_app_notify(struct neighbour *n);
59 #endif
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static struct file_operations neigh_stat_seq_fops;
66 #endif
67
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82
83    Reference count prevents destruction.
84
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103         kfree_skb(skb);
104         return -ENETDOWN;
105 }
106
107 /*
108  * It is random distribution in the interval (1/2)*base...(3/2)*base.
109  * It corresponds to default IPv6 settings and is not overridable,
110  * because it is really reasonable choice.
111  */
112
113 unsigned long neigh_rand_reach_time(unsigned long base)
114 {
115         return (base ? (net_random() % base) + (base >> 1) : 0);
116 }
117
118
119 static int neigh_forced_gc(struct neigh_table *tbl)
120 {
121         int shrunk = 0;
122         int i;
123
124         NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
125
126         write_lock_bh(&tbl->lock);
127         for (i = 0; i <= tbl->hash_mask; i++) {
128                 struct neighbour *n, **np;
129
130                 np = &tbl->hash_buckets[i];
131                 while ((n = *np) != NULL) {
132                         /* Neighbour record may be discarded if:
133                          * - nobody refers to it.
134                          * - it is not permanent
135                          */
136                         write_lock(&n->lock);
137                         if (atomic_read(&n->refcnt) == 1 &&
138                             !(n->nud_state & NUD_PERMANENT)) {
139                                 *np     = n->next;
140                                 n->dead = 1;
141                                 shrunk  = 1;
142                                 write_unlock(&n->lock);
143                                 neigh_release(n);
144                                 continue;
145                         }
146                         write_unlock(&n->lock);
147                         np = &n->next;
148                 }
149         }
150
151         tbl->last_flush = jiffies;
152
153         write_unlock_bh(&tbl->lock);
154
155         return shrunk;
156 }
157
158 static int neigh_del_timer(struct neighbour *n)
159 {
160         if ((n->nud_state & NUD_IN_TIMER) &&
161             del_timer(&n->timer)) {
162                 neigh_release(n);
163                 return 1;
164         }
165         return 0;
166 }
167
168 static void pneigh_queue_purge(struct sk_buff_head *list)
169 {
170         struct sk_buff *skb;
171
172         while ((skb = skb_dequeue(list)) != NULL) {
173                 dev_put(skb->dev);
174                 kfree_skb(skb);
175         }
176 }
177
178 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
179 {
180         int i;
181
182         for (i = 0; i <= tbl->hash_mask; i++) {
183                 struct neighbour *n, **np = &tbl->hash_buckets[i];
184
185                 while ((n = *np) != NULL) {
186                         if (dev && n->dev != dev) {
187                                 np = &n->next;
188                                 continue;
189                         }
190                         *np = n->next;
191                         write_lock(&n->lock);
192                         neigh_del_timer(n);
193                         n->dead = 1;
194
195                         if (atomic_read(&n->refcnt) != 1) {
196                                 /* The most unpleasant situation.
197                                    We must destroy neighbour entry,
198                                    but someone still uses it.
199
200                                    The destroy will be delayed until
201                                    the last user releases us, but
202                                    we must kill timers etc. and move
203                                    it to safe state.
204                                  */
205                                 skb_queue_purge(&n->arp_queue);
206                                 n->output = neigh_blackhole;
207                                 if (n->nud_state & NUD_VALID)
208                                         n->nud_state = NUD_NOARP;
209                                 else
210                                         n->nud_state = NUD_NONE;
211                                 NEIGH_PRINTK2("neigh %p is stray.\n", n);
212                         }
213                         write_unlock(&n->lock);
214                         neigh_release(n);
215                 }
216         }
217 }
218
219 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
220 {
221         write_lock_bh(&tbl->lock);
222         neigh_flush_dev(tbl, dev);
223         write_unlock_bh(&tbl->lock);
224 }
225
226 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
227 {
228         write_lock_bh(&tbl->lock);
229         neigh_flush_dev(tbl, dev);
230         pneigh_ifdown(tbl, dev);
231         write_unlock_bh(&tbl->lock);
232
233         del_timer_sync(&tbl->proxy_timer);
234         pneigh_queue_purge(&tbl->proxy_queue);
235         return 0;
236 }
237
238 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
239 {
240         struct neighbour *n = NULL;
241         unsigned long now = jiffies;
242         int entries;
243
244         entries = atomic_inc_return(&tbl->entries) - 1;
245         if (entries >= tbl->gc_thresh3 ||
246             (entries >= tbl->gc_thresh2 &&
247              time_after(now, tbl->last_flush + 5 * HZ))) {
248                 if (!neigh_forced_gc(tbl) &&
249                     entries >= tbl->gc_thresh3)
250                         goto out_entries;
251         }
252
253         n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
254         if (!n)
255                 goto out_entries;
256
257         memset(n, 0, tbl->entry_size);
258
259         skb_queue_head_init(&n->arp_queue);
260         rwlock_init(&n->lock);
261         n->updated        = n->used = now;
262         n->nud_state      = NUD_NONE;
263         n->output         = neigh_blackhole;
264         n->parms          = neigh_parms_clone(&tbl->parms);
265         init_timer(&n->timer);
266         n->timer.function = neigh_timer_handler;
267         n->timer.data     = (unsigned long)n;
268
269         NEIGH_CACHE_STAT_INC(tbl, allocs);
270         n->tbl            = tbl;
271         atomic_set(&n->refcnt, 1);
272         n->dead           = 1;
273 out:
274         return n;
275
276 out_entries:
277         atomic_dec(&tbl->entries);
278         goto out;
279 }
280
281 static struct neighbour **neigh_hash_alloc(unsigned int entries)
282 {
283         unsigned long size = entries * sizeof(struct neighbour *);
284         struct neighbour **ret;
285
286         if (size <= PAGE_SIZE) {
287                 ret = kmalloc(size, GFP_ATOMIC);
288         } else {
289                 ret = (struct neighbour **)
290                         __get_free_pages(GFP_ATOMIC, get_order(size));
291         }
292         if (ret)
293                 memset(ret, 0, size);
294
295         return ret;
296 }
297
298 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
299 {
300         unsigned long size = entries * sizeof(struct neighbour *);
301
302         if (size <= PAGE_SIZE)
303                 kfree(hash);
304         else
305                 free_pages((unsigned long)hash, get_order(size));
306 }
307
308 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
309 {
310         struct neighbour **new_hash, **old_hash;
311         unsigned int i, new_hash_mask, old_entries;
312
313         NEIGH_CACHE_STAT_INC(tbl, hash_grows);
314
315         BUG_ON(new_entries & (new_entries - 1));
316         new_hash = neigh_hash_alloc(new_entries);
317         if (!new_hash)
318                 return;
319
320         old_entries = tbl->hash_mask + 1;
321         new_hash_mask = new_entries - 1;
322         old_hash = tbl->hash_buckets;
323
324         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
325         for (i = 0; i < old_entries; i++) {
326                 struct neighbour *n, *next;
327
328                 for (n = old_hash[i]; n; n = next) {
329                         unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
330
331                         hash_val &= new_hash_mask;
332                         next = n->next;
333
334                         n->next = new_hash[hash_val];
335                         new_hash[hash_val] = n;
336                 }
337         }
338         tbl->hash_buckets = new_hash;
339         tbl->hash_mask = new_hash_mask;
340
341         neigh_hash_free(old_hash, old_entries);
342 }
343
344 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
345                                struct net_device *dev)
346 {
347         struct neighbour *n;
348         int key_len = tbl->key_len;
349         u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
350         
351         NEIGH_CACHE_STAT_INC(tbl, lookups);
352
353         read_lock_bh(&tbl->lock);
354         for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
355                 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
356                         neigh_hold(n);
357                         NEIGH_CACHE_STAT_INC(tbl, hits);
358                         break;
359                 }
360         }
361         read_unlock_bh(&tbl->lock);
362         return n;
363 }
364
365 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
366 {
367         struct neighbour *n;
368         int key_len = tbl->key_len;
369         u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
370
371         NEIGH_CACHE_STAT_INC(tbl, lookups);
372
373         read_lock_bh(&tbl->lock);
374         for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
375                 if (!memcmp(n->primary_key, pkey, key_len)) {
376                         neigh_hold(n);
377                         NEIGH_CACHE_STAT_INC(tbl, hits);
378                         break;
379                 }
380         }
381         read_unlock_bh(&tbl->lock);
382         return n;
383 }
384
385 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
386                                struct net_device *dev)
387 {
388         u32 hash_val;
389         int key_len = tbl->key_len;
390         int error;
391         struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
392
393         if (!n) {
394                 rc = ERR_PTR(-ENOBUFS);
395                 goto out;
396         }
397
398         memcpy(n->primary_key, pkey, key_len);
399         n->dev = dev;
400         dev_hold(dev);
401
402         /* Protocol specific setup. */
403         if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
404                 rc = ERR_PTR(error);
405                 goto out_neigh_release;
406         }
407
408         /* Device specific setup. */
409         if (n->parms->neigh_setup &&
410             (error = n->parms->neigh_setup(n)) < 0) {
411                 rc = ERR_PTR(error);
412                 goto out_neigh_release;
413         }
414
415         n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
416
417         write_lock_bh(&tbl->lock);
418
419         if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
420                 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
421
422         hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
423
424         if (n->parms->dead) {
425                 rc = ERR_PTR(-EINVAL);
426                 goto out_tbl_unlock;
427         }
428
429         for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
430                 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
431                         neigh_hold(n1);
432                         rc = n1;
433                         goto out_tbl_unlock;
434                 }
435         }
436
437         n->next = tbl->hash_buckets[hash_val];
438         tbl->hash_buckets[hash_val] = n;
439         n->dead = 0;
440         neigh_hold(n);
441         write_unlock_bh(&tbl->lock);
442         NEIGH_PRINTK2("neigh %p is created.\n", n);
443         rc = n;
444 out:
445         return rc;
446 out_tbl_unlock:
447         write_unlock_bh(&tbl->lock);
448 out_neigh_release:
449         neigh_release(n);
450         goto out;
451 }
452
453 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
454                                     struct net_device *dev, int creat)
455 {
456         struct pneigh_entry *n;
457         int key_len = tbl->key_len;
458         u32 hash_val = *(u32 *)(pkey + key_len - 4);
459
460         hash_val ^= (hash_val >> 16);
461         hash_val ^= hash_val >> 8;
462         hash_val ^= hash_val >> 4;
463         hash_val &= PNEIGH_HASHMASK;
464
465         read_lock_bh(&tbl->lock);
466
467         for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
468                 if (!memcmp(n->key, pkey, key_len) &&
469                     (n->dev == dev || !n->dev)) {
470                         read_unlock_bh(&tbl->lock);
471                         goto out;
472                 }
473         }
474         read_unlock_bh(&tbl->lock);
475         n = NULL;
476         if (!creat)
477                 goto out;
478
479         n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
480         if (!n)
481                 goto out;
482
483         memcpy(n->key, pkey, key_len);
484         n->dev = dev;
485         if (dev)
486                 dev_hold(dev);
487
488         if (tbl->pconstructor && tbl->pconstructor(n)) {
489                 if (dev)
490                         dev_put(dev);
491                 kfree(n);
492                 n = NULL;
493                 goto out;
494         }
495
496         write_lock_bh(&tbl->lock);
497         n->next = tbl->phash_buckets[hash_val];
498         tbl->phash_buckets[hash_val] = n;
499         write_unlock_bh(&tbl->lock);
500 out:
501         return n;
502 }
503
504
505 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
506                   struct net_device *dev)
507 {
508         struct pneigh_entry *n, **np;
509         int key_len = tbl->key_len;
510         u32 hash_val = *(u32 *)(pkey + key_len - 4);
511
512         hash_val ^= (hash_val >> 16);
513         hash_val ^= hash_val >> 8;
514         hash_val ^= hash_val >> 4;
515         hash_val &= PNEIGH_HASHMASK;
516
517         write_lock_bh(&tbl->lock);
518         for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
519              np = &n->next) {
520                 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
521                         *np = n->next;
522                         write_unlock_bh(&tbl->lock);
523                         if (tbl->pdestructor)
524                                 tbl->pdestructor(n);
525                         if (n->dev)
526                                 dev_put(n->dev);
527                         kfree(n);
528                         return 0;
529                 }
530         }
531         write_unlock_bh(&tbl->lock);
532         return -ENOENT;
533 }
534
535 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
536 {
537         struct pneigh_entry *n, **np;
538         u32 h;
539
540         for (h = 0; h <= PNEIGH_HASHMASK; h++) {
541                 np = &tbl->phash_buckets[h];
542                 while ((n = *np) != NULL) {
543                         if (!dev || n->dev == dev) {
544                                 *np = n->next;
545                                 if (tbl->pdestructor)
546                                         tbl->pdestructor(n);
547                                 if (n->dev)
548                                         dev_put(n->dev);
549                                 kfree(n);
550                                 continue;
551                         }
552                         np = &n->next;
553                 }
554         }
555         return -ENOENT;
556 }
557
558
559 /*
560  *      neighbour must already be out of the table;
561  *
562  */
563 void neigh_destroy(struct neighbour *neigh)
564 {
565         struct hh_cache *hh;
566
567         NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
568
569         if (!neigh->dead) {
570                 printk(KERN_WARNING
571                        "Destroying alive neighbour %p\n", neigh);
572                 dump_stack();
573                 return;
574         }
575
576         if (neigh_del_timer(neigh))
577                 printk(KERN_WARNING "Impossible event.\n");
578
579         while ((hh = neigh->hh) != NULL) {
580                 neigh->hh = hh->hh_next;
581                 hh->hh_next = NULL;
582                 write_lock_bh(&hh->hh_lock);
583                 hh->hh_output = neigh_blackhole;
584                 write_unlock_bh(&hh->hh_lock);
585                 if (atomic_dec_and_test(&hh->hh_refcnt))
586                         kfree(hh);
587         }
588
589         if (neigh->ops && neigh->ops->destructor)
590                 (neigh->ops->destructor)(neigh);
591
592         skb_queue_purge(&neigh->arp_queue);
593
594         dev_put(neigh->dev);
595         neigh_parms_put(neigh->parms);
596
597         NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
598
599         atomic_dec(&neigh->tbl->entries);
600         kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
601 }
602
603 /* Neighbour state is suspicious;
604    disable fast path.
605
606    Called with write_locked neigh.
607  */
608 static void neigh_suspect(struct neighbour *neigh)
609 {
610         struct hh_cache *hh;
611
612         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
613
614         neigh->output = neigh->ops->output;
615
616         for (hh = neigh->hh; hh; hh = hh->hh_next)
617                 hh->hh_output = neigh->ops->output;
618 }
619
620 /* Neighbour state is OK;
621    enable fast path.
622
623    Called with write_locked neigh.
624  */
625 static void neigh_connect(struct neighbour *neigh)
626 {
627         struct hh_cache *hh;
628
629         NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
630
631         neigh->output = neigh->ops->connected_output;
632
633         for (hh = neigh->hh; hh; hh = hh->hh_next)
634                 hh->hh_output = neigh->ops->hh_output;
635 }
636
637 static void neigh_periodic_timer(unsigned long arg)
638 {
639         struct neigh_table *tbl = (struct neigh_table *)arg;
640         struct neighbour *n, **np;
641         unsigned long expire, now = jiffies;
642
643         NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
644
645         write_lock(&tbl->lock);
646
647         /*
648          *      periodically recompute ReachableTime from random function
649          */
650
651         if (time_after(now, tbl->last_rand + 300 * HZ)) {
652                 struct neigh_parms *p;
653                 tbl->last_rand = now;
654                 for (p = &tbl->parms; p; p = p->next)
655                         p->reachable_time =
656                                 neigh_rand_reach_time(p->base_reachable_time);
657         }
658
659         np = &tbl->hash_buckets[tbl->hash_chain_gc];
660         tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
661
662         while ((n = *np) != NULL) {
663                 unsigned int state;
664
665                 write_lock(&n->lock);
666
667                 state = n->nud_state;
668                 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
669                         write_unlock(&n->lock);
670                         goto next_elt;
671                 }
672
673                 if (time_before(n->used, n->confirmed))
674                         n->used = n->confirmed;
675
676                 if (atomic_read(&n->refcnt) == 1 &&
677                     (state == NUD_FAILED ||
678                      time_after(now, n->used + n->parms->gc_staletime))) {
679                         *np = n->next;
680                         n->dead = 1;
681                         write_unlock(&n->lock);
682                         neigh_release(n);
683                         continue;
684                 }
685                 write_unlock(&n->lock);
686
687 next_elt:
688                 np = &n->next;
689         }
690
691         /* Cycle through all hash buckets every base_reachable_time/2 ticks.
692          * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
693          * base_reachable_time.
694          */
695         expire = tbl->parms.base_reachable_time >> 1;
696         expire /= (tbl->hash_mask + 1);
697         if (!expire)
698                 expire = 1;
699
700         mod_timer(&tbl->gc_timer, now + expire);
701
702         write_unlock(&tbl->lock);
703 }
704
705 static __inline__ int neigh_max_probes(struct neighbour *n)
706 {
707         struct neigh_parms *p = n->parms;
708         return (n->nud_state & NUD_PROBE ?
709                 p->ucast_probes :
710                 p->ucast_probes + p->app_probes + p->mcast_probes);
711 }
712
713 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
714 {
715         if (unlikely(mod_timer(&n->timer, when))) {
716                 printk("NEIGH: BUG, double timer add, state is %x\n",
717                        n->nud_state);
718                 dump_stack();
719         }
720 }
721
722 /* Called when a timer expires for a neighbour entry. */
723
724 static void neigh_timer_handler(unsigned long arg)
725 {
726         unsigned long now, next;
727         struct neighbour *neigh = (struct neighbour *)arg;
728         unsigned state;
729         int notify = 0;
730
731         write_lock(&neigh->lock);
732
733         state = neigh->nud_state;
734         now = jiffies;
735         next = now + HZ;
736
737         if (!(state & NUD_IN_TIMER)) {
738 #ifndef CONFIG_SMP
739                 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
740 #endif
741                 goto out;
742         }
743
744         if (state & NUD_REACHABLE) {
745                 if (time_before_eq(now, 
746                                    neigh->confirmed + neigh->parms->reachable_time)) {
747                         NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
748                         next = neigh->confirmed + neigh->parms->reachable_time;
749                 } else if (time_before_eq(now,
750                                           neigh->used + neigh->parms->delay_probe_time)) {
751                         NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
752                         neigh->nud_state = NUD_DELAY;
753                         neigh_suspect(neigh);
754                         next = now + neigh->parms->delay_probe_time;
755                 } else {
756                         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
757                         neigh->nud_state = NUD_STALE;
758                         neigh_suspect(neigh);
759                 }
760         } else if (state & NUD_DELAY) {
761                 if (time_before_eq(now, 
762                                    neigh->confirmed + neigh->parms->delay_probe_time)) {
763                         NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
764                         neigh->nud_state = NUD_REACHABLE;
765                         neigh_connect(neigh);
766                         next = neigh->confirmed + neigh->parms->reachable_time;
767                 } else {
768                         NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
769                         neigh->nud_state = NUD_PROBE;
770                         atomic_set(&neigh->probes, 0);
771                         next = now + neigh->parms->retrans_time;
772                 }
773         } else {
774                 /* NUD_PROBE|NUD_INCOMPLETE */
775                 next = now + neigh->parms->retrans_time;
776         }
777
778         if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
779             atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
780                 struct sk_buff *skb;
781
782                 neigh->nud_state = NUD_FAILED;
783                 notify = 1;
784                 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
785                 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
786
787                 /* It is very thin place. report_unreachable is very complicated
788                    routine. Particularly, it can hit the same neighbour entry!
789
790                    So that, we try to be accurate and avoid dead loop. --ANK
791                  */
792                 while (neigh->nud_state == NUD_FAILED &&
793                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
794                         write_unlock(&neigh->lock);
795                         neigh->ops->error_report(neigh, skb);
796                         write_lock(&neigh->lock);
797                 }
798                 skb_queue_purge(&neigh->arp_queue);
799         }
800
801         if (neigh->nud_state & NUD_IN_TIMER) {
802                 if (time_before(next, jiffies + HZ/2))
803                         next = jiffies + HZ/2;
804                 if (!mod_timer(&neigh->timer, next))
805                         neigh_hold(neigh);
806         }
807         if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
808                 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
809                 /* keep skb alive even if arp_queue overflows */
810                 if (skb)
811                         skb_get(skb);
812                 write_unlock(&neigh->lock);
813                 neigh->ops->solicit(neigh, skb);
814                 atomic_inc(&neigh->probes);
815                 if (skb)
816                         kfree_skb(skb);
817         } else {
818 out:
819                 write_unlock(&neigh->lock);
820         }
821
822 #ifdef CONFIG_ARPD
823         if (notify && neigh->parms->app_probes)
824                 neigh_app_notify(neigh);
825 #endif
826         neigh_release(neigh);
827 }
828
829 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
830 {
831         int rc;
832         unsigned long now;
833
834         write_lock_bh(&neigh->lock);
835
836         rc = 0;
837         if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
838                 goto out_unlock_bh;
839
840         now = jiffies;
841         
842         if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
843                 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
844                         atomic_set(&neigh->probes, neigh->parms->ucast_probes);
845                         neigh->nud_state     = NUD_INCOMPLETE;
846                         neigh_hold(neigh);
847                         neigh_add_timer(neigh, now + 1);
848                 } else {
849                         neigh->nud_state = NUD_FAILED;
850                         write_unlock_bh(&neigh->lock);
851
852                         if (skb)
853                                 kfree_skb(skb);
854                         return 1;
855                 }
856         } else if (neigh->nud_state & NUD_STALE) {
857                 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
858                 neigh_hold(neigh);
859                 neigh->nud_state = NUD_DELAY;
860                 neigh_add_timer(neigh,
861                                 jiffies + neigh->parms->delay_probe_time);
862         }
863
864         if (neigh->nud_state == NUD_INCOMPLETE) {
865                 if (skb) {
866                         if (skb_queue_len(&neigh->arp_queue) >=
867                             neigh->parms->queue_len) {
868                                 struct sk_buff *buff;
869                                 buff = neigh->arp_queue.next;
870                                 __skb_unlink(buff, &neigh->arp_queue);
871                                 kfree_skb(buff);
872                         }
873                         __skb_queue_tail(&neigh->arp_queue, skb);
874                 }
875                 rc = 1;
876         }
877 out_unlock_bh:
878         write_unlock_bh(&neigh->lock);
879         return rc;
880 }
881
882 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
883 {
884         struct hh_cache *hh;
885         void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
886                 neigh->dev->header_cache_update;
887
888         if (update) {
889                 for (hh = neigh->hh; hh; hh = hh->hh_next) {
890                         write_lock_bh(&hh->hh_lock);
891                         update(hh, neigh->dev, neigh->ha);
892                         write_unlock_bh(&hh->hh_lock);
893                 }
894         }
895 }
896
897
898
899 /* Generic update routine.
900    -- lladdr is new lladdr or NULL, if it is not supplied.
901    -- new    is new state.
902    -- flags
903         NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
904                                 if it is different.
905         NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
906                                 lladdr instead of overriding it 
907                                 if it is different.
908                                 It also allows to retain current state
909                                 if lladdr is unchanged.
910         NEIGH_UPDATE_F_ADMIN    means that the change is administrative.
911
912         NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing 
913                                 NTF_ROUTER flag.
914         NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
915                                 a router.
916
917    Caller MUST hold reference count on the entry.
918  */
919
920 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
921                  u32 flags)
922 {
923         u8 old;
924         int err;
925 #ifdef CONFIG_ARPD
926         int notify = 0;
927 #endif
928         struct net_device *dev;
929         int update_isrouter = 0;
930
931         write_lock_bh(&neigh->lock);
932
933         dev    = neigh->dev;
934         old    = neigh->nud_state;
935         err    = -EPERM;
936
937         if (!(flags & NEIGH_UPDATE_F_ADMIN) && 
938             (old & (NUD_NOARP | NUD_PERMANENT)))
939                 goto out;
940
941         if (!(new & NUD_VALID)) {
942                 neigh_del_timer(neigh);
943                 if (old & NUD_CONNECTED)
944                         neigh_suspect(neigh);
945                 neigh->nud_state = new;
946                 err = 0;
947 #ifdef CONFIG_ARPD
948                 notify = old & NUD_VALID;
949 #endif
950                 goto out;
951         }
952
953         /* Compare new lladdr with cached one */
954         if (!dev->addr_len) {
955                 /* First case: device needs no address. */
956                 lladdr = neigh->ha;
957         } else if (lladdr) {
958                 /* The second case: if something is already cached
959                    and a new address is proposed:
960                    - compare new & old
961                    - if they are different, check override flag
962                  */
963                 if ((old & NUD_VALID) && 
964                     !memcmp(lladdr, neigh->ha, dev->addr_len))
965                         lladdr = neigh->ha;
966         } else {
967                 /* No address is supplied; if we know something,
968                    use it, otherwise discard the request.
969                  */
970                 err = -EINVAL;
971                 if (!(old & NUD_VALID))
972                         goto out;
973                 lladdr = neigh->ha;
974         }
975
976         if (new & NUD_CONNECTED)
977                 neigh->confirmed = jiffies;
978         neigh->updated = jiffies;
979
980         /* If entry was valid and address is not changed,
981            do not change entry state, if new one is STALE.
982          */
983         err = 0;
984         update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
985         if (old & NUD_VALID) {
986                 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
987                         update_isrouter = 0;
988                         if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
989                             (old & NUD_CONNECTED)) {
990                                 lladdr = neigh->ha;
991                                 new = NUD_STALE;
992                         } else
993                                 goto out;
994                 } else {
995                         if (lladdr == neigh->ha && new == NUD_STALE &&
996                             ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
997                              (old & NUD_CONNECTED))
998                             )
999                                 new = old;
1000                 }
1001         }
1002
1003         if (new != old) {
1004                 neigh_del_timer(neigh);
1005                 if (new & NUD_IN_TIMER) {
1006                         neigh_hold(neigh);
1007                         neigh_add_timer(neigh, (jiffies + 
1008                                                 ((new & NUD_REACHABLE) ? 
1009                                                  neigh->parms->reachable_time :
1010                                                  0)));
1011                 }
1012                 neigh->nud_state = new;
1013         }
1014
1015         if (lladdr != neigh->ha) {
1016                 memcpy(&neigh->ha, lladdr, dev->addr_len);
1017                 neigh_update_hhs(neigh);
1018                 if (!(new & NUD_CONNECTED))
1019                         neigh->confirmed = jiffies -
1020                                       (neigh->parms->base_reachable_time << 1);
1021 #ifdef CONFIG_ARPD
1022                 notify = 1;
1023 #endif
1024         }
1025         if (new == old)
1026                 goto out;
1027         if (new & NUD_CONNECTED)
1028                 neigh_connect(neigh);
1029         else
1030                 neigh_suspect(neigh);
1031         if (!(old & NUD_VALID)) {
1032                 struct sk_buff *skb;
1033
1034                 /* Again: avoid dead loop if something went wrong */
1035
1036                 while (neigh->nud_state & NUD_VALID &&
1037                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1038                         struct neighbour *n1 = neigh;
1039                         write_unlock_bh(&neigh->lock);
1040                         /* On shaper/eql skb->dst->neighbour != neigh :( */
1041                         if (skb->dst && skb->dst->neighbour)
1042                                 n1 = skb->dst->neighbour;
1043                         n1->output(skb);
1044                         write_lock_bh(&neigh->lock);
1045                 }
1046                 skb_queue_purge(&neigh->arp_queue);
1047         }
1048 out:
1049         if (update_isrouter) {
1050                 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1051                         (neigh->flags | NTF_ROUTER) :
1052                         (neigh->flags & ~NTF_ROUTER);
1053         }
1054         write_unlock_bh(&neigh->lock);
1055 #ifdef CONFIG_ARPD
1056         if (notify && neigh->parms->app_probes)
1057                 neigh_app_notify(neigh);
1058 #endif
1059         return err;
1060 }
1061
1062 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1063                                  u8 *lladdr, void *saddr,
1064                                  struct net_device *dev)
1065 {
1066         struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1067                                                  lladdr || !dev->addr_len);
1068         if (neigh)
1069                 neigh_update(neigh, lladdr, NUD_STALE, 
1070                              NEIGH_UPDATE_F_OVERRIDE);
1071         return neigh;
1072 }
1073
1074 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1075                           u16 protocol)
1076 {
1077         struct hh_cache *hh;
1078         struct net_device *dev = dst->dev;
1079
1080         for (hh = n->hh; hh; hh = hh->hh_next)
1081                 if (hh->hh_type == protocol)
1082                         break;
1083
1084         if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1085                 memset(hh, 0, sizeof(struct hh_cache));
1086                 rwlock_init(&hh->hh_lock);
1087                 hh->hh_type = protocol;
1088                 atomic_set(&hh->hh_refcnt, 0);
1089                 hh->hh_next = NULL;
1090                 if (dev->hard_header_cache(n, hh)) {
1091                         kfree(hh);
1092                         hh = NULL;
1093                 } else {
1094                         atomic_inc(&hh->hh_refcnt);
1095                         hh->hh_next = n->hh;
1096                         n->hh       = hh;
1097                         if (n->nud_state & NUD_CONNECTED)
1098                                 hh->hh_output = n->ops->hh_output;
1099                         else
1100                                 hh->hh_output = n->ops->output;
1101                 }
1102         }
1103         if (hh) {
1104                 atomic_inc(&hh->hh_refcnt);
1105                 dst->hh = hh;
1106         }
1107 }
1108
1109 /* This function can be used in contexts, where only old dev_queue_xmit
1110    worked, f.e. if you want to override normal output path (eql, shaper),
1111    but resolution is not made yet.
1112  */
1113
1114 int neigh_compat_output(struct sk_buff *skb)
1115 {
1116         struct net_device *dev = skb->dev;
1117
1118         __skb_pull(skb, skb->nh.raw - skb->data);
1119
1120         if (dev->hard_header &&
1121             dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1122                              skb->len) < 0 &&
1123             dev->rebuild_header(skb))
1124                 return 0;
1125
1126         return dev_queue_xmit(skb);
1127 }
1128
1129 /* Slow and careful. */
1130
1131 int neigh_resolve_output(struct sk_buff *skb)
1132 {
1133         struct dst_entry *dst = skb->dst;
1134         struct neighbour *neigh;
1135         int rc = 0;
1136
1137         if (!dst || !(neigh = dst->neighbour))
1138                 goto discard;
1139
1140         __skb_pull(skb, skb->nh.raw - skb->data);
1141
1142         if (!neigh_event_send(neigh, skb)) {
1143                 int err;
1144                 struct net_device *dev = neigh->dev;
1145                 if (dev->hard_header_cache && !dst->hh) {
1146                         write_lock_bh(&neigh->lock);
1147                         if (!dst->hh)
1148                                 neigh_hh_init(neigh, dst, dst->ops->protocol);
1149                         err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1150                                                neigh->ha, NULL, skb->len);
1151                         write_unlock_bh(&neigh->lock);
1152                 } else {
1153                         read_lock_bh(&neigh->lock);
1154                         err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1155                                                neigh->ha, NULL, skb->len);
1156                         read_unlock_bh(&neigh->lock);
1157                 }
1158                 if (err >= 0)
1159                         rc = neigh->ops->queue_xmit(skb);
1160                 else
1161                         goto out_kfree_skb;
1162         }
1163 out:
1164         return rc;
1165 discard:
1166         NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1167                       dst, dst ? dst->neighbour : NULL);
1168 out_kfree_skb:
1169         rc = -EINVAL;
1170         kfree_skb(skb);
1171         goto out;
1172 }
1173
1174 /* As fast as possible without hh cache */
1175
1176 int neigh_connected_output(struct sk_buff *skb)
1177 {
1178         int err;
1179         struct dst_entry *dst = skb->dst;
1180         struct neighbour *neigh = dst->neighbour;
1181         struct net_device *dev = neigh->dev;
1182
1183         __skb_pull(skb, skb->nh.raw - skb->data);
1184
1185         read_lock_bh(&neigh->lock);
1186         err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1187                                neigh->ha, NULL, skb->len);
1188         read_unlock_bh(&neigh->lock);
1189         if (err >= 0)
1190                 err = neigh->ops->queue_xmit(skb);
1191         else {
1192                 err = -EINVAL;
1193                 kfree_skb(skb);
1194         }
1195         return err;
1196 }
1197
1198 static void neigh_proxy_process(unsigned long arg)
1199 {
1200         struct neigh_table *tbl = (struct neigh_table *)arg;
1201         long sched_next = 0;
1202         unsigned long now = jiffies;
1203         struct sk_buff *skb;
1204
1205         spin_lock(&tbl->proxy_queue.lock);
1206
1207         skb = tbl->proxy_queue.next;
1208
1209         while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1210                 struct sk_buff *back = skb;
1211                 long tdif = NEIGH_CB(back)->sched_next - now;
1212
1213                 skb = skb->next;
1214                 if (tdif <= 0) {
1215                         struct net_device *dev = back->dev;
1216                         __skb_unlink(back, &tbl->proxy_queue);
1217                         if (tbl->proxy_redo && netif_running(dev))
1218                                 tbl->proxy_redo(back);
1219                         else
1220                                 kfree_skb(back);
1221
1222                         dev_put(dev);
1223                 } else if (!sched_next || tdif < sched_next)
1224                         sched_next = tdif;
1225         }
1226         del_timer(&tbl->proxy_timer);
1227         if (sched_next)
1228                 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1229         spin_unlock(&tbl->proxy_queue.lock);
1230 }
1231
1232 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1233                     struct sk_buff *skb)
1234 {
1235         unsigned long now = jiffies;
1236         unsigned long sched_next = now + (net_random() % p->proxy_delay);
1237
1238         if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1239                 kfree_skb(skb);
1240                 return;
1241         }
1242
1243         NEIGH_CB(skb)->sched_next = sched_next;
1244         NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1245
1246         spin_lock(&tbl->proxy_queue.lock);
1247         if (del_timer(&tbl->proxy_timer)) {
1248                 if (time_before(tbl->proxy_timer.expires, sched_next))
1249                         sched_next = tbl->proxy_timer.expires;
1250         }
1251         dst_release(skb->dst);
1252         skb->dst = NULL;
1253         dev_hold(skb->dev);
1254         __skb_queue_tail(&tbl->proxy_queue, skb);
1255         mod_timer(&tbl->proxy_timer, sched_next);
1256         spin_unlock(&tbl->proxy_queue.lock);
1257 }
1258
1259
1260 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1261                                       struct neigh_table *tbl)
1262 {
1263         struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1264
1265         if (p) {
1266                 memcpy(p, &tbl->parms, sizeof(*p));
1267                 p->tbl            = tbl;
1268                 atomic_set(&p->refcnt, 1);
1269                 INIT_RCU_HEAD(&p->rcu_head);
1270                 p->reachable_time =
1271                                 neigh_rand_reach_time(p->base_reachable_time);
1272                 if (dev) {
1273                         if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1274                                 kfree(p);
1275                                 return NULL;
1276                         }
1277
1278                         dev_hold(dev);
1279                         p->dev = dev;
1280                 }
1281                 p->sysctl_table = NULL;
1282                 write_lock_bh(&tbl->lock);
1283                 p->next         = tbl->parms.next;
1284                 tbl->parms.next = p;
1285                 write_unlock_bh(&tbl->lock);
1286         }
1287         return p;
1288 }
1289
1290 static void neigh_rcu_free_parms(struct rcu_head *head)
1291 {
1292         struct neigh_parms *parms =
1293                 container_of(head, struct neigh_parms, rcu_head);
1294
1295         neigh_parms_put(parms);
1296 }
1297
1298 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1299 {
1300         struct neigh_parms **p;
1301
1302         if (!parms || parms == &tbl->parms)
1303                 return;
1304         write_lock_bh(&tbl->lock);
1305         for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1306                 if (*p == parms) {
1307                         *p = parms->next;
1308                         parms->dead = 1;
1309                         write_unlock_bh(&tbl->lock);
1310                         if (parms->dev)
1311                                 dev_put(parms->dev);
1312                         call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1313                         return;
1314                 }
1315         }
1316         write_unlock_bh(&tbl->lock);
1317         NEIGH_PRINTK1("neigh_parms_release: not found\n");
1318 }
1319
1320 void neigh_parms_destroy(struct neigh_parms *parms)
1321 {
1322         kfree(parms);
1323 }
1324
1325
1326 void neigh_table_init(struct neigh_table *tbl)
1327 {
1328         unsigned long now = jiffies;
1329         unsigned long phsize;
1330
1331         atomic_set(&tbl->parms.refcnt, 1);
1332         INIT_RCU_HEAD(&tbl->parms.rcu_head);
1333         tbl->parms.reachable_time =
1334                           neigh_rand_reach_time(tbl->parms.base_reachable_time);
1335
1336         if (!tbl->kmem_cachep)
1337                 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1338                                                      tbl->entry_size,
1339                                                      0, SLAB_HWCACHE_ALIGN,
1340                                                      NULL, NULL);
1341
1342         if (!tbl->kmem_cachep)
1343                 panic("cannot create neighbour cache");
1344
1345         tbl->stats = alloc_percpu(struct neigh_statistics);
1346         if (!tbl->stats)
1347                 panic("cannot create neighbour cache statistics");
1348         
1349 #ifdef CONFIG_PROC_FS
1350         tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1351         if (!tbl->pde) 
1352                 panic("cannot create neighbour proc dir entry");
1353         tbl->pde->proc_fops = &neigh_stat_seq_fops;
1354         tbl->pde->data = tbl;
1355 #endif
1356
1357         tbl->hash_mask = 1;
1358         tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1359
1360         phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1361         tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1362
1363         if (!tbl->hash_buckets || !tbl->phash_buckets)
1364                 panic("cannot allocate neighbour cache hashes");
1365
1366         memset(tbl->phash_buckets, 0, phsize);
1367
1368         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1369
1370         rwlock_init(&tbl->lock);
1371         init_timer(&tbl->gc_timer);
1372         tbl->gc_timer.data     = (unsigned long)tbl;
1373         tbl->gc_timer.function = neigh_periodic_timer;
1374         tbl->gc_timer.expires  = now + 1;
1375         add_timer(&tbl->gc_timer);
1376
1377         init_timer(&tbl->proxy_timer);
1378         tbl->proxy_timer.data     = (unsigned long)tbl;
1379         tbl->proxy_timer.function = neigh_proxy_process;
1380         skb_queue_head_init(&tbl->proxy_queue);
1381
1382         tbl->last_flush = now;
1383         tbl->last_rand  = now + tbl->parms.reachable_time * 20;
1384         write_lock(&neigh_tbl_lock);
1385         tbl->next       = neigh_tables;
1386         neigh_tables    = tbl;
1387         write_unlock(&neigh_tbl_lock);
1388 }
1389
1390 int neigh_table_clear(struct neigh_table *tbl)
1391 {
1392         struct neigh_table **tp;
1393
1394         /* It is not clean... Fix it to unload IPv6 module safely */
1395         del_timer_sync(&tbl->gc_timer);
1396         del_timer_sync(&tbl->proxy_timer);
1397         pneigh_queue_purge(&tbl->proxy_queue);
1398         neigh_ifdown(tbl, NULL);
1399         if (atomic_read(&tbl->entries))
1400                 printk(KERN_CRIT "neighbour leakage\n");
1401         write_lock(&neigh_tbl_lock);
1402         for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1403                 if (*tp == tbl) {
1404                         *tp = tbl->next;
1405                         break;
1406                 }
1407         }
1408         write_unlock(&neigh_tbl_lock);
1409
1410         neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1411         tbl->hash_buckets = NULL;
1412
1413         kfree(tbl->phash_buckets);
1414         tbl->phash_buckets = NULL;
1415
1416         return 0;
1417 }
1418
1419 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1420 {
1421         struct ndmsg *ndm = NLMSG_DATA(nlh);
1422         struct rtattr **nda = arg;
1423         struct neigh_table *tbl;
1424         struct net_device *dev = NULL;
1425         int err = -ENODEV;
1426
1427         if (ndm->ndm_ifindex &&
1428             (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1429                 goto out;
1430
1431         read_lock(&neigh_tbl_lock);
1432         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1433                 struct rtattr *dst_attr = nda[NDA_DST - 1];
1434                 struct neighbour *n;
1435
1436                 if (tbl->family != ndm->ndm_family)
1437                         continue;
1438                 read_unlock(&neigh_tbl_lock);
1439
1440                 err = -EINVAL;
1441                 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1442                         goto out_dev_put;
1443
1444                 if (ndm->ndm_flags & NTF_PROXY) {
1445                         err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1446                         goto out_dev_put;
1447                 }
1448
1449                 if (!dev)
1450                         goto out;
1451
1452                 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1453                 if (n) {
1454                         err = neigh_update(n, NULL, NUD_FAILED, 
1455                                            NEIGH_UPDATE_F_OVERRIDE|
1456                                            NEIGH_UPDATE_F_ADMIN);
1457                         neigh_release(n);
1458                 }
1459                 goto out_dev_put;
1460         }
1461         read_unlock(&neigh_tbl_lock);
1462         err = -EADDRNOTAVAIL;
1463 out_dev_put:
1464         if (dev)
1465                 dev_put(dev);
1466 out:
1467         return err;
1468 }
1469
1470 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1471 {
1472         struct ndmsg *ndm = NLMSG_DATA(nlh);
1473         struct rtattr **nda = arg;
1474         struct neigh_table *tbl;
1475         struct net_device *dev = NULL;
1476         int err = -ENODEV;
1477
1478         if (ndm->ndm_ifindex &&
1479             (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1480                 goto out;
1481
1482         read_lock(&neigh_tbl_lock);
1483         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1484                 struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1485                 struct rtattr *dst_attr = nda[NDA_DST - 1];
1486                 int override = 1;
1487                 struct neighbour *n;
1488
1489                 if (tbl->family != ndm->ndm_family)
1490                         continue;
1491                 read_unlock(&neigh_tbl_lock);
1492
1493                 err = -EINVAL;
1494                 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1495                         goto out_dev_put;
1496
1497                 if (ndm->ndm_flags & NTF_PROXY) {
1498                         err = -ENOBUFS;
1499                         if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1500                                 err = 0;
1501                         goto out_dev_put;
1502                 }
1503
1504                 err = -EINVAL;
1505                 if (!dev)
1506                         goto out;
1507                 if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1508                         goto out_dev_put;
1509         
1510                 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1511                 if (n) {
1512                         if (nlh->nlmsg_flags & NLM_F_EXCL) {
1513                                 err = -EEXIST;
1514                                 neigh_release(n);
1515                                 goto out_dev_put;
1516                         }
1517                         
1518                         override = nlh->nlmsg_flags & NLM_F_REPLACE;
1519                 } else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1520                         err = -ENOENT;
1521                         goto out_dev_put;
1522                 } else {
1523                         n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1524                         if (IS_ERR(n)) {
1525                                 err = PTR_ERR(n);
1526                                 goto out_dev_put;
1527                         }
1528                 }
1529
1530                 err = neigh_update(n,
1531                                    lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1532                                    ndm->ndm_state,
1533                                    (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1534                                    NEIGH_UPDATE_F_ADMIN);
1535
1536                 neigh_release(n);
1537                 goto out_dev_put;
1538         }
1539
1540         read_unlock(&neigh_tbl_lock);
1541         err = -EADDRNOTAVAIL;
1542 out_dev_put:
1543         if (dev)
1544                 dev_put(dev);
1545 out:
1546         return err;
1547 }
1548
1549 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1550 {
1551         struct rtattr *nest = NULL;
1552         
1553         nest = RTA_NEST(skb, NDTA_PARMS);
1554
1555         if (parms->dev)
1556                 RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1557
1558         RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1559         RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1560         RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1561         RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1562         RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1563         RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1564         RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1565         RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1566                       parms->base_reachable_time);
1567         RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1568         RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1569         RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1570         RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1571         RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1572         RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1573
1574         return RTA_NEST_END(skb, nest);
1575
1576 rtattr_failure:
1577         return RTA_NEST_CANCEL(skb, nest);
1578 }
1579
1580 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1581                               struct netlink_callback *cb)
1582 {
1583         struct nlmsghdr *nlh;
1584         struct ndtmsg *ndtmsg;
1585
1586         nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1587                                NLM_F_MULTI);
1588
1589         ndtmsg = NLMSG_DATA(nlh);
1590
1591         read_lock_bh(&tbl->lock);
1592         ndtmsg->ndtm_family = tbl->family;
1593         ndtmsg->ndtm_pad1   = 0;
1594         ndtmsg->ndtm_pad2   = 0;
1595
1596         RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1597         RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1598         RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1599         RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1600         RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1601
1602         {
1603                 unsigned long now = jiffies;
1604                 unsigned int flush_delta = now - tbl->last_flush;
1605                 unsigned int rand_delta = now - tbl->last_rand;
1606
1607                 struct ndt_config ndc = {
1608                         .ndtc_key_len           = tbl->key_len,
1609                         .ndtc_entry_size        = tbl->entry_size,
1610                         .ndtc_entries           = atomic_read(&tbl->entries),
1611                         .ndtc_last_flush        = jiffies_to_msecs(flush_delta),
1612                         .ndtc_last_rand         = jiffies_to_msecs(rand_delta),
1613                         .ndtc_hash_rnd          = tbl->hash_rnd,
1614                         .ndtc_hash_mask         = tbl->hash_mask,
1615                         .ndtc_hash_chain_gc     = tbl->hash_chain_gc,
1616                         .ndtc_proxy_qlen        = tbl->proxy_queue.qlen,
1617                 };
1618
1619                 RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1620         }
1621
1622         {
1623                 int cpu;
1624                 struct ndt_stats ndst;
1625
1626                 memset(&ndst, 0, sizeof(ndst));
1627
1628                 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1629                         struct neigh_statistics *st;
1630
1631                         if (!cpu_possible(cpu))
1632                                 continue;
1633
1634                         st = per_cpu_ptr(tbl->stats, cpu);
1635                         ndst.ndts_allocs                += st->allocs;
1636                         ndst.ndts_destroys              += st->destroys;
1637                         ndst.ndts_hash_grows            += st->hash_grows;
1638                         ndst.ndts_res_failed            += st->res_failed;
1639                         ndst.ndts_lookups               += st->lookups;
1640                         ndst.ndts_hits                  += st->hits;
1641                         ndst.ndts_rcv_probes_mcast      += st->rcv_probes_mcast;
1642                         ndst.ndts_rcv_probes_ucast      += st->rcv_probes_ucast;
1643                         ndst.ndts_periodic_gc_runs      += st->periodic_gc_runs;
1644                         ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
1645                 }
1646
1647                 RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1648         }
1649
1650         BUG_ON(tbl->parms.dev);
1651         if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1652                 goto rtattr_failure;
1653
1654         read_unlock_bh(&tbl->lock);
1655         return NLMSG_END(skb, nlh);
1656
1657 rtattr_failure:
1658         read_unlock_bh(&tbl->lock);
1659         return NLMSG_CANCEL(skb, nlh);
1660  
1661 nlmsg_failure:
1662         return -1;
1663 }
1664
1665 static int neightbl_fill_param_info(struct neigh_table *tbl,
1666                                     struct neigh_parms *parms,
1667                                     struct sk_buff *skb,
1668                                     struct netlink_callback *cb)
1669 {
1670         struct ndtmsg *ndtmsg;
1671         struct nlmsghdr *nlh;
1672
1673         nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1674                                NLM_F_MULTI);
1675
1676         ndtmsg = NLMSG_DATA(nlh);
1677
1678         read_lock_bh(&tbl->lock);
1679         ndtmsg->ndtm_family = tbl->family;
1680         ndtmsg->ndtm_pad1   = 0;
1681         ndtmsg->ndtm_pad2   = 0;
1682         RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1683
1684         if (neightbl_fill_parms(skb, parms) < 0)
1685                 goto rtattr_failure;
1686
1687         read_unlock_bh(&tbl->lock);
1688         return NLMSG_END(skb, nlh);
1689
1690 rtattr_failure:
1691         read_unlock_bh(&tbl->lock);
1692         return NLMSG_CANCEL(skb, nlh);
1693
1694 nlmsg_failure:
1695         return -1;
1696 }
1697  
1698 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1699                                                       int ifindex)
1700 {
1701         struct neigh_parms *p;
1702         
1703         for (p = &tbl->parms; p; p = p->next)
1704                 if ((p->dev && p->dev->ifindex == ifindex) ||
1705                     (!p->dev && !ifindex))
1706                         return p;
1707
1708         return NULL;
1709 }
1710
1711 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1712 {
1713         struct neigh_table *tbl;
1714         struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1715         struct rtattr **tb = arg;
1716         int err = -EINVAL;
1717
1718         if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1719                 return -EINVAL;
1720
1721         read_lock(&neigh_tbl_lock);
1722         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1723                 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1724                         continue;
1725
1726                 if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1727                         break;
1728         }
1729
1730         if (tbl == NULL) {
1731                 err = -ENOENT;
1732                 goto errout;
1733         }
1734
1735         /* 
1736          * We acquire tbl->lock to be nice to the periodic timers and
1737          * make sure they always see a consistent set of values.
1738          */
1739         write_lock_bh(&tbl->lock);
1740
1741         if (tb[NDTA_THRESH1 - 1])
1742                 tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1743
1744         if (tb[NDTA_THRESH2 - 1])
1745                 tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1746
1747         if (tb[NDTA_THRESH3 - 1])
1748                 tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1749
1750         if (tb[NDTA_GC_INTERVAL - 1])
1751                 tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1752
1753         if (tb[NDTA_PARMS - 1]) {
1754                 struct rtattr *tbp[NDTPA_MAX];
1755                 struct neigh_parms *p;
1756                 u32 ifindex = 0;
1757
1758                 if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1759                         goto rtattr_failure;
1760
1761                 if (tbp[NDTPA_IFINDEX - 1])
1762                         ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1763
1764                 p = lookup_neigh_params(tbl, ifindex);
1765                 if (p == NULL) {
1766                         err = -ENOENT;
1767                         goto rtattr_failure;
1768                 }
1769         
1770                 if (tbp[NDTPA_QUEUE_LEN - 1])
1771                         p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1772
1773                 if (tbp[NDTPA_PROXY_QLEN - 1])
1774                         p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1775
1776                 if (tbp[NDTPA_APP_PROBES - 1])
1777                         p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1778
1779                 if (tbp[NDTPA_UCAST_PROBES - 1])
1780                         p->ucast_probes =
1781                            RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1782
1783                 if (tbp[NDTPA_MCAST_PROBES - 1])
1784                         p->mcast_probes =
1785                            RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1786
1787                 if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1788                         p->base_reachable_time =
1789                            RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1790
1791                 if (tbp[NDTPA_GC_STALETIME - 1])
1792                         p->gc_staletime =
1793                            RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1794
1795                 if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1796                         p->delay_probe_time =
1797                            RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1798
1799                 if (tbp[NDTPA_RETRANS_TIME - 1])
1800                         p->retrans_time =
1801                            RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1802
1803                 if (tbp[NDTPA_ANYCAST_DELAY - 1])
1804                         p->anycast_delay =
1805                            RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1806
1807                 if (tbp[NDTPA_PROXY_DELAY - 1])
1808                         p->proxy_delay =
1809                            RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1810
1811                 if (tbp[NDTPA_LOCKTIME - 1])
1812                         p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1813         }
1814
1815         err = 0;
1816
1817 rtattr_failure:
1818         write_unlock_bh(&tbl->lock);
1819 errout:
1820         read_unlock(&neigh_tbl_lock);
1821         return err;
1822 }
1823
1824 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1825 {
1826         int idx, family;
1827         int s_idx = cb->args[0];
1828         struct neigh_table *tbl;
1829
1830         family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1831
1832         read_lock(&neigh_tbl_lock);
1833         for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1834                 struct neigh_parms *p;
1835
1836                 if (idx < s_idx || (family && tbl->family != family))
1837                         continue;
1838
1839                 if (neightbl_fill_info(tbl, skb, cb) <= 0)
1840                         break;
1841
1842                 for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1843                         if (idx < s_idx)
1844                                 continue;
1845
1846                         if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1847                                 goto out;
1848                 }
1849
1850         }
1851 out:
1852         read_unlock(&neigh_tbl_lock);
1853         cb->args[0] = idx;
1854
1855         return skb->len;
1856 }
1857
1858 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1859                            u32 pid, u32 seq, int event, unsigned int flags)
1860 {
1861         unsigned long now = jiffies;
1862         unsigned char *b = skb->tail;
1863         struct nda_cacheinfo ci;
1864         int locked = 0;
1865         u32 probes;
1866         struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1867                                          sizeof(struct ndmsg), flags);
1868         struct ndmsg *ndm = NLMSG_DATA(nlh);
1869
1870         ndm->ndm_family  = n->ops->family;
1871         ndm->ndm_pad1    = 0;
1872         ndm->ndm_pad2    = 0;
1873         ndm->ndm_flags   = n->flags;
1874         ndm->ndm_type    = n->type;
1875         ndm->ndm_ifindex = n->dev->ifindex;
1876         RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1877         read_lock_bh(&n->lock);
1878         locked           = 1;
1879         ndm->ndm_state   = n->nud_state;
1880         if (n->nud_state & NUD_VALID)
1881                 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1882         ci.ndm_used      = now - n->used;
1883         ci.ndm_confirmed = now - n->confirmed;
1884         ci.ndm_updated   = now - n->updated;
1885         ci.ndm_refcnt    = atomic_read(&n->refcnt) - 1;
1886         probes = atomic_read(&n->probes);
1887         read_unlock_bh(&n->lock);
1888         locked           = 0;
1889         RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1890         RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1891         nlh->nlmsg_len   = skb->tail - b;
1892         return skb->len;
1893
1894 nlmsg_failure:
1895 rtattr_failure:
1896         if (locked)
1897                 read_unlock_bh(&n->lock);
1898         skb_trim(skb, b - skb->data);
1899         return -1;
1900 }
1901
1902
1903 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1904                             struct netlink_callback *cb)
1905 {
1906         struct neighbour *n;
1907         int rc, h, s_h = cb->args[1];
1908         int idx, s_idx = idx = cb->args[2];
1909
1910         for (h = 0; h <= tbl->hash_mask; h++) {
1911                 if (h < s_h)
1912                         continue;
1913                 if (h > s_h)
1914                         s_idx = 0;
1915                 read_lock_bh(&tbl->lock);
1916                 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1917                         if (idx < s_idx)
1918                                 continue;
1919                         if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1920                                             cb->nlh->nlmsg_seq,
1921                                             RTM_NEWNEIGH,
1922                                             NLM_F_MULTI) <= 0) {
1923                                 read_unlock_bh(&tbl->lock);
1924                                 rc = -1;
1925                                 goto out;
1926                         }
1927                 }
1928                 read_unlock_bh(&tbl->lock);
1929         }
1930         rc = skb->len;
1931 out:
1932         cb->args[1] = h;
1933         cb->args[2] = idx;
1934         return rc;
1935 }
1936
1937 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1938 {
1939         struct neigh_table *tbl;
1940         int t, family, s_t;
1941
1942         read_lock(&neigh_tbl_lock);
1943         family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1944         s_t = cb->args[0];
1945
1946         for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1947                 if (t < s_t || (family && tbl->family != family))
1948                         continue;
1949                 if (t > s_t)
1950                         memset(&cb->args[1], 0, sizeof(cb->args) -
1951                                                 sizeof(cb->args[0]));
1952                 if (neigh_dump_table(tbl, skb, cb) < 0)
1953                         break;
1954         }
1955         read_unlock(&neigh_tbl_lock);
1956
1957         cb->args[0] = t;
1958         return skb->len;
1959 }
1960
1961 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1962 {
1963         int chain;
1964
1965         read_lock_bh(&tbl->lock);
1966         for (chain = 0; chain <= tbl->hash_mask; chain++) {
1967                 struct neighbour *n;
1968
1969                 for (n = tbl->hash_buckets[chain]; n; n = n->next)
1970                         cb(n, cookie);
1971         }
1972         read_unlock_bh(&tbl->lock);
1973 }
1974 EXPORT_SYMBOL(neigh_for_each);
1975
1976 /* The tbl->lock must be held as a writer and BH disabled. */
1977 void __neigh_for_each_release(struct neigh_table *tbl,
1978                               int (*cb)(struct neighbour *))
1979 {
1980         int chain;
1981
1982         for (chain = 0; chain <= tbl->hash_mask; chain++) {
1983                 struct neighbour *n, **np;
1984
1985                 np = &tbl->hash_buckets[chain];
1986                 while ((n = *np) != NULL) {
1987                         int release;
1988
1989                         write_lock(&n->lock);
1990                         release = cb(n);
1991                         if (release) {
1992                                 *np = n->next;
1993                                 n->dead = 1;
1994                         } else
1995                                 np = &n->next;
1996                         write_unlock(&n->lock);
1997                         if (release)
1998                                 neigh_release(n);
1999                 }
2000         }
2001 }
2002 EXPORT_SYMBOL(__neigh_for_each_release);
2003
2004 #ifdef CONFIG_PROC_FS
2005
2006 static struct neighbour *neigh_get_first(struct seq_file *seq)
2007 {
2008         struct neigh_seq_state *state = seq->private;
2009         struct neigh_table *tbl = state->tbl;
2010         struct neighbour *n = NULL;
2011         int bucket = state->bucket;
2012
2013         state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2014         for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2015                 n = tbl->hash_buckets[bucket];
2016
2017                 while (n) {
2018                         if (state->neigh_sub_iter) {
2019                                 loff_t fakep = 0;
2020                                 void *v;
2021
2022                                 v = state->neigh_sub_iter(state, n, &fakep);
2023                                 if (!v)
2024                                         goto next;
2025                         }
2026                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2027                                 break;
2028                         if (n->nud_state & ~NUD_NOARP)
2029                                 break;
2030                 next:
2031                         n = n->next;
2032                 }
2033
2034                 if (n)
2035                         break;
2036         }
2037         state->bucket = bucket;
2038
2039         return n;
2040 }
2041
2042 static struct neighbour *neigh_get_next(struct seq_file *seq,
2043                                         struct neighbour *n,
2044                                         loff_t *pos)
2045 {
2046         struct neigh_seq_state *state = seq->private;
2047         struct neigh_table *tbl = state->tbl;
2048
2049         if (state->neigh_sub_iter) {
2050                 void *v = state->neigh_sub_iter(state, n, pos);
2051                 if (v)
2052                         return n;
2053         }
2054         n = n->next;
2055
2056         while (1) {
2057                 while (n) {
2058                         if (state->neigh_sub_iter) {
2059                                 void *v = state->neigh_sub_iter(state, n, pos);
2060                                 if (v)
2061                                         return n;
2062                                 goto next;
2063                         }
2064                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2065                                 break;
2066
2067                         if (n->nud_state & ~NUD_NOARP)
2068                                 break;
2069                 next:
2070                         n = n->next;
2071                 }
2072
2073                 if (n)
2074                         break;
2075
2076                 if (++state->bucket > tbl->hash_mask)
2077                         break;
2078
2079                 n = tbl->hash_buckets[state->bucket];
2080         }
2081
2082         if (n && pos)
2083                 --(*pos);
2084         return n;
2085 }
2086
2087 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2088 {
2089         struct neighbour *n = neigh_get_first(seq);
2090
2091         if (n) {
2092                 while (*pos) {
2093                         n = neigh_get_next(seq, n, pos);
2094                         if (!n)
2095                                 break;
2096                 }
2097         }
2098         return *pos ? NULL : n;
2099 }
2100
2101 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2102 {
2103         struct neigh_seq_state *state = seq->private;
2104         struct neigh_table *tbl = state->tbl;
2105         struct pneigh_entry *pn = NULL;
2106         int bucket = state->bucket;
2107
2108         state->flags |= NEIGH_SEQ_IS_PNEIGH;
2109         for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2110                 pn = tbl->phash_buckets[bucket];
2111                 if (pn)
2112                         break;
2113         }
2114         state->bucket = bucket;
2115
2116         return pn;
2117 }
2118
2119 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2120                                             struct pneigh_entry *pn,
2121                                             loff_t *pos)
2122 {
2123         struct neigh_seq_state *state = seq->private;
2124         struct neigh_table *tbl = state->tbl;
2125
2126         pn = pn->next;
2127         while (!pn) {
2128                 if (++state->bucket > PNEIGH_HASHMASK)
2129                         break;
2130                 pn = tbl->phash_buckets[state->bucket];
2131                 if (pn)
2132                         break;
2133         }
2134
2135         if (pn && pos)
2136                 --(*pos);
2137
2138         return pn;
2139 }
2140
2141 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2142 {
2143         struct pneigh_entry *pn = pneigh_get_first(seq);
2144
2145         if (pn) {
2146                 while (*pos) {
2147                         pn = pneigh_get_next(seq, pn, pos);
2148                         if (!pn)
2149                                 break;
2150                 }
2151         }
2152         return *pos ? NULL : pn;
2153 }
2154
2155 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2156 {
2157         struct neigh_seq_state *state = seq->private;
2158         void *rc;
2159
2160         rc = neigh_get_idx(seq, pos);
2161         if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2162                 rc = pneigh_get_idx(seq, pos);
2163
2164         return rc;
2165 }
2166
2167 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2168 {
2169         struct neigh_seq_state *state = seq->private;
2170         loff_t pos_minus_one;
2171
2172         state->tbl = tbl;
2173         state->bucket = 0;
2174         state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2175
2176         read_lock_bh(&tbl->lock);
2177
2178         pos_minus_one = *pos - 1;
2179         return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2180 }
2181 EXPORT_SYMBOL(neigh_seq_start);
2182
2183 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2184 {
2185         struct neigh_seq_state *state;
2186         void *rc;
2187
2188         if (v == SEQ_START_TOKEN) {
2189                 rc = neigh_get_idx(seq, pos);
2190                 goto out;
2191         }
2192
2193         state = seq->private;
2194         if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2195                 rc = neigh_get_next(seq, v, NULL);
2196                 if (rc)
2197                         goto out;
2198                 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2199                         rc = pneigh_get_first(seq);
2200         } else {
2201                 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2202                 rc = pneigh_get_next(seq, v, NULL);
2203         }
2204 out:
2205         ++(*pos);
2206         return rc;
2207 }
2208 EXPORT_SYMBOL(neigh_seq_next);
2209
2210 void neigh_seq_stop(struct seq_file *seq, void *v)
2211 {
2212         struct neigh_seq_state *state = seq->private;
2213         struct neigh_table *tbl = state->tbl;
2214
2215         read_unlock_bh(&tbl->lock);
2216 }
2217 EXPORT_SYMBOL(neigh_seq_stop);
2218
2219 /* statistics via seq_file */
2220
2221 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2222 {
2223         struct proc_dir_entry *pde = seq->private;
2224         struct neigh_table *tbl = pde->data;
2225         int cpu;
2226
2227         if (*pos == 0)
2228                 return SEQ_START_TOKEN;
2229         
2230         for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2231                 if (!cpu_possible(cpu))
2232                         continue;
2233                 *pos = cpu+1;
2234                 return per_cpu_ptr(tbl->stats, cpu);
2235         }
2236         return NULL;
2237 }
2238
2239 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2240 {
2241         struct proc_dir_entry *pde = seq->private;
2242         struct neigh_table *tbl = pde->data;
2243         int cpu;
2244
2245         for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2246                 if (!cpu_possible(cpu))
2247                         continue;
2248                 *pos = cpu+1;
2249                 return per_cpu_ptr(tbl->stats, cpu);
2250         }
2251         return NULL;
2252 }
2253
2254 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2255 {
2256
2257 }
2258
2259 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2260 {
2261         struct proc_dir_entry *pde = seq->private;
2262         struct neigh_table *tbl = pde->data;
2263         struct neigh_statistics *st = v;
2264
2265         if (v == SEQ_START_TOKEN) {
2266                 seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2267                 return 0;
2268         }
2269
2270         seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2271                         "%08lx %08lx  %08lx %08lx\n",
2272                    atomic_read(&tbl->entries),
2273
2274                    st->allocs,
2275                    st->destroys,
2276                    st->hash_grows,
2277
2278                    st->lookups,
2279                    st->hits,
2280
2281                    st->res_failed,
2282
2283                    st->rcv_probes_mcast,
2284                    st->rcv_probes_ucast,
2285
2286                    st->periodic_gc_runs,
2287                    st->forced_gc_runs
2288                    );
2289
2290         return 0;
2291 }
2292
2293 static struct seq_operations neigh_stat_seq_ops = {
2294         .start  = neigh_stat_seq_start,
2295         .next   = neigh_stat_seq_next,
2296         .stop   = neigh_stat_seq_stop,
2297         .show   = neigh_stat_seq_show,
2298 };
2299
2300 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2301 {
2302         int ret = seq_open(file, &neigh_stat_seq_ops);
2303
2304         if (!ret) {
2305                 struct seq_file *sf = file->private_data;
2306                 sf->private = PDE(inode);
2307         }
2308         return ret;
2309 };
2310
2311 static struct file_operations neigh_stat_seq_fops = {
2312         .owner   = THIS_MODULE,
2313         .open    = neigh_stat_seq_open,
2314         .read    = seq_read,
2315         .llseek  = seq_lseek,
2316         .release = seq_release,
2317 };
2318
2319 #endif /* CONFIG_PROC_FS */
2320
2321 #ifdef CONFIG_ARPD
2322 void neigh_app_ns(struct neighbour *n)
2323 {
2324         struct nlmsghdr  *nlh;
2325         int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2326         struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2327
2328         if (!skb)
2329                 return;
2330
2331         if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2332                 kfree_skb(skb);
2333                 return;
2334         }
2335         nlh                        = (struct nlmsghdr *)skb->data;
2336         nlh->nlmsg_flags           = NLM_F_REQUEST;
2337         NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2338         netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2339 }
2340
2341 static void neigh_app_notify(struct neighbour *n)
2342 {
2343         struct nlmsghdr *nlh;
2344         int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2345         struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2346
2347         if (!skb)
2348                 return;
2349
2350         if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2351                 kfree_skb(skb);
2352                 return;
2353         }
2354         nlh                        = (struct nlmsghdr *)skb->data;
2355         NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2356         netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2357 }
2358
2359 #endif /* CONFIG_ARPD */
2360
2361 #ifdef CONFIG_SYSCTL
2362
2363 static struct neigh_sysctl_table {
2364         struct ctl_table_header *sysctl_header;
2365         ctl_table               neigh_vars[__NET_NEIGH_MAX];
2366         ctl_table               neigh_dev[2];
2367         ctl_table               neigh_neigh_dir[2];
2368         ctl_table               neigh_proto_dir[2];
2369         ctl_table               neigh_root_dir[2];
2370 } neigh_sysctl_template = {
2371         .neigh_vars = {
2372                 {
2373                         .ctl_name       = NET_NEIGH_MCAST_SOLICIT,
2374                         .procname       = "mcast_solicit",
2375                         .maxlen         = sizeof(int),
2376                         .mode           = 0644,
2377                         .proc_handler   = &proc_dointvec,
2378                 },
2379                 {
2380                         .ctl_name       = NET_NEIGH_UCAST_SOLICIT,
2381                         .procname       = "ucast_solicit",
2382                         .maxlen         = sizeof(int),
2383                         .mode           = 0644,
2384                         .proc_handler   = &proc_dointvec,
2385                 },
2386                 {
2387                         .ctl_name       = NET_NEIGH_APP_SOLICIT,
2388                         .procname       = "app_solicit",
2389                         .maxlen         = sizeof(int),
2390                         .mode           = 0644,
2391                         .proc_handler   = &proc_dointvec,
2392                 },
2393                 {
2394                         .ctl_name       = NET_NEIGH_RETRANS_TIME,
2395                         .procname       = "retrans_time",
2396                         .maxlen         = sizeof(int),
2397                         .mode           = 0644,
2398                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2399                 },
2400                 {
2401                         .ctl_name       = NET_NEIGH_REACHABLE_TIME,
2402                         .procname       = "base_reachable_time",
2403                         .maxlen         = sizeof(int),
2404                         .mode           = 0644,
2405                         .proc_handler   = &proc_dointvec_jiffies,
2406                         .strategy       = &sysctl_jiffies,
2407                 },
2408                 {
2409                         .ctl_name       = NET_NEIGH_DELAY_PROBE_TIME,
2410                         .procname       = "delay_first_probe_time",
2411                         .maxlen         = sizeof(int),
2412                         .mode           = 0644,
2413                         .proc_handler   = &proc_dointvec_jiffies,
2414                         .strategy       = &sysctl_jiffies,
2415                 },
2416                 {
2417                         .ctl_name       = NET_NEIGH_GC_STALE_TIME,
2418                         .procname       = "gc_stale_time",
2419                         .maxlen         = sizeof(int),
2420                         .mode           = 0644,
2421                         .proc_handler   = &proc_dointvec_jiffies,
2422                         .strategy       = &sysctl_jiffies,
2423                 },
2424                 {
2425                         .ctl_name       = NET_NEIGH_UNRES_QLEN,
2426                         .procname       = "unres_qlen",
2427                         .maxlen         = sizeof(int),
2428                         .mode           = 0644,
2429                         .proc_handler   = &proc_dointvec,
2430                 },
2431                 {
2432                         .ctl_name       = NET_NEIGH_PROXY_QLEN,
2433                         .procname       = "proxy_qlen",
2434                         .maxlen         = sizeof(int),
2435                         .mode           = 0644,
2436                         .proc_handler   = &proc_dointvec,
2437                 },
2438                 {
2439                         .ctl_name       = NET_NEIGH_ANYCAST_DELAY,
2440                         .procname       = "anycast_delay",
2441                         .maxlen         = sizeof(int),
2442                         .mode           = 0644,
2443                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2444                 },
2445                 {
2446                         .ctl_name       = NET_NEIGH_PROXY_DELAY,
2447                         .procname       = "proxy_delay",
2448                         .maxlen         = sizeof(int),
2449                         .mode           = 0644,
2450                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2451                 },
2452                 {
2453                         .ctl_name       = NET_NEIGH_LOCKTIME,
2454                         .procname       = "locktime",
2455                         .maxlen         = sizeof(int),
2456                         .mode           = 0644,
2457                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2458                 },
2459                 {
2460                         .ctl_name       = NET_NEIGH_GC_INTERVAL,
2461                         .procname       = "gc_interval",
2462                         .maxlen         = sizeof(int),
2463                         .mode           = 0644,
2464                         .proc_handler   = &proc_dointvec_jiffies,
2465                         .strategy       = &sysctl_jiffies,
2466                 },
2467                 {
2468                         .ctl_name       = NET_NEIGH_GC_THRESH1,
2469                         .procname       = "gc_thresh1",
2470                         .maxlen         = sizeof(int),
2471                         .mode           = 0644,
2472                         .proc_handler   = &proc_dointvec,
2473                 },
2474                 {
2475                         .ctl_name       = NET_NEIGH_GC_THRESH2,
2476                         .procname       = "gc_thresh2",
2477                         .maxlen         = sizeof(int),
2478                         .mode           = 0644,
2479                         .proc_handler   = &proc_dointvec,
2480                 },
2481                 {
2482                         .ctl_name       = NET_NEIGH_GC_THRESH3,
2483                         .procname       = "gc_thresh3",
2484                         .maxlen         = sizeof(int),
2485                         .mode           = 0644,
2486                         .proc_handler   = &proc_dointvec,
2487                 },
2488                 {
2489                         .ctl_name       = NET_NEIGH_RETRANS_TIME_MS,
2490                         .procname       = "retrans_time_ms",
2491                         .maxlen         = sizeof(int),
2492                         .mode           = 0644,
2493                         .proc_handler   = &proc_dointvec_ms_jiffies,
2494                         .strategy       = &sysctl_ms_jiffies,
2495                 },
2496                 {
2497                         .ctl_name       = NET_NEIGH_REACHABLE_TIME_MS,
2498                         .procname       = "base_reachable_time_ms",
2499                         .maxlen         = sizeof(int),
2500                         .mode           = 0644,
2501                         .proc_handler   = &proc_dointvec_ms_jiffies,
2502                         .strategy       = &sysctl_ms_jiffies,
2503                 },
2504         },
2505         .neigh_dev = {
2506                 {
2507                         .ctl_name       = NET_PROTO_CONF_DEFAULT,
2508                         .procname       = "default",
2509                         .mode           = 0555,
2510                 },
2511         },
2512         .neigh_neigh_dir = {
2513                 {
2514                         .procname       = "neigh",
2515                         .mode           = 0555,
2516                 },
2517         },
2518         .neigh_proto_dir = {
2519                 {
2520                         .mode           = 0555,
2521                 },
2522         },
2523         .neigh_root_dir = {
2524                 {
2525                         .ctl_name       = CTL_NET,
2526                         .procname       = "net",
2527                         .mode           = 0555,
2528                 },
2529         },
2530 };
2531
2532 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2533                           int p_id, int pdev_id, char *p_name, 
2534                           proc_handler *handler, ctl_handler *strategy)
2535 {
2536         struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2537         const char *dev_name_source = NULL;
2538         char *dev_name = NULL;
2539         int err = 0;
2540
2541         if (!t)
2542                 return -ENOBUFS;
2543         memcpy(t, &neigh_sysctl_template, sizeof(*t));
2544         t->neigh_vars[0].data  = &p->mcast_probes;
2545         t->neigh_vars[1].data  = &p->ucast_probes;
2546         t->neigh_vars[2].data  = &p->app_probes;
2547         t->neigh_vars[3].data  = &p->retrans_time;
2548         t->neigh_vars[4].data  = &p->base_reachable_time;
2549         t->neigh_vars[5].data  = &p->delay_probe_time;
2550         t->neigh_vars[6].data  = &p->gc_staletime;
2551         t->neigh_vars[7].data  = &p->queue_len;
2552         t->neigh_vars[8].data  = &p->proxy_qlen;
2553         t->neigh_vars[9].data  = &p->anycast_delay;
2554         t->neigh_vars[10].data = &p->proxy_delay;
2555         t->neigh_vars[11].data = &p->locktime;
2556
2557         if (dev) {
2558                 dev_name_source = dev->name;
2559                 t->neigh_dev[0].ctl_name = dev->ifindex;
2560                 t->neigh_vars[12].procname = NULL;
2561                 t->neigh_vars[13].procname = NULL;
2562                 t->neigh_vars[14].procname = NULL;
2563                 t->neigh_vars[15].procname = NULL;
2564         } else {
2565                 dev_name_source = t->neigh_dev[0].procname;
2566                 t->neigh_vars[12].data = (int *)(p + 1);
2567                 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2568                 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2569                 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2570         }
2571
2572         t->neigh_vars[16].data  = &p->retrans_time;
2573         t->neigh_vars[17].data  = &p->base_reachable_time;
2574
2575         if (handler || strategy) {
2576                 /* RetransTime */
2577                 t->neigh_vars[3].proc_handler = handler;
2578                 t->neigh_vars[3].strategy = strategy;
2579                 t->neigh_vars[3].extra1 = dev;
2580                 /* ReachableTime */
2581                 t->neigh_vars[4].proc_handler = handler;
2582                 t->neigh_vars[4].strategy = strategy;
2583                 t->neigh_vars[4].extra1 = dev;
2584                 /* RetransTime (in milliseconds)*/
2585                 t->neigh_vars[16].proc_handler = handler;
2586                 t->neigh_vars[16].strategy = strategy;
2587                 t->neigh_vars[16].extra1 = dev;
2588                 /* ReachableTime (in milliseconds) */
2589                 t->neigh_vars[17].proc_handler = handler;
2590                 t->neigh_vars[17].strategy = strategy;
2591                 t->neigh_vars[17].extra1 = dev;
2592         }
2593
2594         dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2595         if (!dev_name) {
2596                 err = -ENOBUFS;
2597                 goto free;
2598         }
2599
2600         t->neigh_dev[0].procname = dev_name;
2601
2602         t->neigh_neigh_dir[0].ctl_name = pdev_id;
2603
2604         t->neigh_proto_dir[0].procname = p_name;
2605         t->neigh_proto_dir[0].ctl_name = p_id;
2606
2607         t->neigh_dev[0].child          = t->neigh_vars;
2608         t->neigh_neigh_dir[0].child    = t->neigh_dev;
2609         t->neigh_proto_dir[0].child    = t->neigh_neigh_dir;
2610         t->neigh_root_dir[0].child     = t->neigh_proto_dir;
2611
2612         t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2613         if (!t->sysctl_header) {
2614                 err = -ENOBUFS;
2615                 goto free_procname;
2616         }
2617         p->sysctl_table = t;
2618         return 0;
2619
2620         /* error path */
2621  free_procname:
2622         kfree(dev_name);
2623  free:
2624         kfree(t);
2625
2626         return err;
2627 }
2628
2629 void neigh_sysctl_unregister(struct neigh_parms *p)
2630 {
2631         if (p->sysctl_table) {
2632                 struct neigh_sysctl_table *t = p->sysctl_table;
2633                 p->sysctl_table = NULL;
2634                 unregister_sysctl_table(t->sysctl_header);
2635                 kfree(t->neigh_dev[0].procname);
2636                 kfree(t);
2637         }
2638 }
2639
2640 #endif  /* CONFIG_SYSCTL */
2641
2642 EXPORT_SYMBOL(__neigh_event_send);
2643 EXPORT_SYMBOL(neigh_add);
2644 EXPORT_SYMBOL(neigh_changeaddr);
2645 EXPORT_SYMBOL(neigh_compat_output);
2646 EXPORT_SYMBOL(neigh_connected_output);
2647 EXPORT_SYMBOL(neigh_create);
2648 EXPORT_SYMBOL(neigh_delete);
2649 EXPORT_SYMBOL(neigh_destroy);
2650 EXPORT_SYMBOL(neigh_dump_info);
2651 EXPORT_SYMBOL(neigh_event_ns);
2652 EXPORT_SYMBOL(neigh_ifdown);
2653 EXPORT_SYMBOL(neigh_lookup);
2654 EXPORT_SYMBOL(neigh_lookup_nodev);
2655 EXPORT_SYMBOL(neigh_parms_alloc);
2656 EXPORT_SYMBOL(neigh_parms_release);
2657 EXPORT_SYMBOL(neigh_rand_reach_time);
2658 EXPORT_SYMBOL(neigh_resolve_output);
2659 EXPORT_SYMBOL(neigh_table_clear);
2660 EXPORT_SYMBOL(neigh_table_init);
2661 EXPORT_SYMBOL(neigh_update);
2662 EXPORT_SYMBOL(neigh_update_hhs);
2663 EXPORT_SYMBOL(pneigh_enqueue);
2664 EXPORT_SYMBOL(pneigh_lookup);
2665 EXPORT_SYMBOL(neightbl_dump_info);
2666 EXPORT_SYMBOL(neightbl_set);
2667
2668 #ifdef CONFIG_ARPD
2669 EXPORT_SYMBOL(neigh_app_ns);
2670 #endif
2671 #ifdef CONFIG_SYSCTL
2672 EXPORT_SYMBOL(neigh_sysctl_register);
2673 EXPORT_SYMBOL(neigh_sysctl_unregister);
2674 #endif