Merge upstream (approx. 2.6.12-git8) into 'janitor' branch of netdev-2.6.
[pandora-kernel.git] / net / core / neighbour.c
1 /*
2  *      Generic address resolution entity
3  *
4  *      Authors:
5  *      Pedro Roque             <roque@di.fc.ul.pt>
6  *      Alexey Kuznetsov        <kuznet@ms2.inr.ac.ru>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *      Fixes:
14  *      Vitaly E. Lavrov        releasing NULL neighbor in neigh_add.
15  *      Harald Welte            Add neighbour cache statistics like rtstat
16  */
17
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
26 #ifdef CONFIG_SYSCTL
27 #include <linux/sysctl.h>
28 #endif
29 #include <linux/times.h>
30 #include <net/neighbour.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36
37 #define NEIGH_DEBUG 1
38
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
44
45 #if NEIGH_DEBUG >= 1
46 #undef NEIGH_PRINTK1
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
48 #endif
49 #if NEIGH_DEBUG >= 2
50 #undef NEIGH_PRINTK2
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
52 #endif
53
54 #define PNEIGH_HASHMASK         0xF
55
56 static void neigh_timer_handler(unsigned long arg);
57 #ifdef CONFIG_ARPD
58 static void neigh_app_notify(struct neighbour *n);
59 #endif
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62
63 static struct neigh_table *neigh_tables;
64 static struct file_operations neigh_stat_seq_fops;
65
66 /*
67    Neighbour hash table buckets are protected with rwlock tbl->lock.
68
69    - All the scans/updates to hash buckets MUST be made under this lock.
70    - NOTHING clever should be made under this lock: no callbacks
71      to protocol backends, no attempts to send something to network.
72      It will result in deadlocks, if backend/driver wants to use neighbour
73      cache.
74    - If the entry requires some non-trivial actions, increase
75      its reference count and release table lock.
76
77    Neighbour entries are protected:
78    - with reference count.
79    - with rwlock neigh->lock
80
81    Reference count prevents destruction.
82
83    neigh->lock mainly serializes ll address data and its validity state.
84    However, the same lock is used to protect another entry fields:
85     - timer
86     - resolution queue
87
88    Again, nothing clever shall be made under neigh->lock,
89    the most complicated procedure, which we allow is dev->hard_header.
90    It is supposed, that dev->hard_header is simplistic and does
91    not make callbacks to neighbour tables.
92
93    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
94    list of neighbour tables. This list is used only in process context,
95  */
96
97 static DEFINE_RWLOCK(neigh_tbl_lock);
98
99 static int neigh_blackhole(struct sk_buff *skb)
100 {
101         kfree_skb(skb);
102         return -ENETDOWN;
103 }
104
105 /*
106  * It is random distribution in the interval (1/2)*base...(3/2)*base.
107  * It corresponds to default IPv6 settings and is not overridable,
108  * because it is really reasonable choice.
109  */
110
111 unsigned long neigh_rand_reach_time(unsigned long base)
112 {
113         return (base ? (net_random() % base) + (base >> 1) : 0);
114 }
115
116
117 static int neigh_forced_gc(struct neigh_table *tbl)
118 {
119         int shrunk = 0;
120         int i;
121
122         NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
123
124         write_lock_bh(&tbl->lock);
125         for (i = 0; i <= tbl->hash_mask; i++) {
126                 struct neighbour *n, **np;
127
128                 np = &tbl->hash_buckets[i];
129                 while ((n = *np) != NULL) {
130                         /* Neighbour record may be discarded if:
131                          * - nobody refers to it.
132                          * - it is not permanent
133                          */
134                         write_lock(&n->lock);
135                         if (atomic_read(&n->refcnt) == 1 &&
136                             !(n->nud_state & NUD_PERMANENT)) {
137                                 *np     = n->next;
138                                 n->dead = 1;
139                                 shrunk  = 1;
140                                 write_unlock(&n->lock);
141                                 neigh_release(n);
142                                 continue;
143                         }
144                         write_unlock(&n->lock);
145                         np = &n->next;
146                 }
147         }
148
149         tbl->last_flush = jiffies;
150
151         write_unlock_bh(&tbl->lock);
152
153         return shrunk;
154 }
155
156 static int neigh_del_timer(struct neighbour *n)
157 {
158         if ((n->nud_state & NUD_IN_TIMER) &&
159             del_timer(&n->timer)) {
160                 neigh_release(n);
161                 return 1;
162         }
163         return 0;
164 }
165
166 static void pneigh_queue_purge(struct sk_buff_head *list)
167 {
168         struct sk_buff *skb;
169
170         while ((skb = skb_dequeue(list)) != NULL) {
171                 dev_put(skb->dev);
172                 kfree_skb(skb);
173         }
174 }
175
176 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
177 {
178         int i;
179
180         write_lock_bh(&tbl->lock);
181
182         for (i=0; i <= tbl->hash_mask; i++) {
183                 struct neighbour *n, **np;
184
185                 np = &tbl->hash_buckets[i];
186                 while ((n = *np) != NULL) {
187                         if (dev && n->dev != dev) {
188                                 np = &n->next;
189                                 continue;
190                         }
191                         *np = n->next;
192                         write_lock_bh(&n->lock);
193                         n->dead = 1;
194                         neigh_del_timer(n);
195                         write_unlock_bh(&n->lock);
196                         neigh_release(n);
197                 }
198         }
199
200         write_unlock_bh(&tbl->lock);
201 }
202
203 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
204 {
205         int i;
206
207         write_lock_bh(&tbl->lock);
208
209         for (i = 0; i <= tbl->hash_mask; i++) {
210                 struct neighbour *n, **np = &tbl->hash_buckets[i];
211
212                 while ((n = *np) != NULL) {
213                         if (dev && n->dev != dev) {
214                                 np = &n->next;
215                                 continue;
216                         }
217                         *np = n->next;
218                         write_lock(&n->lock);
219                         neigh_del_timer(n);
220                         n->dead = 1;
221
222                         if (atomic_read(&n->refcnt) != 1) {
223                                 /* The most unpleasant situation.
224                                    We must destroy neighbour entry,
225                                    but someone still uses it.
226
227                                    The destroy will be delayed until
228                                    the last user releases us, but
229                                    we must kill timers etc. and move
230                                    it to safe state.
231                                  */
232                                 skb_queue_purge(&n->arp_queue);
233                                 n->output = neigh_blackhole;
234                                 if (n->nud_state & NUD_VALID)
235                                         n->nud_state = NUD_NOARP;
236                                 else
237                                         n->nud_state = NUD_NONE;
238                                 NEIGH_PRINTK2("neigh %p is stray.\n", n);
239                         }
240                         write_unlock(&n->lock);
241                         neigh_release(n);
242                 }
243         }
244
245         pneigh_ifdown(tbl, dev);
246         write_unlock_bh(&tbl->lock);
247
248         del_timer_sync(&tbl->proxy_timer);
249         pneigh_queue_purge(&tbl->proxy_queue);
250         return 0;
251 }
252
253 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
254 {
255         struct neighbour *n = NULL;
256         unsigned long now = jiffies;
257         int entries;
258
259         entries = atomic_inc_return(&tbl->entries) - 1;
260         if (entries >= tbl->gc_thresh3 ||
261             (entries >= tbl->gc_thresh2 &&
262              time_after(now, tbl->last_flush + 5 * HZ))) {
263                 if (!neigh_forced_gc(tbl) &&
264                     entries >= tbl->gc_thresh3)
265                         goto out_entries;
266         }
267
268         n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
269         if (!n)
270                 goto out_entries;
271
272         memset(n, 0, tbl->entry_size);
273
274         skb_queue_head_init(&n->arp_queue);
275         rwlock_init(&n->lock);
276         n->updated        = n->used = now;
277         n->nud_state      = NUD_NONE;
278         n->output         = neigh_blackhole;
279         n->parms          = neigh_parms_clone(&tbl->parms);
280         init_timer(&n->timer);
281         n->timer.function = neigh_timer_handler;
282         n->timer.data     = (unsigned long)n;
283
284         NEIGH_CACHE_STAT_INC(tbl, allocs);
285         n->tbl            = tbl;
286         atomic_set(&n->refcnt, 1);
287         n->dead           = 1;
288 out:
289         return n;
290
291 out_entries:
292         atomic_dec(&tbl->entries);
293         goto out;
294 }
295
296 static struct neighbour **neigh_hash_alloc(unsigned int entries)
297 {
298         unsigned long size = entries * sizeof(struct neighbour *);
299         struct neighbour **ret;
300
301         if (size <= PAGE_SIZE) {
302                 ret = kmalloc(size, GFP_ATOMIC);
303         } else {
304                 ret = (struct neighbour **)
305                         __get_free_pages(GFP_ATOMIC, get_order(size));
306         }
307         if (ret)
308                 memset(ret, 0, size);
309
310         return ret;
311 }
312
313 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
314 {
315         unsigned long size = entries * sizeof(struct neighbour *);
316
317         if (size <= PAGE_SIZE)
318                 kfree(hash);
319         else
320                 free_pages((unsigned long)hash, get_order(size));
321 }
322
323 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
324 {
325         struct neighbour **new_hash, **old_hash;
326         unsigned int i, new_hash_mask, old_entries;
327
328         NEIGH_CACHE_STAT_INC(tbl, hash_grows);
329
330         BUG_ON(new_entries & (new_entries - 1));
331         new_hash = neigh_hash_alloc(new_entries);
332         if (!new_hash)
333                 return;
334
335         old_entries = tbl->hash_mask + 1;
336         new_hash_mask = new_entries - 1;
337         old_hash = tbl->hash_buckets;
338
339         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
340         for (i = 0; i < old_entries; i++) {
341                 struct neighbour *n, *next;
342
343                 for (n = old_hash[i]; n; n = next) {
344                         unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
345
346                         hash_val &= new_hash_mask;
347                         next = n->next;
348
349                         n->next = new_hash[hash_val];
350                         new_hash[hash_val] = n;
351                 }
352         }
353         tbl->hash_buckets = new_hash;
354         tbl->hash_mask = new_hash_mask;
355
356         neigh_hash_free(old_hash, old_entries);
357 }
358
359 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
360                                struct net_device *dev)
361 {
362         struct neighbour *n;
363         int key_len = tbl->key_len;
364         u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
365         
366         NEIGH_CACHE_STAT_INC(tbl, lookups);
367
368         read_lock_bh(&tbl->lock);
369         for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
370                 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
371                         neigh_hold(n);
372                         NEIGH_CACHE_STAT_INC(tbl, hits);
373                         break;
374                 }
375         }
376         read_unlock_bh(&tbl->lock);
377         return n;
378 }
379
380 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
381 {
382         struct neighbour *n;
383         int key_len = tbl->key_len;
384         u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
385
386         NEIGH_CACHE_STAT_INC(tbl, lookups);
387
388         read_lock_bh(&tbl->lock);
389         for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
390                 if (!memcmp(n->primary_key, pkey, key_len)) {
391                         neigh_hold(n);
392                         NEIGH_CACHE_STAT_INC(tbl, hits);
393                         break;
394                 }
395         }
396         read_unlock_bh(&tbl->lock);
397         return n;
398 }
399
400 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
401                                struct net_device *dev)
402 {
403         u32 hash_val;
404         int key_len = tbl->key_len;
405         int error;
406         struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
407
408         if (!n) {
409                 rc = ERR_PTR(-ENOBUFS);
410                 goto out;
411         }
412
413         memcpy(n->primary_key, pkey, key_len);
414         n->dev = dev;
415         dev_hold(dev);
416
417         /* Protocol specific setup. */
418         if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
419                 rc = ERR_PTR(error);
420                 goto out_neigh_release;
421         }
422
423         /* Device specific setup. */
424         if (n->parms->neigh_setup &&
425             (error = n->parms->neigh_setup(n)) < 0) {
426                 rc = ERR_PTR(error);
427                 goto out_neigh_release;
428         }
429
430         n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
431
432         write_lock_bh(&tbl->lock);
433
434         if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
435                 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
436
437         hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
438
439         if (n->parms->dead) {
440                 rc = ERR_PTR(-EINVAL);
441                 goto out_tbl_unlock;
442         }
443
444         for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
445                 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
446                         neigh_hold(n1);
447                         rc = n1;
448                         goto out_tbl_unlock;
449                 }
450         }
451
452         n->next = tbl->hash_buckets[hash_val];
453         tbl->hash_buckets[hash_val] = n;
454         n->dead = 0;
455         neigh_hold(n);
456         write_unlock_bh(&tbl->lock);
457         NEIGH_PRINTK2("neigh %p is created.\n", n);
458         rc = n;
459 out:
460         return rc;
461 out_tbl_unlock:
462         write_unlock_bh(&tbl->lock);
463 out_neigh_release:
464         neigh_release(n);
465         goto out;
466 }
467
468 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
469                                     struct net_device *dev, int creat)
470 {
471         struct pneigh_entry *n;
472         int key_len = tbl->key_len;
473         u32 hash_val = *(u32 *)(pkey + key_len - 4);
474
475         hash_val ^= (hash_val >> 16);
476         hash_val ^= hash_val >> 8;
477         hash_val ^= hash_val >> 4;
478         hash_val &= PNEIGH_HASHMASK;
479
480         read_lock_bh(&tbl->lock);
481
482         for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
483                 if (!memcmp(n->key, pkey, key_len) &&
484                     (n->dev == dev || !n->dev)) {
485                         read_unlock_bh(&tbl->lock);
486                         goto out;
487                 }
488         }
489         read_unlock_bh(&tbl->lock);
490         n = NULL;
491         if (!creat)
492                 goto out;
493
494         n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
495         if (!n)
496                 goto out;
497
498         memcpy(n->key, pkey, key_len);
499         n->dev = dev;
500         if (dev)
501                 dev_hold(dev);
502
503         if (tbl->pconstructor && tbl->pconstructor(n)) {
504                 if (dev)
505                         dev_put(dev);
506                 kfree(n);
507                 n = NULL;
508                 goto out;
509         }
510
511         write_lock_bh(&tbl->lock);
512         n->next = tbl->phash_buckets[hash_val];
513         tbl->phash_buckets[hash_val] = n;
514         write_unlock_bh(&tbl->lock);
515 out:
516         return n;
517 }
518
519
520 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
521                   struct net_device *dev)
522 {
523         struct pneigh_entry *n, **np;
524         int key_len = tbl->key_len;
525         u32 hash_val = *(u32 *)(pkey + key_len - 4);
526
527         hash_val ^= (hash_val >> 16);
528         hash_val ^= hash_val >> 8;
529         hash_val ^= hash_val >> 4;
530         hash_val &= PNEIGH_HASHMASK;
531
532         write_lock_bh(&tbl->lock);
533         for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
534              np = &n->next) {
535                 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
536                         *np = n->next;
537                         write_unlock_bh(&tbl->lock);
538                         if (tbl->pdestructor)
539                                 tbl->pdestructor(n);
540                         if (n->dev)
541                                 dev_put(n->dev);
542                         kfree(n);
543                         return 0;
544                 }
545         }
546         write_unlock_bh(&tbl->lock);
547         return -ENOENT;
548 }
549
550 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
551 {
552         struct pneigh_entry *n, **np;
553         u32 h;
554
555         for (h = 0; h <= PNEIGH_HASHMASK; h++) {
556                 np = &tbl->phash_buckets[h];
557                 while ((n = *np) != NULL) {
558                         if (!dev || n->dev == dev) {
559                                 *np = n->next;
560                                 if (tbl->pdestructor)
561                                         tbl->pdestructor(n);
562                                 if (n->dev)
563                                         dev_put(n->dev);
564                                 kfree(n);
565                                 continue;
566                         }
567                         np = &n->next;
568                 }
569         }
570         return -ENOENT;
571 }
572
573
574 /*
575  *      neighbour must already be out of the table;
576  *
577  */
578 void neigh_destroy(struct neighbour *neigh)
579 {
580         struct hh_cache *hh;
581
582         NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
583
584         if (!neigh->dead) {
585                 printk(KERN_WARNING
586                        "Destroying alive neighbour %p\n", neigh);
587                 dump_stack();
588                 return;
589         }
590
591         if (neigh_del_timer(neigh))
592                 printk(KERN_WARNING "Impossible event.\n");
593
594         while ((hh = neigh->hh) != NULL) {
595                 neigh->hh = hh->hh_next;
596                 hh->hh_next = NULL;
597                 write_lock_bh(&hh->hh_lock);
598                 hh->hh_output = neigh_blackhole;
599                 write_unlock_bh(&hh->hh_lock);
600                 if (atomic_dec_and_test(&hh->hh_refcnt))
601                         kfree(hh);
602         }
603
604         if (neigh->ops && neigh->ops->destructor)
605                 (neigh->ops->destructor)(neigh);
606
607         skb_queue_purge(&neigh->arp_queue);
608
609         dev_put(neigh->dev);
610         neigh_parms_put(neigh->parms);
611
612         NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
613
614         atomic_dec(&neigh->tbl->entries);
615         kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
616 }
617
618 /* Neighbour state is suspicious;
619    disable fast path.
620
621    Called with write_locked neigh.
622  */
623 static void neigh_suspect(struct neighbour *neigh)
624 {
625         struct hh_cache *hh;
626
627         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
628
629         neigh->output = neigh->ops->output;
630
631         for (hh = neigh->hh; hh; hh = hh->hh_next)
632                 hh->hh_output = neigh->ops->output;
633 }
634
635 /* Neighbour state is OK;
636    enable fast path.
637
638    Called with write_locked neigh.
639  */
640 static void neigh_connect(struct neighbour *neigh)
641 {
642         struct hh_cache *hh;
643
644         NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
645
646         neigh->output = neigh->ops->connected_output;
647
648         for (hh = neigh->hh; hh; hh = hh->hh_next)
649                 hh->hh_output = neigh->ops->hh_output;
650 }
651
652 static void neigh_periodic_timer(unsigned long arg)
653 {
654         struct neigh_table *tbl = (struct neigh_table *)arg;
655         struct neighbour *n, **np;
656         unsigned long expire, now = jiffies;
657
658         NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
659
660         write_lock(&tbl->lock);
661
662         /*
663          *      periodically recompute ReachableTime from random function
664          */
665
666         if (time_after(now, tbl->last_rand + 300 * HZ)) {
667                 struct neigh_parms *p;
668                 tbl->last_rand = now;
669                 for (p = &tbl->parms; p; p = p->next)
670                         p->reachable_time =
671                                 neigh_rand_reach_time(p->base_reachable_time);
672         }
673
674         np = &tbl->hash_buckets[tbl->hash_chain_gc];
675         tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
676
677         while ((n = *np) != NULL) {
678                 unsigned int state;
679
680                 write_lock(&n->lock);
681
682                 state = n->nud_state;
683                 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
684                         write_unlock(&n->lock);
685                         goto next_elt;
686                 }
687
688                 if (time_before(n->used, n->confirmed))
689                         n->used = n->confirmed;
690
691                 if (atomic_read(&n->refcnt) == 1 &&
692                     (state == NUD_FAILED ||
693                      time_after(now, n->used + n->parms->gc_staletime))) {
694                         *np = n->next;
695                         n->dead = 1;
696                         write_unlock(&n->lock);
697                         neigh_release(n);
698                         continue;
699                 }
700                 write_unlock(&n->lock);
701
702 next_elt:
703                 np = &n->next;
704         }
705
706         /* Cycle through all hash buckets every base_reachable_time/2 ticks.
707          * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
708          * base_reachable_time.
709          */
710         expire = tbl->parms.base_reachable_time >> 1;
711         expire /= (tbl->hash_mask + 1);
712         if (!expire)
713                 expire = 1;
714
715         mod_timer(&tbl->gc_timer, now + expire);
716
717         write_unlock(&tbl->lock);
718 }
719
720 static __inline__ int neigh_max_probes(struct neighbour *n)
721 {
722         struct neigh_parms *p = n->parms;
723         return (n->nud_state & NUD_PROBE ?
724                 p->ucast_probes :
725                 p->ucast_probes + p->app_probes + p->mcast_probes);
726 }
727
728
729 /* Called when a timer expires for a neighbour entry. */
730
731 static void neigh_timer_handler(unsigned long arg)
732 {
733         unsigned long now, next;
734         struct neighbour *neigh = (struct neighbour *)arg;
735         unsigned state;
736         int notify = 0;
737
738         write_lock(&neigh->lock);
739
740         state = neigh->nud_state;
741         now = jiffies;
742         next = now + HZ;
743
744         if (!(state & NUD_IN_TIMER)) {
745 #ifndef CONFIG_SMP
746                 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
747 #endif
748                 goto out;
749         }
750
751         if (state & NUD_REACHABLE) {
752                 if (time_before_eq(now, 
753                                    neigh->confirmed + neigh->parms->reachable_time)) {
754                         NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
755                         next = neigh->confirmed + neigh->parms->reachable_time;
756                 } else if (time_before_eq(now,
757                                           neigh->used + neigh->parms->delay_probe_time)) {
758                         NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
759                         neigh->nud_state = NUD_DELAY;
760                         neigh_suspect(neigh);
761                         next = now + neigh->parms->delay_probe_time;
762                 } else {
763                         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
764                         neigh->nud_state = NUD_STALE;
765                         neigh_suspect(neigh);
766                 }
767         } else if (state & NUD_DELAY) {
768                 if (time_before_eq(now, 
769                                    neigh->confirmed + neigh->parms->delay_probe_time)) {
770                         NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
771                         neigh->nud_state = NUD_REACHABLE;
772                         neigh_connect(neigh);
773                         next = neigh->confirmed + neigh->parms->reachable_time;
774                 } else {
775                         NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
776                         neigh->nud_state = NUD_PROBE;
777                         atomic_set(&neigh->probes, 0);
778                         next = now + neigh->parms->retrans_time;
779                 }
780         } else {
781                 /* NUD_PROBE|NUD_INCOMPLETE */
782                 next = now + neigh->parms->retrans_time;
783         }
784
785         if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
786             atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
787                 struct sk_buff *skb;
788
789                 neigh->nud_state = NUD_FAILED;
790                 notify = 1;
791                 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
792                 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
793
794                 /* It is very thin place. report_unreachable is very complicated
795                    routine. Particularly, it can hit the same neighbour entry!
796
797                    So that, we try to be accurate and avoid dead loop. --ANK
798                  */
799                 while (neigh->nud_state == NUD_FAILED &&
800                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
801                         write_unlock(&neigh->lock);
802                         neigh->ops->error_report(neigh, skb);
803                         write_lock(&neigh->lock);
804                 }
805                 skb_queue_purge(&neigh->arp_queue);
806         }
807
808         if (neigh->nud_state & NUD_IN_TIMER) {
809                 neigh_hold(neigh);
810                 if (time_before(next, jiffies + HZ/2))
811                         next = jiffies + HZ/2;
812                 neigh->timer.expires = next;
813                 add_timer(&neigh->timer);
814         }
815         if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
816                 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
817                 /* keep skb alive even if arp_queue overflows */
818                 if (skb)
819                         skb_get(skb);
820                 write_unlock(&neigh->lock);
821                 neigh->ops->solicit(neigh, skb);
822                 atomic_inc(&neigh->probes);
823                 if (skb)
824                         kfree_skb(skb);
825         } else {
826 out:
827                 write_unlock(&neigh->lock);
828         }
829
830 #ifdef CONFIG_ARPD
831         if (notify && neigh->parms->app_probes)
832                 neigh_app_notify(neigh);
833 #endif
834         neigh_release(neigh);
835 }
836
837 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
838 {
839         int rc;
840         unsigned long now;
841
842         write_lock_bh(&neigh->lock);
843
844         rc = 0;
845         if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
846                 goto out_unlock_bh;
847
848         now = jiffies;
849         
850         if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
851                 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
852                         atomic_set(&neigh->probes, neigh->parms->ucast_probes);
853                         neigh->nud_state     = NUD_INCOMPLETE;
854                         neigh_hold(neigh);
855                         neigh->timer.expires = now + 1;
856                         add_timer(&neigh->timer);
857                 } else {
858                         neigh->nud_state = NUD_FAILED;
859                         write_unlock_bh(&neigh->lock);
860
861                         if (skb)
862                                 kfree_skb(skb);
863                         return 1;
864                 }
865         } else if (neigh->nud_state & NUD_STALE) {
866                 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
867                 neigh_hold(neigh);
868                 neigh->nud_state = NUD_DELAY;
869                 neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
870                 add_timer(&neigh->timer);
871         }
872
873         if (neigh->nud_state == NUD_INCOMPLETE) {
874                 if (skb) {
875                         if (skb_queue_len(&neigh->arp_queue) >=
876                             neigh->parms->queue_len) {
877                                 struct sk_buff *buff;
878                                 buff = neigh->arp_queue.next;
879                                 __skb_unlink(buff, &neigh->arp_queue);
880                                 kfree_skb(buff);
881                         }
882                         __skb_queue_tail(&neigh->arp_queue, skb);
883                 }
884                 rc = 1;
885         }
886 out_unlock_bh:
887         write_unlock_bh(&neigh->lock);
888         return rc;
889 }
890
891 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
892 {
893         struct hh_cache *hh;
894         void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
895                 neigh->dev->header_cache_update;
896
897         if (update) {
898                 for (hh = neigh->hh; hh; hh = hh->hh_next) {
899                         write_lock_bh(&hh->hh_lock);
900                         update(hh, neigh->dev, neigh->ha);
901                         write_unlock_bh(&hh->hh_lock);
902                 }
903         }
904 }
905
906
907
908 /* Generic update routine.
909    -- lladdr is new lladdr or NULL, if it is not supplied.
910    -- new    is new state.
911    -- flags
912         NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
913                                 if it is different.
914         NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
915                                 lladdr instead of overriding it 
916                                 if it is different.
917                                 It also allows to retain current state
918                                 if lladdr is unchanged.
919         NEIGH_UPDATE_F_ADMIN    means that the change is administrative.
920
921         NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing 
922                                 NTF_ROUTER flag.
923         NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
924                                 a router.
925
926    Caller MUST hold reference count on the entry.
927  */
928
929 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
930                  u32 flags)
931 {
932         u8 old;
933         int err;
934 #ifdef CONFIG_ARPD
935         int notify = 0;
936 #endif
937         struct net_device *dev;
938         int update_isrouter = 0;
939
940         write_lock_bh(&neigh->lock);
941
942         dev    = neigh->dev;
943         old    = neigh->nud_state;
944         err    = -EPERM;
945
946         if (!(flags & NEIGH_UPDATE_F_ADMIN) && 
947             (old & (NUD_NOARP | NUD_PERMANENT)))
948                 goto out;
949
950         if (!(new & NUD_VALID)) {
951                 neigh_del_timer(neigh);
952                 if (old & NUD_CONNECTED)
953                         neigh_suspect(neigh);
954                 neigh->nud_state = new;
955                 err = 0;
956 #ifdef CONFIG_ARPD
957                 notify = old & NUD_VALID;
958 #endif
959                 goto out;
960         }
961
962         /* Compare new lladdr with cached one */
963         if (!dev->addr_len) {
964                 /* First case: device needs no address. */
965                 lladdr = neigh->ha;
966         } else if (lladdr) {
967                 /* The second case: if something is already cached
968                    and a new address is proposed:
969                    - compare new & old
970                    - if they are different, check override flag
971                  */
972                 if ((old & NUD_VALID) && 
973                     !memcmp(lladdr, neigh->ha, dev->addr_len))
974                         lladdr = neigh->ha;
975         } else {
976                 /* No address is supplied; if we know something,
977                    use it, otherwise discard the request.
978                  */
979                 err = -EINVAL;
980                 if (!(old & NUD_VALID))
981                         goto out;
982                 lladdr = neigh->ha;
983         }
984
985         if (new & NUD_CONNECTED)
986                 neigh->confirmed = jiffies;
987         neigh->updated = jiffies;
988
989         /* If entry was valid and address is not changed,
990            do not change entry state, if new one is STALE.
991          */
992         err = 0;
993         update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
994         if (old & NUD_VALID) {
995                 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
996                         update_isrouter = 0;
997                         if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
998                             (old & NUD_CONNECTED)) {
999                                 lladdr = neigh->ha;
1000                                 new = NUD_STALE;
1001                         } else
1002                                 goto out;
1003                 } else {
1004                         if (lladdr == neigh->ha && new == NUD_STALE &&
1005                             ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1006                              (old & NUD_CONNECTED))
1007                             )
1008                                 new = old;
1009                 }
1010         }
1011
1012         if (new != old) {
1013                 neigh_del_timer(neigh);
1014                 if (new & NUD_IN_TIMER) {
1015                         neigh_hold(neigh);
1016                         neigh->timer.expires = jiffies + 
1017                                                 ((new & NUD_REACHABLE) ? 
1018                                                  neigh->parms->reachable_time : 0);
1019                         add_timer(&neigh->timer);
1020                 }
1021                 neigh->nud_state = new;
1022         }
1023
1024         if (lladdr != neigh->ha) {
1025                 memcpy(&neigh->ha, lladdr, dev->addr_len);
1026                 neigh_update_hhs(neigh);
1027                 if (!(new & NUD_CONNECTED))
1028                         neigh->confirmed = jiffies -
1029                                       (neigh->parms->base_reachable_time << 1);
1030 #ifdef CONFIG_ARPD
1031                 notify = 1;
1032 #endif
1033         }
1034         if (new == old)
1035                 goto out;
1036         if (new & NUD_CONNECTED)
1037                 neigh_connect(neigh);
1038         else
1039                 neigh_suspect(neigh);
1040         if (!(old & NUD_VALID)) {
1041                 struct sk_buff *skb;
1042
1043                 /* Again: avoid dead loop if something went wrong */
1044
1045                 while (neigh->nud_state & NUD_VALID &&
1046                        (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1047                         struct neighbour *n1 = neigh;
1048                         write_unlock_bh(&neigh->lock);
1049                         /* On shaper/eql skb->dst->neighbour != neigh :( */
1050                         if (skb->dst && skb->dst->neighbour)
1051                                 n1 = skb->dst->neighbour;
1052                         n1->output(skb);
1053                         write_lock_bh(&neigh->lock);
1054                 }
1055                 skb_queue_purge(&neigh->arp_queue);
1056         }
1057 out:
1058         if (update_isrouter) {
1059                 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1060                         (neigh->flags | NTF_ROUTER) :
1061                         (neigh->flags & ~NTF_ROUTER);
1062         }
1063         write_unlock_bh(&neigh->lock);
1064 #ifdef CONFIG_ARPD
1065         if (notify && neigh->parms->app_probes)
1066                 neigh_app_notify(neigh);
1067 #endif
1068         return err;
1069 }
1070
1071 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1072                                  u8 *lladdr, void *saddr,
1073                                  struct net_device *dev)
1074 {
1075         struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1076                                                  lladdr || !dev->addr_len);
1077         if (neigh)
1078                 neigh_update(neigh, lladdr, NUD_STALE, 
1079                              NEIGH_UPDATE_F_OVERRIDE);
1080         return neigh;
1081 }
1082
1083 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1084                           u16 protocol)
1085 {
1086         struct hh_cache *hh;
1087         struct net_device *dev = dst->dev;
1088
1089         for (hh = n->hh; hh; hh = hh->hh_next)
1090                 if (hh->hh_type == protocol)
1091                         break;
1092
1093         if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1094                 memset(hh, 0, sizeof(struct hh_cache));
1095                 rwlock_init(&hh->hh_lock);
1096                 hh->hh_type = protocol;
1097                 atomic_set(&hh->hh_refcnt, 0);
1098                 hh->hh_next = NULL;
1099                 if (dev->hard_header_cache(n, hh)) {
1100                         kfree(hh);
1101                         hh = NULL;
1102                 } else {
1103                         atomic_inc(&hh->hh_refcnt);
1104                         hh->hh_next = n->hh;
1105                         n->hh       = hh;
1106                         if (n->nud_state & NUD_CONNECTED)
1107                                 hh->hh_output = n->ops->hh_output;
1108                         else
1109                                 hh->hh_output = n->ops->output;
1110                 }
1111         }
1112         if (hh) {
1113                 atomic_inc(&hh->hh_refcnt);
1114                 dst->hh = hh;
1115         }
1116 }
1117
1118 /* This function can be used in contexts, where only old dev_queue_xmit
1119    worked, f.e. if you want to override normal output path (eql, shaper),
1120    but resolution is not made yet.
1121  */
1122
1123 int neigh_compat_output(struct sk_buff *skb)
1124 {
1125         struct net_device *dev = skb->dev;
1126
1127         __skb_pull(skb, skb->nh.raw - skb->data);
1128
1129         if (dev->hard_header &&
1130             dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1131                              skb->len) < 0 &&
1132             dev->rebuild_header(skb))
1133                 return 0;
1134
1135         return dev_queue_xmit(skb);
1136 }
1137
1138 /* Slow and careful. */
1139
1140 int neigh_resolve_output(struct sk_buff *skb)
1141 {
1142         struct dst_entry *dst = skb->dst;
1143         struct neighbour *neigh;
1144         int rc = 0;
1145
1146         if (!dst || !(neigh = dst->neighbour))
1147                 goto discard;
1148
1149         __skb_pull(skb, skb->nh.raw - skb->data);
1150
1151         if (!neigh_event_send(neigh, skb)) {
1152                 int err;
1153                 struct net_device *dev = neigh->dev;
1154                 if (dev->hard_header_cache && !dst->hh) {
1155                         write_lock_bh(&neigh->lock);
1156                         if (!dst->hh)
1157                                 neigh_hh_init(neigh, dst, dst->ops->protocol);
1158                         err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1159                                                neigh->ha, NULL, skb->len);
1160                         write_unlock_bh(&neigh->lock);
1161                 } else {
1162                         read_lock_bh(&neigh->lock);
1163                         err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1164                                                neigh->ha, NULL, skb->len);
1165                         read_unlock_bh(&neigh->lock);
1166                 }
1167                 if (err >= 0)
1168                         rc = neigh->ops->queue_xmit(skb);
1169                 else
1170                         goto out_kfree_skb;
1171         }
1172 out:
1173         return rc;
1174 discard:
1175         NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1176                       dst, dst ? dst->neighbour : NULL);
1177 out_kfree_skb:
1178         rc = -EINVAL;
1179         kfree_skb(skb);
1180         goto out;
1181 }
1182
1183 /* As fast as possible without hh cache */
1184
1185 int neigh_connected_output(struct sk_buff *skb)
1186 {
1187         int err;
1188         struct dst_entry *dst = skb->dst;
1189         struct neighbour *neigh = dst->neighbour;
1190         struct net_device *dev = neigh->dev;
1191
1192         __skb_pull(skb, skb->nh.raw - skb->data);
1193
1194         read_lock_bh(&neigh->lock);
1195         err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1196                                neigh->ha, NULL, skb->len);
1197         read_unlock_bh(&neigh->lock);
1198         if (err >= 0)
1199                 err = neigh->ops->queue_xmit(skb);
1200         else {
1201                 err = -EINVAL;
1202                 kfree_skb(skb);
1203         }
1204         return err;
1205 }
1206
1207 static void neigh_proxy_process(unsigned long arg)
1208 {
1209         struct neigh_table *tbl = (struct neigh_table *)arg;
1210         long sched_next = 0;
1211         unsigned long now = jiffies;
1212         struct sk_buff *skb;
1213
1214         spin_lock(&tbl->proxy_queue.lock);
1215
1216         skb = tbl->proxy_queue.next;
1217
1218         while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1219                 struct sk_buff *back = skb;
1220                 long tdif = back->stamp.tv_usec - now;
1221
1222                 skb = skb->next;
1223                 if (tdif <= 0) {
1224                         struct net_device *dev = back->dev;
1225                         __skb_unlink(back, &tbl->proxy_queue);
1226                         if (tbl->proxy_redo && netif_running(dev))
1227                                 tbl->proxy_redo(back);
1228                         else
1229                                 kfree_skb(back);
1230
1231                         dev_put(dev);
1232                 } else if (!sched_next || tdif < sched_next)
1233                         sched_next = tdif;
1234         }
1235         del_timer(&tbl->proxy_timer);
1236         if (sched_next)
1237                 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1238         spin_unlock(&tbl->proxy_queue.lock);
1239 }
1240
1241 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1242                     struct sk_buff *skb)
1243 {
1244         unsigned long now = jiffies;
1245         unsigned long sched_next = now + (net_random() % p->proxy_delay);
1246
1247         if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1248                 kfree_skb(skb);
1249                 return;
1250         }
1251         skb->stamp.tv_sec  = LOCALLY_ENQUEUED;
1252         skb->stamp.tv_usec = sched_next;
1253
1254         spin_lock(&tbl->proxy_queue.lock);
1255         if (del_timer(&tbl->proxy_timer)) {
1256                 if (time_before(tbl->proxy_timer.expires, sched_next))
1257                         sched_next = tbl->proxy_timer.expires;
1258         }
1259         dst_release(skb->dst);
1260         skb->dst = NULL;
1261         dev_hold(skb->dev);
1262         __skb_queue_tail(&tbl->proxy_queue, skb);
1263         mod_timer(&tbl->proxy_timer, sched_next);
1264         spin_unlock(&tbl->proxy_queue.lock);
1265 }
1266
1267
1268 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1269                                       struct neigh_table *tbl)
1270 {
1271         struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1272
1273         if (p) {
1274                 memcpy(p, &tbl->parms, sizeof(*p));
1275                 p->tbl            = tbl;
1276                 atomic_set(&p->refcnt, 1);
1277                 INIT_RCU_HEAD(&p->rcu_head);
1278                 p->reachable_time =
1279                                 neigh_rand_reach_time(p->base_reachable_time);
1280                 if (dev) {
1281                         if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1282                                 kfree(p);
1283                                 return NULL;
1284                         }
1285
1286                         dev_hold(dev);
1287                         p->dev = dev;
1288                 }
1289                 p->sysctl_table = NULL;
1290                 write_lock_bh(&tbl->lock);
1291                 p->next         = tbl->parms.next;
1292                 tbl->parms.next = p;
1293                 write_unlock_bh(&tbl->lock);
1294         }
1295         return p;
1296 }
1297
1298 static void neigh_rcu_free_parms(struct rcu_head *head)
1299 {
1300         struct neigh_parms *parms =
1301                 container_of(head, struct neigh_parms, rcu_head);
1302
1303         neigh_parms_put(parms);
1304 }
1305
1306 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1307 {
1308         struct neigh_parms **p;
1309
1310         if (!parms || parms == &tbl->parms)
1311                 return;
1312         write_lock_bh(&tbl->lock);
1313         for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1314                 if (*p == parms) {
1315                         *p = parms->next;
1316                         parms->dead = 1;
1317                         write_unlock_bh(&tbl->lock);
1318                         if (parms->dev)
1319                                 dev_put(parms->dev);
1320                         call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1321                         return;
1322                 }
1323         }
1324         write_unlock_bh(&tbl->lock);
1325         NEIGH_PRINTK1("neigh_parms_release: not found\n");
1326 }
1327
1328 void neigh_parms_destroy(struct neigh_parms *parms)
1329 {
1330         kfree(parms);
1331 }
1332
1333
1334 void neigh_table_init(struct neigh_table *tbl)
1335 {
1336         unsigned long now = jiffies;
1337         unsigned long phsize;
1338
1339         atomic_set(&tbl->parms.refcnt, 1);
1340         INIT_RCU_HEAD(&tbl->parms.rcu_head);
1341         tbl->parms.reachable_time =
1342                           neigh_rand_reach_time(tbl->parms.base_reachable_time);
1343
1344         if (!tbl->kmem_cachep)
1345                 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1346                                                      tbl->entry_size,
1347                                                      0, SLAB_HWCACHE_ALIGN,
1348                                                      NULL, NULL);
1349
1350         if (!tbl->kmem_cachep)
1351                 panic("cannot create neighbour cache");
1352
1353         tbl->stats = alloc_percpu(struct neigh_statistics);
1354         if (!tbl->stats)
1355                 panic("cannot create neighbour cache statistics");
1356         
1357 #ifdef CONFIG_PROC_FS
1358         tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1359         if (!tbl->pde) 
1360                 panic("cannot create neighbour proc dir entry");
1361         tbl->pde->proc_fops = &neigh_stat_seq_fops;
1362         tbl->pde->data = tbl;
1363 #endif
1364
1365         tbl->hash_mask = 1;
1366         tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1367
1368         phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1369         tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1370
1371         if (!tbl->hash_buckets || !tbl->phash_buckets)
1372                 panic("cannot allocate neighbour cache hashes");
1373
1374         memset(tbl->phash_buckets, 0, phsize);
1375
1376         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1377
1378         rwlock_init(&tbl->lock);
1379         init_timer(&tbl->gc_timer);
1380         tbl->gc_timer.data     = (unsigned long)tbl;
1381         tbl->gc_timer.function = neigh_periodic_timer;
1382         tbl->gc_timer.expires  = now + 1;
1383         add_timer(&tbl->gc_timer);
1384
1385         init_timer(&tbl->proxy_timer);
1386         tbl->proxy_timer.data     = (unsigned long)tbl;
1387         tbl->proxy_timer.function = neigh_proxy_process;
1388         skb_queue_head_init(&tbl->proxy_queue);
1389
1390         tbl->last_flush = now;
1391         tbl->last_rand  = now + tbl->parms.reachable_time * 20;
1392         write_lock(&neigh_tbl_lock);
1393         tbl->next       = neigh_tables;
1394         neigh_tables    = tbl;
1395         write_unlock(&neigh_tbl_lock);
1396 }
1397
1398 int neigh_table_clear(struct neigh_table *tbl)
1399 {
1400         struct neigh_table **tp;
1401
1402         /* It is not clean... Fix it to unload IPv6 module safely */
1403         del_timer_sync(&tbl->gc_timer);
1404         del_timer_sync(&tbl->proxy_timer);
1405         pneigh_queue_purge(&tbl->proxy_queue);
1406         neigh_ifdown(tbl, NULL);
1407         if (atomic_read(&tbl->entries))
1408                 printk(KERN_CRIT "neighbour leakage\n");
1409         write_lock(&neigh_tbl_lock);
1410         for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1411                 if (*tp == tbl) {
1412                         *tp = tbl->next;
1413                         break;
1414                 }
1415         }
1416         write_unlock(&neigh_tbl_lock);
1417
1418         neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1419         tbl->hash_buckets = NULL;
1420
1421         kfree(tbl->phash_buckets);
1422         tbl->phash_buckets = NULL;
1423
1424         return 0;
1425 }
1426
1427 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1428 {
1429         struct ndmsg *ndm = NLMSG_DATA(nlh);
1430         struct rtattr **nda = arg;
1431         struct neigh_table *tbl;
1432         struct net_device *dev = NULL;
1433         int err = -ENODEV;
1434
1435         if (ndm->ndm_ifindex &&
1436             (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1437                 goto out;
1438
1439         read_lock(&neigh_tbl_lock);
1440         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1441                 struct rtattr *dst_attr = nda[NDA_DST - 1];
1442                 struct neighbour *n;
1443
1444                 if (tbl->family != ndm->ndm_family)
1445                         continue;
1446                 read_unlock(&neigh_tbl_lock);
1447
1448                 err = -EINVAL;
1449                 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1450                         goto out_dev_put;
1451
1452                 if (ndm->ndm_flags & NTF_PROXY) {
1453                         err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1454                         goto out_dev_put;
1455                 }
1456
1457                 if (!dev)
1458                         goto out;
1459
1460                 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1461                 if (n) {
1462                         err = neigh_update(n, NULL, NUD_FAILED, 
1463                                            NEIGH_UPDATE_F_OVERRIDE|
1464                                            NEIGH_UPDATE_F_ADMIN);
1465                         neigh_release(n);
1466                 }
1467                 goto out_dev_put;
1468         }
1469         read_unlock(&neigh_tbl_lock);
1470         err = -EADDRNOTAVAIL;
1471 out_dev_put:
1472         if (dev)
1473                 dev_put(dev);
1474 out:
1475         return err;
1476 }
1477
1478 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1479 {
1480         struct ndmsg *ndm = NLMSG_DATA(nlh);
1481         struct rtattr **nda = arg;
1482         struct neigh_table *tbl;
1483         struct net_device *dev = NULL;
1484         int err = -ENODEV;
1485
1486         if (ndm->ndm_ifindex &&
1487             (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1488                 goto out;
1489
1490         read_lock(&neigh_tbl_lock);
1491         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1492                 struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1493                 struct rtattr *dst_attr = nda[NDA_DST - 1];
1494                 int override = 1;
1495                 struct neighbour *n;
1496
1497                 if (tbl->family != ndm->ndm_family)
1498                         continue;
1499                 read_unlock(&neigh_tbl_lock);
1500
1501                 err = -EINVAL;
1502                 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1503                         goto out_dev_put;
1504
1505                 if (ndm->ndm_flags & NTF_PROXY) {
1506                         err = -ENOBUFS;
1507                         if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1508                                 err = 0;
1509                         goto out_dev_put;
1510                 }
1511
1512                 err = -EINVAL;
1513                 if (!dev)
1514                         goto out;
1515                 if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1516                         goto out_dev_put;
1517         
1518                 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1519                 if (n) {
1520                         if (nlh->nlmsg_flags & NLM_F_EXCL) {
1521                                 err = -EEXIST;
1522                                 neigh_release(n);
1523                                 goto out_dev_put;
1524                         }
1525                         
1526                         override = nlh->nlmsg_flags & NLM_F_REPLACE;
1527                 } else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1528                         err = -ENOENT;
1529                         goto out_dev_put;
1530                 } else {
1531                         n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1532                         if (IS_ERR(n)) {
1533                                 err = PTR_ERR(n);
1534                                 goto out_dev_put;
1535                         }
1536                 }
1537
1538                 err = neigh_update(n,
1539                                    lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1540                                    ndm->ndm_state,
1541                                    (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1542                                    NEIGH_UPDATE_F_ADMIN);
1543
1544                 neigh_release(n);
1545                 goto out_dev_put;
1546         }
1547
1548         read_unlock(&neigh_tbl_lock);
1549         err = -EADDRNOTAVAIL;
1550 out_dev_put:
1551         if (dev)
1552                 dev_put(dev);
1553 out:
1554         return err;
1555 }
1556
1557 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1558 {
1559         struct rtattr *nest = NULL;
1560         
1561         nest = RTA_NEST(skb, NDTA_PARMS);
1562
1563         if (parms->dev)
1564                 RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1565
1566         RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1567         RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1568         RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1569         RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1570         RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1571         RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1572         RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1573         RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1574                       parms->base_reachable_time);
1575         RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1576         RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1577         RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1578         RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1579         RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1580         RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1581
1582         return RTA_NEST_END(skb, nest);
1583
1584 rtattr_failure:
1585         return RTA_NEST_CANCEL(skb, nest);
1586 }
1587
1588 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1589                               struct netlink_callback *cb)
1590 {
1591         struct nlmsghdr *nlh;
1592         struct ndtmsg *ndtmsg;
1593
1594         nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1595                                NLM_F_MULTI);
1596
1597         ndtmsg = NLMSG_DATA(nlh);
1598
1599         read_lock_bh(&tbl->lock);
1600         ndtmsg->ndtm_family = tbl->family;
1601
1602         RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1603         RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1604         RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1605         RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1606         RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1607
1608         {
1609                 unsigned long now = jiffies;
1610                 unsigned int flush_delta = now - tbl->last_flush;
1611                 unsigned int rand_delta = now - tbl->last_rand;
1612
1613                 struct ndt_config ndc = {
1614                         .ndtc_key_len           = tbl->key_len,
1615                         .ndtc_entry_size        = tbl->entry_size,
1616                         .ndtc_entries           = atomic_read(&tbl->entries),
1617                         .ndtc_last_flush        = jiffies_to_msecs(flush_delta),
1618                         .ndtc_last_rand         = jiffies_to_msecs(rand_delta),
1619                         .ndtc_hash_rnd          = tbl->hash_rnd,
1620                         .ndtc_hash_mask         = tbl->hash_mask,
1621                         .ndtc_hash_chain_gc     = tbl->hash_chain_gc,
1622                         .ndtc_proxy_qlen        = tbl->proxy_queue.qlen,
1623                 };
1624
1625                 RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1626         }
1627
1628         {
1629                 int cpu;
1630                 struct ndt_stats ndst;
1631
1632                 memset(&ndst, 0, sizeof(ndst));
1633
1634                 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1635                         struct neigh_statistics *st;
1636
1637                         if (!cpu_possible(cpu))
1638                                 continue;
1639
1640                         st = per_cpu_ptr(tbl->stats, cpu);
1641                         ndst.ndts_allocs                += st->allocs;
1642                         ndst.ndts_destroys              += st->destroys;
1643                         ndst.ndts_hash_grows            += st->hash_grows;
1644                         ndst.ndts_res_failed            += st->res_failed;
1645                         ndst.ndts_lookups               += st->lookups;
1646                         ndst.ndts_hits                  += st->hits;
1647                         ndst.ndts_rcv_probes_mcast      += st->rcv_probes_mcast;
1648                         ndst.ndts_rcv_probes_ucast      += st->rcv_probes_ucast;
1649                         ndst.ndts_periodic_gc_runs      += st->periodic_gc_runs;
1650                         ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
1651                 }
1652
1653                 RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1654         }
1655
1656         BUG_ON(tbl->parms.dev);
1657         if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1658                 goto rtattr_failure;
1659
1660         read_unlock_bh(&tbl->lock);
1661         return NLMSG_END(skb, nlh);
1662
1663 rtattr_failure:
1664         read_unlock_bh(&tbl->lock);
1665         return NLMSG_CANCEL(skb, nlh);
1666  
1667 nlmsg_failure:
1668         return -1;
1669 }
1670
1671 static int neightbl_fill_param_info(struct neigh_table *tbl,
1672                                     struct neigh_parms *parms,
1673                                     struct sk_buff *skb,
1674                                     struct netlink_callback *cb)
1675 {
1676         struct ndtmsg *ndtmsg;
1677         struct nlmsghdr *nlh;
1678
1679         nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1680                                NLM_F_MULTI);
1681
1682         ndtmsg = NLMSG_DATA(nlh);
1683
1684         read_lock_bh(&tbl->lock);
1685         ndtmsg->ndtm_family = tbl->family;
1686         RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1687
1688         if (neightbl_fill_parms(skb, parms) < 0)
1689                 goto rtattr_failure;
1690
1691         read_unlock_bh(&tbl->lock);
1692         return NLMSG_END(skb, nlh);
1693
1694 rtattr_failure:
1695         read_unlock_bh(&tbl->lock);
1696         return NLMSG_CANCEL(skb, nlh);
1697
1698 nlmsg_failure:
1699         return -1;
1700 }
1701  
1702 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1703                                                       int ifindex)
1704 {
1705         struct neigh_parms *p;
1706         
1707         for (p = &tbl->parms; p; p = p->next)
1708                 if ((p->dev && p->dev->ifindex == ifindex) ||
1709                     (!p->dev && !ifindex))
1710                         return p;
1711
1712         return NULL;
1713 }
1714
1715 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1716 {
1717         struct neigh_table *tbl;
1718         struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1719         struct rtattr **tb = arg;
1720         int err = -EINVAL;
1721
1722         if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1723                 return -EINVAL;
1724
1725         read_lock(&neigh_tbl_lock);
1726         for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1727                 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1728                         continue;
1729
1730                 if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1731                         break;
1732         }
1733
1734         if (tbl == NULL) {
1735                 err = -ENOENT;
1736                 goto errout;
1737         }
1738
1739         /* 
1740          * We acquire tbl->lock to be nice to the periodic timers and
1741          * make sure they always see a consistent set of values.
1742          */
1743         write_lock_bh(&tbl->lock);
1744
1745         if (tb[NDTA_THRESH1 - 1])
1746                 tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1747
1748         if (tb[NDTA_THRESH2 - 1])
1749                 tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1750
1751         if (tb[NDTA_THRESH3 - 1])
1752                 tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1753
1754         if (tb[NDTA_GC_INTERVAL - 1])
1755                 tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1756
1757         if (tb[NDTA_PARMS - 1]) {
1758                 struct rtattr *tbp[NDTPA_MAX];
1759                 struct neigh_parms *p;
1760                 u32 ifindex = 0;
1761
1762                 if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1763                         goto rtattr_failure;
1764
1765                 if (tbp[NDTPA_IFINDEX - 1])
1766                         ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1767
1768                 p = lookup_neigh_params(tbl, ifindex);
1769                 if (p == NULL) {
1770                         err = -ENOENT;
1771                         goto rtattr_failure;
1772                 }
1773         
1774                 if (tbp[NDTPA_QUEUE_LEN - 1])
1775                         p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1776
1777                 if (tbp[NDTPA_PROXY_QLEN - 1])
1778                         p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1779
1780                 if (tbp[NDTPA_APP_PROBES - 1])
1781                         p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1782
1783                 if (tbp[NDTPA_UCAST_PROBES - 1])
1784                         p->ucast_probes =
1785                            RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1786
1787                 if (tbp[NDTPA_MCAST_PROBES - 1])
1788                         p->mcast_probes =
1789                            RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1790
1791                 if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1792                         p->base_reachable_time =
1793                            RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1794
1795                 if (tbp[NDTPA_GC_STALETIME - 1])
1796                         p->gc_staletime =
1797                            RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1798
1799                 if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1800                         p->delay_probe_time =
1801                            RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1802
1803                 if (tbp[NDTPA_RETRANS_TIME - 1])
1804                         p->retrans_time =
1805                            RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1806
1807                 if (tbp[NDTPA_ANYCAST_DELAY - 1])
1808                         p->anycast_delay =
1809                            RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1810
1811                 if (tbp[NDTPA_PROXY_DELAY - 1])
1812                         p->proxy_delay =
1813                            RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1814
1815                 if (tbp[NDTPA_LOCKTIME - 1])
1816                         p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1817         }
1818
1819         err = 0;
1820
1821 rtattr_failure:
1822         write_unlock_bh(&tbl->lock);
1823 errout:
1824         read_unlock(&neigh_tbl_lock);
1825         return err;
1826 }
1827
1828 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1829 {
1830         int idx, family;
1831         int s_idx = cb->args[0];
1832         struct neigh_table *tbl;
1833
1834         family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1835
1836         read_lock(&neigh_tbl_lock);
1837         for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1838                 struct neigh_parms *p;
1839
1840                 if (idx < s_idx || (family && tbl->family != family))
1841                         continue;
1842
1843                 if (neightbl_fill_info(tbl, skb, cb) <= 0)
1844                         break;
1845
1846                 for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1847                         if (idx < s_idx)
1848                                 continue;
1849
1850                         if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1851                                 goto out;
1852                 }
1853
1854         }
1855 out:
1856         read_unlock(&neigh_tbl_lock);
1857         cb->args[0] = idx;
1858
1859         return skb->len;
1860 }
1861
1862 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1863                            u32 pid, u32 seq, int event, unsigned int flags)
1864 {
1865         unsigned long now = jiffies;
1866         unsigned char *b = skb->tail;
1867         struct nda_cacheinfo ci;
1868         int locked = 0;
1869         u32 probes;
1870         struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1871                                          sizeof(struct ndmsg), flags);
1872         struct ndmsg *ndm = NLMSG_DATA(nlh);
1873
1874         ndm->ndm_family  = n->ops->family;
1875         ndm->ndm_flags   = n->flags;
1876         ndm->ndm_type    = n->type;
1877         ndm->ndm_ifindex = n->dev->ifindex;
1878         RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1879         read_lock_bh(&n->lock);
1880         locked           = 1;
1881         ndm->ndm_state   = n->nud_state;
1882         if (n->nud_state & NUD_VALID)
1883                 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1884         ci.ndm_used      = now - n->used;
1885         ci.ndm_confirmed = now - n->confirmed;
1886         ci.ndm_updated   = now - n->updated;
1887         ci.ndm_refcnt    = atomic_read(&n->refcnt) - 1;
1888         probes = atomic_read(&n->probes);
1889         read_unlock_bh(&n->lock);
1890         locked           = 0;
1891         RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1892         RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1893         nlh->nlmsg_len   = skb->tail - b;
1894         return skb->len;
1895
1896 nlmsg_failure:
1897 rtattr_failure:
1898         if (locked)
1899                 read_unlock_bh(&n->lock);
1900         skb_trim(skb, b - skb->data);
1901         return -1;
1902 }
1903
1904
1905 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1906                             struct netlink_callback *cb)
1907 {
1908         struct neighbour *n;
1909         int rc, h, s_h = cb->args[1];
1910         int idx, s_idx = idx = cb->args[2];
1911
1912         for (h = 0; h <= tbl->hash_mask; h++) {
1913                 if (h < s_h)
1914                         continue;
1915                 if (h > s_h)
1916                         s_idx = 0;
1917                 read_lock_bh(&tbl->lock);
1918                 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1919                         if (idx < s_idx)
1920                                 continue;
1921                         if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1922                                             cb->nlh->nlmsg_seq,
1923                                             RTM_NEWNEIGH,
1924                                             NLM_F_MULTI) <= 0) {
1925                                 read_unlock_bh(&tbl->lock);
1926                                 rc = -1;
1927                                 goto out;
1928                         }
1929                 }
1930                 read_unlock_bh(&tbl->lock);
1931         }
1932         rc = skb->len;
1933 out:
1934         cb->args[1] = h;
1935         cb->args[2] = idx;
1936         return rc;
1937 }
1938
1939 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1940 {
1941         struct neigh_table *tbl;
1942         int t, family, s_t;
1943
1944         read_lock(&neigh_tbl_lock);
1945         family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1946         s_t = cb->args[0];
1947
1948         for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1949                 if (t < s_t || (family && tbl->family != family))
1950                         continue;
1951                 if (t > s_t)
1952                         memset(&cb->args[1], 0, sizeof(cb->args) -
1953                                                 sizeof(cb->args[0]));
1954                 if (neigh_dump_table(tbl, skb, cb) < 0)
1955                         break;
1956         }
1957         read_unlock(&neigh_tbl_lock);
1958
1959         cb->args[0] = t;
1960         return skb->len;
1961 }
1962
1963 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1964 {
1965         int chain;
1966
1967         read_lock_bh(&tbl->lock);
1968         for (chain = 0; chain <= tbl->hash_mask; chain++) {
1969                 struct neighbour *n;
1970
1971                 for (n = tbl->hash_buckets[chain]; n; n = n->next)
1972                         cb(n, cookie);
1973         }
1974         read_unlock_bh(&tbl->lock);
1975 }
1976 EXPORT_SYMBOL(neigh_for_each);
1977
1978 /* The tbl->lock must be held as a writer and BH disabled. */
1979 void __neigh_for_each_release(struct neigh_table *tbl,
1980                               int (*cb)(struct neighbour *))
1981 {
1982         int chain;
1983
1984         for (chain = 0; chain <= tbl->hash_mask; chain++) {
1985                 struct neighbour *n, **np;
1986
1987                 np = &tbl->hash_buckets[chain];
1988                 while ((n = *np) != NULL) {
1989                         int release;
1990
1991                         write_lock(&n->lock);
1992                         release = cb(n);
1993                         if (release) {
1994                                 *np = n->next;
1995                                 n->dead = 1;
1996                         } else
1997                                 np = &n->next;
1998                         write_unlock(&n->lock);
1999                         if (release)
2000                                 neigh_release(n);
2001                 }
2002         }
2003 }
2004 EXPORT_SYMBOL(__neigh_for_each_release);
2005
2006 #ifdef CONFIG_PROC_FS
2007
2008 static struct neighbour *neigh_get_first(struct seq_file *seq)
2009 {
2010         struct neigh_seq_state *state = seq->private;
2011         struct neigh_table *tbl = state->tbl;
2012         struct neighbour *n = NULL;
2013         int bucket = state->bucket;
2014
2015         state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2016         for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2017                 n = tbl->hash_buckets[bucket];
2018
2019                 while (n) {
2020                         if (state->neigh_sub_iter) {
2021                                 loff_t fakep = 0;
2022                                 void *v;
2023
2024                                 v = state->neigh_sub_iter(state, n, &fakep);
2025                                 if (!v)
2026                                         goto next;
2027                         }
2028                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2029                                 break;
2030                         if (n->nud_state & ~NUD_NOARP)
2031                                 break;
2032                 next:
2033                         n = n->next;
2034                 }
2035
2036                 if (n)
2037                         break;
2038         }
2039         state->bucket = bucket;
2040
2041         return n;
2042 }
2043
2044 static struct neighbour *neigh_get_next(struct seq_file *seq,
2045                                         struct neighbour *n,
2046                                         loff_t *pos)
2047 {
2048         struct neigh_seq_state *state = seq->private;
2049         struct neigh_table *tbl = state->tbl;
2050
2051         if (state->neigh_sub_iter) {
2052                 void *v = state->neigh_sub_iter(state, n, pos);
2053                 if (v)
2054                         return n;
2055         }
2056         n = n->next;
2057
2058         while (1) {
2059                 while (n) {
2060                         if (state->neigh_sub_iter) {
2061                                 void *v = state->neigh_sub_iter(state, n, pos);
2062                                 if (v)
2063                                         return n;
2064                                 goto next;
2065                         }
2066                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2067                                 break;
2068
2069                         if (n->nud_state & ~NUD_NOARP)
2070                                 break;
2071                 next:
2072                         n = n->next;
2073                 }
2074
2075                 if (n)
2076                         break;
2077
2078                 if (++state->bucket > tbl->hash_mask)
2079                         break;
2080
2081                 n = tbl->hash_buckets[state->bucket];
2082         }
2083
2084         if (n && pos)
2085                 --(*pos);
2086         return n;
2087 }
2088
2089 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2090 {
2091         struct neighbour *n = neigh_get_first(seq);
2092
2093         if (n) {
2094                 while (*pos) {
2095                         n = neigh_get_next(seq, n, pos);
2096                         if (!n)
2097                                 break;
2098                 }
2099         }
2100         return *pos ? NULL : n;
2101 }
2102
2103 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2104 {
2105         struct neigh_seq_state *state = seq->private;
2106         struct neigh_table *tbl = state->tbl;
2107         struct pneigh_entry *pn = NULL;
2108         int bucket = state->bucket;
2109
2110         state->flags |= NEIGH_SEQ_IS_PNEIGH;
2111         for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2112                 pn = tbl->phash_buckets[bucket];
2113                 if (pn)
2114                         break;
2115         }
2116         state->bucket = bucket;
2117
2118         return pn;
2119 }
2120
2121 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2122                                             struct pneigh_entry *pn,
2123                                             loff_t *pos)
2124 {
2125         struct neigh_seq_state *state = seq->private;
2126         struct neigh_table *tbl = state->tbl;
2127
2128         pn = pn->next;
2129         while (!pn) {
2130                 if (++state->bucket > PNEIGH_HASHMASK)
2131                         break;
2132                 pn = tbl->phash_buckets[state->bucket];
2133                 if (pn)
2134                         break;
2135         }
2136
2137         if (pn && pos)
2138                 --(*pos);
2139
2140         return pn;
2141 }
2142
2143 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2144 {
2145         struct pneigh_entry *pn = pneigh_get_first(seq);
2146
2147         if (pn) {
2148                 while (*pos) {
2149                         pn = pneigh_get_next(seq, pn, pos);
2150                         if (!pn)
2151                                 break;
2152                 }
2153         }
2154         return *pos ? NULL : pn;
2155 }
2156
2157 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2158 {
2159         struct neigh_seq_state *state = seq->private;
2160         void *rc;
2161
2162         rc = neigh_get_idx(seq, pos);
2163         if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2164                 rc = pneigh_get_idx(seq, pos);
2165
2166         return rc;
2167 }
2168
2169 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2170 {
2171         struct neigh_seq_state *state = seq->private;
2172         loff_t pos_minus_one;
2173
2174         state->tbl = tbl;
2175         state->bucket = 0;
2176         state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2177
2178         read_lock_bh(&tbl->lock);
2179
2180         pos_minus_one = *pos - 1;
2181         return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2182 }
2183 EXPORT_SYMBOL(neigh_seq_start);
2184
2185 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2186 {
2187         struct neigh_seq_state *state;
2188         void *rc;
2189
2190         if (v == SEQ_START_TOKEN) {
2191                 rc = neigh_get_idx(seq, pos);
2192                 goto out;
2193         }
2194
2195         state = seq->private;
2196         if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2197                 rc = neigh_get_next(seq, v, NULL);
2198                 if (rc)
2199                         goto out;
2200                 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2201                         rc = pneigh_get_first(seq);
2202         } else {
2203                 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2204                 rc = pneigh_get_next(seq, v, NULL);
2205         }
2206 out:
2207         ++(*pos);
2208         return rc;
2209 }
2210 EXPORT_SYMBOL(neigh_seq_next);
2211
2212 void neigh_seq_stop(struct seq_file *seq, void *v)
2213 {
2214         struct neigh_seq_state *state = seq->private;
2215         struct neigh_table *tbl = state->tbl;
2216
2217         read_unlock_bh(&tbl->lock);
2218 }
2219 EXPORT_SYMBOL(neigh_seq_stop);
2220
2221 /* statistics via seq_file */
2222
2223 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2224 {
2225         struct proc_dir_entry *pde = seq->private;
2226         struct neigh_table *tbl = pde->data;
2227         int cpu;
2228
2229         if (*pos == 0)
2230                 return SEQ_START_TOKEN;
2231         
2232         for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2233                 if (!cpu_possible(cpu))
2234                         continue;
2235                 *pos = cpu+1;
2236                 return per_cpu_ptr(tbl->stats, cpu);
2237         }
2238         return NULL;
2239 }
2240
2241 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2242 {
2243         struct proc_dir_entry *pde = seq->private;
2244         struct neigh_table *tbl = pde->data;
2245         int cpu;
2246
2247         for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2248                 if (!cpu_possible(cpu))
2249                         continue;
2250                 *pos = cpu+1;
2251                 return per_cpu_ptr(tbl->stats, cpu);
2252         }
2253         return NULL;
2254 }
2255
2256 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2257 {
2258
2259 }
2260
2261 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2262 {
2263         struct proc_dir_entry *pde = seq->private;
2264         struct neigh_table *tbl = pde->data;
2265         struct neigh_statistics *st = v;
2266
2267         if (v == SEQ_START_TOKEN) {
2268                 seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2269                 return 0;
2270         }
2271
2272         seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2273                         "%08lx %08lx  %08lx %08lx\n",
2274                    atomic_read(&tbl->entries),
2275
2276                    st->allocs,
2277                    st->destroys,
2278                    st->hash_grows,
2279
2280                    st->lookups,
2281                    st->hits,
2282
2283                    st->res_failed,
2284
2285                    st->rcv_probes_mcast,
2286                    st->rcv_probes_ucast,
2287
2288                    st->periodic_gc_runs,
2289                    st->forced_gc_runs
2290                    );
2291
2292         return 0;
2293 }
2294
2295 static struct seq_operations neigh_stat_seq_ops = {
2296         .start  = neigh_stat_seq_start,
2297         .next   = neigh_stat_seq_next,
2298         .stop   = neigh_stat_seq_stop,
2299         .show   = neigh_stat_seq_show,
2300 };
2301
2302 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2303 {
2304         int ret = seq_open(file, &neigh_stat_seq_ops);
2305
2306         if (!ret) {
2307                 struct seq_file *sf = file->private_data;
2308                 sf->private = PDE(inode);
2309         }
2310         return ret;
2311 };
2312
2313 static struct file_operations neigh_stat_seq_fops = {
2314         .owner   = THIS_MODULE,
2315         .open    = neigh_stat_seq_open,
2316         .read    = seq_read,
2317         .llseek  = seq_lseek,
2318         .release = seq_release,
2319 };
2320
2321 #endif /* CONFIG_PROC_FS */
2322
2323 #ifdef CONFIG_ARPD
2324 void neigh_app_ns(struct neighbour *n)
2325 {
2326         struct nlmsghdr  *nlh;
2327         int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2328         struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2329
2330         if (!skb)
2331                 return;
2332
2333         if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2334                 kfree_skb(skb);
2335                 return;
2336         }
2337         nlh                        = (struct nlmsghdr *)skb->data;
2338         nlh->nlmsg_flags           = NLM_F_REQUEST;
2339         NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
2340         netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
2341 }
2342
2343 static void neigh_app_notify(struct neighbour *n)
2344 {
2345         struct nlmsghdr *nlh;
2346         int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2347         struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2348
2349         if (!skb)
2350                 return;
2351
2352         if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2353                 kfree_skb(skb);
2354                 return;
2355         }
2356         nlh                        = (struct nlmsghdr *)skb->data;
2357         NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
2358         netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
2359 }
2360
2361 #endif /* CONFIG_ARPD */
2362
2363 #ifdef CONFIG_SYSCTL
2364
2365 static struct neigh_sysctl_table {
2366         struct ctl_table_header *sysctl_header;
2367         ctl_table               neigh_vars[__NET_NEIGH_MAX];
2368         ctl_table               neigh_dev[2];
2369         ctl_table               neigh_neigh_dir[2];
2370         ctl_table               neigh_proto_dir[2];
2371         ctl_table               neigh_root_dir[2];
2372 } neigh_sysctl_template = {
2373         .neigh_vars = {
2374                 {
2375                         .ctl_name       = NET_NEIGH_MCAST_SOLICIT,
2376                         .procname       = "mcast_solicit",
2377                         .maxlen         = sizeof(int),
2378                         .mode           = 0644,
2379                         .proc_handler   = &proc_dointvec,
2380                 },
2381                 {
2382                         .ctl_name       = NET_NEIGH_UCAST_SOLICIT,
2383                         .procname       = "ucast_solicit",
2384                         .maxlen         = sizeof(int),
2385                         .mode           = 0644,
2386                         .proc_handler   = &proc_dointvec,
2387                 },
2388                 {
2389                         .ctl_name       = NET_NEIGH_APP_SOLICIT,
2390                         .procname       = "app_solicit",
2391                         .maxlen         = sizeof(int),
2392                         .mode           = 0644,
2393                         .proc_handler   = &proc_dointvec,
2394                 },
2395                 {
2396                         .ctl_name       = NET_NEIGH_RETRANS_TIME,
2397                         .procname       = "retrans_time",
2398                         .maxlen         = sizeof(int),
2399                         .mode           = 0644,
2400                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2401                 },
2402                 {
2403                         .ctl_name       = NET_NEIGH_REACHABLE_TIME,
2404                         .procname       = "base_reachable_time",
2405                         .maxlen         = sizeof(int),
2406                         .mode           = 0644,
2407                         .proc_handler   = &proc_dointvec_jiffies,
2408                         .strategy       = &sysctl_jiffies,
2409                 },
2410                 {
2411                         .ctl_name       = NET_NEIGH_DELAY_PROBE_TIME,
2412                         .procname       = "delay_first_probe_time",
2413                         .maxlen         = sizeof(int),
2414                         .mode           = 0644,
2415                         .proc_handler   = &proc_dointvec_jiffies,
2416                         .strategy       = &sysctl_jiffies,
2417                 },
2418                 {
2419                         .ctl_name       = NET_NEIGH_GC_STALE_TIME,
2420                         .procname       = "gc_stale_time",
2421                         .maxlen         = sizeof(int),
2422                         .mode           = 0644,
2423                         .proc_handler   = &proc_dointvec_jiffies,
2424                         .strategy       = &sysctl_jiffies,
2425                 },
2426                 {
2427                         .ctl_name       = NET_NEIGH_UNRES_QLEN,
2428                         .procname       = "unres_qlen",
2429                         .maxlen         = sizeof(int),
2430                         .mode           = 0644,
2431                         .proc_handler   = &proc_dointvec,
2432                 },
2433                 {
2434                         .ctl_name       = NET_NEIGH_PROXY_QLEN,
2435                         .procname       = "proxy_qlen",
2436                         .maxlen         = sizeof(int),
2437                         .mode           = 0644,
2438                         .proc_handler   = &proc_dointvec,
2439                 },
2440                 {
2441                         .ctl_name       = NET_NEIGH_ANYCAST_DELAY,
2442                         .procname       = "anycast_delay",
2443                         .maxlen         = sizeof(int),
2444                         .mode           = 0644,
2445                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2446                 },
2447                 {
2448                         .ctl_name       = NET_NEIGH_PROXY_DELAY,
2449                         .procname       = "proxy_delay",
2450                         .maxlen         = sizeof(int),
2451                         .mode           = 0644,
2452                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2453                 },
2454                 {
2455                         .ctl_name       = NET_NEIGH_LOCKTIME,
2456                         .procname       = "locktime",
2457                         .maxlen         = sizeof(int),
2458                         .mode           = 0644,
2459                         .proc_handler   = &proc_dointvec_userhz_jiffies,
2460                 },
2461                 {
2462                         .ctl_name       = NET_NEIGH_GC_INTERVAL,
2463                         .procname       = "gc_interval",
2464                         .maxlen         = sizeof(int),
2465                         .mode           = 0644,
2466                         .proc_handler   = &proc_dointvec_jiffies,
2467                         .strategy       = &sysctl_jiffies,
2468                 },
2469                 {
2470                         .ctl_name       = NET_NEIGH_GC_THRESH1,
2471                         .procname       = "gc_thresh1",
2472                         .maxlen         = sizeof(int),
2473                         .mode           = 0644,
2474                         .proc_handler   = &proc_dointvec,
2475                 },
2476                 {
2477                         .ctl_name       = NET_NEIGH_GC_THRESH2,
2478                         .procname       = "gc_thresh2",
2479                         .maxlen         = sizeof(int),
2480                         .mode           = 0644,
2481                         .proc_handler   = &proc_dointvec,
2482                 },
2483                 {
2484                         .ctl_name       = NET_NEIGH_GC_THRESH3,
2485                         .procname       = "gc_thresh3",
2486                         .maxlen         = sizeof(int),
2487                         .mode           = 0644,
2488                         .proc_handler   = &proc_dointvec,
2489                 },
2490                 {
2491                         .ctl_name       = NET_NEIGH_RETRANS_TIME_MS,
2492                         .procname       = "retrans_time_ms",
2493                         .maxlen         = sizeof(int),
2494                         .mode           = 0644,
2495                         .proc_handler   = &proc_dointvec_ms_jiffies,
2496                         .strategy       = &sysctl_ms_jiffies,
2497                 },
2498                 {
2499                         .ctl_name       = NET_NEIGH_REACHABLE_TIME_MS,
2500                         .procname       = "base_reachable_time_ms",
2501                         .maxlen         = sizeof(int),
2502                         .mode           = 0644,
2503                         .proc_handler   = &proc_dointvec_ms_jiffies,
2504                         .strategy       = &sysctl_ms_jiffies,
2505                 },
2506         },
2507         .neigh_dev = {
2508                 {
2509                         .ctl_name       = NET_PROTO_CONF_DEFAULT,
2510                         .procname       = "default",
2511                         .mode           = 0555,
2512                 },
2513         },
2514         .neigh_neigh_dir = {
2515                 {
2516                         .procname       = "neigh",
2517                         .mode           = 0555,
2518                 },
2519         },
2520         .neigh_proto_dir = {
2521                 {
2522                         .mode           = 0555,
2523                 },
2524         },
2525         .neigh_root_dir = {
2526                 {
2527                         .ctl_name       = CTL_NET,
2528                         .procname       = "net",
2529                         .mode           = 0555,
2530                 },
2531         },
2532 };
2533
2534 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2535                           int p_id, int pdev_id, char *p_name, 
2536                           proc_handler *handler, ctl_handler *strategy)
2537 {
2538         struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2539         const char *dev_name_source = NULL;
2540         char *dev_name = NULL;
2541         int err = 0;
2542
2543         if (!t)
2544                 return -ENOBUFS;
2545         memcpy(t, &neigh_sysctl_template, sizeof(*t));
2546         t->neigh_vars[0].data  = &p->mcast_probes;
2547         t->neigh_vars[1].data  = &p->ucast_probes;
2548         t->neigh_vars[2].data  = &p->app_probes;
2549         t->neigh_vars[3].data  = &p->retrans_time;
2550         t->neigh_vars[4].data  = &p->base_reachable_time;
2551         t->neigh_vars[5].data  = &p->delay_probe_time;
2552         t->neigh_vars[6].data  = &p->gc_staletime;
2553         t->neigh_vars[7].data  = &p->queue_len;
2554         t->neigh_vars[8].data  = &p->proxy_qlen;
2555         t->neigh_vars[9].data  = &p->anycast_delay;
2556         t->neigh_vars[10].data = &p->proxy_delay;
2557         t->neigh_vars[11].data = &p->locktime;
2558
2559         if (dev) {
2560                 dev_name_source = dev->name;
2561                 t->neigh_dev[0].ctl_name = dev->ifindex;
2562                 t->neigh_vars[12].procname = NULL;
2563                 t->neigh_vars[13].procname = NULL;
2564                 t->neigh_vars[14].procname = NULL;
2565                 t->neigh_vars[15].procname = NULL;
2566         } else {
2567                 dev_name_source = t->neigh_dev[0].procname;
2568                 t->neigh_vars[12].data = (int *)(p + 1);
2569                 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2570                 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2571                 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2572         }
2573
2574         t->neigh_vars[16].data  = &p->retrans_time;
2575         t->neigh_vars[17].data  = &p->base_reachable_time;
2576
2577         if (handler || strategy) {
2578                 /* RetransTime */
2579                 t->neigh_vars[3].proc_handler = handler;
2580                 t->neigh_vars[3].strategy = strategy;
2581                 t->neigh_vars[3].extra1 = dev;
2582                 /* ReachableTime */
2583                 t->neigh_vars[4].proc_handler = handler;
2584                 t->neigh_vars[4].strategy = strategy;
2585                 t->neigh_vars[4].extra1 = dev;
2586                 /* RetransTime (in milliseconds)*/
2587                 t->neigh_vars[16].proc_handler = handler;
2588                 t->neigh_vars[16].strategy = strategy;
2589                 t->neigh_vars[16].extra1 = dev;
2590                 /* ReachableTime (in milliseconds) */
2591                 t->neigh_vars[17].proc_handler = handler;
2592                 t->neigh_vars[17].strategy = strategy;
2593                 t->neigh_vars[17].extra1 = dev;
2594         }
2595
2596         dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2597         if (!dev_name) {
2598                 err = -ENOBUFS;
2599                 goto free;
2600         }
2601
2602         t->neigh_dev[0].procname = dev_name;
2603
2604         t->neigh_neigh_dir[0].ctl_name = pdev_id;
2605
2606         t->neigh_proto_dir[0].procname = p_name;
2607         t->neigh_proto_dir[0].ctl_name = p_id;
2608
2609         t->neigh_dev[0].child          = t->neigh_vars;
2610         t->neigh_neigh_dir[0].child    = t->neigh_dev;
2611         t->neigh_proto_dir[0].child    = t->neigh_neigh_dir;
2612         t->neigh_root_dir[0].child     = t->neigh_proto_dir;
2613
2614         t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2615         if (!t->sysctl_header) {
2616                 err = -ENOBUFS;
2617                 goto free_procname;
2618         }
2619         p->sysctl_table = t;
2620         return 0;
2621
2622         /* error path */
2623  free_procname:
2624         kfree(dev_name);
2625  free:
2626         kfree(t);
2627
2628         return err;
2629 }
2630
2631 void neigh_sysctl_unregister(struct neigh_parms *p)
2632 {
2633         if (p->sysctl_table) {
2634                 struct neigh_sysctl_table *t = p->sysctl_table;
2635                 p->sysctl_table = NULL;
2636                 unregister_sysctl_table(t->sysctl_header);
2637                 kfree(t->neigh_dev[0].procname);
2638                 kfree(t);
2639         }
2640 }
2641
2642 #endif  /* CONFIG_SYSCTL */
2643
2644 EXPORT_SYMBOL(__neigh_event_send);
2645 EXPORT_SYMBOL(neigh_add);
2646 EXPORT_SYMBOL(neigh_changeaddr);
2647 EXPORT_SYMBOL(neigh_compat_output);
2648 EXPORT_SYMBOL(neigh_connected_output);
2649 EXPORT_SYMBOL(neigh_create);
2650 EXPORT_SYMBOL(neigh_delete);
2651 EXPORT_SYMBOL(neigh_destroy);
2652 EXPORT_SYMBOL(neigh_dump_info);
2653 EXPORT_SYMBOL(neigh_event_ns);
2654 EXPORT_SYMBOL(neigh_ifdown);
2655 EXPORT_SYMBOL(neigh_lookup);
2656 EXPORT_SYMBOL(neigh_lookup_nodev);
2657 EXPORT_SYMBOL(neigh_parms_alloc);
2658 EXPORT_SYMBOL(neigh_parms_release);
2659 EXPORT_SYMBOL(neigh_rand_reach_time);
2660 EXPORT_SYMBOL(neigh_resolve_output);
2661 EXPORT_SYMBOL(neigh_table_clear);
2662 EXPORT_SYMBOL(neigh_table_init);
2663 EXPORT_SYMBOL(neigh_update);
2664 EXPORT_SYMBOL(neigh_update_hhs);
2665 EXPORT_SYMBOL(pneigh_enqueue);
2666 EXPORT_SYMBOL(pneigh_lookup);
2667 EXPORT_SYMBOL(neightbl_dump_info);
2668 EXPORT_SYMBOL(neightbl_set);
2669
2670 #ifdef CONFIG_ARPD
2671 EXPORT_SYMBOL(neigh_app_ns);
2672 #endif
2673 #ifdef CONFIG_SYSCTL
2674 EXPORT_SYMBOL(neigh_sysctl_register);
2675 EXPORT_SYMBOL(neigh_sysctl_unregister);
2676 #endif