Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus
[pandora-kernel.git] / net / ipv4 / route.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              ROUTE - implementation of the IP router.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12  *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  * Fixes:
15  *              Alan Cox        :       Verify area fixes.
16  *              Alan Cox        :       cli() protects routing changes
17  *              Rui Oliveira    :       ICMP routing table updates
18  *              (rco@di.uminho.pt)      Routing table insertion and update
19  *              Linus Torvalds  :       Rewrote bits to be sensible
20  *              Alan Cox        :       Added BSD route gw semantics
21  *              Alan Cox        :       Super /proc >4K
22  *              Alan Cox        :       MTU in route table
23  *              Alan Cox        :       MSS actually. Also added the window
24  *                                      clamper.
25  *              Sam Lantinga    :       Fixed route matching in rt_del()
26  *              Alan Cox        :       Routing cache support.
27  *              Alan Cox        :       Removed compatibility cruft.
28  *              Alan Cox        :       RTF_REJECT support.
29  *              Alan Cox        :       TCP irtt support.
30  *              Jonathan Naylor :       Added Metric support.
31  *      Miquel van Smoorenburg  :       BSD API fixes.
32  *      Miquel van Smoorenburg  :       Metrics.
33  *              Alan Cox        :       Use __u32 properly
34  *              Alan Cox        :       Aligned routing errors more closely with BSD
35  *                                      our system is still very different.
36  *              Alan Cox        :       Faster /proc handling
37  *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
38  *                                      routing caches and better behaviour.
39  *
40  *              Olaf Erb        :       irtt wasn't being copied right.
41  *              Bjorn Ekwall    :       Kerneld route support.
42  *              Alan Cox        :       Multicast fixed (I hope)
43  *              Pavel Krauz     :       Limited broadcast fixed
44  *              Mike McLagan    :       Routing by source
45  *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
46  *                                      route.c and rewritten from scratch.
47  *              Andi Kleen      :       Load-limit warning messages.
48  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
49  *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
50  *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
51  *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
52  *              Marc Boucher    :       routing by fwmark
53  *      Robert Olsson           :       Added rt_cache statistics
54  *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
55  *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
56  *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
57  *      Ilia Sotnikov           :       Removed TOS from hash calculations
58  *
59  *              This program is free software; you can redistribute it and/or
60  *              modify it under the terms of the GNU General Public License
61  *              as published by the Free Software Foundation; either version
62  *              2 of the License, or (at your option) any later version.
63  */
64
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
71 #include <linux/mm.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/in.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
93 #include <linux/slab.h>
94 #include <net/dst.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
97 #include <net/ip.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
102 #include <net/arp.h>
103 #include <net/tcp.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/netevent.h>
107 #include <net/rtnetlink.h>
108 #ifdef CONFIG_SYSCTL
109 #include <linux/sysctl.h>
110 #endif
111 #include <net/atmclip.h>
112
113 #define RT_FL_TOS(oldflp4) \
114     ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
115
116 #define IP_MAX_MTU      0xFFF0
117
118 #define RT_GC_TIMEOUT (300*HZ)
119
120 static int ip_rt_max_size;
121 static int ip_rt_gc_timeout __read_mostly       = RT_GC_TIMEOUT;
122 static int ip_rt_gc_interval __read_mostly      = 60 * HZ;
123 static int ip_rt_gc_min_interval __read_mostly  = HZ / 2;
124 static int ip_rt_redirect_number __read_mostly  = 9;
125 static int ip_rt_redirect_load __read_mostly    = HZ / 50;
126 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
127 static int ip_rt_error_cost __read_mostly       = HZ;
128 static int ip_rt_error_burst __read_mostly      = 5 * HZ;
129 static int ip_rt_gc_elasticity __read_mostly    = 8;
130 static int ip_rt_mtu_expires __read_mostly      = 10 * 60 * HZ;
131 static int ip_rt_min_pmtu __read_mostly         = 512 + 20 + 20;
132 static int ip_rt_min_advmss __read_mostly       = 256;
133 static int rt_chain_length_max __read_mostly    = 20;
134
135 /*
136  *      Interface to generic destination cache.
137  */
138
139 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
140 static unsigned int      ipv4_default_advmss(const struct dst_entry *dst);
141 static unsigned int      ipv4_default_mtu(const struct dst_entry *dst);
142 static void              ipv4_dst_destroy(struct dst_entry *dst);
143 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
144 static void              ipv4_link_failure(struct sk_buff *skb);
145 static void              ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
146 static int rt_garbage_collect(struct dst_ops *ops);
147
148 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
149                             int how)
150 {
151 }
152
153 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
154 {
155         struct rtable *rt = (struct rtable *) dst;
156         struct inet_peer *peer;
157         u32 *p = NULL;
158
159         if (!rt->peer)
160                 rt_bind_peer(rt, rt->rt_dst, 1);
161
162         peer = rt->peer;
163         if (peer) {
164                 u32 *old_p = __DST_METRICS_PTR(old);
165                 unsigned long prev, new;
166
167                 p = peer->metrics;
168                 if (inet_metrics_new(peer))
169                         memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
170
171                 new = (unsigned long) p;
172                 prev = cmpxchg(&dst->_metrics, old, new);
173
174                 if (prev != old) {
175                         p = __DST_METRICS_PTR(prev);
176                         if (prev & DST_METRICS_READ_ONLY)
177                                 p = NULL;
178                 } else {
179                         if (rt->fi) {
180                                 fib_info_put(rt->fi);
181                                 rt->fi = NULL;
182                         }
183                 }
184         }
185         return p;
186 }
187
188 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr);
189
190 static struct dst_ops ipv4_dst_ops = {
191         .family =               AF_INET,
192         .protocol =             cpu_to_be16(ETH_P_IP),
193         .gc =                   rt_garbage_collect,
194         .check =                ipv4_dst_check,
195         .default_advmss =       ipv4_default_advmss,
196         .default_mtu =          ipv4_default_mtu,
197         .cow_metrics =          ipv4_cow_metrics,
198         .destroy =              ipv4_dst_destroy,
199         .ifdown =               ipv4_dst_ifdown,
200         .negative_advice =      ipv4_negative_advice,
201         .link_failure =         ipv4_link_failure,
202         .update_pmtu =          ip_rt_update_pmtu,
203         .local_out =            __ip_local_out,
204         .neigh_lookup =         ipv4_neigh_lookup,
205 };
206
207 #define ECN_OR_COST(class)      TC_PRIO_##class
208
209 const __u8 ip_tos2prio[16] = {
210         TC_PRIO_BESTEFFORT,
211         ECN_OR_COST(BESTEFFORT),
212         TC_PRIO_BESTEFFORT,
213         ECN_OR_COST(BESTEFFORT),
214         TC_PRIO_BULK,
215         ECN_OR_COST(BULK),
216         TC_PRIO_BULK,
217         ECN_OR_COST(BULK),
218         TC_PRIO_INTERACTIVE,
219         ECN_OR_COST(INTERACTIVE),
220         TC_PRIO_INTERACTIVE,
221         ECN_OR_COST(INTERACTIVE),
222         TC_PRIO_INTERACTIVE_BULK,
223         ECN_OR_COST(INTERACTIVE_BULK),
224         TC_PRIO_INTERACTIVE_BULK,
225         ECN_OR_COST(INTERACTIVE_BULK)
226 };
227
228
229 /*
230  * Route cache.
231  */
232
233 /* The locking scheme is rather straight forward:
234  *
235  * 1) Read-Copy Update protects the buckets of the central route hash.
236  * 2) Only writers remove entries, and they hold the lock
237  *    as they look at rtable reference counts.
238  * 3) Only readers acquire references to rtable entries,
239  *    they do so with atomic increments and with the
240  *    lock held.
241  */
242
243 struct rt_hash_bucket {
244         struct rtable __rcu     *chain;
245 };
246
247 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
248         defined(CONFIG_PROVE_LOCKING)
249 /*
250  * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
251  * The size of this table is a power of two and depends on the number of CPUS.
252  * (on lockdep we have a quite big spinlock_t, so keep the size down there)
253  */
254 #ifdef CONFIG_LOCKDEP
255 # define RT_HASH_LOCK_SZ        256
256 #else
257 # if NR_CPUS >= 32
258 #  define RT_HASH_LOCK_SZ       4096
259 # elif NR_CPUS >= 16
260 #  define RT_HASH_LOCK_SZ       2048
261 # elif NR_CPUS >= 8
262 #  define RT_HASH_LOCK_SZ       1024
263 # elif NR_CPUS >= 4
264 #  define RT_HASH_LOCK_SZ       512
265 # else
266 #  define RT_HASH_LOCK_SZ       256
267 # endif
268 #endif
269
270 static spinlock_t       *rt_hash_locks;
271 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
272
273 static __init void rt_hash_lock_init(void)
274 {
275         int i;
276
277         rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
278                         GFP_KERNEL);
279         if (!rt_hash_locks)
280                 panic("IP: failed to allocate rt_hash_locks\n");
281
282         for (i = 0; i < RT_HASH_LOCK_SZ; i++)
283                 spin_lock_init(&rt_hash_locks[i]);
284 }
285 #else
286 # define rt_hash_lock_addr(slot) NULL
287
288 static inline void rt_hash_lock_init(void)
289 {
290 }
291 #endif
292
293 static struct rt_hash_bucket    *rt_hash_table __read_mostly;
294 static unsigned                 rt_hash_mask __read_mostly;
295 static unsigned int             rt_hash_log  __read_mostly;
296
297 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
298 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
299
300 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
301                                    int genid)
302 {
303         return jhash_3words((__force u32)daddr, (__force u32)saddr,
304                             idx, genid)
305                 & rt_hash_mask;
306 }
307
308 static inline int rt_genid(struct net *net)
309 {
310         return atomic_read(&net->ipv4.rt_genid);
311 }
312
313 #ifdef CONFIG_PROC_FS
314 struct rt_cache_iter_state {
315         struct seq_net_private p;
316         int bucket;
317         int genid;
318 };
319
320 static struct rtable *rt_cache_get_first(struct seq_file *seq)
321 {
322         struct rt_cache_iter_state *st = seq->private;
323         struct rtable *r = NULL;
324
325         for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
326                 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
327                         continue;
328                 rcu_read_lock_bh();
329                 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
330                 while (r) {
331                         if (dev_net(r->dst.dev) == seq_file_net(seq) &&
332                             r->rt_genid == st->genid)
333                                 return r;
334                         r = rcu_dereference_bh(r->dst.rt_next);
335                 }
336                 rcu_read_unlock_bh();
337         }
338         return r;
339 }
340
341 static struct rtable *__rt_cache_get_next(struct seq_file *seq,
342                                           struct rtable *r)
343 {
344         struct rt_cache_iter_state *st = seq->private;
345
346         r = rcu_dereference_bh(r->dst.rt_next);
347         while (!r) {
348                 rcu_read_unlock_bh();
349                 do {
350                         if (--st->bucket < 0)
351                                 return NULL;
352                 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
353                 rcu_read_lock_bh();
354                 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
355         }
356         return r;
357 }
358
359 static struct rtable *rt_cache_get_next(struct seq_file *seq,
360                                         struct rtable *r)
361 {
362         struct rt_cache_iter_state *st = seq->private;
363         while ((r = __rt_cache_get_next(seq, r)) != NULL) {
364                 if (dev_net(r->dst.dev) != seq_file_net(seq))
365                         continue;
366                 if (r->rt_genid == st->genid)
367                         break;
368         }
369         return r;
370 }
371
372 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
373 {
374         struct rtable *r = rt_cache_get_first(seq);
375
376         if (r)
377                 while (pos && (r = rt_cache_get_next(seq, r)))
378                         --pos;
379         return pos ? NULL : r;
380 }
381
382 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
383 {
384         struct rt_cache_iter_state *st = seq->private;
385         if (*pos)
386                 return rt_cache_get_idx(seq, *pos - 1);
387         st->genid = rt_genid(seq_file_net(seq));
388         return SEQ_START_TOKEN;
389 }
390
391 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
392 {
393         struct rtable *r;
394
395         if (v == SEQ_START_TOKEN)
396                 r = rt_cache_get_first(seq);
397         else
398                 r = rt_cache_get_next(seq, v);
399         ++*pos;
400         return r;
401 }
402
403 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
404 {
405         if (v && v != SEQ_START_TOKEN)
406                 rcu_read_unlock_bh();
407 }
408
409 static int rt_cache_seq_show(struct seq_file *seq, void *v)
410 {
411         if (v == SEQ_START_TOKEN)
412                 seq_printf(seq, "%-127s\n",
413                            "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
414                            "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
415                            "HHUptod\tSpecDst");
416         else {
417                 struct rtable *r = v;
418                 struct neighbour *n;
419                 int len;
420
421                 n = dst_get_neighbour(&r->dst);
422                 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
423                               "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
424                         r->dst.dev ? r->dst.dev->name : "*",
425                         (__force u32)r->rt_dst,
426                         (__force u32)r->rt_gateway,
427                         r->rt_flags, atomic_read(&r->dst.__refcnt),
428                         r->dst.__use, 0, (__force u32)r->rt_src,
429                         dst_metric_advmss(&r->dst) + 40,
430                         dst_metric(&r->dst, RTAX_WINDOW),
431                         (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
432                               dst_metric(&r->dst, RTAX_RTTVAR)),
433                         r->rt_key_tos,
434                         -1,
435                         (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
436                         r->rt_spec_dst, &len);
437
438                 seq_printf(seq, "%*s\n", 127 - len, "");
439         }
440         return 0;
441 }
442
443 static const struct seq_operations rt_cache_seq_ops = {
444         .start  = rt_cache_seq_start,
445         .next   = rt_cache_seq_next,
446         .stop   = rt_cache_seq_stop,
447         .show   = rt_cache_seq_show,
448 };
449
450 static int rt_cache_seq_open(struct inode *inode, struct file *file)
451 {
452         return seq_open_net(inode, file, &rt_cache_seq_ops,
453                         sizeof(struct rt_cache_iter_state));
454 }
455
456 static const struct file_operations rt_cache_seq_fops = {
457         .owner   = THIS_MODULE,
458         .open    = rt_cache_seq_open,
459         .read    = seq_read,
460         .llseek  = seq_lseek,
461         .release = seq_release_net,
462 };
463
464
465 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
466 {
467         int cpu;
468
469         if (*pos == 0)
470                 return SEQ_START_TOKEN;
471
472         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
473                 if (!cpu_possible(cpu))
474                         continue;
475                 *pos = cpu+1;
476                 return &per_cpu(rt_cache_stat, cpu);
477         }
478         return NULL;
479 }
480
481 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
482 {
483         int cpu;
484
485         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
486                 if (!cpu_possible(cpu))
487                         continue;
488                 *pos = cpu+1;
489                 return &per_cpu(rt_cache_stat, cpu);
490         }
491         return NULL;
492
493 }
494
495 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
496 {
497
498 }
499
500 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
501 {
502         struct rt_cache_stat *st = v;
503
504         if (v == SEQ_START_TOKEN) {
505                 seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
506                 return 0;
507         }
508
509         seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
510                    " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
511                    dst_entries_get_slow(&ipv4_dst_ops),
512                    st->in_hit,
513                    st->in_slow_tot,
514                    st->in_slow_mc,
515                    st->in_no_route,
516                    st->in_brd,
517                    st->in_martian_dst,
518                    st->in_martian_src,
519
520                    st->out_hit,
521                    st->out_slow_tot,
522                    st->out_slow_mc,
523
524                    st->gc_total,
525                    st->gc_ignored,
526                    st->gc_goal_miss,
527                    st->gc_dst_overflow,
528                    st->in_hlist_search,
529                    st->out_hlist_search
530                 );
531         return 0;
532 }
533
534 static const struct seq_operations rt_cpu_seq_ops = {
535         .start  = rt_cpu_seq_start,
536         .next   = rt_cpu_seq_next,
537         .stop   = rt_cpu_seq_stop,
538         .show   = rt_cpu_seq_show,
539 };
540
541
542 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
543 {
544         return seq_open(file, &rt_cpu_seq_ops);
545 }
546
547 static const struct file_operations rt_cpu_seq_fops = {
548         .owner   = THIS_MODULE,
549         .open    = rt_cpu_seq_open,
550         .read    = seq_read,
551         .llseek  = seq_lseek,
552         .release = seq_release,
553 };
554
555 #ifdef CONFIG_IP_ROUTE_CLASSID
556 static int rt_acct_proc_show(struct seq_file *m, void *v)
557 {
558         struct ip_rt_acct *dst, *src;
559         unsigned int i, j;
560
561         dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
562         if (!dst)
563                 return -ENOMEM;
564
565         for_each_possible_cpu(i) {
566                 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
567                 for (j = 0; j < 256; j++) {
568                         dst[j].o_bytes   += src[j].o_bytes;
569                         dst[j].o_packets += src[j].o_packets;
570                         dst[j].i_bytes   += src[j].i_bytes;
571                         dst[j].i_packets += src[j].i_packets;
572                 }
573         }
574
575         seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
576         kfree(dst);
577         return 0;
578 }
579
580 static int rt_acct_proc_open(struct inode *inode, struct file *file)
581 {
582         return single_open(file, rt_acct_proc_show, NULL);
583 }
584
585 static const struct file_operations rt_acct_proc_fops = {
586         .owner          = THIS_MODULE,
587         .open           = rt_acct_proc_open,
588         .read           = seq_read,
589         .llseek         = seq_lseek,
590         .release        = single_release,
591 };
592 #endif
593
594 static int __net_init ip_rt_do_proc_init(struct net *net)
595 {
596         struct proc_dir_entry *pde;
597
598         pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
599                         &rt_cache_seq_fops);
600         if (!pde)
601                 goto err1;
602
603         pde = proc_create("rt_cache", S_IRUGO,
604                           net->proc_net_stat, &rt_cpu_seq_fops);
605         if (!pde)
606                 goto err2;
607
608 #ifdef CONFIG_IP_ROUTE_CLASSID
609         pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
610         if (!pde)
611                 goto err3;
612 #endif
613         return 0;
614
615 #ifdef CONFIG_IP_ROUTE_CLASSID
616 err3:
617         remove_proc_entry("rt_cache", net->proc_net_stat);
618 #endif
619 err2:
620         remove_proc_entry("rt_cache", net->proc_net);
621 err1:
622         return -ENOMEM;
623 }
624
625 static void __net_exit ip_rt_do_proc_exit(struct net *net)
626 {
627         remove_proc_entry("rt_cache", net->proc_net_stat);
628         remove_proc_entry("rt_cache", net->proc_net);
629 #ifdef CONFIG_IP_ROUTE_CLASSID
630         remove_proc_entry("rt_acct", net->proc_net);
631 #endif
632 }
633
634 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
635         .init = ip_rt_do_proc_init,
636         .exit = ip_rt_do_proc_exit,
637 };
638
639 static int __init ip_rt_proc_init(void)
640 {
641         return register_pernet_subsys(&ip_rt_proc_ops);
642 }
643
644 #else
645 static inline int ip_rt_proc_init(void)
646 {
647         return 0;
648 }
649 #endif /* CONFIG_PROC_FS */
650
651 static inline void rt_free(struct rtable *rt)
652 {
653         call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
654 }
655
656 static inline void rt_drop(struct rtable *rt)
657 {
658         ip_rt_put(rt);
659         call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
660 }
661
662 static inline int rt_fast_clean(struct rtable *rth)
663 {
664         /* Kill broadcast/multicast entries very aggresively, if they
665            collide in hash table with more useful entries */
666         return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
667                 rt_is_input_route(rth) && rth->dst.rt_next;
668 }
669
670 static inline int rt_valuable(struct rtable *rth)
671 {
672         return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
673                 (rth->peer && rth->peer->pmtu_expires);
674 }
675
676 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
677 {
678         unsigned long age;
679         int ret = 0;
680
681         if (atomic_read(&rth->dst.__refcnt))
682                 goto out;
683
684         age = jiffies - rth->dst.lastuse;
685         if ((age <= tmo1 && !rt_fast_clean(rth)) ||
686             (age <= tmo2 && rt_valuable(rth)))
687                 goto out;
688         ret = 1;
689 out:    return ret;
690 }
691
692 /* Bits of score are:
693  * 31: very valuable
694  * 30: not quite useless
695  * 29..0: usage counter
696  */
697 static inline u32 rt_score(struct rtable *rt)
698 {
699         u32 score = jiffies - rt->dst.lastuse;
700
701         score = ~score & ~(3<<30);
702
703         if (rt_valuable(rt))
704                 score |= (1<<31);
705
706         if (rt_is_output_route(rt) ||
707             !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
708                 score |= (1<<30);
709
710         return score;
711 }
712
713 static inline bool rt_caching(const struct net *net)
714 {
715         return net->ipv4.current_rt_cache_rebuild_count <=
716                 net->ipv4.sysctl_rt_cache_rebuild_count;
717 }
718
719 static inline bool compare_hash_inputs(const struct rtable *rt1,
720                                        const struct rtable *rt2)
721 {
722         return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
723                 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
724                 (rt1->rt_iif ^ rt2->rt_iif)) == 0);
725 }
726
727 static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
728 {
729         return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
730                 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
731                 (rt1->rt_mark ^ rt2->rt_mark) |
732                 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
733                 (rt1->rt_oif ^ rt2->rt_oif) |
734                 (rt1->rt_iif ^ rt2->rt_iif)) == 0;
735 }
736
737 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
738 {
739         return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
740 }
741
742 static inline int rt_is_expired(struct rtable *rth)
743 {
744         return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
745 }
746
747 /*
748  * Perform a full scan of hash table and free all entries.
749  * Can be called by a softirq or a process.
750  * In the later case, we want to be reschedule if necessary
751  */
752 static void rt_do_flush(struct net *net, int process_context)
753 {
754         unsigned int i;
755         struct rtable *rth, *next;
756
757         for (i = 0; i <= rt_hash_mask; i++) {
758                 struct rtable __rcu **pprev;
759                 struct rtable *list;
760
761                 if (process_context && need_resched())
762                         cond_resched();
763                 rth = rcu_dereference_raw(rt_hash_table[i].chain);
764                 if (!rth)
765                         continue;
766
767                 spin_lock_bh(rt_hash_lock_addr(i));
768
769                 list = NULL;
770                 pprev = &rt_hash_table[i].chain;
771                 rth = rcu_dereference_protected(*pprev,
772                         lockdep_is_held(rt_hash_lock_addr(i)));
773
774                 while (rth) {
775                         next = rcu_dereference_protected(rth->dst.rt_next,
776                                 lockdep_is_held(rt_hash_lock_addr(i)));
777
778                         if (!net ||
779                             net_eq(dev_net(rth->dst.dev), net)) {
780                                 rcu_assign_pointer(*pprev, next);
781                                 rcu_assign_pointer(rth->dst.rt_next, list);
782                                 list = rth;
783                         } else {
784                                 pprev = &rth->dst.rt_next;
785                         }
786                         rth = next;
787                 }
788
789                 spin_unlock_bh(rt_hash_lock_addr(i));
790
791                 for (; list; list = next) {
792                         next = rcu_dereference_protected(list->dst.rt_next, 1);
793                         rt_free(list);
794                 }
795         }
796 }
797
798 /*
799  * While freeing expired entries, we compute average chain length
800  * and standard deviation, using fixed-point arithmetic.
801  * This to have an estimation of rt_chain_length_max
802  *  rt_chain_length_max = max(elasticity, AVG + 4*SD)
803  * We use 3 bits for frational part, and 29 (or 61) for magnitude.
804  */
805
806 #define FRACT_BITS 3
807 #define ONE (1UL << FRACT_BITS)
808
809 /*
810  * Given a hash chain and an item in this hash chain,
811  * find if a previous entry has the same hash_inputs
812  * (but differs on tos, mark or oif)
813  * Returns 0 if an alias is found.
814  * Returns ONE if rth has no alias before itself.
815  */
816 static int has_noalias(const struct rtable *head, const struct rtable *rth)
817 {
818         const struct rtable *aux = head;
819
820         while (aux != rth) {
821                 if (compare_hash_inputs(aux, rth))
822                         return 0;
823                 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
824         }
825         return ONE;
826 }
827
828 /*
829  * Perturbation of rt_genid by a small quantity [1..256]
830  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
831  * many times (2^24) without giving recent rt_genid.
832  * Jenkins hash is strong enough that litle changes of rt_genid are OK.
833  */
834 static void rt_cache_invalidate(struct net *net)
835 {
836         unsigned char shuffle;
837
838         get_random_bytes(&shuffle, sizeof(shuffle));
839         atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
840 }
841
842 /*
843  * delay < 0  : invalidate cache (fast : entries will be deleted later)
844  * delay >= 0 : invalidate & flush cache (can be long)
845  */
846 void rt_cache_flush(struct net *net, int delay)
847 {
848         rt_cache_invalidate(net);
849         if (delay >= 0)
850                 rt_do_flush(net, !in_softirq());
851 }
852
853 /* Flush previous cache invalidated entries from the cache */
854 void rt_cache_flush_batch(struct net *net)
855 {
856         rt_do_flush(net, !in_softirq());
857 }
858
859 static void rt_emergency_hash_rebuild(struct net *net)
860 {
861         if (net_ratelimit())
862                 printk(KERN_WARNING "Route hash chain too long!\n");
863         rt_cache_invalidate(net);
864 }
865
866 /*
867    Short description of GC goals.
868
869    We want to build algorithm, which will keep routing cache
870    at some equilibrium point, when number of aged off entries
871    is kept approximately equal to newly generated ones.
872
873    Current expiration strength is variable "expire".
874    We try to adjust it dynamically, so that if networking
875    is idle expires is large enough to keep enough of warm entries,
876    and when load increases it reduces to limit cache size.
877  */
878
879 static int rt_garbage_collect(struct dst_ops *ops)
880 {
881         static unsigned long expire = RT_GC_TIMEOUT;
882         static unsigned long last_gc;
883         static int rover;
884         static int equilibrium;
885         struct rtable *rth;
886         struct rtable __rcu **rthp;
887         unsigned long now = jiffies;
888         int goal;
889         int entries = dst_entries_get_fast(&ipv4_dst_ops);
890
891         /*
892          * Garbage collection is pretty expensive,
893          * do not make it too frequently.
894          */
895
896         RT_CACHE_STAT_INC(gc_total);
897
898         if (now - last_gc < ip_rt_gc_min_interval &&
899             entries < ip_rt_max_size) {
900                 RT_CACHE_STAT_INC(gc_ignored);
901                 goto out;
902         }
903
904         entries = dst_entries_get_slow(&ipv4_dst_ops);
905         /* Calculate number of entries, which we want to expire now. */
906         goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
907         if (goal <= 0) {
908                 if (equilibrium < ipv4_dst_ops.gc_thresh)
909                         equilibrium = ipv4_dst_ops.gc_thresh;
910                 goal = entries - equilibrium;
911                 if (goal > 0) {
912                         equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
913                         goal = entries - equilibrium;
914                 }
915         } else {
916                 /* We are in dangerous area. Try to reduce cache really
917                  * aggressively.
918                  */
919                 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
920                 equilibrium = entries - goal;
921         }
922
923         if (now - last_gc >= ip_rt_gc_min_interval)
924                 last_gc = now;
925
926         if (goal <= 0) {
927                 equilibrium += goal;
928                 goto work_done;
929         }
930
931         do {
932                 int i, k;
933
934                 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
935                         unsigned long tmo = expire;
936
937                         k = (k + 1) & rt_hash_mask;
938                         rthp = &rt_hash_table[k].chain;
939                         spin_lock_bh(rt_hash_lock_addr(k));
940                         while ((rth = rcu_dereference_protected(*rthp,
941                                         lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
942                                 if (!rt_is_expired(rth) &&
943                                         !rt_may_expire(rth, tmo, expire)) {
944                                         tmo >>= 1;
945                                         rthp = &rth->dst.rt_next;
946                                         continue;
947                                 }
948                                 *rthp = rth->dst.rt_next;
949                                 rt_free(rth);
950                                 goal--;
951                         }
952                         spin_unlock_bh(rt_hash_lock_addr(k));
953                         if (goal <= 0)
954                                 break;
955                 }
956                 rover = k;
957
958                 if (goal <= 0)
959                         goto work_done;
960
961                 /* Goal is not achieved. We stop process if:
962
963                    - if expire reduced to zero. Otherwise, expire is halfed.
964                    - if table is not full.
965                    - if we are called from interrupt.
966                    - jiffies check is just fallback/debug loop breaker.
967                      We will not spin here for long time in any case.
968                  */
969
970                 RT_CACHE_STAT_INC(gc_goal_miss);
971
972                 if (expire == 0)
973                         break;
974
975                 expire >>= 1;
976
977                 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
978                         goto out;
979         } while (!in_softirq() && time_before_eq(jiffies, now));
980
981         if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
982                 goto out;
983         if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
984                 goto out;
985         if (net_ratelimit())
986                 printk(KERN_WARNING "dst cache overflow\n");
987         RT_CACHE_STAT_INC(gc_dst_overflow);
988         return 1;
989
990 work_done:
991         expire += ip_rt_gc_min_interval;
992         if (expire > ip_rt_gc_timeout ||
993             dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
994             dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
995                 expire = ip_rt_gc_timeout;
996 out:    return 0;
997 }
998
999 /*
1000  * Returns number of entries in a hash chain that have different hash_inputs
1001  */
1002 static int slow_chain_length(const struct rtable *head)
1003 {
1004         int length = 0;
1005         const struct rtable *rth = head;
1006
1007         while (rth) {
1008                 length += has_noalias(head, rth);
1009                 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
1010         }
1011         return length >> FRACT_BITS;
1012 }
1013
1014 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
1015 {
1016         struct neigh_table *tbl = &arp_tbl;
1017         static const __be32 inaddr_any = 0;
1018         struct net_device *dev = dst->dev;
1019         const __be32 *pkey = daddr;
1020         struct neighbour *n;
1021
1022 #if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
1023         if (dev->type == ARPHRD_ATM)
1024                 tbl = clip_tbl_hook;
1025 #endif
1026         if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1027                 pkey = &inaddr_any;
1028
1029         n = __ipv4_neigh_lookup(tbl, dev, *(__force u32 *)pkey);
1030         if (n)
1031                 return n;
1032         return neigh_create(tbl, pkey, dev);
1033 }
1034
1035 static int rt_bind_neighbour(struct rtable *rt)
1036 {
1037         struct neighbour *n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1038         if (IS_ERR(n))
1039                 return PTR_ERR(n);
1040         dst_set_neighbour(&rt->dst, n);
1041
1042         return 0;
1043 }
1044
1045 static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1046                                      struct sk_buff *skb, int ifindex)
1047 {
1048         struct rtable   *rth, *cand;
1049         struct rtable __rcu **rthp, **candp;
1050         unsigned long   now;
1051         u32             min_score;
1052         int             chain_length;
1053         int attempts = !in_softirq();
1054
1055 restart:
1056         chain_length = 0;
1057         min_score = ~(u32)0;
1058         cand = NULL;
1059         candp = NULL;
1060         now = jiffies;
1061
1062         if (!rt_caching(dev_net(rt->dst.dev))) {
1063                 /*
1064                  * If we're not caching, just tell the caller we
1065                  * were successful and don't touch the route.  The
1066                  * caller hold the sole reference to the cache entry, and
1067                  * it will be released when the caller is done with it.
1068                  * If we drop it here, the callers have no way to resolve routes
1069                  * when we're not caching.  Instead, just point *rp at rt, so
1070                  * the caller gets a single use out of the route
1071                  * Note that we do rt_free on this new route entry, so that
1072                  * once its refcount hits zero, we are still able to reap it
1073                  * (Thanks Alexey)
1074                  * Note: To avoid expensive rcu stuff for this uncached dst,
1075                  * we set DST_NOCACHE so that dst_release() can free dst without
1076                  * waiting a grace period.
1077                  */
1078
1079                 rt->dst.flags |= DST_NOCACHE;
1080                 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1081                         int err = rt_bind_neighbour(rt);
1082                         if (err) {
1083                                 if (net_ratelimit())
1084                                         printk(KERN_WARNING
1085                                             "Neighbour table failure & not caching routes.\n");
1086                                 ip_rt_put(rt);
1087                                 return ERR_PTR(err);
1088                         }
1089                 }
1090
1091                 goto skip_hashing;
1092         }
1093
1094         rthp = &rt_hash_table[hash].chain;
1095
1096         spin_lock_bh(rt_hash_lock_addr(hash));
1097         while ((rth = rcu_dereference_protected(*rthp,
1098                         lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1099                 if (rt_is_expired(rth)) {
1100                         *rthp = rth->dst.rt_next;
1101                         rt_free(rth);
1102                         continue;
1103                 }
1104                 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1105                         /* Put it first */
1106                         *rthp = rth->dst.rt_next;
1107                         /*
1108                          * Since lookup is lockfree, the deletion
1109                          * must be visible to another weakly ordered CPU before
1110                          * the insertion at the start of the hash chain.
1111                          */
1112                         rcu_assign_pointer(rth->dst.rt_next,
1113                                            rt_hash_table[hash].chain);
1114                         /*
1115                          * Since lookup is lockfree, the update writes
1116                          * must be ordered for consistency on SMP.
1117                          */
1118                         rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1119
1120                         dst_use(&rth->dst, now);
1121                         spin_unlock_bh(rt_hash_lock_addr(hash));
1122
1123                         rt_drop(rt);
1124                         if (skb)
1125                                 skb_dst_set(skb, &rth->dst);
1126                         return rth;
1127                 }
1128
1129                 if (!atomic_read(&rth->dst.__refcnt)) {
1130                         u32 score = rt_score(rth);
1131
1132                         if (score <= min_score) {
1133                                 cand = rth;
1134                                 candp = rthp;
1135                                 min_score = score;
1136                         }
1137                 }
1138
1139                 chain_length++;
1140
1141                 rthp = &rth->dst.rt_next;
1142         }
1143
1144         if (cand) {
1145                 /* ip_rt_gc_elasticity used to be average length of chain
1146                  * length, when exceeded gc becomes really aggressive.
1147                  *
1148                  * The second limit is less certain. At the moment it allows
1149                  * only 2 entries per bucket. We will see.
1150                  */
1151                 if (chain_length > ip_rt_gc_elasticity) {
1152                         *candp = cand->dst.rt_next;
1153                         rt_free(cand);
1154                 }
1155         } else {
1156                 if (chain_length > rt_chain_length_max &&
1157                     slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1158                         struct net *net = dev_net(rt->dst.dev);
1159                         int num = ++net->ipv4.current_rt_cache_rebuild_count;
1160                         if (!rt_caching(net)) {
1161                                 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1162                                         rt->dst.dev->name, num);
1163                         }
1164                         rt_emergency_hash_rebuild(net);
1165                         spin_unlock_bh(rt_hash_lock_addr(hash));
1166
1167                         hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1168                                         ifindex, rt_genid(net));
1169                         goto restart;
1170                 }
1171         }
1172
1173         /* Try to bind route to arp only if it is output
1174            route or unicast forwarding path.
1175          */
1176         if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1177                 int err = rt_bind_neighbour(rt);
1178                 if (err) {
1179                         spin_unlock_bh(rt_hash_lock_addr(hash));
1180
1181                         if (err != -ENOBUFS) {
1182                                 rt_drop(rt);
1183                                 return ERR_PTR(err);
1184                         }
1185
1186                         /* Neighbour tables are full and nothing
1187                            can be released. Try to shrink route cache,
1188                            it is most likely it holds some neighbour records.
1189                          */
1190                         if (attempts-- > 0) {
1191                                 int saved_elasticity = ip_rt_gc_elasticity;
1192                                 int saved_int = ip_rt_gc_min_interval;
1193                                 ip_rt_gc_elasticity     = 1;
1194                                 ip_rt_gc_min_interval   = 0;
1195                                 rt_garbage_collect(&ipv4_dst_ops);
1196                                 ip_rt_gc_min_interval   = saved_int;
1197                                 ip_rt_gc_elasticity     = saved_elasticity;
1198                                 goto restart;
1199                         }
1200
1201                         if (net_ratelimit())
1202                                 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1203                         rt_drop(rt);
1204                         return ERR_PTR(-ENOBUFS);
1205                 }
1206         }
1207
1208         rt->dst.rt_next = rt_hash_table[hash].chain;
1209
1210         /*
1211          * Since lookup is lockfree, we must make sure
1212          * previous writes to rt are committed to memory
1213          * before making rt visible to other CPUS.
1214          */
1215         rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1216
1217         spin_unlock_bh(rt_hash_lock_addr(hash));
1218
1219 skip_hashing:
1220         if (skb)
1221                 skb_dst_set(skb, &rt->dst);
1222         return rt;
1223 }
1224
1225 static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1226
1227 static u32 rt_peer_genid(void)
1228 {
1229         return atomic_read(&__rt_peer_genid);
1230 }
1231
1232 void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
1233 {
1234         struct inet_peer *peer;
1235
1236         peer = inet_getpeer_v4(daddr, create);
1237
1238         if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1239                 inet_putpeer(peer);
1240         else
1241                 rt->rt_peer_genid = rt_peer_genid();
1242 }
1243
1244 /*
1245  * Peer allocation may fail only in serious out-of-memory conditions.  However
1246  * we still can generate some output.
1247  * Random ID selection looks a bit dangerous because we have no chances to
1248  * select ID being unique in a reasonable period of time.
1249  * But broken packet identifier may be better than no packet at all.
1250  */
1251 static void ip_select_fb_ident(struct iphdr *iph)
1252 {
1253         static DEFINE_SPINLOCK(ip_fb_id_lock);
1254         static u32 ip_fallback_id;
1255         u32 salt;
1256
1257         spin_lock_bh(&ip_fb_id_lock);
1258         salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1259         iph->id = htons(salt & 0xFFFF);
1260         ip_fallback_id = salt;
1261         spin_unlock_bh(&ip_fb_id_lock);
1262 }
1263
1264 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1265 {
1266         struct rtable *rt = (struct rtable *) dst;
1267
1268         if (rt) {
1269                 if (rt->peer == NULL)
1270                         rt_bind_peer(rt, rt->rt_dst, 1);
1271
1272                 /* If peer is attached to destination, it is never detached,
1273                    so that we need not to grab a lock to dereference it.
1274                  */
1275                 if (rt->peer) {
1276                         iph->id = htons(inet_getid(rt->peer, more));
1277                         return;
1278                 }
1279         } else
1280                 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1281                        __builtin_return_address(0));
1282
1283         ip_select_fb_ident(iph);
1284 }
1285 EXPORT_SYMBOL(__ip_select_ident);
1286
1287 static void rt_del(unsigned hash, struct rtable *rt)
1288 {
1289         struct rtable __rcu **rthp;
1290         struct rtable *aux;
1291
1292         rthp = &rt_hash_table[hash].chain;
1293         spin_lock_bh(rt_hash_lock_addr(hash));
1294         ip_rt_put(rt);
1295         while ((aux = rcu_dereference_protected(*rthp,
1296                         lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1297                 if (aux == rt || rt_is_expired(aux)) {
1298                         *rthp = aux->dst.rt_next;
1299                         rt_free(aux);
1300                         continue;
1301                 }
1302                 rthp = &aux->dst.rt_next;
1303         }
1304         spin_unlock_bh(rt_hash_lock_addr(hash));
1305 }
1306
1307 /* called in rcu_read_lock() section */
1308 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1309                     __be32 saddr, struct net_device *dev)
1310 {
1311         struct in_device *in_dev = __in_dev_get_rcu(dev);
1312         struct inet_peer *peer;
1313         struct net *net;
1314
1315         if (!in_dev)
1316                 return;
1317
1318         net = dev_net(dev);
1319         if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1320             ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1321             ipv4_is_zeronet(new_gw))
1322                 goto reject_redirect;
1323
1324         if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1325                 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1326                         goto reject_redirect;
1327                 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1328                         goto reject_redirect;
1329         } else {
1330                 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1331                         goto reject_redirect;
1332         }
1333
1334         peer = inet_getpeer_v4(daddr, 1);
1335         if (peer) {
1336                 peer->redirect_learned.a4 = new_gw;
1337
1338                 inet_putpeer(peer);
1339
1340                 atomic_inc(&__rt_peer_genid);
1341         }
1342         return;
1343
1344 reject_redirect:
1345 #ifdef CONFIG_IP_ROUTE_VERBOSE
1346         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1347                 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1348                         "  Advised path = %pI4 -> %pI4\n",
1349                        &old_gw, dev->name, &new_gw,
1350                        &saddr, &daddr);
1351 #endif
1352         ;
1353 }
1354
1355 static bool peer_pmtu_expired(struct inet_peer *peer)
1356 {
1357         unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1358
1359         return orig &&
1360                time_after_eq(jiffies, orig) &&
1361                cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1362 }
1363
1364 static bool peer_pmtu_cleaned(struct inet_peer *peer)
1365 {
1366         unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1367
1368         return orig &&
1369                cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1370 }
1371
1372 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1373 {
1374         struct rtable *rt = (struct rtable *)dst;
1375         struct dst_entry *ret = dst;
1376
1377         if (rt) {
1378                 if (dst->obsolete > 0) {
1379                         ip_rt_put(rt);
1380                         ret = NULL;
1381                 } else if (rt->rt_flags & RTCF_REDIRECTED) {
1382                         unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1383                                                 rt->rt_oif,
1384                                                 rt_genid(dev_net(dst->dev)));
1385                         rt_del(hash, rt);
1386                         ret = NULL;
1387                 } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
1388                         dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
1389                 }
1390         }
1391         return ret;
1392 }
1393
1394 /*
1395  * Algorithm:
1396  *      1. The first ip_rt_redirect_number redirects are sent
1397  *         with exponential backoff, then we stop sending them at all,
1398  *         assuming that the host ignores our redirects.
1399  *      2. If we did not see packets requiring redirects
1400  *         during ip_rt_redirect_silence, we assume that the host
1401  *         forgot redirected route and start to send redirects again.
1402  *
1403  * This algorithm is much cheaper and more intelligent than dumb load limiting
1404  * in icmp.c.
1405  *
1406  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1407  * and "frag. need" (breaks PMTU discovery) in icmp.c.
1408  */
1409
1410 void ip_rt_send_redirect(struct sk_buff *skb)
1411 {
1412         struct rtable *rt = skb_rtable(skb);
1413         struct in_device *in_dev;
1414         struct inet_peer *peer;
1415         int log_martians;
1416
1417         rcu_read_lock();
1418         in_dev = __in_dev_get_rcu(rt->dst.dev);
1419         if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1420                 rcu_read_unlock();
1421                 return;
1422         }
1423         log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1424         rcu_read_unlock();
1425
1426         if (!rt->peer)
1427                 rt_bind_peer(rt, rt->rt_dst, 1);
1428         peer = rt->peer;
1429         if (!peer) {
1430                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1431                 return;
1432         }
1433
1434         /* No redirected packets during ip_rt_redirect_silence;
1435          * reset the algorithm.
1436          */
1437         if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1438                 peer->rate_tokens = 0;
1439
1440         /* Too many ignored redirects; do not send anything
1441          * set dst.rate_last to the last seen redirected packet.
1442          */
1443         if (peer->rate_tokens >= ip_rt_redirect_number) {
1444                 peer->rate_last = jiffies;
1445                 return;
1446         }
1447
1448         /* Check for load limit; set rate_last to the latest sent
1449          * redirect.
1450          */
1451         if (peer->rate_tokens == 0 ||
1452             time_after(jiffies,
1453                        (peer->rate_last +
1454                         (ip_rt_redirect_load << peer->rate_tokens)))) {
1455                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1456                 peer->rate_last = jiffies;
1457                 ++peer->rate_tokens;
1458 #ifdef CONFIG_IP_ROUTE_VERBOSE
1459                 if (log_martians &&
1460                     peer->rate_tokens == ip_rt_redirect_number &&
1461                     net_ratelimit())
1462                         printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1463                                &ip_hdr(skb)->saddr, rt->rt_iif,
1464                                 &rt->rt_dst, &rt->rt_gateway);
1465 #endif
1466         }
1467 }
1468
1469 static int ip_error(struct sk_buff *skb)
1470 {
1471         struct rtable *rt = skb_rtable(skb);
1472         struct inet_peer *peer;
1473         unsigned long now;
1474         bool send;
1475         int code;
1476
1477         switch (rt->dst.error) {
1478         case EINVAL:
1479         default:
1480                 goto out;
1481         case EHOSTUNREACH:
1482                 code = ICMP_HOST_UNREACH;
1483                 break;
1484         case ENETUNREACH:
1485                 code = ICMP_NET_UNREACH;
1486                 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1487                                 IPSTATS_MIB_INNOROUTES);
1488                 break;
1489         case EACCES:
1490                 code = ICMP_PKT_FILTERED;
1491                 break;
1492         }
1493
1494         if (!rt->peer)
1495                 rt_bind_peer(rt, rt->rt_dst, 1);
1496         peer = rt->peer;
1497
1498         send = true;
1499         if (peer) {
1500                 now = jiffies;
1501                 peer->rate_tokens += now - peer->rate_last;
1502                 if (peer->rate_tokens > ip_rt_error_burst)
1503                         peer->rate_tokens = ip_rt_error_burst;
1504                 peer->rate_last = now;
1505                 if (peer->rate_tokens >= ip_rt_error_cost)
1506                         peer->rate_tokens -= ip_rt_error_cost;
1507                 else
1508                         send = false;
1509         }
1510         if (send)
1511                 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1512
1513 out:    kfree_skb(skb);
1514         return 0;
1515 }
1516
1517 /*
1518  *      The last two values are not from the RFC but
1519  *      are needed for AMPRnet AX.25 paths.
1520  */
1521
1522 static const unsigned short mtu_plateau[] =
1523 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1524
1525 static inline unsigned short guess_mtu(unsigned short old_mtu)
1526 {
1527         int i;
1528
1529         for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1530                 if (old_mtu > mtu_plateau[i])
1531                         return mtu_plateau[i];
1532         return 68;
1533 }
1534
1535 unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1536                                  unsigned short new_mtu,
1537                                  struct net_device *dev)
1538 {
1539         unsigned short old_mtu = ntohs(iph->tot_len);
1540         unsigned short est_mtu = 0;
1541         struct inet_peer *peer;
1542
1543         peer = inet_getpeer_v4(iph->daddr, 1);
1544         if (peer) {
1545                 unsigned short mtu = new_mtu;
1546
1547                 if (new_mtu < 68 || new_mtu >= old_mtu) {
1548                         /* BSD 4.2 derived systems incorrectly adjust
1549                          * tot_len by the IP header length, and report
1550                          * a zero MTU in the ICMP message.
1551                          */
1552                         if (mtu == 0 &&
1553                             old_mtu >= 68 + (iph->ihl << 2))
1554                                 old_mtu -= iph->ihl << 2;
1555                         mtu = guess_mtu(old_mtu);
1556                 }
1557
1558                 if (mtu < ip_rt_min_pmtu)
1559                         mtu = ip_rt_min_pmtu;
1560                 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1561                         unsigned long pmtu_expires;
1562
1563                         pmtu_expires = jiffies + ip_rt_mtu_expires;
1564                         if (!pmtu_expires)
1565                                 pmtu_expires = 1UL;
1566
1567                         est_mtu = mtu;
1568                         peer->pmtu_learned = mtu;
1569                         peer->pmtu_expires = pmtu_expires;
1570                 }
1571
1572                 inet_putpeer(peer);
1573
1574                 atomic_inc(&__rt_peer_genid);
1575         }
1576         return est_mtu ? : new_mtu;
1577 }
1578
1579 static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1580 {
1581         unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
1582
1583         if (!expires)
1584                 return;
1585         if (time_before(jiffies, expires)) {
1586                 u32 orig_dst_mtu = dst_mtu(dst);
1587                 if (peer->pmtu_learned < orig_dst_mtu) {
1588                         if (!peer->pmtu_orig)
1589                                 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1590                         dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1591                 }
1592         } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1593                 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1594 }
1595
1596 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1597 {
1598         struct rtable *rt = (struct rtable *) dst;
1599         struct inet_peer *peer;
1600
1601         dst_confirm(dst);
1602
1603         if (!rt->peer)
1604                 rt_bind_peer(rt, rt->rt_dst, 1);
1605         peer = rt->peer;
1606         if (peer) {
1607                 unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1608
1609                 if (mtu < ip_rt_min_pmtu)
1610                         mtu = ip_rt_min_pmtu;
1611                 if (!pmtu_expires || mtu < peer->pmtu_learned) {
1612
1613                         pmtu_expires = jiffies + ip_rt_mtu_expires;
1614                         if (!pmtu_expires)
1615                                 pmtu_expires = 1UL;
1616
1617                         peer->pmtu_learned = mtu;
1618                         peer->pmtu_expires = pmtu_expires;
1619
1620                         atomic_inc(&__rt_peer_genid);
1621                         rt->rt_peer_genid = rt_peer_genid();
1622                 }
1623                 check_peer_pmtu(dst, peer);
1624         }
1625 }
1626
1627 static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1628 {
1629         struct rtable *rt = (struct rtable *) dst;
1630         __be32 orig_gw = rt->rt_gateway;
1631         struct neighbour *n;
1632
1633         dst_confirm(&rt->dst);
1634
1635         neigh_release(dst_get_neighbour(&rt->dst));
1636         dst_set_neighbour(&rt->dst, NULL);
1637
1638         rt->rt_gateway = peer->redirect_learned.a4;
1639         rt_bind_neighbour(rt);
1640         n = dst_get_neighbour(&rt->dst);
1641         if (!n || !(n->nud_state & NUD_VALID)) {
1642                 if (n)
1643                         neigh_event_send(n, NULL);
1644                 rt->rt_gateway = orig_gw;
1645                 return -EAGAIN;
1646         } else {
1647                 rt->rt_flags |= RTCF_REDIRECTED;
1648                 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1649         }
1650         return 0;
1651 }
1652
1653 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1654 {
1655         struct rtable *rt = (struct rtable *) dst;
1656
1657         if (rt_is_expired(rt))
1658                 return NULL;
1659         if (rt->rt_peer_genid != rt_peer_genid()) {
1660                 struct inet_peer *peer;
1661
1662                 if (!rt->peer)
1663                         rt_bind_peer(rt, rt->rt_dst, 0);
1664
1665                 peer = rt->peer;
1666                 if (peer) {
1667                         check_peer_pmtu(dst, peer);
1668
1669                         if (peer->redirect_learned.a4 &&
1670                             peer->redirect_learned.a4 != rt->rt_gateway) {
1671                                 if (check_peer_redir(dst, peer))
1672                                         return NULL;
1673                         }
1674                 }
1675
1676                 rt->rt_peer_genid = rt_peer_genid();
1677         }
1678         return dst;
1679 }
1680
1681 static void ipv4_dst_destroy(struct dst_entry *dst)
1682 {
1683         struct rtable *rt = (struct rtable *) dst;
1684         struct inet_peer *peer = rt->peer;
1685
1686         if (rt->fi) {
1687                 fib_info_put(rt->fi);
1688                 rt->fi = NULL;
1689         }
1690         if (peer) {
1691                 rt->peer = NULL;
1692                 inet_putpeer(peer);
1693         }
1694 }
1695
1696
1697 static void ipv4_link_failure(struct sk_buff *skb)
1698 {
1699         struct rtable *rt;
1700
1701         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1702
1703         rt = skb_rtable(skb);
1704         if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
1705                 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1706 }
1707
1708 static int ip_rt_bug(struct sk_buff *skb)
1709 {
1710         printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1711                 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1712                 skb->dev ? skb->dev->name : "?");
1713         kfree_skb(skb);
1714         WARN_ON(1);
1715         return 0;
1716 }
1717
1718 /*
1719    We do not cache source address of outgoing interface,
1720    because it is used only by IP RR, TS and SRR options,
1721    so that it out of fast path.
1722
1723    BTW remember: "addr" is allowed to be not aligned
1724    in IP options!
1725  */
1726
1727 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1728 {
1729         __be32 src;
1730
1731         if (rt_is_output_route(rt))
1732                 src = ip_hdr(skb)->saddr;
1733         else {
1734                 struct fib_result res;
1735                 struct flowi4 fl4;
1736                 struct iphdr *iph;
1737
1738                 iph = ip_hdr(skb);
1739
1740                 memset(&fl4, 0, sizeof(fl4));
1741                 fl4.daddr = iph->daddr;
1742                 fl4.saddr = iph->saddr;
1743                 fl4.flowi4_tos = RT_TOS(iph->tos);
1744                 fl4.flowi4_oif = rt->dst.dev->ifindex;
1745                 fl4.flowi4_iif = skb->dev->ifindex;
1746                 fl4.flowi4_mark = skb->mark;
1747
1748                 rcu_read_lock();
1749                 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1750                         src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1751                 else
1752                         src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1753                                         RT_SCOPE_UNIVERSE);
1754                 rcu_read_unlock();
1755         }
1756         memcpy(addr, &src, 4);
1757 }
1758
1759 #ifdef CONFIG_IP_ROUTE_CLASSID
1760 static void set_class_tag(struct rtable *rt, u32 tag)
1761 {
1762         if (!(rt->dst.tclassid & 0xFFFF))
1763                 rt->dst.tclassid |= tag & 0xFFFF;
1764         if (!(rt->dst.tclassid & 0xFFFF0000))
1765                 rt->dst.tclassid |= tag & 0xFFFF0000;
1766 }
1767 #endif
1768
1769 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1770 {
1771         unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1772
1773         if (advmss == 0) {
1774                 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1775                                ip_rt_min_advmss);
1776                 if (advmss > 65535 - 40)
1777                         advmss = 65535 - 40;
1778         }
1779         return advmss;
1780 }
1781
1782 static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1783 {
1784         unsigned int mtu = dst->dev->mtu;
1785
1786         if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1787                 const struct rtable *rt = (const struct rtable *) dst;
1788
1789                 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1790                         mtu = 576;
1791         }
1792
1793         if (mtu > IP_MAX_MTU)
1794                 mtu = IP_MAX_MTU;
1795
1796         return mtu;
1797 }
1798
1799 static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1800                             struct fib_info *fi)
1801 {
1802         struct inet_peer *peer;
1803         int create = 0;
1804
1805         /* If a peer entry exists for this destination, we must hook
1806          * it up in order to get at cached metrics.
1807          */
1808         if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
1809                 create = 1;
1810
1811         rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
1812         if (peer) {
1813                 rt->rt_peer_genid = rt_peer_genid();
1814                 if (inet_metrics_new(peer))
1815                         memcpy(peer->metrics, fi->fib_metrics,
1816                                sizeof(u32) * RTAX_MAX);
1817                 dst_init_metrics(&rt->dst, peer->metrics, false);
1818
1819                 check_peer_pmtu(&rt->dst, peer);
1820                 if (peer->redirect_learned.a4 &&
1821                     peer->redirect_learned.a4 != rt->rt_gateway) {
1822                         rt->rt_gateway = peer->redirect_learned.a4;
1823                         rt->rt_flags |= RTCF_REDIRECTED;
1824                 }
1825         } else {
1826                 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1827                         rt->fi = fi;
1828                         atomic_inc(&fi->fib_clntref);
1829                 }
1830                 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1831         }
1832 }
1833
1834 static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
1835                            const struct fib_result *res,
1836                            struct fib_info *fi, u16 type, u32 itag)
1837 {
1838         struct dst_entry *dst = &rt->dst;
1839
1840         if (fi) {
1841                 if (FIB_RES_GW(*res) &&
1842                     FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1843                         rt->rt_gateway = FIB_RES_GW(*res);
1844                 rt_init_metrics(rt, fl4, fi);
1845 #ifdef CONFIG_IP_ROUTE_CLASSID
1846                 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1847 #endif
1848         }
1849
1850         if (dst_mtu(dst) > IP_MAX_MTU)
1851                 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1852         if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
1853                 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1854
1855 #ifdef CONFIG_IP_ROUTE_CLASSID
1856 #ifdef CONFIG_IP_MULTIPLE_TABLES
1857         set_class_tag(rt, fib_rules_tclass(res));
1858 #endif
1859         set_class_tag(rt, itag);
1860 #endif
1861 }
1862
1863 static struct rtable *rt_dst_alloc(struct net_device *dev,
1864                                    bool nopolicy, bool noxfrm)
1865 {
1866         return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1867                          DST_HOST |
1868                          (nopolicy ? DST_NOPOLICY : 0) |
1869                          (noxfrm ? DST_NOXFRM : 0));
1870 }
1871
1872 /* called in rcu_read_lock() section */
1873 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1874                                 u8 tos, struct net_device *dev, int our)
1875 {
1876         unsigned int hash;
1877         struct rtable *rth;
1878         __be32 spec_dst;
1879         struct in_device *in_dev = __in_dev_get_rcu(dev);
1880         u32 itag = 0;
1881         int err;
1882
1883         /* Primary sanity checks. */
1884
1885         if (in_dev == NULL)
1886                 return -EINVAL;
1887
1888         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1889             ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1890                 goto e_inval;
1891
1892         if (ipv4_is_zeronet(saddr)) {
1893                 if (!ipv4_is_local_multicast(daddr))
1894                         goto e_inval;
1895                 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1896         } else {
1897                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1898                                           &itag);
1899                 if (err < 0)
1900                         goto e_err;
1901         }
1902         rth = rt_dst_alloc(init_net.loopback_dev,
1903                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1904         if (!rth)
1905                 goto e_nobufs;
1906
1907 #ifdef CONFIG_IP_ROUTE_CLASSID
1908         rth->dst.tclassid = itag;
1909 #endif
1910         rth->dst.output = ip_rt_bug;
1911
1912         rth->rt_key_dst = daddr;
1913         rth->rt_key_src = saddr;
1914         rth->rt_genid   = rt_genid(dev_net(dev));
1915         rth->rt_flags   = RTCF_MULTICAST;
1916         rth->rt_type    = RTN_MULTICAST;
1917         rth->rt_key_tos = tos;
1918         rth->rt_dst     = daddr;
1919         rth->rt_src     = saddr;
1920         rth->rt_route_iif = dev->ifindex;
1921         rth->rt_iif     = dev->ifindex;
1922         rth->rt_oif     = 0;
1923         rth->rt_mark    = skb->mark;
1924         rth->rt_gateway = daddr;
1925         rth->rt_spec_dst= spec_dst;
1926         rth->rt_peer_genid = 0;
1927         rth->peer = NULL;
1928         rth->fi = NULL;
1929         if (our) {
1930                 rth->dst.input= ip_local_deliver;
1931                 rth->rt_flags |= RTCF_LOCAL;
1932         }
1933
1934 #ifdef CONFIG_IP_MROUTE
1935         if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1936                 rth->dst.input = ip_mr_input;
1937 #endif
1938         RT_CACHE_STAT_INC(in_slow_mc);
1939
1940         hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1941         rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1942         return IS_ERR(rth) ? PTR_ERR(rth) : 0;
1943
1944 e_nobufs:
1945         return -ENOBUFS;
1946 e_inval:
1947         return -EINVAL;
1948 e_err:
1949         return err;
1950 }
1951
1952
1953 static void ip_handle_martian_source(struct net_device *dev,
1954                                      struct in_device *in_dev,
1955                                      struct sk_buff *skb,
1956                                      __be32 daddr,
1957                                      __be32 saddr)
1958 {
1959         RT_CACHE_STAT_INC(in_martian_src);
1960 #ifdef CONFIG_IP_ROUTE_VERBOSE
1961         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1962                 /*
1963                  *      RFC1812 recommendation, if source is martian,
1964                  *      the only hint is MAC header.
1965                  */
1966                 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1967                         &daddr, &saddr, dev->name);
1968                 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1969                         int i;
1970                         const unsigned char *p = skb_mac_header(skb);
1971                         printk(KERN_WARNING "ll header: ");
1972                         for (i = 0; i < dev->hard_header_len; i++, p++) {
1973                                 printk("%02x", *p);
1974                                 if (i < (dev->hard_header_len - 1))
1975                                         printk(":");
1976                         }
1977                         printk("\n");
1978                 }
1979         }
1980 #endif
1981 }
1982
1983 /* called in rcu_read_lock() section */
1984 static int __mkroute_input(struct sk_buff *skb,
1985                            const struct fib_result *res,
1986                            struct in_device *in_dev,
1987                            __be32 daddr, __be32 saddr, u32 tos,
1988                            struct rtable **result)
1989 {
1990         struct rtable *rth;
1991         int err;
1992         struct in_device *out_dev;
1993         unsigned int flags = 0;
1994         __be32 spec_dst;
1995         u32 itag;
1996
1997         /* get a working reference to the output device */
1998         out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1999         if (out_dev == NULL) {
2000                 if (net_ratelimit())
2001                         printk(KERN_CRIT "Bug in ip_route_input" \
2002                                "_slow(). Please, report\n");
2003                 return -EINVAL;
2004         }
2005
2006
2007         err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
2008                                   in_dev->dev, &spec_dst, &itag);
2009         if (err < 0) {
2010                 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
2011                                          saddr);
2012
2013                 goto cleanup;
2014         }
2015
2016         if (err)
2017                 flags |= RTCF_DIRECTSRC;
2018
2019         if (out_dev == in_dev && err &&
2020             (IN_DEV_SHARED_MEDIA(out_dev) ||
2021              inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
2022                 flags |= RTCF_DOREDIRECT;
2023
2024         if (skb->protocol != htons(ETH_P_IP)) {
2025                 /* Not IP (i.e. ARP). Do not create route, if it is
2026                  * invalid for proxy arp. DNAT routes are always valid.
2027                  *
2028                  * Proxy arp feature have been extended to allow, ARP
2029                  * replies back to the same interface, to support
2030                  * Private VLAN switch technologies. See arp.c.
2031                  */
2032                 if (out_dev == in_dev &&
2033                     IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
2034                         err = -EINVAL;
2035                         goto cleanup;
2036                 }
2037         }
2038
2039         rth = rt_dst_alloc(out_dev->dev,
2040                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
2041                            IN_DEV_CONF_GET(out_dev, NOXFRM));
2042         if (!rth) {
2043                 err = -ENOBUFS;
2044                 goto cleanup;
2045         }
2046
2047         rth->rt_key_dst = daddr;
2048         rth->rt_key_src = saddr;
2049         rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2050         rth->rt_flags = flags;
2051         rth->rt_type = res->type;
2052         rth->rt_key_tos = tos;
2053         rth->rt_dst     = daddr;
2054         rth->rt_src     = saddr;
2055         rth->rt_route_iif = in_dev->dev->ifindex;
2056         rth->rt_iif     = in_dev->dev->ifindex;
2057         rth->rt_oif     = 0;
2058         rth->rt_mark    = skb->mark;
2059         rth->rt_gateway = daddr;
2060         rth->rt_spec_dst= spec_dst;
2061         rth->rt_peer_genid = 0;
2062         rth->peer = NULL;
2063         rth->fi = NULL;
2064
2065         rth->dst.input = ip_forward;
2066         rth->dst.output = ip_output;
2067
2068         rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
2069
2070         *result = rth;
2071         err = 0;
2072  cleanup:
2073         return err;
2074 }
2075
2076 static int ip_mkroute_input(struct sk_buff *skb,
2077                             struct fib_result *res,
2078                             const struct flowi4 *fl4,
2079                             struct in_device *in_dev,
2080                             __be32 daddr, __be32 saddr, u32 tos)
2081 {
2082         struct rtable* rth = NULL;
2083         int err;
2084         unsigned hash;
2085
2086 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2087         if (res->fi && res->fi->fib_nhs > 1)
2088                 fib_select_multipath(res);
2089 #endif
2090
2091         /* create a routing cache entry */
2092         err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2093         if (err)
2094                 return err;
2095
2096         /* put it into the cache */
2097         hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
2098                        rt_genid(dev_net(rth->dst.dev)));
2099         rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
2100         if (IS_ERR(rth))
2101                 return PTR_ERR(rth);
2102         return 0;
2103 }
2104
2105 /*
2106  *      NOTE. We drop all the packets that has local source
2107  *      addresses, because every properly looped back packet
2108  *      must have correct destination already attached by output routine.
2109  *
2110  *      Such approach solves two big problems:
2111  *      1. Not simplex devices are handled properly.
2112  *      2. IP spoofing attempts are filtered with 100% of guarantee.
2113  *      called with rcu_read_lock()
2114  */
2115
2116 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2117                                u8 tos, struct net_device *dev)
2118 {
2119         struct fib_result res;
2120         struct in_device *in_dev = __in_dev_get_rcu(dev);
2121         struct flowi4   fl4;
2122         unsigned        flags = 0;
2123         u32             itag = 0;
2124         struct rtable * rth;
2125         unsigned        hash;
2126         __be32          spec_dst;
2127         int             err = -EINVAL;
2128         struct net    * net = dev_net(dev);
2129
2130         /* IP on this device is disabled. */
2131
2132         if (!in_dev)
2133                 goto out;
2134
2135         /* Check for the most weird martians, which can be not detected
2136            by fib_lookup.
2137          */
2138
2139         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2140             ipv4_is_loopback(saddr))
2141                 goto martian_source;
2142
2143         if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2144                 goto brd_input;
2145
2146         /* Accept zero addresses only to limited broadcast;
2147          * I even do not know to fix it or not. Waiting for complains :-)
2148          */
2149         if (ipv4_is_zeronet(saddr))
2150                 goto martian_source;
2151
2152         if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
2153                 goto martian_destination;
2154
2155         /*
2156          *      Now we are ready to route packet.
2157          */
2158         fl4.flowi4_oif = 0;
2159         fl4.flowi4_iif = dev->ifindex;
2160         fl4.flowi4_mark = skb->mark;
2161         fl4.flowi4_tos = tos;
2162         fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2163         fl4.daddr = daddr;
2164         fl4.saddr = saddr;
2165         err = fib_lookup(net, &fl4, &res);
2166         if (err != 0) {
2167                 if (!IN_DEV_FORWARD(in_dev))
2168                         goto e_hostunreach;
2169                 goto no_route;
2170         }
2171
2172         RT_CACHE_STAT_INC(in_slow_tot);
2173
2174         if (res.type == RTN_BROADCAST)
2175                 goto brd_input;
2176
2177         if (res.type == RTN_LOCAL) {
2178                 err = fib_validate_source(skb, saddr, daddr, tos,
2179                                           net->loopback_dev->ifindex,
2180                                           dev, &spec_dst, &itag);
2181                 if (err < 0)
2182                         goto martian_source_keep_err;
2183                 if (err)
2184                         flags |= RTCF_DIRECTSRC;
2185                 spec_dst = daddr;
2186                 goto local_input;
2187         }
2188
2189         if (!IN_DEV_FORWARD(in_dev))
2190                 goto e_hostunreach;
2191         if (res.type != RTN_UNICAST)
2192                 goto martian_destination;
2193
2194         err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
2195 out:    return err;
2196
2197 brd_input:
2198         if (skb->protocol != htons(ETH_P_IP))
2199                 goto e_inval;
2200
2201         if (ipv4_is_zeronet(saddr))
2202                 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2203         else {
2204                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2205                                           &itag);
2206                 if (err < 0)
2207                         goto martian_source_keep_err;
2208                 if (err)
2209                         flags |= RTCF_DIRECTSRC;
2210         }
2211         flags |= RTCF_BROADCAST;
2212         res.type = RTN_BROADCAST;
2213         RT_CACHE_STAT_INC(in_brd);
2214
2215 local_input:
2216         rth = rt_dst_alloc(net->loopback_dev,
2217                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
2218         if (!rth)
2219                 goto e_nobufs;
2220
2221         rth->dst.input= ip_local_deliver;
2222         rth->dst.output= ip_rt_bug;
2223 #ifdef CONFIG_IP_ROUTE_CLASSID
2224         rth->dst.tclassid = itag;
2225 #endif
2226
2227         rth->rt_key_dst = daddr;
2228         rth->rt_key_src = saddr;
2229         rth->rt_genid = rt_genid(net);
2230         rth->rt_flags   = flags|RTCF_LOCAL;
2231         rth->rt_type    = res.type;
2232         rth->rt_key_tos = tos;
2233         rth->rt_dst     = daddr;
2234         rth->rt_src     = saddr;
2235 #ifdef CONFIG_IP_ROUTE_CLASSID
2236         rth->dst.tclassid = itag;
2237 #endif
2238         rth->rt_route_iif = dev->ifindex;
2239         rth->rt_iif     = dev->ifindex;
2240         rth->rt_oif     = 0;
2241         rth->rt_mark    = skb->mark;
2242         rth->rt_gateway = daddr;
2243         rth->rt_spec_dst= spec_dst;
2244         rth->rt_peer_genid = 0;
2245         rth->peer = NULL;
2246         rth->fi = NULL;
2247         if (res.type == RTN_UNREACHABLE) {
2248                 rth->dst.input= ip_error;
2249                 rth->dst.error= -err;
2250                 rth->rt_flags   &= ~RTCF_LOCAL;
2251         }
2252         hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2253         rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
2254         err = 0;
2255         if (IS_ERR(rth))
2256                 err = PTR_ERR(rth);
2257         goto out;
2258
2259 no_route:
2260         RT_CACHE_STAT_INC(in_no_route);
2261         spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2262         res.type = RTN_UNREACHABLE;
2263         if (err == -ESRCH)
2264                 err = -ENETUNREACH;
2265         goto local_input;
2266
2267         /*
2268          *      Do not cache martian addresses: they should be logged (RFC1812)
2269          */
2270 martian_destination:
2271         RT_CACHE_STAT_INC(in_martian_dst);
2272 #ifdef CONFIG_IP_ROUTE_VERBOSE
2273         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2274                 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2275                         &daddr, &saddr, dev->name);
2276 #endif
2277
2278 e_hostunreach:
2279         err = -EHOSTUNREACH;
2280         goto out;
2281
2282 e_inval:
2283         err = -EINVAL;
2284         goto out;
2285
2286 e_nobufs:
2287         err = -ENOBUFS;
2288         goto out;
2289
2290 martian_source:
2291         err = -EINVAL;
2292 martian_source_keep_err:
2293         ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2294         goto out;
2295 }
2296
2297 int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2298                            u8 tos, struct net_device *dev, bool noref)
2299 {
2300         struct rtable * rth;
2301         unsigned        hash;
2302         int iif = dev->ifindex;
2303         struct net *net;
2304         int res;
2305
2306         net = dev_net(dev);
2307
2308         rcu_read_lock();
2309
2310         if (!rt_caching(net))
2311                 goto skip_cache;
2312
2313         tos &= IPTOS_RT_MASK;
2314         hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2315
2316         for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2317              rth = rcu_dereference(rth->dst.rt_next)) {
2318                 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2319                      ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2320                      (rth->rt_iif ^ iif) |
2321                      rth->rt_oif |
2322                      (rth->rt_key_tos ^ tos)) == 0 &&
2323                     rth->rt_mark == skb->mark &&
2324                     net_eq(dev_net(rth->dst.dev), net) &&
2325                     !rt_is_expired(rth)) {
2326                         if (noref) {
2327                                 dst_use_noref(&rth->dst, jiffies);
2328                                 skb_dst_set_noref(skb, &rth->dst);
2329                         } else {
2330                                 dst_use(&rth->dst, jiffies);
2331                                 skb_dst_set(skb, &rth->dst);
2332                         }
2333                         RT_CACHE_STAT_INC(in_hit);
2334                         rcu_read_unlock();
2335                         return 0;
2336                 }
2337                 RT_CACHE_STAT_INC(in_hlist_search);
2338         }
2339
2340 skip_cache:
2341         /* Multicast recognition logic is moved from route cache to here.
2342            The problem was that too many Ethernet cards have broken/missing
2343            hardware multicast filters :-( As result the host on multicasting
2344            network acquires a lot of useless route cache entries, sort of
2345            SDR messages from all the world. Now we try to get rid of them.
2346            Really, provided software IP multicast filter is organized
2347            reasonably (at least, hashed), it does not result in a slowdown
2348            comparing with route cache reject entries.
2349            Note, that multicast routers are not affected, because
2350            route cache entry is created eventually.
2351          */
2352         if (ipv4_is_multicast(daddr)) {
2353                 struct in_device *in_dev = __in_dev_get_rcu(dev);
2354
2355                 if (in_dev) {
2356                         int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2357                                                   ip_hdr(skb)->protocol);
2358                         if (our
2359 #ifdef CONFIG_IP_MROUTE
2360                                 ||
2361                             (!ipv4_is_local_multicast(daddr) &&
2362                              IN_DEV_MFORWARD(in_dev))
2363 #endif
2364                            ) {
2365                                 int res = ip_route_input_mc(skb, daddr, saddr,
2366                                                             tos, dev, our);
2367                                 rcu_read_unlock();
2368                                 return res;
2369                         }
2370                 }
2371                 rcu_read_unlock();
2372                 return -EINVAL;
2373         }
2374         res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2375         rcu_read_unlock();
2376         return res;
2377 }
2378 EXPORT_SYMBOL(ip_route_input_common);
2379
2380 /* called with rcu_read_lock() */
2381 static struct rtable *__mkroute_output(const struct fib_result *res,
2382                                        const struct flowi4 *fl4,
2383                                        __be32 orig_daddr, __be32 orig_saddr,
2384                                        int orig_oif, struct net_device *dev_out,
2385                                        unsigned int flags)
2386 {
2387         struct fib_info *fi = res->fi;
2388         u32 tos = RT_FL_TOS(fl4);
2389         struct in_device *in_dev;
2390         u16 type = res->type;
2391         struct rtable *rth;
2392
2393         if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
2394                 return ERR_PTR(-EINVAL);
2395
2396         if (ipv4_is_lbcast(fl4->daddr))
2397                 type = RTN_BROADCAST;
2398         else if (ipv4_is_multicast(fl4->daddr))
2399                 type = RTN_MULTICAST;
2400         else if (ipv4_is_zeronet(fl4->daddr))
2401                 return ERR_PTR(-EINVAL);
2402
2403         if (dev_out->flags & IFF_LOOPBACK)
2404                 flags |= RTCF_LOCAL;
2405
2406         in_dev = __in_dev_get_rcu(dev_out);
2407         if (!in_dev)
2408                 return ERR_PTR(-EINVAL);
2409
2410         if (type == RTN_BROADCAST) {
2411                 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2412                 fi = NULL;
2413         } else if (type == RTN_MULTICAST) {
2414                 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2415                 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2416                                      fl4->flowi4_proto))
2417                         flags &= ~RTCF_LOCAL;
2418                 /* If multicast route do not exist use
2419                  * default one, but do not gateway in this case.
2420                  * Yes, it is hack.
2421                  */
2422                 if (fi && res->prefixlen < 4)
2423                         fi = NULL;
2424         }
2425
2426         rth = rt_dst_alloc(dev_out,
2427                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
2428                            IN_DEV_CONF_GET(in_dev, NOXFRM));
2429         if (!rth)
2430                 return ERR_PTR(-ENOBUFS);
2431
2432         rth->dst.output = ip_output;
2433
2434         rth->rt_key_dst = orig_daddr;
2435         rth->rt_key_src = orig_saddr;
2436         rth->rt_genid = rt_genid(dev_net(dev_out));
2437         rth->rt_flags   = flags;
2438         rth->rt_type    = type;
2439         rth->rt_key_tos = tos;
2440         rth->rt_dst     = fl4->daddr;
2441         rth->rt_src     = fl4->saddr;
2442         rth->rt_route_iif = 0;
2443         rth->rt_iif     = orig_oif ? : dev_out->ifindex;
2444         rth->rt_oif     = orig_oif;
2445         rth->rt_mark    = fl4->flowi4_mark;
2446         rth->rt_gateway = fl4->daddr;
2447         rth->rt_spec_dst= fl4->saddr;
2448         rth->rt_peer_genid = 0;
2449         rth->peer = NULL;
2450         rth->fi = NULL;
2451
2452         RT_CACHE_STAT_INC(out_slow_tot);
2453
2454         if (flags & RTCF_LOCAL) {
2455                 rth->dst.input = ip_local_deliver;
2456                 rth->rt_spec_dst = fl4->daddr;
2457         }
2458         if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2459                 rth->rt_spec_dst = fl4->saddr;
2460                 if (flags & RTCF_LOCAL &&
2461                     !(dev_out->flags & IFF_LOOPBACK)) {
2462                         rth->dst.output = ip_mc_output;
2463                         RT_CACHE_STAT_INC(out_slow_mc);
2464                 }
2465 #ifdef CONFIG_IP_MROUTE
2466                 if (type == RTN_MULTICAST) {
2467                         if (IN_DEV_MFORWARD(in_dev) &&
2468                             !ipv4_is_local_multicast(fl4->daddr)) {
2469                                 rth->dst.input = ip_mr_input;
2470                                 rth->dst.output = ip_mc_output;
2471                         }
2472                 }
2473 #endif
2474         }
2475
2476         rt_set_nexthop(rth, fl4, res, fi, type, 0);
2477
2478         return rth;
2479 }
2480
2481 /*
2482  * Major route resolver routine.
2483  * called with rcu_read_lock();
2484  */
2485
2486 static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2487 {
2488         struct net_device *dev_out = NULL;
2489         u32 tos = RT_FL_TOS(fl4);
2490         unsigned int flags = 0;
2491         struct fib_result res;
2492         struct rtable *rth;
2493         __be32 orig_daddr;
2494         __be32 orig_saddr;
2495         int orig_oif;
2496
2497         res.fi          = NULL;
2498 #ifdef CONFIG_IP_MULTIPLE_TABLES
2499         res.r           = NULL;
2500 #endif
2501
2502         orig_daddr = fl4->daddr;
2503         orig_saddr = fl4->saddr;
2504         orig_oif = fl4->flowi4_oif;
2505
2506         fl4->flowi4_iif = net->loopback_dev->ifindex;
2507         fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2508         fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2509                          RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2510
2511         rcu_read_lock();
2512         if (fl4->saddr) {
2513                 rth = ERR_PTR(-EINVAL);
2514                 if (ipv4_is_multicast(fl4->saddr) ||
2515                     ipv4_is_lbcast(fl4->saddr) ||
2516                     ipv4_is_zeronet(fl4->saddr))
2517                         goto out;
2518
2519                 /* I removed check for oif == dev_out->oif here.
2520                    It was wrong for two reasons:
2521                    1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2522                       is assigned to multiple interfaces.
2523                    2. Moreover, we are allowed to send packets with saddr
2524                       of another iface. --ANK
2525                  */
2526
2527                 if (fl4->flowi4_oif == 0 &&
2528                     (ipv4_is_multicast(fl4->daddr) ||
2529                      ipv4_is_lbcast(fl4->daddr))) {
2530                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2531                         dev_out = __ip_dev_find(net, fl4->saddr, false);
2532                         if (dev_out == NULL)
2533                                 goto out;
2534
2535                         /* Special hack: user can direct multicasts
2536                            and limited broadcast via necessary interface
2537                            without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2538                            This hack is not just for fun, it allows
2539                            vic,vat and friends to work.
2540                            They bind socket to loopback, set ttl to zero
2541                            and expect that it will work.
2542                            From the viewpoint of routing cache they are broken,
2543                            because we are not allowed to build multicast path
2544                            with loopback source addr (look, routing cache
2545                            cannot know, that ttl is zero, so that packet
2546                            will not leave this host and route is valid).
2547                            Luckily, this hack is good workaround.
2548                          */
2549
2550                         fl4->flowi4_oif = dev_out->ifindex;
2551                         goto make_route;
2552                 }
2553
2554                 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2555                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2556                         if (!__ip_dev_find(net, fl4->saddr, false))
2557                                 goto out;
2558                 }
2559         }
2560
2561
2562         if (fl4->flowi4_oif) {
2563                 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2564                 rth = ERR_PTR(-ENODEV);
2565                 if (dev_out == NULL)
2566                         goto out;
2567
2568                 /* RACE: Check return value of inet_select_addr instead. */
2569                 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2570                         rth = ERR_PTR(-ENETUNREACH);
2571                         goto out;
2572                 }
2573                 if (ipv4_is_local_multicast(fl4->daddr) ||
2574                     ipv4_is_lbcast(fl4->daddr)) {
2575                         if (!fl4->saddr)
2576                                 fl4->saddr = inet_select_addr(dev_out, 0,
2577                                                               RT_SCOPE_LINK);
2578                         goto make_route;
2579                 }
2580                 if (fl4->saddr) {
2581                         if (ipv4_is_multicast(fl4->daddr))
2582                                 fl4->saddr = inet_select_addr(dev_out, 0,
2583                                                               fl4->flowi4_scope);
2584                         else if (!fl4->daddr)
2585                                 fl4->saddr = inet_select_addr(dev_out, 0,
2586                                                               RT_SCOPE_HOST);
2587                 }
2588         }
2589
2590         if (!fl4->daddr) {
2591                 fl4->daddr = fl4->saddr;
2592                 if (!fl4->daddr)
2593                         fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2594                 dev_out = net->loopback_dev;
2595                 fl4->flowi4_oif = net->loopback_dev->ifindex;
2596                 res.type = RTN_LOCAL;
2597                 flags |= RTCF_LOCAL;
2598                 goto make_route;
2599         }
2600
2601         if (fib_lookup(net, fl4, &res)) {
2602                 res.fi = NULL;
2603                 if (fl4->flowi4_oif) {
2604                         /* Apparently, routing tables are wrong. Assume,
2605                            that the destination is on link.
2606
2607                            WHY? DW.
2608                            Because we are allowed to send to iface
2609                            even if it has NO routes and NO assigned
2610                            addresses. When oif is specified, routing
2611                            tables are looked up with only one purpose:
2612                            to catch if destination is gatewayed, rather than
2613                            direct. Moreover, if MSG_DONTROUTE is set,
2614                            we send packet, ignoring both routing tables
2615                            and ifaddr state. --ANK
2616
2617
2618                            We could make it even if oif is unknown,
2619                            likely IPv6, but we do not.
2620                          */
2621
2622                         if (fl4->saddr == 0)
2623                                 fl4->saddr = inet_select_addr(dev_out, 0,
2624                                                               RT_SCOPE_LINK);
2625                         res.type = RTN_UNICAST;
2626                         goto make_route;
2627                 }
2628                 rth = ERR_PTR(-ENETUNREACH);
2629                 goto out;
2630         }
2631
2632         if (res.type == RTN_LOCAL) {
2633                 if (!fl4->saddr) {
2634                         if (res.fi->fib_prefsrc)
2635                                 fl4->saddr = res.fi->fib_prefsrc;
2636                         else
2637                                 fl4->saddr = fl4->daddr;
2638                 }
2639                 dev_out = net->loopback_dev;
2640                 fl4->flowi4_oif = dev_out->ifindex;
2641                 res.fi = NULL;
2642                 flags |= RTCF_LOCAL;
2643                 goto make_route;
2644         }
2645
2646 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2647         if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2648                 fib_select_multipath(&res);
2649         else
2650 #endif
2651         if (!res.prefixlen &&
2652             res.table->tb_num_default > 1 &&
2653             res.type == RTN_UNICAST && !fl4->flowi4_oif)
2654                 fib_select_default(&res);
2655
2656         if (!fl4->saddr)
2657                 fl4->saddr = FIB_RES_PREFSRC(net, res);
2658
2659         dev_out = FIB_RES_DEV(res);
2660         fl4->flowi4_oif = dev_out->ifindex;
2661
2662
2663 make_route:
2664         rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2665                                dev_out, flags);
2666         if (!IS_ERR(rth)) {
2667                 unsigned int hash;
2668
2669                 hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
2670                                rt_genid(dev_net(dev_out)));
2671                 rth = rt_intern_hash(hash, rth, NULL, orig_oif);
2672         }
2673
2674 out:
2675         rcu_read_unlock();
2676         return rth;
2677 }
2678
2679 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2680 {
2681         struct rtable *rth;
2682         unsigned int hash;
2683
2684         if (!rt_caching(net))
2685                 goto slow_output;
2686
2687         hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
2688
2689         rcu_read_lock_bh();
2690         for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2691                 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2692                 if (rth->rt_key_dst == flp4->daddr &&
2693                     rth->rt_key_src == flp4->saddr &&
2694                     rt_is_output_route(rth) &&
2695                     rth->rt_oif == flp4->flowi4_oif &&
2696                     rth->rt_mark == flp4->flowi4_mark &&
2697                     !((rth->rt_key_tos ^ flp4->flowi4_tos) &
2698                             (IPTOS_RT_MASK | RTO_ONLINK)) &&
2699                     net_eq(dev_net(rth->dst.dev), net) &&
2700                     !rt_is_expired(rth)) {
2701                         dst_use(&rth->dst, jiffies);
2702                         RT_CACHE_STAT_INC(out_hit);
2703                         rcu_read_unlock_bh();
2704                         if (!flp4->saddr)
2705                                 flp4->saddr = rth->rt_src;
2706                         if (!flp4->daddr)
2707                                 flp4->daddr = rth->rt_dst;
2708                         return rth;
2709                 }
2710                 RT_CACHE_STAT_INC(out_hlist_search);
2711         }
2712         rcu_read_unlock_bh();
2713
2714 slow_output:
2715         return ip_route_output_slow(net, flp4);
2716 }
2717 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2718
2719 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2720 {
2721         return NULL;
2722 }
2723
2724 static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2725 {
2726         return 0;
2727 }
2728
2729 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2730 {
2731 }
2732
2733 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2734                                           unsigned long old)
2735 {
2736         return NULL;
2737 }
2738
2739 static struct dst_ops ipv4_dst_blackhole_ops = {
2740         .family                 =       AF_INET,
2741         .protocol               =       cpu_to_be16(ETH_P_IP),
2742         .destroy                =       ipv4_dst_destroy,
2743         .check                  =       ipv4_blackhole_dst_check,
2744         .default_mtu            =       ipv4_blackhole_default_mtu,
2745         .default_advmss         =       ipv4_default_advmss,
2746         .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
2747         .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
2748         .neigh_lookup           =       ipv4_neigh_lookup,
2749 };
2750
2751 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2752 {
2753         struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2754         struct rtable *ort = (struct rtable *) dst_orig;
2755
2756         if (rt) {
2757                 struct dst_entry *new = &rt->dst;
2758
2759                 new->__use = 1;
2760                 new->input = dst_discard;
2761                 new->output = dst_discard;
2762                 dst_copy_metrics(new, &ort->dst);
2763
2764                 new->dev = ort->dst.dev;
2765                 if (new->dev)
2766                         dev_hold(new->dev);
2767
2768                 rt->rt_key_dst = ort->rt_key_dst;
2769                 rt->rt_key_src = ort->rt_key_src;
2770                 rt->rt_key_tos = ort->rt_key_tos;
2771                 rt->rt_route_iif = ort->rt_route_iif;
2772                 rt->rt_iif = ort->rt_iif;
2773                 rt->rt_oif = ort->rt_oif;
2774                 rt->rt_mark = ort->rt_mark;
2775
2776                 rt->rt_genid = rt_genid(net);
2777                 rt->rt_flags = ort->rt_flags;
2778                 rt->rt_type = ort->rt_type;
2779                 rt->rt_dst = ort->rt_dst;
2780                 rt->rt_src = ort->rt_src;
2781                 rt->rt_gateway = ort->rt_gateway;
2782                 rt->rt_spec_dst = ort->rt_spec_dst;
2783                 rt->peer = ort->peer;
2784                 if (rt->peer)
2785                         atomic_inc(&rt->peer->refcnt);
2786                 rt->fi = ort->fi;
2787                 if (rt->fi)
2788                         atomic_inc(&rt->fi->fib_clntref);
2789
2790                 dst_free(new);
2791         }
2792
2793         dst_release(dst_orig);
2794
2795         return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2796 }
2797
2798 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2799                                     struct sock *sk)
2800 {
2801         struct rtable *rt = __ip_route_output_key(net, flp4);
2802
2803         if (IS_ERR(rt))
2804                 return rt;
2805
2806         if (flp4->flowi4_proto)
2807                 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2808                                                    flowi4_to_flowi(flp4),
2809                                                    sk, 0);
2810
2811         return rt;
2812 }
2813 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2814
2815 static int rt_fill_info(struct net *net,
2816                         struct sk_buff *skb, u32 pid, u32 seq, int event,
2817                         int nowait, unsigned int flags)
2818 {
2819         struct rtable *rt = skb_rtable(skb);
2820         struct rtmsg *r;
2821         struct nlmsghdr *nlh;
2822         long expires = 0;
2823         const struct inet_peer *peer = rt->peer;
2824         u32 id = 0, ts = 0, tsage = 0, error;
2825
2826         nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2827         if (nlh == NULL)
2828                 return -EMSGSIZE;
2829
2830         r = nlmsg_data(nlh);
2831         r->rtm_family    = AF_INET;
2832         r->rtm_dst_len  = 32;
2833         r->rtm_src_len  = 0;
2834         r->rtm_tos      = rt->rt_key_tos;
2835         r->rtm_table    = RT_TABLE_MAIN;
2836         NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2837         r->rtm_type     = rt->rt_type;
2838         r->rtm_scope    = RT_SCOPE_UNIVERSE;
2839         r->rtm_protocol = RTPROT_UNSPEC;
2840         r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2841         if (rt->rt_flags & RTCF_NOTIFY)
2842                 r->rtm_flags |= RTM_F_NOTIFY;
2843
2844         NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2845
2846         if (rt->rt_key_src) {
2847                 r->rtm_src_len = 32;
2848                 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
2849         }
2850         if (rt->dst.dev)
2851                 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
2852 #ifdef CONFIG_IP_ROUTE_CLASSID
2853         if (rt->dst.tclassid)
2854                 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2855 #endif
2856         if (rt_is_input_route(rt))
2857                 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2858         else if (rt->rt_src != rt->rt_key_src)
2859                 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2860
2861         if (rt->rt_dst != rt->rt_gateway)
2862                 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2863
2864         if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2865                 goto nla_put_failure;
2866
2867         if (rt->rt_mark)
2868                 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
2869
2870         error = rt->dst.error;
2871         if (peer) {
2872                 inet_peer_refcheck(rt->peer);
2873                 id = atomic_read(&peer->ip_id_count) & 0xffff;
2874                 if (peer->tcp_ts_stamp) {
2875                         ts = peer->tcp_ts;
2876                         tsage = get_seconds() - peer->tcp_ts_stamp;
2877                 }
2878                 expires = ACCESS_ONCE(peer->pmtu_expires);
2879                 if (expires)
2880                         expires -= jiffies;
2881         }
2882
2883         if (rt_is_input_route(rt)) {
2884 #ifdef CONFIG_IP_MROUTE
2885                 __be32 dst = rt->rt_dst;
2886
2887                 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2888                     IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2889                         int err = ipmr_get_route(net, skb,
2890                                                  rt->rt_src, rt->rt_dst,
2891                                                  r, nowait);
2892                         if (err <= 0) {
2893                                 if (!nowait) {
2894                                         if (err == 0)
2895                                                 return 0;
2896                                         goto nla_put_failure;
2897                                 } else {
2898                                         if (err == -EMSGSIZE)
2899                                                 goto nla_put_failure;
2900                                         error = err;
2901                                 }
2902                         }
2903                 } else
2904 #endif
2905                         NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
2906         }
2907
2908         if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
2909                                expires, error) < 0)
2910                 goto nla_put_failure;
2911
2912         return nlmsg_end(skb, nlh);
2913
2914 nla_put_failure:
2915         nlmsg_cancel(skb, nlh);
2916         return -EMSGSIZE;
2917 }
2918
2919 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2920 {
2921         struct net *net = sock_net(in_skb->sk);
2922         struct rtmsg *rtm;
2923         struct nlattr *tb[RTA_MAX+1];
2924         struct rtable *rt = NULL;
2925         __be32 dst = 0;
2926         __be32 src = 0;
2927         u32 iif;
2928         int err;
2929         int mark;
2930         struct sk_buff *skb;
2931
2932         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2933         if (err < 0)
2934                 goto errout;
2935
2936         rtm = nlmsg_data(nlh);
2937
2938         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2939         if (skb == NULL) {
2940                 err = -ENOBUFS;
2941                 goto errout;
2942         }
2943
2944         /* Reserve room for dummy headers, this skb can pass
2945            through good chunk of routing engine.
2946          */
2947         skb_reset_mac_header(skb);
2948         skb_reset_network_header(skb);
2949
2950         /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2951         ip_hdr(skb)->protocol = IPPROTO_ICMP;
2952         skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2953
2954         src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2955         dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2956         iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2957         mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2958
2959         if (iif) {
2960                 struct net_device *dev;
2961
2962                 dev = __dev_get_by_index(net, iif);
2963                 if (dev == NULL) {
2964                         err = -ENODEV;
2965                         goto errout_free;
2966                 }
2967
2968                 skb->protocol   = htons(ETH_P_IP);
2969                 skb->dev        = dev;
2970                 skb->mark       = mark;
2971                 local_bh_disable();
2972                 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2973                 local_bh_enable();
2974
2975                 rt = skb_rtable(skb);
2976                 if (err == 0 && rt->dst.error)
2977                         err = -rt->dst.error;
2978         } else {
2979                 struct flowi4 fl4 = {
2980                         .daddr = dst,
2981                         .saddr = src,
2982                         .flowi4_tos = rtm->rtm_tos,
2983                         .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2984                         .flowi4_mark = mark,
2985                 };
2986                 rt = ip_route_output_key(net, &fl4);
2987
2988                 err = 0;
2989                 if (IS_ERR(rt))
2990                         err = PTR_ERR(rt);
2991         }
2992
2993         if (err)
2994                 goto errout_free;
2995
2996         skb_dst_set(skb, &rt->dst);
2997         if (rtm->rtm_flags & RTM_F_NOTIFY)
2998                 rt->rt_flags |= RTCF_NOTIFY;
2999
3000         err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
3001                            RTM_NEWROUTE, 0, 0);
3002         if (err <= 0)
3003                 goto errout_free;
3004
3005         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
3006 errout:
3007         return err;
3008
3009 errout_free:
3010         kfree_skb(skb);
3011         goto errout;
3012 }
3013
3014 int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
3015 {
3016         struct rtable *rt;
3017         int h, s_h;
3018         int idx, s_idx;
3019         struct net *net;
3020
3021         net = sock_net(skb->sk);
3022
3023         s_h = cb->args[0];
3024         if (s_h < 0)
3025                 s_h = 0;
3026         s_idx = idx = cb->args[1];
3027         for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
3028                 if (!rt_hash_table[h].chain)
3029                         continue;
3030                 rcu_read_lock_bh();
3031                 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3032                      rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3033                         if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
3034                                 continue;
3035                         if (rt_is_expired(rt))
3036                                 continue;
3037                         skb_dst_set_noref(skb, &rt->dst);
3038                         if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
3039                                          cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3040                                          1, NLM_F_MULTI) <= 0) {
3041                                 skb_dst_drop(skb);
3042                                 rcu_read_unlock_bh();
3043                                 goto done;
3044                         }
3045                         skb_dst_drop(skb);
3046                 }
3047                 rcu_read_unlock_bh();
3048         }
3049
3050 done:
3051         cb->args[0] = h;
3052         cb->args[1] = idx;
3053         return skb->len;
3054 }
3055
3056 void ip_rt_multicast_event(struct in_device *in_dev)
3057 {
3058         rt_cache_flush(dev_net(in_dev->dev), 0);
3059 }
3060
3061 #ifdef CONFIG_SYSCTL
3062 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3063                                         void __user *buffer,
3064                                         size_t *lenp, loff_t *ppos)
3065 {
3066         if (write) {
3067                 int flush_delay;
3068                 ctl_table ctl;
3069                 struct net *net;
3070
3071                 memcpy(&ctl, __ctl, sizeof(ctl));
3072                 ctl.data = &flush_delay;
3073                 proc_dointvec(&ctl, write, buffer, lenp, ppos);
3074
3075                 net = (struct net *)__ctl->extra1;
3076                 rt_cache_flush(net, flush_delay);
3077                 return 0;
3078         }
3079
3080         return -EINVAL;
3081 }
3082
3083 static ctl_table ipv4_route_table[] = {
3084         {
3085                 .procname       = "gc_thresh",
3086                 .data           = &ipv4_dst_ops.gc_thresh,
3087                 .maxlen         = sizeof(int),
3088                 .mode           = 0644,
3089                 .proc_handler   = proc_dointvec,
3090         },
3091         {
3092                 .procname       = "max_size",
3093                 .data           = &ip_rt_max_size,
3094                 .maxlen         = sizeof(int),
3095                 .mode           = 0644,
3096                 .proc_handler   = proc_dointvec,
3097         },
3098         {
3099                 /*  Deprecated. Use gc_min_interval_ms */
3100
3101                 .procname       = "gc_min_interval",
3102                 .data           = &ip_rt_gc_min_interval,
3103                 .maxlen         = sizeof(int),
3104                 .mode           = 0644,
3105                 .proc_handler   = proc_dointvec_jiffies,
3106         },
3107         {
3108                 .procname       = "gc_min_interval_ms",
3109                 .data           = &ip_rt_gc_min_interval,
3110                 .maxlen         = sizeof(int),
3111                 .mode           = 0644,
3112                 .proc_handler   = proc_dointvec_ms_jiffies,
3113         },
3114         {
3115                 .procname       = "gc_timeout",
3116                 .data           = &ip_rt_gc_timeout,
3117                 .maxlen         = sizeof(int),
3118                 .mode           = 0644,
3119                 .proc_handler   = proc_dointvec_jiffies,
3120         },
3121         {
3122                 .procname       = "gc_interval",
3123                 .data           = &ip_rt_gc_interval,
3124                 .maxlen         = sizeof(int),
3125                 .mode           = 0644,
3126                 .proc_handler   = proc_dointvec_jiffies,
3127         },
3128         {
3129                 .procname       = "redirect_load",
3130                 .data           = &ip_rt_redirect_load,
3131                 .maxlen         = sizeof(int),
3132                 .mode           = 0644,
3133                 .proc_handler   = proc_dointvec,
3134         },
3135         {
3136                 .procname       = "redirect_number",
3137                 .data           = &ip_rt_redirect_number,
3138                 .maxlen         = sizeof(int),
3139                 .mode           = 0644,
3140                 .proc_handler   = proc_dointvec,
3141         },
3142         {
3143                 .procname       = "redirect_silence",
3144                 .data           = &ip_rt_redirect_silence,
3145                 .maxlen         = sizeof(int),
3146                 .mode           = 0644,
3147                 .proc_handler   = proc_dointvec,
3148         },
3149         {
3150                 .procname       = "error_cost",
3151                 .data           = &ip_rt_error_cost,
3152                 .maxlen         = sizeof(int),
3153                 .mode           = 0644,
3154                 .proc_handler   = proc_dointvec,
3155         },
3156         {
3157                 .procname       = "error_burst",
3158                 .data           = &ip_rt_error_burst,
3159                 .maxlen         = sizeof(int),
3160                 .mode           = 0644,
3161                 .proc_handler   = proc_dointvec,
3162         },
3163         {
3164                 .procname       = "gc_elasticity",
3165                 .data           = &ip_rt_gc_elasticity,
3166                 .maxlen         = sizeof(int),
3167                 .mode           = 0644,
3168                 .proc_handler   = proc_dointvec,
3169         },
3170         {
3171                 .procname       = "mtu_expires",
3172                 .data           = &ip_rt_mtu_expires,
3173                 .maxlen         = sizeof(int),
3174                 .mode           = 0644,
3175                 .proc_handler   = proc_dointvec_jiffies,
3176         },
3177         {
3178                 .procname       = "min_pmtu",
3179                 .data           = &ip_rt_min_pmtu,
3180                 .maxlen         = sizeof(int),
3181                 .mode           = 0644,
3182                 .proc_handler   = proc_dointvec,
3183         },
3184         {
3185                 .procname       = "min_adv_mss",
3186                 .data           = &ip_rt_min_advmss,
3187                 .maxlen         = sizeof(int),
3188                 .mode           = 0644,
3189                 .proc_handler   = proc_dointvec,
3190         },
3191         { }
3192 };
3193
3194 static struct ctl_table empty[1];
3195
3196 static struct ctl_table ipv4_skeleton[] =
3197 {
3198         { .procname = "route", 
3199           .mode = 0555, .child = ipv4_route_table},
3200         { .procname = "neigh", 
3201           .mode = 0555, .child = empty},
3202         { }
3203 };
3204
3205 static __net_initdata struct ctl_path ipv4_path[] = {
3206         { .procname = "net", },
3207         { .procname = "ipv4", },
3208         { },
3209 };
3210
3211 static struct ctl_table ipv4_route_flush_table[] = {
3212         {
3213                 .procname       = "flush",
3214                 .maxlen         = sizeof(int),
3215                 .mode           = 0200,
3216                 .proc_handler   = ipv4_sysctl_rtcache_flush,
3217         },
3218         { },
3219 };
3220
3221 static __net_initdata struct ctl_path ipv4_route_path[] = {
3222         { .procname = "net", },
3223         { .procname = "ipv4", },
3224         { .procname = "route", },
3225         { },
3226 };
3227
3228 static __net_init int sysctl_route_net_init(struct net *net)
3229 {
3230         struct ctl_table *tbl;
3231
3232         tbl = ipv4_route_flush_table;
3233         if (!net_eq(net, &init_net)) {
3234                 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3235                 if (tbl == NULL)
3236                         goto err_dup;
3237         }
3238         tbl[0].extra1 = net;
3239
3240         net->ipv4.route_hdr =
3241                 register_net_sysctl_table(net, ipv4_route_path, tbl);
3242         if (net->ipv4.route_hdr == NULL)
3243                 goto err_reg;
3244         return 0;
3245
3246 err_reg:
3247         if (tbl != ipv4_route_flush_table)
3248                 kfree(tbl);
3249 err_dup:
3250         return -ENOMEM;
3251 }
3252
3253 static __net_exit void sysctl_route_net_exit(struct net *net)
3254 {
3255         struct ctl_table *tbl;
3256
3257         tbl = net->ipv4.route_hdr->ctl_table_arg;
3258         unregister_net_sysctl_table(net->ipv4.route_hdr);
3259         BUG_ON(tbl == ipv4_route_flush_table);
3260         kfree(tbl);
3261 }
3262
3263 static __net_initdata struct pernet_operations sysctl_route_ops = {
3264         .init = sysctl_route_net_init,
3265         .exit = sysctl_route_net_exit,
3266 };
3267 #endif
3268
3269 static __net_init int rt_genid_init(struct net *net)
3270 {
3271         get_random_bytes(&net->ipv4.rt_genid,
3272                          sizeof(net->ipv4.rt_genid));
3273         get_random_bytes(&net->ipv4.dev_addr_genid,
3274                          sizeof(net->ipv4.dev_addr_genid));
3275         return 0;
3276 }
3277
3278 static __net_initdata struct pernet_operations rt_genid_ops = {
3279         .init = rt_genid_init,
3280 };
3281
3282
3283 #ifdef CONFIG_IP_ROUTE_CLASSID
3284 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3285 #endif /* CONFIG_IP_ROUTE_CLASSID */
3286
3287 static __initdata unsigned long rhash_entries;
3288 static int __init set_rhash_entries(char *str)
3289 {
3290         if (!str)
3291                 return 0;
3292         rhash_entries = simple_strtoul(str, &str, 0);
3293         return 1;
3294 }
3295 __setup("rhash_entries=", set_rhash_entries);
3296
3297 int __init ip_rt_init(void)
3298 {
3299         int rc = 0;
3300
3301 #ifdef CONFIG_IP_ROUTE_CLASSID
3302         ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3303         if (!ip_rt_acct)
3304                 panic("IP: failed to allocate ip_rt_acct\n");
3305 #endif
3306
3307         ipv4_dst_ops.kmem_cachep =
3308                 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3309                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3310
3311         ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3312
3313         if (dst_entries_init(&ipv4_dst_ops) < 0)
3314                 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3315
3316         if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3317                 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3318
3319         rt_hash_table = (struct rt_hash_bucket *)
3320                 alloc_large_system_hash("IP route cache",
3321                                         sizeof(struct rt_hash_bucket),
3322                                         rhash_entries,
3323                                         (totalram_pages >= 128 * 1024) ?
3324                                         15 : 17,
3325                                         0,
3326                                         &rt_hash_log,
3327                                         &rt_hash_mask,
3328                                         rhash_entries ? 0 : 512 * 1024);
3329         memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3330         rt_hash_lock_init();
3331
3332         ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3333         ip_rt_max_size = (rt_hash_mask + 1) * 16;
3334
3335         devinet_init();
3336         ip_fib_init();
3337
3338         if (ip_rt_proc_init())
3339                 printk(KERN_ERR "Unable to create route proc files\n");
3340 #ifdef CONFIG_XFRM
3341         xfrm_init();
3342         xfrm4_init(ip_rt_max_size);
3343 #endif
3344         rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
3345
3346 #ifdef CONFIG_SYSCTL
3347         register_pernet_subsys(&sysctl_route_ops);
3348 #endif
3349         register_pernet_subsys(&rt_genid_ops);
3350         return rc;
3351 }
3352
3353 #ifdef CONFIG_SYSCTL
3354 /*
3355  * We really need to sanitize the damn ipv4 init order, then all
3356  * this nonsense will go away.
3357  */
3358 void __init ip_static_sysctl_init(void)
3359 {
3360         register_sysctl_paths(ipv4_path, ipv4_skeleton);
3361 }
3362 #endif