2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
93 #include <linux/slab.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/netevent.h>
107 #include <net/rtnetlink.h>
109 #include <linux/sysctl.h>
112 #define RT_FL_TOS(oldflp) \
113 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
115 #define IP_MAX_MTU 0xFFF0
117 #define RT_GC_TIMEOUT (300*HZ)
119 static int ip_rt_max_size;
120 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
121 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
122 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
123 static int ip_rt_redirect_number __read_mostly = 9;
124 static int ip_rt_redirect_load __read_mostly = HZ / 50;
125 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126 static int ip_rt_error_cost __read_mostly = HZ;
127 static int ip_rt_error_burst __read_mostly = 5 * HZ;
128 static int ip_rt_gc_elasticity __read_mostly = 8;
129 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131 static int ip_rt_min_advmss __read_mostly = 256;
132 static int rt_chain_length_max __read_mostly = 20;
134 static struct delayed_work expires_work;
135 static unsigned long expires_ljiffies;
138 * Interface to generic destination cache.
141 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
143 static void ipv4_dst_destroy(struct dst_entry *dst);
144 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
145 static void ipv4_link_failure(struct sk_buff *skb);
146 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
147 static int rt_garbage_collect(struct dst_ops *ops);
149 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
154 static struct dst_ops ipv4_dst_ops = {
156 .protocol = cpu_to_be16(ETH_P_IP),
157 .gc = rt_garbage_collect,
158 .check = ipv4_dst_check,
159 .default_advmss = ipv4_default_advmss,
160 .destroy = ipv4_dst_destroy,
161 .ifdown = ipv4_dst_ifdown,
162 .negative_advice = ipv4_negative_advice,
163 .link_failure = ipv4_link_failure,
164 .update_pmtu = ip_rt_update_pmtu,
165 .local_out = __ip_local_out,
168 #define ECN_OR_COST(class) TC_PRIO_##class
170 const __u8 ip_tos2prio[16] = {
174 ECN_OR_COST(BESTEFFORT),
180 ECN_OR_COST(INTERACTIVE),
182 ECN_OR_COST(INTERACTIVE),
183 TC_PRIO_INTERACTIVE_BULK,
184 ECN_OR_COST(INTERACTIVE_BULK),
185 TC_PRIO_INTERACTIVE_BULK,
186 ECN_OR_COST(INTERACTIVE_BULK)
194 /* The locking scheme is rather straight forward:
196 * 1) Read-Copy Update protects the buckets of the central route hash.
197 * 2) Only writers remove entries, and they hold the lock
198 * as they look at rtable reference counts.
199 * 3) Only readers acquire references to rtable entries,
200 * they do so with atomic increments and with the
204 struct rt_hash_bucket {
205 struct rtable __rcu *chain;
208 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
209 defined(CONFIG_PROVE_LOCKING)
211 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
212 * The size of this table is a power of two and depends on the number of CPUS.
213 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
215 #ifdef CONFIG_LOCKDEP
216 # define RT_HASH_LOCK_SZ 256
219 # define RT_HASH_LOCK_SZ 4096
221 # define RT_HASH_LOCK_SZ 2048
223 # define RT_HASH_LOCK_SZ 1024
225 # define RT_HASH_LOCK_SZ 512
227 # define RT_HASH_LOCK_SZ 256
231 static spinlock_t *rt_hash_locks;
232 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
234 static __init void rt_hash_lock_init(void)
238 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
241 panic("IP: failed to allocate rt_hash_locks\n");
243 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
244 spin_lock_init(&rt_hash_locks[i]);
247 # define rt_hash_lock_addr(slot) NULL
249 static inline void rt_hash_lock_init(void)
254 static struct rt_hash_bucket *rt_hash_table __read_mostly;
255 static unsigned rt_hash_mask __read_mostly;
256 static unsigned int rt_hash_log __read_mostly;
258 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
259 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
261 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
264 return jhash_3words((__force u32)daddr, (__force u32)saddr,
269 static inline int rt_genid(struct net *net)
271 return atomic_read(&net->ipv4.rt_genid);
274 #ifdef CONFIG_PROC_FS
275 struct rt_cache_iter_state {
276 struct seq_net_private p;
281 static struct rtable *rt_cache_get_first(struct seq_file *seq)
283 struct rt_cache_iter_state *st = seq->private;
284 struct rtable *r = NULL;
286 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
287 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
290 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
292 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
293 r->rt_genid == st->genid)
295 r = rcu_dereference_bh(r->dst.rt_next);
297 rcu_read_unlock_bh();
302 static struct rtable *__rt_cache_get_next(struct seq_file *seq,
305 struct rt_cache_iter_state *st = seq->private;
307 r = rcu_dereference_bh(r->dst.rt_next);
309 rcu_read_unlock_bh();
311 if (--st->bucket < 0)
313 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
315 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
320 static struct rtable *rt_cache_get_next(struct seq_file *seq,
323 struct rt_cache_iter_state *st = seq->private;
324 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
325 if (dev_net(r->dst.dev) != seq_file_net(seq))
327 if (r->rt_genid == st->genid)
333 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
335 struct rtable *r = rt_cache_get_first(seq);
338 while (pos && (r = rt_cache_get_next(seq, r)))
340 return pos ? NULL : r;
343 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
345 struct rt_cache_iter_state *st = seq->private;
347 return rt_cache_get_idx(seq, *pos - 1);
348 st->genid = rt_genid(seq_file_net(seq));
349 return SEQ_START_TOKEN;
352 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
356 if (v == SEQ_START_TOKEN)
357 r = rt_cache_get_first(seq);
359 r = rt_cache_get_next(seq, v);
364 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
366 if (v && v != SEQ_START_TOKEN)
367 rcu_read_unlock_bh();
370 static int rt_cache_seq_show(struct seq_file *seq, void *v)
372 if (v == SEQ_START_TOKEN)
373 seq_printf(seq, "%-127s\n",
374 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
375 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
378 struct rtable *r = v;
381 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
382 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
383 r->dst.dev ? r->dst.dev->name : "*",
384 (__force u32)r->rt_dst,
385 (__force u32)r->rt_gateway,
386 r->rt_flags, atomic_read(&r->dst.__refcnt),
387 r->dst.__use, 0, (__force u32)r->rt_src,
388 dst_metric_advmss(&r->dst) + 40,
389 dst_metric(&r->dst, RTAX_WINDOW),
390 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
391 dst_metric(&r->dst, RTAX_RTTVAR)),
393 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
394 r->dst.hh ? (r->dst.hh->hh_output ==
396 r->rt_spec_dst, &len);
398 seq_printf(seq, "%*s\n", 127 - len, "");
403 static const struct seq_operations rt_cache_seq_ops = {
404 .start = rt_cache_seq_start,
405 .next = rt_cache_seq_next,
406 .stop = rt_cache_seq_stop,
407 .show = rt_cache_seq_show,
410 static int rt_cache_seq_open(struct inode *inode, struct file *file)
412 return seq_open_net(inode, file, &rt_cache_seq_ops,
413 sizeof(struct rt_cache_iter_state));
416 static const struct file_operations rt_cache_seq_fops = {
417 .owner = THIS_MODULE,
418 .open = rt_cache_seq_open,
421 .release = seq_release_net,
425 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
430 return SEQ_START_TOKEN;
432 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
433 if (!cpu_possible(cpu))
436 return &per_cpu(rt_cache_stat, cpu);
441 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
445 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
446 if (!cpu_possible(cpu))
449 return &per_cpu(rt_cache_stat, cpu);
455 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
460 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
462 struct rt_cache_stat *st = v;
464 if (v == SEQ_START_TOKEN) {
465 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
469 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
470 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
471 dst_entries_get_slow(&ipv4_dst_ops),
494 static const struct seq_operations rt_cpu_seq_ops = {
495 .start = rt_cpu_seq_start,
496 .next = rt_cpu_seq_next,
497 .stop = rt_cpu_seq_stop,
498 .show = rt_cpu_seq_show,
502 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
504 return seq_open(file, &rt_cpu_seq_ops);
507 static const struct file_operations rt_cpu_seq_fops = {
508 .owner = THIS_MODULE,
509 .open = rt_cpu_seq_open,
512 .release = seq_release,
515 #ifdef CONFIG_NET_CLS_ROUTE
516 static int rt_acct_proc_show(struct seq_file *m, void *v)
518 struct ip_rt_acct *dst, *src;
521 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
525 for_each_possible_cpu(i) {
526 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
527 for (j = 0; j < 256; j++) {
528 dst[j].o_bytes += src[j].o_bytes;
529 dst[j].o_packets += src[j].o_packets;
530 dst[j].i_bytes += src[j].i_bytes;
531 dst[j].i_packets += src[j].i_packets;
535 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
540 static int rt_acct_proc_open(struct inode *inode, struct file *file)
542 return single_open(file, rt_acct_proc_show, NULL);
545 static const struct file_operations rt_acct_proc_fops = {
546 .owner = THIS_MODULE,
547 .open = rt_acct_proc_open,
550 .release = single_release,
554 static int __net_init ip_rt_do_proc_init(struct net *net)
556 struct proc_dir_entry *pde;
558 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
563 pde = proc_create("rt_cache", S_IRUGO,
564 net->proc_net_stat, &rt_cpu_seq_fops);
568 #ifdef CONFIG_NET_CLS_ROUTE
569 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
575 #ifdef CONFIG_NET_CLS_ROUTE
577 remove_proc_entry("rt_cache", net->proc_net_stat);
580 remove_proc_entry("rt_cache", net->proc_net);
585 static void __net_exit ip_rt_do_proc_exit(struct net *net)
587 remove_proc_entry("rt_cache", net->proc_net_stat);
588 remove_proc_entry("rt_cache", net->proc_net);
589 #ifdef CONFIG_NET_CLS_ROUTE
590 remove_proc_entry("rt_acct", net->proc_net);
594 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
595 .init = ip_rt_do_proc_init,
596 .exit = ip_rt_do_proc_exit,
599 static int __init ip_rt_proc_init(void)
601 return register_pernet_subsys(&ip_rt_proc_ops);
605 static inline int ip_rt_proc_init(void)
609 #endif /* CONFIG_PROC_FS */
611 static inline void rt_free(struct rtable *rt)
613 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
616 static inline void rt_drop(struct rtable *rt)
619 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
622 static inline int rt_fast_clean(struct rtable *rth)
624 /* Kill broadcast/multicast entries very aggresively, if they
625 collide in hash table with more useful entries */
626 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
627 rt_is_input_route(rth) && rth->dst.rt_next;
630 static inline int rt_valuable(struct rtable *rth)
632 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
636 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
641 if (atomic_read(&rth->dst.__refcnt))
645 if (rth->dst.expires &&
646 time_after_eq(jiffies, rth->dst.expires))
649 age = jiffies - rth->dst.lastuse;
651 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
652 (age <= tmo2 && rt_valuable(rth)))
658 /* Bits of score are:
660 * 30: not quite useless
661 * 29..0: usage counter
663 static inline u32 rt_score(struct rtable *rt)
665 u32 score = jiffies - rt->dst.lastuse;
667 score = ~score & ~(3<<30);
672 if (rt_is_output_route(rt) ||
673 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
679 static inline bool rt_caching(const struct net *net)
681 return net->ipv4.current_rt_cache_rebuild_count <=
682 net->ipv4.sysctl_rt_cache_rebuild_count;
685 static inline bool compare_hash_inputs(const struct flowi *fl1,
686 const struct flowi *fl2)
688 return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
689 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
690 (fl1->iif ^ fl2->iif)) == 0);
693 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
695 return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
696 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
697 (fl1->mark ^ fl2->mark) |
698 (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
699 (fl1->oif ^ fl2->oif) |
700 (fl1->iif ^ fl2->iif)) == 0;
703 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
705 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
708 static inline int rt_is_expired(struct rtable *rth)
710 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
714 * Perform a full scan of hash table and free all entries.
715 * Can be called by a softirq or a process.
716 * In the later case, we want to be reschedule if necessary
718 static void rt_do_flush(int process_context)
721 struct rtable *rth, *next;
722 struct rtable * tail;
724 for (i = 0; i <= rt_hash_mask; i++) {
725 if (process_context && need_resched())
727 rth = rcu_dereference_raw(rt_hash_table[i].chain);
731 spin_lock_bh(rt_hash_lock_addr(i));
734 struct rtable __rcu **prev;
737 rth = rcu_dereference_protected(rt_hash_table[i].chain,
738 lockdep_is_held(rt_hash_lock_addr(i)));
740 /* defer releasing the head of the list after spin_unlock */
741 for (tail = rth; tail;
742 tail = rcu_dereference_protected(tail->dst.rt_next,
743 lockdep_is_held(rt_hash_lock_addr(i))))
744 if (!rt_is_expired(tail))
747 rt_hash_table[i].chain = tail;
749 /* call rt_free on entries after the tail requiring flush */
750 prev = &rt_hash_table[i].chain;
751 for (p = rcu_dereference_protected(*prev,
752 lockdep_is_held(rt_hash_lock_addr(i)));
755 next = rcu_dereference_protected(p->dst.rt_next,
756 lockdep_is_held(rt_hash_lock_addr(i)));
757 if (!rt_is_expired(p)) {
758 prev = &p->dst.rt_next;
766 rth = rcu_dereference_protected(rt_hash_table[i].chain,
767 lockdep_is_held(rt_hash_lock_addr(i)));
768 rcu_assign_pointer(rt_hash_table[i].chain, NULL);
771 spin_unlock_bh(rt_hash_lock_addr(i));
773 for (; rth != tail; rth = next) {
774 next = rcu_dereference_protected(rth->dst.rt_next, 1);
781 * While freeing expired entries, we compute average chain length
782 * and standard deviation, using fixed-point arithmetic.
783 * This to have an estimation of rt_chain_length_max
784 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
785 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
789 #define ONE (1UL << FRACT_BITS)
792 * Given a hash chain and an item in this hash chain,
793 * find if a previous entry has the same hash_inputs
794 * (but differs on tos, mark or oif)
795 * Returns 0 if an alias is found.
796 * Returns ONE if rth has no alias before itself.
798 static int has_noalias(const struct rtable *head, const struct rtable *rth)
800 const struct rtable *aux = head;
803 if (compare_hash_inputs(&aux->fl, &rth->fl))
805 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
810 static void rt_check_expire(void)
812 static unsigned int rover;
813 unsigned int i = rover, goal;
815 struct rtable __rcu **rthp;
816 unsigned long samples = 0;
817 unsigned long sum = 0, sum2 = 0;
821 delta = jiffies - expires_ljiffies;
822 expires_ljiffies = jiffies;
823 mult = ((u64)delta) << rt_hash_log;
824 if (ip_rt_gc_timeout > 1)
825 do_div(mult, ip_rt_gc_timeout);
826 goal = (unsigned int)mult;
827 if (goal > rt_hash_mask)
828 goal = rt_hash_mask + 1;
829 for (; goal > 0; goal--) {
830 unsigned long tmo = ip_rt_gc_timeout;
831 unsigned long length;
833 i = (i + 1) & rt_hash_mask;
834 rthp = &rt_hash_table[i].chain;
841 if (rcu_dereference_raw(*rthp) == NULL)
844 spin_lock_bh(rt_hash_lock_addr(i));
845 while ((rth = rcu_dereference_protected(*rthp,
846 lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
847 prefetch(rth->dst.rt_next);
848 if (rt_is_expired(rth)) {
849 *rthp = rth->dst.rt_next;
853 if (rth->dst.expires) {
854 /* Entry is expired even if it is in use */
855 if (time_before_eq(jiffies, rth->dst.expires)) {
858 rthp = &rth->dst.rt_next;
860 * We only count entries on
861 * a chain with equal hash inputs once
862 * so that entries for different QOS
863 * levels, and other non-hash input
864 * attributes don't unfairly skew
865 * the length computation
867 length += has_noalias(rt_hash_table[i].chain, rth);
870 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
873 /* Cleanup aged off entries. */
874 *rthp = rth->dst.rt_next;
877 spin_unlock_bh(rt_hash_lock_addr(i));
879 sum2 += length*length;
882 unsigned long avg = sum / samples;
883 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
884 rt_chain_length_max = max_t(unsigned long,
886 (avg + 4*sd) >> FRACT_BITS);
892 * rt_worker_func() is run in process context.
893 * we call rt_check_expire() to scan part of the hash table
895 static void rt_worker_func(struct work_struct *work)
898 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
902 * Pertubation of rt_genid by a small quantity [1..256]
903 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
904 * many times (2^24) without giving recent rt_genid.
905 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
907 static void rt_cache_invalidate(struct net *net)
909 unsigned char shuffle;
911 get_random_bytes(&shuffle, sizeof(shuffle));
912 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
916 * delay < 0 : invalidate cache (fast : entries will be deleted later)
917 * delay >= 0 : invalidate & flush cache (can be long)
919 void rt_cache_flush(struct net *net, int delay)
921 rt_cache_invalidate(net);
923 rt_do_flush(!in_softirq());
926 /* Flush previous cache invalidated entries from the cache */
927 void rt_cache_flush_batch(void)
929 rt_do_flush(!in_softirq());
932 static void rt_emergency_hash_rebuild(struct net *net)
935 printk(KERN_WARNING "Route hash chain too long!\n");
936 rt_cache_invalidate(net);
940 Short description of GC goals.
942 We want to build algorithm, which will keep routing cache
943 at some equilibrium point, when number of aged off entries
944 is kept approximately equal to newly generated ones.
946 Current expiration strength is variable "expire".
947 We try to adjust it dynamically, so that if networking
948 is idle expires is large enough to keep enough of warm entries,
949 and when load increases it reduces to limit cache size.
952 static int rt_garbage_collect(struct dst_ops *ops)
954 static unsigned long expire = RT_GC_TIMEOUT;
955 static unsigned long last_gc;
957 static int equilibrium;
959 struct rtable __rcu **rthp;
960 unsigned long now = jiffies;
962 int entries = dst_entries_get_fast(&ipv4_dst_ops);
965 * Garbage collection is pretty expensive,
966 * do not make it too frequently.
969 RT_CACHE_STAT_INC(gc_total);
971 if (now - last_gc < ip_rt_gc_min_interval &&
972 entries < ip_rt_max_size) {
973 RT_CACHE_STAT_INC(gc_ignored);
977 entries = dst_entries_get_slow(&ipv4_dst_ops);
978 /* Calculate number of entries, which we want to expire now. */
979 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
981 if (equilibrium < ipv4_dst_ops.gc_thresh)
982 equilibrium = ipv4_dst_ops.gc_thresh;
983 goal = entries - equilibrium;
985 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
986 goal = entries - equilibrium;
989 /* We are in dangerous area. Try to reduce cache really
992 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
993 equilibrium = entries - goal;
996 if (now - last_gc >= ip_rt_gc_min_interval)
1000 equilibrium += goal;
1007 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
1008 unsigned long tmo = expire;
1010 k = (k + 1) & rt_hash_mask;
1011 rthp = &rt_hash_table[k].chain;
1012 spin_lock_bh(rt_hash_lock_addr(k));
1013 while ((rth = rcu_dereference_protected(*rthp,
1014 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
1015 if (!rt_is_expired(rth) &&
1016 !rt_may_expire(rth, tmo, expire)) {
1018 rthp = &rth->dst.rt_next;
1021 *rthp = rth->dst.rt_next;
1025 spin_unlock_bh(rt_hash_lock_addr(k));
1034 /* Goal is not achieved. We stop process if:
1036 - if expire reduced to zero. Otherwise, expire is halfed.
1037 - if table is not full.
1038 - if we are called from interrupt.
1039 - jiffies check is just fallback/debug loop breaker.
1040 We will not spin here for long time in any case.
1043 RT_CACHE_STAT_INC(gc_goal_miss);
1049 #if RT_CACHE_DEBUG >= 2
1050 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
1051 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
1054 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1056 } while (!in_softirq() && time_before_eq(jiffies, now));
1058 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1060 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1062 if (net_ratelimit())
1063 printk(KERN_WARNING "dst cache overflow\n");
1064 RT_CACHE_STAT_INC(gc_dst_overflow);
1068 expire += ip_rt_gc_min_interval;
1069 if (expire > ip_rt_gc_timeout ||
1070 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
1071 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1072 expire = ip_rt_gc_timeout;
1073 #if RT_CACHE_DEBUG >= 2
1074 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
1075 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
1081 * Returns number of entries in a hash chain that have different hash_inputs
1083 static int slow_chain_length(const struct rtable *head)
1086 const struct rtable *rth = head;
1089 length += has_noalias(head, rth);
1090 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
1092 return length >> FRACT_BITS;
1095 static int rt_intern_hash(unsigned hash, struct rtable *rt,
1096 struct rtable **rp, struct sk_buff *skb, int ifindex)
1098 struct rtable *rth, *cand;
1099 struct rtable __rcu **rthp, **candp;
1103 int attempts = !in_softirq();
1107 min_score = ~(u32)0;
1112 if (!rt_caching(dev_net(rt->dst.dev))) {
1114 * If we're not caching, just tell the caller we
1115 * were successful and don't touch the route. The
1116 * caller hold the sole reference to the cache entry, and
1117 * it will be released when the caller is done with it.
1118 * If we drop it here, the callers have no way to resolve routes
1119 * when we're not caching. Instead, just point *rp at rt, so
1120 * the caller gets a single use out of the route
1121 * Note that we do rt_free on this new route entry, so that
1122 * once its refcount hits zero, we are still able to reap it
1124 * Note: To avoid expensive rcu stuff for this uncached dst,
1125 * we set DST_NOCACHE so that dst_release() can free dst without
1126 * waiting a grace period.
1129 rt->dst.flags |= DST_NOCACHE;
1130 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1131 int err = arp_bind_neighbour(&rt->dst);
1133 if (net_ratelimit())
1135 "Neighbour table failure & not caching routes.\n");
1144 rthp = &rt_hash_table[hash].chain;
1146 spin_lock_bh(rt_hash_lock_addr(hash));
1147 while ((rth = rcu_dereference_protected(*rthp,
1148 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1149 if (rt_is_expired(rth)) {
1150 *rthp = rth->dst.rt_next;
1154 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1156 *rthp = rth->dst.rt_next;
1158 * Since lookup is lockfree, the deletion
1159 * must be visible to another weakly ordered CPU before
1160 * the insertion at the start of the hash chain.
1162 rcu_assign_pointer(rth->dst.rt_next,
1163 rt_hash_table[hash].chain);
1165 * Since lookup is lockfree, the update writes
1166 * must be ordered for consistency on SMP.
1168 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1170 dst_use(&rth->dst, now);
1171 spin_unlock_bh(rt_hash_lock_addr(hash));
1177 skb_dst_set(skb, &rth->dst);
1181 if (!atomic_read(&rth->dst.__refcnt)) {
1182 u32 score = rt_score(rth);
1184 if (score <= min_score) {
1193 rthp = &rth->dst.rt_next;
1197 /* ip_rt_gc_elasticity used to be average length of chain
1198 * length, when exceeded gc becomes really aggressive.
1200 * The second limit is less certain. At the moment it allows
1201 * only 2 entries per bucket. We will see.
1203 if (chain_length > ip_rt_gc_elasticity) {
1204 *candp = cand->dst.rt_next;
1208 if (chain_length > rt_chain_length_max &&
1209 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1210 struct net *net = dev_net(rt->dst.dev);
1211 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1212 if (!rt_caching(net)) {
1213 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1214 rt->dst.dev->name, num);
1216 rt_emergency_hash_rebuild(net);
1217 spin_unlock_bh(rt_hash_lock_addr(hash));
1219 hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1220 ifindex, rt_genid(net));
1225 /* Try to bind route to arp only if it is output
1226 route or unicast forwarding path.
1228 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1229 int err = arp_bind_neighbour(&rt->dst);
1231 spin_unlock_bh(rt_hash_lock_addr(hash));
1233 if (err != -ENOBUFS) {
1238 /* Neighbour tables are full and nothing
1239 can be released. Try to shrink route cache,
1240 it is most likely it holds some neighbour records.
1242 if (attempts-- > 0) {
1243 int saved_elasticity = ip_rt_gc_elasticity;
1244 int saved_int = ip_rt_gc_min_interval;
1245 ip_rt_gc_elasticity = 1;
1246 ip_rt_gc_min_interval = 0;
1247 rt_garbage_collect(&ipv4_dst_ops);
1248 ip_rt_gc_min_interval = saved_int;
1249 ip_rt_gc_elasticity = saved_elasticity;
1253 if (net_ratelimit())
1254 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1260 rt->dst.rt_next = rt_hash_table[hash].chain;
1262 #if RT_CACHE_DEBUG >= 2
1263 if (rt->dst.rt_next) {
1265 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1267 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
1268 printk(" . %pI4", &trt->rt_dst);
1273 * Since lookup is lockfree, we must make sure
1274 * previous writes to rt are comitted to memory
1275 * before making rt visible to other CPUS.
1277 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1279 spin_unlock_bh(rt_hash_lock_addr(hash));
1285 skb_dst_set(skb, &rt->dst);
1289 void rt_bind_peer(struct rtable *rt, int create)
1291 struct inet_peer *peer;
1293 peer = inet_getpeer_v4(rt->rt_dst, create);
1295 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1300 * Peer allocation may fail only in serious out-of-memory conditions. However
1301 * we still can generate some output.
1302 * Random ID selection looks a bit dangerous because we have no chances to
1303 * select ID being unique in a reasonable period of time.
1304 * But broken packet identifier may be better than no packet at all.
1306 static void ip_select_fb_ident(struct iphdr *iph)
1308 static DEFINE_SPINLOCK(ip_fb_id_lock);
1309 static u32 ip_fallback_id;
1312 spin_lock_bh(&ip_fb_id_lock);
1313 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1314 iph->id = htons(salt & 0xFFFF);
1315 ip_fallback_id = salt;
1316 spin_unlock_bh(&ip_fb_id_lock);
1319 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1321 struct rtable *rt = (struct rtable *) dst;
1324 if (rt->peer == NULL)
1325 rt_bind_peer(rt, 1);
1327 /* If peer is attached to destination, it is never detached,
1328 so that we need not to grab a lock to dereference it.
1331 iph->id = htons(inet_getid(rt->peer, more));
1335 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1336 __builtin_return_address(0));
1338 ip_select_fb_ident(iph);
1340 EXPORT_SYMBOL(__ip_select_ident);
1342 static void rt_del(unsigned hash, struct rtable *rt)
1344 struct rtable __rcu **rthp;
1347 rthp = &rt_hash_table[hash].chain;
1348 spin_lock_bh(rt_hash_lock_addr(hash));
1350 while ((aux = rcu_dereference_protected(*rthp,
1351 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1352 if (aux == rt || rt_is_expired(aux)) {
1353 *rthp = aux->dst.rt_next;
1357 rthp = &aux->dst.rt_next;
1359 spin_unlock_bh(rt_hash_lock_addr(hash));
1362 /* called in rcu_read_lock() section */
1363 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1364 __be32 saddr, struct net_device *dev)
1367 struct in_device *in_dev = __in_dev_get_rcu(dev);
1369 struct rtable __rcu **rthp;
1370 __be32 skeys[2] = { saddr, 0 };
1371 int ikeys[2] = { dev->ifindex, 0 };
1372 struct netevent_redirect netevent;
1379 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1380 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1381 ipv4_is_zeronet(new_gw))
1382 goto reject_redirect;
1384 if (!rt_caching(net))
1385 goto reject_redirect;
1387 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1388 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1389 goto reject_redirect;
1390 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1391 goto reject_redirect;
1393 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1394 goto reject_redirect;
1397 for (i = 0; i < 2; i++) {
1398 for (k = 0; k < 2; k++) {
1399 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1402 rthp = &rt_hash_table[hash].chain;
1404 while ((rth = rcu_dereference(*rthp)) != NULL) {
1407 if (rth->fl.fl4_dst != daddr ||
1408 rth->fl.fl4_src != skeys[i] ||
1409 rth->fl.oif != ikeys[k] ||
1410 rt_is_input_route(rth) ||
1411 rt_is_expired(rth) ||
1412 !net_eq(dev_net(rth->dst.dev), net)) {
1413 rthp = &rth->dst.rt_next;
1417 if (rth->rt_dst != daddr ||
1418 rth->rt_src != saddr ||
1420 rth->rt_gateway != old_gw ||
1421 rth->dst.dev != dev)
1424 dst_hold(&rth->dst);
1426 rt = dst_alloc(&ipv4_dst_ops);
1432 /* Copy all the information. */
1435 atomic_set(&rt->dst.__refcnt, 1);
1436 rt->dst.child = NULL;
1438 dev_hold(rt->dst.dev);
1439 rt->dst.obsolete = -1;
1440 rt->dst.lastuse = jiffies;
1441 rt->dst.path = &rt->dst;
1442 rt->dst.neighbour = NULL;
1445 rt->dst.xfrm = NULL;
1447 rt->rt_genid = rt_genid(net);
1448 rt->rt_flags |= RTCF_REDIRECTED;
1450 /* Gateway is different ... */
1451 rt->rt_gateway = new_gw;
1453 /* Redirect received -> path was valid */
1454 dst_confirm(&rth->dst);
1457 atomic_inc(&rt->peer->refcnt);
1459 if (arp_bind_neighbour(&rt->dst) ||
1460 !(rt->dst.neighbour->nud_state &
1462 if (rt->dst.neighbour)
1463 neigh_event_send(rt->dst.neighbour, NULL);
1469 netevent.old = &rth->dst;
1470 netevent.new = &rt->dst;
1471 call_netevent_notifiers(NETEVENT_REDIRECT,
1475 if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
1486 #ifdef CONFIG_IP_ROUTE_VERBOSE
1487 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1488 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1489 " Advised path = %pI4 -> %pI4\n",
1490 &old_gw, dev->name, &new_gw,
1496 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1498 struct rtable *rt = (struct rtable *)dst;
1499 struct dst_entry *ret = dst;
1502 if (dst->obsolete > 0) {
1505 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1507 time_after_eq(jiffies, rt->dst.expires))) {
1508 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1510 rt_genid(dev_net(dst->dev)));
1511 #if RT_CACHE_DEBUG >= 1
1512 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1513 &rt->rt_dst, rt->fl.fl4_tos);
1524 * 1. The first ip_rt_redirect_number redirects are sent
1525 * with exponential backoff, then we stop sending them at all,
1526 * assuming that the host ignores our redirects.
1527 * 2. If we did not see packets requiring redirects
1528 * during ip_rt_redirect_silence, we assume that the host
1529 * forgot redirected route and start to send redirects again.
1531 * This algorithm is much cheaper and more intelligent than dumb load limiting
1534 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1535 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1538 void ip_rt_send_redirect(struct sk_buff *skb)
1540 struct rtable *rt = skb_rtable(skb);
1541 struct in_device *in_dev;
1545 in_dev = __in_dev_get_rcu(rt->dst.dev);
1546 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1550 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1553 /* No redirected packets during ip_rt_redirect_silence;
1554 * reset the algorithm.
1556 if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
1557 rt->dst.rate_tokens = 0;
1559 /* Too many ignored redirects; do not send anything
1560 * set dst.rate_last to the last seen redirected packet.
1562 if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
1563 rt->dst.rate_last = jiffies;
1567 /* Check for load limit; set rate_last to the latest sent
1570 if (rt->dst.rate_tokens == 0 ||
1572 (rt->dst.rate_last +
1573 (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
1574 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1575 rt->dst.rate_last = jiffies;
1576 ++rt->dst.rate_tokens;
1577 #ifdef CONFIG_IP_ROUTE_VERBOSE
1579 rt->dst.rate_tokens == ip_rt_redirect_number &&
1581 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1582 &rt->rt_src, rt->rt_iif,
1583 &rt->rt_dst, &rt->rt_gateway);
1588 static int ip_error(struct sk_buff *skb)
1590 struct rtable *rt = skb_rtable(skb);
1594 switch (rt->dst.error) {
1599 code = ICMP_HOST_UNREACH;
1602 code = ICMP_NET_UNREACH;
1603 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1604 IPSTATS_MIB_INNOROUTES);
1607 code = ICMP_PKT_FILTERED;
1612 rt->dst.rate_tokens += now - rt->dst.rate_last;
1613 if (rt->dst.rate_tokens > ip_rt_error_burst)
1614 rt->dst.rate_tokens = ip_rt_error_burst;
1615 rt->dst.rate_last = now;
1616 if (rt->dst.rate_tokens >= ip_rt_error_cost) {
1617 rt->dst.rate_tokens -= ip_rt_error_cost;
1618 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1621 out: kfree_skb(skb);
1626 * The last two values are not from the RFC but
1627 * are needed for AMPRnet AX.25 paths.
1630 static const unsigned short mtu_plateau[] =
1631 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1633 static inline unsigned short guess_mtu(unsigned short old_mtu)
1637 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1638 if (old_mtu > mtu_plateau[i])
1639 return mtu_plateau[i];
1643 unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1644 unsigned short new_mtu,
1645 struct net_device *dev)
1648 unsigned short old_mtu = ntohs(iph->tot_len);
1650 int ikeys[2] = { dev->ifindex, 0 };
1651 __be32 skeys[2] = { iph->saddr, 0, };
1652 __be32 daddr = iph->daddr;
1653 unsigned short est_mtu = 0;
1655 for (k = 0; k < 2; k++) {
1656 for (i = 0; i < 2; i++) {
1657 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1661 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1662 rth = rcu_dereference(rth->dst.rt_next)) {
1663 unsigned short mtu = new_mtu;
1665 if (rth->fl.fl4_dst != daddr ||
1666 rth->fl.fl4_src != skeys[i] ||
1667 rth->rt_dst != daddr ||
1668 rth->rt_src != iph->saddr ||
1669 rth->fl.oif != ikeys[k] ||
1670 rt_is_input_route(rth) ||
1671 dst_metric_locked(&rth->dst, RTAX_MTU) ||
1672 !net_eq(dev_net(rth->dst.dev), net) ||
1676 if (new_mtu < 68 || new_mtu >= old_mtu) {
1678 /* BSD 4.2 compatibility hack :-( */
1680 old_mtu >= dst_mtu(&rth->dst) &&
1681 old_mtu >= 68 + (iph->ihl << 2))
1682 old_mtu -= iph->ihl << 2;
1684 mtu = guess_mtu(old_mtu);
1686 if (mtu <= dst_mtu(&rth->dst)) {
1687 if (mtu < dst_mtu(&rth->dst)) {
1688 dst_confirm(&rth->dst);
1689 if (mtu < ip_rt_min_pmtu) {
1690 u32 lock = dst_metric(&rth->dst,
1692 mtu = ip_rt_min_pmtu;
1693 lock |= (1 << RTAX_MTU);
1694 dst_metric_set(&rth->dst, RTAX_LOCK,
1697 dst_metric_set(&rth->dst, RTAX_MTU, mtu);
1698 dst_set_expires(&rth->dst,
1707 return est_mtu ? : new_mtu;
1710 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1712 if (dst_mtu(dst) > mtu && mtu >= 68 &&
1713 !(dst_metric_locked(dst, RTAX_MTU))) {
1714 if (mtu < ip_rt_min_pmtu) {
1715 u32 lock = dst_metric(dst, RTAX_LOCK);
1716 mtu = ip_rt_min_pmtu;
1717 dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
1719 dst_metric_set(dst, RTAX_MTU, mtu);
1720 dst_set_expires(dst, ip_rt_mtu_expires);
1721 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1725 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1727 if (rt_is_expired((struct rtable *)dst))
1732 static void ipv4_dst_destroy(struct dst_entry *dst)
1734 struct rtable *rt = (struct rtable *) dst;
1735 struct inet_peer *peer = rt->peer;
1744 static void ipv4_link_failure(struct sk_buff *skb)
1748 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1750 rt = skb_rtable(skb);
1752 dst_set_expires(&rt->dst, 0);
1755 static int ip_rt_bug(struct sk_buff *skb)
1757 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1758 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1759 skb->dev ? skb->dev->name : "?");
1765 We do not cache source address of outgoing interface,
1766 because it is used only by IP RR, TS and SRR options,
1767 so that it out of fast path.
1769 BTW remember: "addr" is allowed to be not aligned
1773 void ip_rt_get_source(u8 *addr, struct rtable *rt)
1776 struct fib_result res;
1778 if (rt_is_output_route(rt))
1782 if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0)
1783 src = FIB_RES_PREFSRC(res);
1785 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1789 memcpy(addr, &src, 4);
1792 #ifdef CONFIG_NET_CLS_ROUTE
1793 static void set_class_tag(struct rtable *rt, u32 tag)
1795 if (!(rt->dst.tclassid & 0xFFFF))
1796 rt->dst.tclassid |= tag & 0xFFFF;
1797 if (!(rt->dst.tclassid & 0xFFFF0000))
1798 rt->dst.tclassid |= tag & 0xFFFF0000;
1802 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1804 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1807 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1809 if (advmss > 65535 - 40)
1810 advmss = 65535 - 40;
1815 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1817 struct dst_entry *dst = &rt->dst;
1818 struct fib_info *fi = res->fi;
1821 if (FIB_RES_GW(*res) &&
1822 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1823 rt->rt_gateway = FIB_RES_GW(*res);
1824 dst_import_metrics(dst, fi->fib_metrics);
1825 if (fi->fib_mtu == 0) {
1826 dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
1827 if (dst_metric_locked(dst, RTAX_MTU) &&
1828 rt->rt_gateway != rt->rt_dst &&
1829 dst->dev->mtu > 576)
1830 dst_metric_set(dst, RTAX_MTU, 576);
1832 #ifdef CONFIG_NET_CLS_ROUTE
1833 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1836 dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
1838 if (dst_mtu(dst) > IP_MAX_MTU)
1839 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1840 if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
1841 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1843 #ifdef CONFIG_NET_CLS_ROUTE
1844 #ifdef CONFIG_IP_MULTIPLE_TABLES
1845 set_class_tag(rt, fib_rules_tclass(res));
1847 set_class_tag(rt, itag);
1849 rt->rt_type = res->type;
1852 /* called in rcu_read_lock() section */
1853 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1854 u8 tos, struct net_device *dev, int our)
1859 struct in_device *in_dev = __in_dev_get_rcu(dev);
1863 /* Primary sanity checks. */
1868 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1869 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1872 if (ipv4_is_zeronet(saddr)) {
1873 if (!ipv4_is_local_multicast(daddr))
1875 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1877 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1882 rth = dst_alloc(&ipv4_dst_ops);
1886 rth->dst.output = ip_rt_bug;
1887 rth->dst.obsolete = -1;
1889 atomic_set(&rth->dst.__refcnt, 1);
1890 rth->dst.flags= DST_HOST;
1891 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1892 rth->dst.flags |= DST_NOPOLICY;
1893 rth->fl.fl4_dst = daddr;
1894 rth->rt_dst = daddr;
1895 rth->fl.fl4_tos = tos;
1896 rth->fl.mark = skb->mark;
1897 rth->fl.fl4_src = saddr;
1898 rth->rt_src = saddr;
1899 #ifdef CONFIG_NET_CLS_ROUTE
1900 rth->dst.tclassid = itag;
1903 rth->fl.iif = dev->ifindex;
1904 rth->dst.dev = init_net.loopback_dev;
1905 dev_hold(rth->dst.dev);
1907 rth->rt_gateway = daddr;
1908 rth->rt_spec_dst= spec_dst;
1909 rth->rt_genid = rt_genid(dev_net(dev));
1910 rth->rt_flags = RTCF_MULTICAST;
1911 rth->rt_type = RTN_MULTICAST;
1913 rth->dst.input= ip_local_deliver;
1914 rth->rt_flags |= RTCF_LOCAL;
1917 #ifdef CONFIG_IP_MROUTE
1918 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1919 rth->dst.input = ip_mr_input;
1921 RT_CACHE_STAT_INC(in_slow_mc);
1923 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1924 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
1935 static void ip_handle_martian_source(struct net_device *dev,
1936 struct in_device *in_dev,
1937 struct sk_buff *skb,
1941 RT_CACHE_STAT_INC(in_martian_src);
1942 #ifdef CONFIG_IP_ROUTE_VERBOSE
1943 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1945 * RFC1812 recommendation, if source is martian,
1946 * the only hint is MAC header.
1948 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1949 &daddr, &saddr, dev->name);
1950 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1952 const unsigned char *p = skb_mac_header(skb);
1953 printk(KERN_WARNING "ll header: ");
1954 for (i = 0; i < dev->hard_header_len; i++, p++) {
1956 if (i < (dev->hard_header_len - 1))
1965 /* called in rcu_read_lock() section */
1966 static int __mkroute_input(struct sk_buff *skb,
1967 struct fib_result *res,
1968 struct in_device *in_dev,
1969 __be32 daddr, __be32 saddr, u32 tos,
1970 struct rtable **result)
1974 struct in_device *out_dev;
1975 unsigned int flags = 0;
1979 /* get a working reference to the output device */
1980 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1981 if (out_dev == NULL) {
1982 if (net_ratelimit())
1983 printk(KERN_CRIT "Bug in ip_route_input" \
1984 "_slow(). Please, report\n");
1989 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1990 in_dev->dev, &spec_dst, &itag, skb->mark);
1992 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1999 flags |= RTCF_DIRECTSRC;
2001 if (out_dev == in_dev && err &&
2002 (IN_DEV_SHARED_MEDIA(out_dev) ||
2003 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
2004 flags |= RTCF_DOREDIRECT;
2006 if (skb->protocol != htons(ETH_P_IP)) {
2007 /* Not IP (i.e. ARP). Do not create route, if it is
2008 * invalid for proxy arp. DNAT routes are always valid.
2010 * Proxy arp feature have been extended to allow, ARP
2011 * replies back to the same interface, to support
2012 * Private VLAN switch technologies. See arp.c.
2014 if (out_dev == in_dev &&
2015 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
2022 rth = dst_alloc(&ipv4_dst_ops);
2028 atomic_set(&rth->dst.__refcnt, 1);
2029 rth->dst.flags= DST_HOST;
2030 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2031 rth->dst.flags |= DST_NOPOLICY;
2032 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
2033 rth->dst.flags |= DST_NOXFRM;
2034 rth->fl.fl4_dst = daddr;
2035 rth->rt_dst = daddr;
2036 rth->fl.fl4_tos = tos;
2037 rth->fl.mark = skb->mark;
2038 rth->fl.fl4_src = saddr;
2039 rth->rt_src = saddr;
2040 rth->rt_gateway = daddr;
2042 rth->fl.iif = in_dev->dev->ifindex;
2043 rth->dst.dev = (out_dev)->dev;
2044 dev_hold(rth->dst.dev);
2046 rth->rt_spec_dst= spec_dst;
2048 rth->dst.obsolete = -1;
2049 rth->dst.input = ip_forward;
2050 rth->dst.output = ip_output;
2051 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2053 rt_set_nexthop(rth, res, itag);
2055 rth->rt_flags = flags;
2063 static int ip_mkroute_input(struct sk_buff *skb,
2064 struct fib_result *res,
2065 const struct flowi *fl,
2066 struct in_device *in_dev,
2067 __be32 daddr, __be32 saddr, u32 tos)
2069 struct rtable* rth = NULL;
2073 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2074 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
2075 fib_select_multipath(fl, res);
2078 /* create a routing cache entry */
2079 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2083 /* put it into the cache */
2084 hash = rt_hash(daddr, saddr, fl->iif,
2085 rt_genid(dev_net(rth->dst.dev)));
2086 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
2090 * NOTE. We drop all the packets that has local source
2091 * addresses, because every properly looped back packet
2092 * must have correct destination already attached by output routine.
2094 * Such approach solves two big problems:
2095 * 1. Not simplex devices are handled properly.
2096 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2097 * called with rcu_read_lock()
2100 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2101 u8 tos, struct net_device *dev)
2103 struct fib_result res;
2104 struct in_device *in_dev = __in_dev_get_rcu(dev);
2105 struct flowi fl = { .fl4_dst = daddr,
2108 .fl4_scope = RT_SCOPE_UNIVERSE,
2110 .iif = dev->ifindex };
2113 struct rtable * rth;
2117 struct net * net = dev_net(dev);
2119 /* IP on this device is disabled. */
2124 /* Check for the most weird martians, which can be not detected
2128 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2129 ipv4_is_loopback(saddr))
2130 goto martian_source;
2132 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2135 /* Accept zero addresses only to limited broadcast;
2136 * I even do not know to fix it or not. Waiting for complains :-)
2138 if (ipv4_is_zeronet(saddr))
2139 goto martian_source;
2141 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
2142 goto martian_destination;
2145 * Now we are ready to route packet.
2147 err = fib_lookup(net, &fl, &res);
2149 if (!IN_DEV_FORWARD(in_dev))
2154 RT_CACHE_STAT_INC(in_slow_tot);
2156 if (res.type == RTN_BROADCAST)
2159 if (res.type == RTN_LOCAL) {
2160 err = fib_validate_source(saddr, daddr, tos,
2161 net->loopback_dev->ifindex,
2162 dev, &spec_dst, &itag, skb->mark);
2164 goto martian_source_keep_err;
2166 flags |= RTCF_DIRECTSRC;
2171 if (!IN_DEV_FORWARD(in_dev))
2173 if (res.type != RTN_UNICAST)
2174 goto martian_destination;
2176 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
2180 if (skb->protocol != htons(ETH_P_IP))
2183 if (ipv4_is_zeronet(saddr))
2184 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2186 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
2189 goto martian_source_keep_err;
2191 flags |= RTCF_DIRECTSRC;
2193 flags |= RTCF_BROADCAST;
2194 res.type = RTN_BROADCAST;
2195 RT_CACHE_STAT_INC(in_brd);
2198 rth = dst_alloc(&ipv4_dst_ops);
2202 rth->dst.output= ip_rt_bug;
2203 rth->dst.obsolete = -1;
2204 rth->rt_genid = rt_genid(net);
2206 atomic_set(&rth->dst.__refcnt, 1);
2207 rth->dst.flags= DST_HOST;
2208 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2209 rth->dst.flags |= DST_NOPOLICY;
2210 rth->fl.fl4_dst = daddr;
2211 rth->rt_dst = daddr;
2212 rth->fl.fl4_tos = tos;
2213 rth->fl.mark = skb->mark;
2214 rth->fl.fl4_src = saddr;
2215 rth->rt_src = saddr;
2216 #ifdef CONFIG_NET_CLS_ROUTE
2217 rth->dst.tclassid = itag;
2220 rth->fl.iif = dev->ifindex;
2221 rth->dst.dev = net->loopback_dev;
2222 dev_hold(rth->dst.dev);
2223 rth->rt_gateway = daddr;
2224 rth->rt_spec_dst= spec_dst;
2225 rth->dst.input= ip_local_deliver;
2226 rth->rt_flags = flags|RTCF_LOCAL;
2227 if (res.type == RTN_UNREACHABLE) {
2228 rth->dst.input= ip_error;
2229 rth->dst.error= -err;
2230 rth->rt_flags &= ~RTCF_LOCAL;
2232 rth->rt_type = res.type;
2233 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2234 err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
2238 RT_CACHE_STAT_INC(in_no_route);
2239 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2240 res.type = RTN_UNREACHABLE;
2246 * Do not cache martian addresses: they should be logged (RFC1812)
2248 martian_destination:
2249 RT_CACHE_STAT_INC(in_martian_dst);
2250 #ifdef CONFIG_IP_ROUTE_VERBOSE
2251 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2252 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2253 &daddr, &saddr, dev->name);
2257 err = -EHOSTUNREACH;
2270 martian_source_keep_err:
2271 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2275 int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2276 u8 tos, struct net_device *dev, bool noref)
2278 struct rtable * rth;
2280 int iif = dev->ifindex;
2288 if (!rt_caching(net))
2291 tos &= IPTOS_RT_MASK;
2292 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2294 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2295 rth = rcu_dereference(rth->dst.rt_next)) {
2296 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
2297 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
2298 (rth->fl.iif ^ iif) |
2300 (rth->fl.fl4_tos ^ tos)) == 0 &&
2301 rth->fl.mark == skb->mark &&
2302 net_eq(dev_net(rth->dst.dev), net) &&
2303 !rt_is_expired(rth)) {
2305 dst_use_noref(&rth->dst, jiffies);
2306 skb_dst_set_noref(skb, &rth->dst);
2308 dst_use(&rth->dst, jiffies);
2309 skb_dst_set(skb, &rth->dst);
2311 RT_CACHE_STAT_INC(in_hit);
2315 RT_CACHE_STAT_INC(in_hlist_search);
2319 /* Multicast recognition logic is moved from route cache to here.
2320 The problem was that too many Ethernet cards have broken/missing
2321 hardware multicast filters :-( As result the host on multicasting
2322 network acquires a lot of useless route cache entries, sort of
2323 SDR messages from all the world. Now we try to get rid of them.
2324 Really, provided software IP multicast filter is organized
2325 reasonably (at least, hashed), it does not result in a slowdown
2326 comparing with route cache reject entries.
2327 Note, that multicast routers are not affected, because
2328 route cache entry is created eventually.
2330 if (ipv4_is_multicast(daddr)) {
2331 struct in_device *in_dev = __in_dev_get_rcu(dev);
2334 int our = ip_check_mc(in_dev, daddr, saddr,
2335 ip_hdr(skb)->protocol);
2337 #ifdef CONFIG_IP_MROUTE
2339 (!ipv4_is_local_multicast(daddr) &&
2340 IN_DEV_MFORWARD(in_dev))
2343 int res = ip_route_input_mc(skb, daddr, saddr,
2352 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2356 EXPORT_SYMBOL(ip_route_input_common);
2358 /* called with rcu_read_lock() */
2359 static int __mkroute_output(struct rtable **result,
2360 struct fib_result *res,
2361 const struct flowi *fl,
2362 const struct flowi *oldflp,
2363 struct net_device *dev_out,
2367 struct in_device *in_dev;
2368 u32 tos = RT_FL_TOS(oldflp);
2370 if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
2373 if (ipv4_is_lbcast(fl->fl4_dst))
2374 res->type = RTN_BROADCAST;
2375 else if (ipv4_is_multicast(fl->fl4_dst))
2376 res->type = RTN_MULTICAST;
2377 else if (ipv4_is_zeronet(fl->fl4_dst))
2380 if (dev_out->flags & IFF_LOOPBACK)
2381 flags |= RTCF_LOCAL;
2383 in_dev = __in_dev_get_rcu(dev_out);
2387 if (res->type == RTN_BROADCAST) {
2388 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2390 } else if (res->type == RTN_MULTICAST) {
2391 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2392 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2394 flags &= ~RTCF_LOCAL;
2395 /* If multicast route do not exist use
2396 * default one, but do not gateway in this case.
2399 if (res->fi && res->prefixlen < 4)
2404 rth = dst_alloc(&ipv4_dst_ops);
2408 atomic_set(&rth->dst.__refcnt, 1);
2409 rth->dst.flags= DST_HOST;
2410 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2411 rth->dst.flags |= DST_NOXFRM;
2412 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2413 rth->dst.flags |= DST_NOPOLICY;
2415 rth->fl.fl4_dst = oldflp->fl4_dst;
2416 rth->fl.fl4_tos = tos;
2417 rth->fl.fl4_src = oldflp->fl4_src;
2418 rth->fl.oif = oldflp->oif;
2419 rth->fl.mark = oldflp->mark;
2420 rth->rt_dst = fl->fl4_dst;
2421 rth->rt_src = fl->fl4_src;
2422 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2423 /* get references to the devices that are to be hold by the routing
2425 rth->dst.dev = dev_out;
2427 rth->rt_gateway = fl->fl4_dst;
2428 rth->rt_spec_dst= fl->fl4_src;
2430 rth->dst.output=ip_output;
2431 rth->dst.obsolete = -1;
2432 rth->rt_genid = rt_genid(dev_net(dev_out));
2434 RT_CACHE_STAT_INC(out_slow_tot);
2436 if (flags & RTCF_LOCAL) {
2437 rth->dst.input = ip_local_deliver;
2438 rth->rt_spec_dst = fl->fl4_dst;
2440 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2441 rth->rt_spec_dst = fl->fl4_src;
2442 if (flags & RTCF_LOCAL &&
2443 !(dev_out->flags & IFF_LOOPBACK)) {
2444 rth->dst.output = ip_mc_output;
2445 RT_CACHE_STAT_INC(out_slow_mc);
2447 #ifdef CONFIG_IP_MROUTE
2448 if (res->type == RTN_MULTICAST) {
2449 if (IN_DEV_MFORWARD(in_dev) &&
2450 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
2451 rth->dst.input = ip_mr_input;
2452 rth->dst.output = ip_mc_output;
2458 rt_set_nexthop(rth, res, 0);
2460 rth->rt_flags = flags;
2465 /* called with rcu_read_lock() */
2466 static int ip_mkroute_output(struct rtable **rp,
2467 struct fib_result *res,
2468 const struct flowi *fl,
2469 const struct flowi *oldflp,
2470 struct net_device *dev_out,
2473 struct rtable *rth = NULL;
2474 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2477 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2478 rt_genid(dev_net(dev_out)));
2479 err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
2486 * Major route resolver routine.
2487 * called with rcu_read_lock();
2490 static int ip_route_output_slow(struct net *net, struct rtable **rp,
2491 const struct flowi *oldflp)
2493 u32 tos = RT_FL_TOS(oldflp);
2494 struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
2495 .fl4_src = oldflp->fl4_src,
2496 .fl4_tos = tos & IPTOS_RT_MASK,
2497 .fl4_scope = ((tos & RTO_ONLINK) ?
2498 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
2499 .mark = oldflp->mark,
2500 .iif = net->loopback_dev->ifindex,
2501 .oif = oldflp->oif };
2502 struct fib_result res;
2503 unsigned int flags = 0;
2504 struct net_device *dev_out = NULL;
2509 #ifdef CONFIG_IP_MULTIPLE_TABLES
2513 if (oldflp->fl4_src) {
2515 if (ipv4_is_multicast(oldflp->fl4_src) ||
2516 ipv4_is_lbcast(oldflp->fl4_src) ||
2517 ipv4_is_zeronet(oldflp->fl4_src))
2520 /* I removed check for oif == dev_out->oif here.
2521 It was wrong for two reasons:
2522 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2523 is assigned to multiple interfaces.
2524 2. Moreover, we are allowed to send packets with saddr
2525 of another iface. --ANK
2528 if (oldflp->oif == 0 &&
2529 (ipv4_is_multicast(oldflp->fl4_dst) ||
2530 ipv4_is_lbcast(oldflp->fl4_dst))) {
2531 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2532 dev_out = __ip_dev_find(net, oldflp->fl4_src, false);
2533 if (dev_out == NULL)
2536 /* Special hack: user can direct multicasts
2537 and limited broadcast via necessary interface
2538 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2539 This hack is not just for fun, it allows
2540 vic,vat and friends to work.
2541 They bind socket to loopback, set ttl to zero
2542 and expect that it will work.
2543 From the viewpoint of routing cache they are broken,
2544 because we are not allowed to build multicast path
2545 with loopback source addr (look, routing cache
2546 cannot know, that ttl is zero, so that packet
2547 will not leave this host and route is valid).
2548 Luckily, this hack is good workaround.
2551 fl.oif = dev_out->ifindex;
2555 if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
2556 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2557 if (!__ip_dev_find(net, oldflp->fl4_src, false))
2564 dev_out = dev_get_by_index_rcu(net, oldflp->oif);
2566 if (dev_out == NULL)
2569 /* RACE: Check return value of inet_select_addr instead. */
2570 if (rcu_dereference(dev_out->ip_ptr) == NULL)
2571 goto out; /* Wrong error code */
2573 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
2574 ipv4_is_lbcast(oldflp->fl4_dst)) {
2576 fl.fl4_src = inet_select_addr(dev_out, 0,
2581 if (ipv4_is_multicast(oldflp->fl4_dst))
2582 fl.fl4_src = inet_select_addr(dev_out, 0,
2584 else if (!oldflp->fl4_dst)
2585 fl.fl4_src = inet_select_addr(dev_out, 0,
2591 fl.fl4_dst = fl.fl4_src;
2593 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2594 dev_out = net->loopback_dev;
2595 fl.oif = net->loopback_dev->ifindex;
2596 res.type = RTN_LOCAL;
2597 flags |= RTCF_LOCAL;
2601 if (fib_lookup(net, &fl, &res)) {
2604 /* Apparently, routing tables are wrong. Assume,
2605 that the destination is on link.
2608 Because we are allowed to send to iface
2609 even if it has NO routes and NO assigned
2610 addresses. When oif is specified, routing
2611 tables are looked up with only one purpose:
2612 to catch if destination is gatewayed, rather than
2613 direct. Moreover, if MSG_DONTROUTE is set,
2614 we send packet, ignoring both routing tables
2615 and ifaddr state. --ANK
2618 We could make it even if oif is unknown,
2619 likely IPv6, but we do not.
2622 if (fl.fl4_src == 0)
2623 fl.fl4_src = inet_select_addr(dev_out, 0,
2625 res.type = RTN_UNICAST;
2632 if (res.type == RTN_LOCAL) {
2634 fl.fl4_src = fl.fl4_dst;
2635 dev_out = net->loopback_dev;
2636 fl.oif = dev_out->ifindex;
2638 flags |= RTCF_LOCAL;
2642 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2643 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2644 fib_select_multipath(&fl, &res);
2647 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2648 fib_select_default(net, &fl, &res);
2651 fl.fl4_src = FIB_RES_PREFSRC(res);
2653 dev_out = FIB_RES_DEV(res);
2654 fl.oif = dev_out->ifindex;
2658 err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2663 int __ip_route_output_key(struct net *net, struct rtable **rp,
2664 const struct flowi *flp)
2670 if (!rt_caching(net))
2673 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2676 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2677 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2678 if (rth->fl.fl4_dst == flp->fl4_dst &&
2679 rth->fl.fl4_src == flp->fl4_src &&
2680 rt_is_output_route(rth) &&
2681 rth->fl.oif == flp->oif &&
2682 rth->fl.mark == flp->mark &&
2683 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2684 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2685 net_eq(dev_net(rth->dst.dev), net) &&
2686 !rt_is_expired(rth)) {
2687 dst_use(&rth->dst, jiffies);
2688 RT_CACHE_STAT_INC(out_hit);
2689 rcu_read_unlock_bh();
2693 RT_CACHE_STAT_INC(out_hlist_search);
2695 rcu_read_unlock_bh();
2699 res = ip_route_output_slow(net, rp, flp);
2703 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2705 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2710 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2714 static struct dst_ops ipv4_dst_blackhole_ops = {
2716 .protocol = cpu_to_be16(ETH_P_IP),
2717 .destroy = ipv4_dst_destroy,
2718 .check = ipv4_blackhole_dst_check,
2719 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2723 static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
2725 struct rtable *ort = *rp;
2726 struct rtable *rt = (struct rtable *)
2727 dst_alloc(&ipv4_dst_blackhole_ops);
2730 struct dst_entry *new = &rt->dst;
2732 atomic_set(&new->__refcnt, 1);
2734 new->input = dst_discard;
2735 new->output = dst_discard;
2736 dst_copy_metrics(new, &ort->dst);
2738 new->dev = ort->dst.dev;
2744 rt->rt_genid = rt_genid(net);
2745 rt->rt_flags = ort->rt_flags;
2746 rt->rt_type = ort->rt_type;
2747 rt->rt_dst = ort->rt_dst;
2748 rt->rt_src = ort->rt_src;
2749 rt->rt_iif = ort->rt_iif;
2750 rt->rt_gateway = ort->rt_gateway;
2751 rt->rt_spec_dst = ort->rt_spec_dst;
2752 rt->peer = ort->peer;
2754 atomic_inc(&rt->peer->refcnt);
2759 dst_release(&(*rp)->dst);
2761 return rt ? 0 : -ENOMEM;
2764 int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2765 struct sock *sk, int flags)
2769 if ((err = __ip_route_output_key(net, rp, flp)) != 0)
2774 flp->fl4_src = (*rp)->rt_src;
2776 flp->fl4_dst = (*rp)->rt_dst;
2777 err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk,
2778 flags ? XFRM_LOOKUP_WAIT : 0);
2779 if (err == -EREMOTE)
2780 err = ipv4_dst_blackhole(net, rp, flp);
2787 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2789 int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2791 return ip_route_output_flow(net, rp, flp, NULL, 0);
2793 EXPORT_SYMBOL(ip_route_output_key);
2795 static int rt_fill_info(struct net *net,
2796 struct sk_buff *skb, u32 pid, u32 seq, int event,
2797 int nowait, unsigned int flags)
2799 struct rtable *rt = skb_rtable(skb);
2801 struct nlmsghdr *nlh;
2803 u32 id = 0, ts = 0, tsage = 0, error;
2805 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2809 r = nlmsg_data(nlh);
2810 r->rtm_family = AF_INET;
2811 r->rtm_dst_len = 32;
2813 r->rtm_tos = rt->fl.fl4_tos;
2814 r->rtm_table = RT_TABLE_MAIN;
2815 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2816 r->rtm_type = rt->rt_type;
2817 r->rtm_scope = RT_SCOPE_UNIVERSE;
2818 r->rtm_protocol = RTPROT_UNSPEC;
2819 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2820 if (rt->rt_flags & RTCF_NOTIFY)
2821 r->rtm_flags |= RTM_F_NOTIFY;
2823 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2825 if (rt->fl.fl4_src) {
2826 r->rtm_src_len = 32;
2827 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2830 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
2831 #ifdef CONFIG_NET_CLS_ROUTE
2832 if (rt->dst.tclassid)
2833 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2835 if (rt_is_input_route(rt))
2836 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2837 else if (rt->rt_src != rt->fl.fl4_src)
2838 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2840 if (rt->rt_dst != rt->rt_gateway)
2841 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2843 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2844 goto nla_put_failure;
2847 NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
2849 error = rt->dst.error;
2850 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
2852 inet_peer_refcheck(rt->peer);
2853 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
2854 if (rt->peer->tcp_ts_stamp) {
2855 ts = rt->peer->tcp_ts;
2856 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2860 if (rt_is_input_route(rt)) {
2861 #ifdef CONFIG_IP_MROUTE
2862 __be32 dst = rt->rt_dst;
2864 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2865 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2866 int err = ipmr_get_route(net, skb, r, nowait);
2871 goto nla_put_failure;
2873 if (err == -EMSGSIZE)
2874 goto nla_put_failure;
2880 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2883 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
2884 expires, error) < 0)
2885 goto nla_put_failure;
2887 return nlmsg_end(skb, nlh);
2890 nlmsg_cancel(skb, nlh);
2894 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2896 struct net *net = sock_net(in_skb->sk);
2898 struct nlattr *tb[RTA_MAX+1];
2899 struct rtable *rt = NULL;
2905 struct sk_buff *skb;
2907 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2911 rtm = nlmsg_data(nlh);
2913 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2919 /* Reserve room for dummy headers, this skb can pass
2920 through good chunk of routing engine.
2922 skb_reset_mac_header(skb);
2923 skb_reset_network_header(skb);
2925 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2926 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2927 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2929 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2930 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2931 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2932 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2935 struct net_device *dev;
2937 dev = __dev_get_by_index(net, iif);
2943 skb->protocol = htons(ETH_P_IP);
2947 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2950 rt = skb_rtable(skb);
2951 if (err == 0 && rt->dst.error)
2952 err = -rt->dst.error;
2957 .fl4_tos = rtm->rtm_tos,
2958 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2961 err = ip_route_output_key(net, &rt, &fl);
2967 skb_dst_set(skb, &rt->dst);
2968 if (rtm->rtm_flags & RTM_F_NOTIFY)
2969 rt->rt_flags |= RTCF_NOTIFY;
2971 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2972 RTM_NEWROUTE, 0, 0);
2976 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2985 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2992 net = sock_net(skb->sk);
2997 s_idx = idx = cb->args[1];
2998 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2999 if (!rt_hash_table[h].chain)
3002 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3003 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3004 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
3006 if (rt_is_expired(rt))
3008 skb_dst_set_noref(skb, &rt->dst);
3009 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
3010 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3011 1, NLM_F_MULTI) <= 0) {
3013 rcu_read_unlock_bh();
3018 rcu_read_unlock_bh();
3027 void ip_rt_multicast_event(struct in_device *in_dev)
3029 rt_cache_flush(dev_net(in_dev->dev), 0);
3032 #ifdef CONFIG_SYSCTL
3033 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3034 void __user *buffer,
3035 size_t *lenp, loff_t *ppos)
3042 memcpy(&ctl, __ctl, sizeof(ctl));
3043 ctl.data = &flush_delay;
3044 proc_dointvec(&ctl, write, buffer, lenp, ppos);
3046 net = (struct net *)__ctl->extra1;
3047 rt_cache_flush(net, flush_delay);
3054 static ctl_table ipv4_route_table[] = {
3056 .procname = "gc_thresh",
3057 .data = &ipv4_dst_ops.gc_thresh,
3058 .maxlen = sizeof(int),
3060 .proc_handler = proc_dointvec,
3063 .procname = "max_size",
3064 .data = &ip_rt_max_size,
3065 .maxlen = sizeof(int),
3067 .proc_handler = proc_dointvec,
3070 /* Deprecated. Use gc_min_interval_ms */
3072 .procname = "gc_min_interval",
3073 .data = &ip_rt_gc_min_interval,
3074 .maxlen = sizeof(int),
3076 .proc_handler = proc_dointvec_jiffies,
3079 .procname = "gc_min_interval_ms",
3080 .data = &ip_rt_gc_min_interval,
3081 .maxlen = sizeof(int),
3083 .proc_handler = proc_dointvec_ms_jiffies,
3086 .procname = "gc_timeout",
3087 .data = &ip_rt_gc_timeout,
3088 .maxlen = sizeof(int),
3090 .proc_handler = proc_dointvec_jiffies,
3093 .procname = "gc_interval",
3094 .data = &ip_rt_gc_interval,
3095 .maxlen = sizeof(int),
3097 .proc_handler = proc_dointvec_jiffies,
3100 .procname = "redirect_load",
3101 .data = &ip_rt_redirect_load,
3102 .maxlen = sizeof(int),
3104 .proc_handler = proc_dointvec,
3107 .procname = "redirect_number",
3108 .data = &ip_rt_redirect_number,
3109 .maxlen = sizeof(int),
3111 .proc_handler = proc_dointvec,
3114 .procname = "redirect_silence",
3115 .data = &ip_rt_redirect_silence,
3116 .maxlen = sizeof(int),
3118 .proc_handler = proc_dointvec,
3121 .procname = "error_cost",
3122 .data = &ip_rt_error_cost,
3123 .maxlen = sizeof(int),
3125 .proc_handler = proc_dointvec,
3128 .procname = "error_burst",
3129 .data = &ip_rt_error_burst,
3130 .maxlen = sizeof(int),
3132 .proc_handler = proc_dointvec,
3135 .procname = "gc_elasticity",
3136 .data = &ip_rt_gc_elasticity,
3137 .maxlen = sizeof(int),
3139 .proc_handler = proc_dointvec,
3142 .procname = "mtu_expires",
3143 .data = &ip_rt_mtu_expires,
3144 .maxlen = sizeof(int),
3146 .proc_handler = proc_dointvec_jiffies,
3149 .procname = "min_pmtu",
3150 .data = &ip_rt_min_pmtu,
3151 .maxlen = sizeof(int),
3153 .proc_handler = proc_dointvec,
3156 .procname = "min_adv_mss",
3157 .data = &ip_rt_min_advmss,
3158 .maxlen = sizeof(int),
3160 .proc_handler = proc_dointvec,
3165 static struct ctl_table empty[1];
3167 static struct ctl_table ipv4_skeleton[] =
3169 { .procname = "route",
3170 .mode = 0555, .child = ipv4_route_table},
3171 { .procname = "neigh",
3172 .mode = 0555, .child = empty},
3176 static __net_initdata struct ctl_path ipv4_path[] = {
3177 { .procname = "net", },
3178 { .procname = "ipv4", },
3182 static struct ctl_table ipv4_route_flush_table[] = {
3184 .procname = "flush",
3185 .maxlen = sizeof(int),
3187 .proc_handler = ipv4_sysctl_rtcache_flush,
3192 static __net_initdata struct ctl_path ipv4_route_path[] = {
3193 { .procname = "net", },
3194 { .procname = "ipv4", },
3195 { .procname = "route", },
3199 static __net_init int sysctl_route_net_init(struct net *net)
3201 struct ctl_table *tbl;
3203 tbl = ipv4_route_flush_table;
3204 if (!net_eq(net, &init_net)) {
3205 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3209 tbl[0].extra1 = net;
3211 net->ipv4.route_hdr =
3212 register_net_sysctl_table(net, ipv4_route_path, tbl);
3213 if (net->ipv4.route_hdr == NULL)
3218 if (tbl != ipv4_route_flush_table)
3224 static __net_exit void sysctl_route_net_exit(struct net *net)
3226 struct ctl_table *tbl;
3228 tbl = net->ipv4.route_hdr->ctl_table_arg;
3229 unregister_net_sysctl_table(net->ipv4.route_hdr);
3230 BUG_ON(tbl == ipv4_route_flush_table);
3234 static __net_initdata struct pernet_operations sysctl_route_ops = {
3235 .init = sysctl_route_net_init,
3236 .exit = sysctl_route_net_exit,
3240 static __net_init int rt_genid_init(struct net *net)
3242 get_random_bytes(&net->ipv4.rt_genid,
3243 sizeof(net->ipv4.rt_genid));
3247 static __net_initdata struct pernet_operations rt_genid_ops = {
3248 .init = rt_genid_init,
3252 #ifdef CONFIG_NET_CLS_ROUTE
3253 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3254 #endif /* CONFIG_NET_CLS_ROUTE */
3256 static __initdata unsigned long rhash_entries;
3257 static int __init set_rhash_entries(char *str)
3261 rhash_entries = simple_strtoul(str, &str, 0);
3264 __setup("rhash_entries=", set_rhash_entries);
3266 int __init ip_rt_init(void)
3270 #ifdef CONFIG_NET_CLS_ROUTE
3271 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3273 panic("IP: failed to allocate ip_rt_acct\n");
3276 ipv4_dst_ops.kmem_cachep =
3277 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3278 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3280 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3282 if (dst_entries_init(&ipv4_dst_ops) < 0)
3283 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3285 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3286 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3288 rt_hash_table = (struct rt_hash_bucket *)
3289 alloc_large_system_hash("IP route cache",
3290 sizeof(struct rt_hash_bucket),
3292 (totalram_pages >= 128 * 1024) ?
3297 rhash_entries ? 0 : 512 * 1024);
3298 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3299 rt_hash_lock_init();
3301 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3302 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3307 /* All the timers, started at system startup tend
3308 to synchronize. Perturb it a bit.
3310 INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3311 expires_ljiffies = jiffies;
3312 schedule_delayed_work(&expires_work,
3313 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3315 if (ip_rt_proc_init())
3316 printk(KERN_ERR "Unable to create route proc files\n");
3319 xfrm4_init(ip_rt_max_size);
3321 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3323 #ifdef CONFIG_SYSCTL
3324 register_pernet_subsys(&sysctl_route_ops);
3326 register_pernet_subsys(&rt_genid_ops);
3330 #ifdef CONFIG_SYSCTL
3332 * We really need to sanitize the damn ipv4 init order, then all
3333 * this nonsense will go away.
3335 void __init ip_static_sysctl_init(void)
3337 register_sysctl_paths(ipv4_path, ipv4_skeleton);