Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[pandora-kernel.git] / net / ipv4 / ipmr.c
1 /*
2  *      IP multicast routing support for mrouted 3.6/3.8
3  *
4  *              (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5  *        Linux Consultancy and Custom Driver Development
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  *
12  *      Fixes:
13  *      Michael Chastain        :       Incorrect size of copying.
14  *      Alan Cox                :       Added the cache manager code
15  *      Alan Cox                :       Fixed the clone/copy bug and device race.
16  *      Mike McLagan            :       Routing by source
17  *      Malcolm Beattie         :       Buffer handling fixes.
18  *      Alexey Kuznetsov        :       Double buffer free and other fixes.
19  *      SVR Anand               :       Fixed several multicast bugs and problems.
20  *      Alexey Kuznetsov        :       Status, optimisations and more.
21  *      Brad Parker             :       Better behaviour on mrouted upcall
22  *                                      overflow.
23  *      Carlos Picoto           :       PIMv1 Support
24  *      Pavlin Ivanov Radoslavov:       PIMv2 Registers must checksum only PIM header
25  *                                      Relax this requrement to work with older peers.
26  *
27  */
28
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
35 #include <linux/mm.h>
36 #include <linux/kernel.h>
37 #include <linux/fcntl.h>
38 #include <linux/stat.h>
39 #include <linux/socket.h>
40 #include <linux/in.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/inetdevice.h>
44 #include <linux/igmp.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/mroute.h>
48 #include <linux/init.h>
49 #include <linux/if_ether.h>
50 #include <linux/slab.h>
51 #include <net/net_namespace.h>
52 #include <net/ip.h>
53 #include <net/protocol.h>
54 #include <linux/skbuff.h>
55 #include <net/route.h>
56 #include <net/sock.h>
57 #include <net/icmp.h>
58 #include <net/udp.h>
59 #include <net/raw.h>
60 #include <linux/notifier.h>
61 #include <linux/if_arp.h>
62 #include <linux/netfilter_ipv4.h>
63 #include <net/ipip.h>
64 #include <net/checksum.h>
65 #include <net/netlink.h>
66 #include <net/fib_rules.h>
67
68 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
69 #define CONFIG_IP_PIMSM 1
70 #endif
71
72 struct mr_table {
73         struct list_head        list;
74         u32                     id;
75         struct sock             *mroute_sk;
76         struct timer_list       ipmr_expire_timer;
77         struct list_head        mfc_unres_queue;
78         struct list_head        mfc_cache_array[MFC_LINES];
79         struct vif_device       vif_table[MAXVIFS];
80         int                     maxvif;
81         atomic_t                cache_resolve_queue_len;
82         int                     mroute_do_assert;
83         int                     mroute_do_pim;
84 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
85         int                     mroute_reg_vif_num;
86 #endif
87 };
88
89 struct ipmr_rule {
90         struct fib_rule         common;
91 };
92
93 struct ipmr_result {
94         struct mr_table         *mrt;
95 };
96
97 /* Big lock, protecting vif table, mrt cache and mroute socket state.
98    Note that the changes are semaphored via rtnl_lock.
99  */
100
101 static DEFINE_RWLOCK(mrt_lock);
102
103 /*
104  *      Multicast router control variables
105  */
106
107 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
108
109 /* Special spinlock for queue of unresolved entries */
110 static DEFINE_SPINLOCK(mfc_unres_lock);
111
112 /* We return to original Alan's scheme. Hash table of resolved
113    entries is changed only in process context and protected
114    with weak lock mrt_lock. Queue of unresolved entries is protected
115    with strong spinlock mfc_unres_lock.
116
117    In this case data path is free of exclusive locks at all.
118  */
119
120 static struct kmem_cache *mrt_cachep __read_mostly;
121
122 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
123 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
124                          struct sk_buff *skb, struct mfc_cache *cache,
125                          int local);
126 static int ipmr_cache_report(struct mr_table *mrt,
127                              struct sk_buff *pkt, vifi_t vifi, int assert);
128 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
129                             struct mfc_cache *c, struct rtmsg *rtm);
130 static void ipmr_expire_process(unsigned long arg);
131
132 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
133 #define ipmr_for_each_table(mrt, net) \
134         list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
135
136 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
137 {
138         struct mr_table *mrt;
139
140         ipmr_for_each_table(mrt, net) {
141                 if (mrt->id == id)
142                         return mrt;
143         }
144         return NULL;
145 }
146
147 static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
148                            struct mr_table **mrt)
149 {
150         struct ipmr_result res;
151         struct fib_lookup_arg arg = { .result = &res, };
152         int err;
153
154         err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg);
155         if (err < 0)
156                 return err;
157         *mrt = res.mrt;
158         return 0;
159 }
160
161 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
162                             int flags, struct fib_lookup_arg *arg)
163 {
164         struct ipmr_result *res = arg->result;
165         struct mr_table *mrt;
166
167         switch (rule->action) {
168         case FR_ACT_TO_TBL:
169                 break;
170         case FR_ACT_UNREACHABLE:
171                 return -ENETUNREACH;
172         case FR_ACT_PROHIBIT:
173                 return -EACCES;
174         case FR_ACT_BLACKHOLE:
175         default:
176                 return -EINVAL;
177         }
178
179         mrt = ipmr_get_table(rule->fr_net, rule->table);
180         if (mrt == NULL)
181                 return -EAGAIN;
182         res->mrt = mrt;
183         return 0;
184 }
185
186 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
187 {
188         return 1;
189 }
190
191 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
192         FRA_GENERIC_POLICY,
193 };
194
195 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
196                                struct fib_rule_hdr *frh, struct nlattr **tb)
197 {
198         return 0;
199 }
200
201 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
202                              struct nlattr **tb)
203 {
204         return 1;
205 }
206
207 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
208                           struct fib_rule_hdr *frh)
209 {
210         frh->dst_len = 0;
211         frh->src_len = 0;
212         frh->tos     = 0;
213         return 0;
214 }
215
216 static struct fib_rules_ops ipmr_rules_ops_template = {
217         .family         = FIB_RULES_IPMR,
218         .rule_size      = sizeof(struct ipmr_rule),
219         .addr_size      = sizeof(u32),
220         .action         = ipmr_rule_action,
221         .match          = ipmr_rule_match,
222         .configure      = ipmr_rule_configure,
223         .compare        = ipmr_rule_compare,
224         .default_pref   = fib_default_rule_pref,
225         .fill           = ipmr_rule_fill,
226         .nlgroup        = RTNLGRP_IPV4_RULE,
227         .policy         = ipmr_rule_policy,
228         .owner          = THIS_MODULE,
229 };
230
231 static int __net_init ipmr_rules_init(struct net *net)
232 {
233         struct fib_rules_ops *ops;
234         struct mr_table *mrt;
235         int err;
236
237         ops = fib_rules_register(&ipmr_rules_ops_template, net);
238         if (IS_ERR(ops))
239                 return PTR_ERR(ops);
240
241         INIT_LIST_HEAD(&net->ipv4.mr_tables);
242
243         mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
244         if (mrt == NULL) {
245                 err = -ENOMEM;
246                 goto err1;
247         }
248
249         err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
250         if (err < 0)
251                 goto err2;
252
253         net->ipv4.mr_rules_ops = ops;
254         return 0;
255
256 err2:
257         kfree(mrt);
258 err1:
259         fib_rules_unregister(ops);
260         return err;
261 }
262
263 static void __net_exit ipmr_rules_exit(struct net *net)
264 {
265         struct mr_table *mrt, *next;
266
267         list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list)
268                 kfree(mrt);
269         fib_rules_unregister(net->ipv4.mr_rules_ops);
270 }
271 #else
272 #define ipmr_for_each_table(mrt, net) \
273         for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
274
275 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
276 {
277         return net->ipv4.mrt;
278 }
279
280 static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
281                            struct mr_table **mrt)
282 {
283         *mrt = net->ipv4.mrt;
284         return 0;
285 }
286
287 static int __net_init ipmr_rules_init(struct net *net)
288 {
289         net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
290         return net->ipv4.mrt ? 0 : -ENOMEM;
291 }
292
293 static void __net_exit ipmr_rules_exit(struct net *net)
294 {
295         kfree(net->ipv4.mrt);
296 }
297 #endif
298
299 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
300 {
301         struct mr_table *mrt;
302         unsigned int i;
303
304         mrt = ipmr_get_table(net, id);
305         if (mrt != NULL)
306                 return mrt;
307
308         mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
309         if (mrt == NULL)
310                 return NULL;
311         mrt->id = id;
312
313         /* Forwarding cache */
314         for (i = 0; i < MFC_LINES; i++)
315                 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
316
317         INIT_LIST_HEAD(&mrt->mfc_unres_queue);
318
319         setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
320                     (unsigned long)mrt);
321
322 #ifdef CONFIG_IP_PIMSM
323         mrt->mroute_reg_vif_num = -1;
324 #endif
325 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
326         list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
327 #endif
328         return mrt;
329 }
330
331 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
332
333 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
334 {
335         struct net *net = dev_net(dev);
336
337         dev_close(dev);
338
339         dev = __dev_get_by_name(net, "tunl0");
340         if (dev) {
341                 const struct net_device_ops *ops = dev->netdev_ops;
342                 struct ifreq ifr;
343                 struct ip_tunnel_parm p;
344
345                 memset(&p, 0, sizeof(p));
346                 p.iph.daddr = v->vifc_rmt_addr.s_addr;
347                 p.iph.saddr = v->vifc_lcl_addr.s_addr;
348                 p.iph.version = 4;
349                 p.iph.ihl = 5;
350                 p.iph.protocol = IPPROTO_IPIP;
351                 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
352                 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
353
354                 if (ops->ndo_do_ioctl) {
355                         mm_segment_t oldfs = get_fs();
356
357                         set_fs(KERNEL_DS);
358                         ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
359                         set_fs(oldfs);
360                 }
361         }
362 }
363
364 static
365 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
366 {
367         struct net_device  *dev;
368
369         dev = __dev_get_by_name(net, "tunl0");
370
371         if (dev) {
372                 const struct net_device_ops *ops = dev->netdev_ops;
373                 int err;
374                 struct ifreq ifr;
375                 struct ip_tunnel_parm p;
376                 struct in_device  *in_dev;
377
378                 memset(&p, 0, sizeof(p));
379                 p.iph.daddr = v->vifc_rmt_addr.s_addr;
380                 p.iph.saddr = v->vifc_lcl_addr.s_addr;
381                 p.iph.version = 4;
382                 p.iph.ihl = 5;
383                 p.iph.protocol = IPPROTO_IPIP;
384                 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
385                 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
386
387                 if (ops->ndo_do_ioctl) {
388                         mm_segment_t oldfs = get_fs();
389
390                         set_fs(KERNEL_DS);
391                         err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
392                         set_fs(oldfs);
393                 } else
394                         err = -EOPNOTSUPP;
395
396                 dev = NULL;
397
398                 if (err == 0 &&
399                     (dev = __dev_get_by_name(net, p.name)) != NULL) {
400                         dev->flags |= IFF_MULTICAST;
401
402                         in_dev = __in_dev_get_rtnl(dev);
403                         if (in_dev == NULL)
404                                 goto failure;
405
406                         ipv4_devconf_setall(in_dev);
407                         IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
408
409                         if (dev_open(dev))
410                                 goto failure;
411                         dev_hold(dev);
412                 }
413         }
414         return dev;
415
416 failure:
417         /* allow the register to be completed before unregistering. */
418         rtnl_unlock();
419         rtnl_lock();
420
421         unregister_netdevice(dev);
422         return NULL;
423 }
424
425 #ifdef CONFIG_IP_PIMSM
426
427 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
428 {
429         struct net *net = dev_net(dev);
430         struct mr_table *mrt;
431         struct flowi fl = {
432                 .oif            = dev->ifindex,
433                 .iif            = skb->skb_iif,
434                 .mark           = skb->mark,
435         };
436         int err;
437
438         err = ipmr_fib_lookup(net, &fl, &mrt);
439         if (err < 0)
440                 return err;
441
442         read_lock(&mrt_lock);
443         dev->stats.tx_bytes += skb->len;
444         dev->stats.tx_packets++;
445         ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
446         read_unlock(&mrt_lock);
447         kfree_skb(skb);
448         return NETDEV_TX_OK;
449 }
450
451 static const struct net_device_ops reg_vif_netdev_ops = {
452         .ndo_start_xmit = reg_vif_xmit,
453 };
454
455 static void reg_vif_setup(struct net_device *dev)
456 {
457         dev->type               = ARPHRD_PIMREG;
458         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
459         dev->flags              = IFF_NOARP;
460         dev->netdev_ops         = &reg_vif_netdev_ops,
461         dev->destructor         = free_netdev;
462         dev->features           |= NETIF_F_NETNS_LOCAL;
463 }
464
465 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
466 {
467         struct net_device *dev;
468         struct in_device *in_dev;
469         char name[IFNAMSIZ];
470
471         if (mrt->id == RT_TABLE_DEFAULT)
472                 sprintf(name, "pimreg");
473         else
474                 sprintf(name, "pimreg%u", mrt->id);
475
476         dev = alloc_netdev(0, name, reg_vif_setup);
477
478         if (dev == NULL)
479                 return NULL;
480
481         dev_net_set(dev, net);
482
483         if (register_netdevice(dev)) {
484                 free_netdev(dev);
485                 return NULL;
486         }
487         dev->iflink = 0;
488
489         rcu_read_lock();
490         if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
491                 rcu_read_unlock();
492                 goto failure;
493         }
494
495         ipv4_devconf_setall(in_dev);
496         IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
497         rcu_read_unlock();
498
499         if (dev_open(dev))
500                 goto failure;
501
502         dev_hold(dev);
503
504         return dev;
505
506 failure:
507         /* allow the register to be completed before unregistering. */
508         rtnl_unlock();
509         rtnl_lock();
510
511         unregister_netdevice(dev);
512         return NULL;
513 }
514 #endif
515
516 /*
517  *      Delete a VIF entry
518  *      @notify: Set to 1, if the caller is a notifier_call
519  */
520
521 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
522                       struct list_head *head)
523 {
524         struct vif_device *v;
525         struct net_device *dev;
526         struct in_device *in_dev;
527
528         if (vifi < 0 || vifi >= mrt->maxvif)
529                 return -EADDRNOTAVAIL;
530
531         v = &mrt->vif_table[vifi];
532
533         write_lock_bh(&mrt_lock);
534         dev = v->dev;
535         v->dev = NULL;
536
537         if (!dev) {
538                 write_unlock_bh(&mrt_lock);
539                 return -EADDRNOTAVAIL;
540         }
541
542 #ifdef CONFIG_IP_PIMSM
543         if (vifi == mrt->mroute_reg_vif_num)
544                 mrt->mroute_reg_vif_num = -1;
545 #endif
546
547         if (vifi+1 == mrt->maxvif) {
548                 int tmp;
549                 for (tmp=vifi-1; tmp>=0; tmp--) {
550                         if (VIF_EXISTS(mrt, tmp))
551                                 break;
552                 }
553                 mrt->maxvif = tmp+1;
554         }
555
556         write_unlock_bh(&mrt_lock);
557
558         dev_set_allmulti(dev, -1);
559
560         if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
561                 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
562                 ip_rt_multicast_event(in_dev);
563         }
564
565         if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
566                 unregister_netdevice_queue(dev, head);
567
568         dev_put(dev);
569         return 0;
570 }
571
572 static inline void ipmr_cache_free(struct mfc_cache *c)
573 {
574         kmem_cache_free(mrt_cachep, c);
575 }
576
577 /* Destroy an unresolved cache entry, killing queued skbs
578    and reporting error to netlink readers.
579  */
580
581 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
582 {
583         struct net *net = NULL; //mrt->net;
584         struct sk_buff *skb;
585         struct nlmsgerr *e;
586
587         atomic_dec(&mrt->cache_resolve_queue_len);
588
589         while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
590                 if (ip_hdr(skb)->version == 0) {
591                         struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
592                         nlh->nlmsg_type = NLMSG_ERROR;
593                         nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
594                         skb_trim(skb, nlh->nlmsg_len);
595                         e = NLMSG_DATA(nlh);
596                         e->error = -ETIMEDOUT;
597                         memset(&e->msg, 0, sizeof(e->msg));
598
599                         rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
600                 } else
601                         kfree_skb(skb);
602         }
603
604         ipmr_cache_free(c);
605 }
606
607
608 /* Timer process for the unresolved queue. */
609
610 static void ipmr_expire_process(unsigned long arg)
611 {
612         struct mr_table *mrt = (struct mr_table *)arg;
613         unsigned long now;
614         unsigned long expires;
615         struct mfc_cache *c, *next;
616
617         if (!spin_trylock(&mfc_unres_lock)) {
618                 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
619                 return;
620         }
621
622         if (list_empty(&mrt->mfc_unres_queue))
623                 goto out;
624
625         now = jiffies;
626         expires = 10*HZ;
627
628         list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
629                 if (time_after(c->mfc_un.unres.expires, now)) {
630                         unsigned long interval = c->mfc_un.unres.expires - now;
631                         if (interval < expires)
632                                 expires = interval;
633                         continue;
634                 }
635
636                 list_del(&c->list);
637                 ipmr_destroy_unres(mrt, c);
638         }
639
640         if (!list_empty(&mrt->mfc_unres_queue))
641                 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
642
643 out:
644         spin_unlock(&mfc_unres_lock);
645 }
646
647 /* Fill oifs list. It is called under write locked mrt_lock. */
648
649 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
650                                    unsigned char *ttls)
651 {
652         int vifi;
653
654         cache->mfc_un.res.minvif = MAXVIFS;
655         cache->mfc_un.res.maxvif = 0;
656         memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
657
658         for (vifi = 0; vifi < mrt->maxvif; vifi++) {
659                 if (VIF_EXISTS(mrt, vifi) &&
660                     ttls[vifi] && ttls[vifi] < 255) {
661                         cache->mfc_un.res.ttls[vifi] = ttls[vifi];
662                         if (cache->mfc_un.res.minvif > vifi)
663                                 cache->mfc_un.res.minvif = vifi;
664                         if (cache->mfc_un.res.maxvif <= vifi)
665                                 cache->mfc_un.res.maxvif = vifi + 1;
666                 }
667         }
668 }
669
670 static int vif_add(struct net *net, struct mr_table *mrt,
671                    struct vifctl *vifc, int mrtsock)
672 {
673         int vifi = vifc->vifc_vifi;
674         struct vif_device *v = &mrt->vif_table[vifi];
675         struct net_device *dev;
676         struct in_device *in_dev;
677         int err;
678
679         /* Is vif busy ? */
680         if (VIF_EXISTS(mrt, vifi))
681                 return -EADDRINUSE;
682
683         switch (vifc->vifc_flags) {
684 #ifdef CONFIG_IP_PIMSM
685         case VIFF_REGISTER:
686                 /*
687                  * Special Purpose VIF in PIM
688                  * All the packets will be sent to the daemon
689                  */
690                 if (mrt->mroute_reg_vif_num >= 0)
691                         return -EADDRINUSE;
692                 dev = ipmr_reg_vif(net, mrt);
693                 if (!dev)
694                         return -ENOBUFS;
695                 err = dev_set_allmulti(dev, 1);
696                 if (err) {
697                         unregister_netdevice(dev);
698                         dev_put(dev);
699                         return err;
700                 }
701                 break;
702 #endif
703         case VIFF_TUNNEL:
704                 dev = ipmr_new_tunnel(net, vifc);
705                 if (!dev)
706                         return -ENOBUFS;
707                 err = dev_set_allmulti(dev, 1);
708                 if (err) {
709                         ipmr_del_tunnel(dev, vifc);
710                         dev_put(dev);
711                         return err;
712                 }
713                 break;
714
715         case VIFF_USE_IFINDEX:
716         case 0:
717                 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
718                         dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
719                         if (dev && dev->ip_ptr == NULL) {
720                                 dev_put(dev);
721                                 return -EADDRNOTAVAIL;
722                         }
723                 } else
724                         dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
725
726                 if (!dev)
727                         return -EADDRNOTAVAIL;
728                 err = dev_set_allmulti(dev, 1);
729                 if (err) {
730                         dev_put(dev);
731                         return err;
732                 }
733                 break;
734         default:
735                 return -EINVAL;
736         }
737
738         if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
739                 dev_put(dev);
740                 return -EADDRNOTAVAIL;
741         }
742         IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
743         ip_rt_multicast_event(in_dev);
744
745         /*
746          *      Fill in the VIF structures
747          */
748         v->rate_limit = vifc->vifc_rate_limit;
749         v->local = vifc->vifc_lcl_addr.s_addr;
750         v->remote = vifc->vifc_rmt_addr.s_addr;
751         v->flags = vifc->vifc_flags;
752         if (!mrtsock)
753                 v->flags |= VIFF_STATIC;
754         v->threshold = vifc->vifc_threshold;
755         v->bytes_in = 0;
756         v->bytes_out = 0;
757         v->pkt_in = 0;
758         v->pkt_out = 0;
759         v->link = dev->ifindex;
760         if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
761                 v->link = dev->iflink;
762
763         /* And finish update writing critical data */
764         write_lock_bh(&mrt_lock);
765         v->dev = dev;
766 #ifdef CONFIG_IP_PIMSM
767         if (v->flags&VIFF_REGISTER)
768                 mrt->mroute_reg_vif_num = vifi;
769 #endif
770         if (vifi+1 > mrt->maxvif)
771                 mrt->maxvif = vifi+1;
772         write_unlock_bh(&mrt_lock);
773         return 0;
774 }
775
776 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
777                                          __be32 origin,
778                                          __be32 mcastgrp)
779 {
780         int line = MFC_HASH(mcastgrp, origin);
781         struct mfc_cache *c;
782
783         list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
784                 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
785                         return c;
786         }
787         return NULL;
788 }
789
790 /*
791  *      Allocate a multicast cache entry
792  */
793 static struct mfc_cache *ipmr_cache_alloc(void)
794 {
795         struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
796         if (c == NULL)
797                 return NULL;
798         c->mfc_un.res.minvif = MAXVIFS;
799         return c;
800 }
801
802 static struct mfc_cache *ipmr_cache_alloc_unres(void)
803 {
804         struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
805         if (c == NULL)
806                 return NULL;
807         skb_queue_head_init(&c->mfc_un.unres.unresolved);
808         c->mfc_un.unres.expires = jiffies + 10*HZ;
809         return c;
810 }
811
812 /*
813  *      A cache entry has gone into a resolved state from queued
814  */
815
816 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
817                                struct mfc_cache *uc, struct mfc_cache *c)
818 {
819         struct sk_buff *skb;
820         struct nlmsgerr *e;
821
822         /*
823          *      Play the pending entries through our router
824          */
825
826         while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
827                 if (ip_hdr(skb)->version == 0) {
828                         struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
829
830                         if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
831                                 nlh->nlmsg_len = (skb_tail_pointer(skb) -
832                                                   (u8 *)nlh);
833                         } else {
834                                 nlh->nlmsg_type = NLMSG_ERROR;
835                                 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
836                                 skb_trim(skb, nlh->nlmsg_len);
837                                 e = NLMSG_DATA(nlh);
838                                 e->error = -EMSGSIZE;
839                                 memset(&e->msg, 0, sizeof(e->msg));
840                         }
841
842                         rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
843                 } else
844                         ip_mr_forward(net, mrt, skb, c, 0);
845         }
846 }
847
848 /*
849  *      Bounce a cache query up to mrouted. We could use netlink for this but mrouted
850  *      expects the following bizarre scheme.
851  *
852  *      Called under mrt_lock.
853  */
854
855 static int ipmr_cache_report(struct mr_table *mrt,
856                              struct sk_buff *pkt, vifi_t vifi, int assert)
857 {
858         struct sk_buff *skb;
859         const int ihl = ip_hdrlen(pkt);
860         struct igmphdr *igmp;
861         struct igmpmsg *msg;
862         int ret;
863
864 #ifdef CONFIG_IP_PIMSM
865         if (assert == IGMPMSG_WHOLEPKT)
866                 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
867         else
868 #endif
869                 skb = alloc_skb(128, GFP_ATOMIC);
870
871         if (!skb)
872                 return -ENOBUFS;
873
874 #ifdef CONFIG_IP_PIMSM
875         if (assert == IGMPMSG_WHOLEPKT) {
876                 /* Ugly, but we have no choice with this interface.
877                    Duplicate old header, fix ihl, length etc.
878                    And all this only to mangle msg->im_msgtype and
879                    to set msg->im_mbz to "mbz" :-)
880                  */
881                 skb_push(skb, sizeof(struct iphdr));
882                 skb_reset_network_header(skb);
883                 skb_reset_transport_header(skb);
884                 msg = (struct igmpmsg *)skb_network_header(skb);
885                 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
886                 msg->im_msgtype = IGMPMSG_WHOLEPKT;
887                 msg->im_mbz = 0;
888                 msg->im_vif = mrt->mroute_reg_vif_num;
889                 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
890                 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
891                                              sizeof(struct iphdr));
892         } else
893 #endif
894         {
895
896         /*
897          *      Copy the IP header
898          */
899
900         skb->network_header = skb->tail;
901         skb_put(skb, ihl);
902         skb_copy_to_linear_data(skb, pkt->data, ihl);
903         ip_hdr(skb)->protocol = 0;                      /* Flag to the kernel this is a route add */
904         msg = (struct igmpmsg *)skb_network_header(skb);
905         msg->im_vif = vifi;
906         skb_dst_set(skb, dst_clone(skb_dst(pkt)));
907
908         /*
909          *      Add our header
910          */
911
912         igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
913         igmp->type      =
914         msg->im_msgtype = assert;
915         igmp->code      =       0;
916         ip_hdr(skb)->tot_len = htons(skb->len);                 /* Fix the length */
917         skb->transport_header = skb->network_header;
918         }
919
920         if (mrt->mroute_sk == NULL) {
921                 kfree_skb(skb);
922                 return -EINVAL;
923         }
924
925         /*
926          *      Deliver to mrouted
927          */
928         ret = sock_queue_rcv_skb(mrt->mroute_sk, skb);
929         if (ret < 0) {
930                 if (net_ratelimit())
931                         printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
932                 kfree_skb(skb);
933         }
934
935         return ret;
936 }
937
938 /*
939  *      Queue a packet for resolution. It gets locked cache entry!
940  */
941
942 static int
943 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
944 {
945         bool found = false;
946         int err;
947         struct mfc_cache *c;
948         const struct iphdr *iph = ip_hdr(skb);
949
950         spin_lock_bh(&mfc_unres_lock);
951         list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
952                 if (c->mfc_mcastgrp == iph->daddr &&
953                     c->mfc_origin == iph->saddr) {
954                         found = true;
955                         break;
956                 }
957         }
958
959         if (!found) {
960                 /*
961                  *      Create a new entry if allowable
962                  */
963
964                 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
965                     (c = ipmr_cache_alloc_unres()) == NULL) {
966                         spin_unlock_bh(&mfc_unres_lock);
967
968                         kfree_skb(skb);
969                         return -ENOBUFS;
970                 }
971
972                 /*
973                  *      Fill in the new cache entry
974                  */
975                 c->mfc_parent   = -1;
976                 c->mfc_origin   = iph->saddr;
977                 c->mfc_mcastgrp = iph->daddr;
978
979                 /*
980                  *      Reflect first query at mrouted.
981                  */
982                 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
983                 if (err < 0) {
984                         /* If the report failed throw the cache entry
985                            out - Brad Parker
986                          */
987                         spin_unlock_bh(&mfc_unres_lock);
988
989                         ipmr_cache_free(c);
990                         kfree_skb(skb);
991                         return err;
992                 }
993
994                 atomic_inc(&mrt->cache_resolve_queue_len);
995                 list_add(&c->list, &mrt->mfc_unres_queue);
996
997                 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
998         }
999
1000         /*
1001          *      See if we can append the packet
1002          */
1003         if (c->mfc_un.unres.unresolved.qlen>3) {
1004                 kfree_skb(skb);
1005                 err = -ENOBUFS;
1006         } else {
1007                 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1008                 err = 0;
1009         }
1010
1011         spin_unlock_bh(&mfc_unres_lock);
1012         return err;
1013 }
1014
1015 /*
1016  *      MFC cache manipulation by user space mroute daemon
1017  */
1018
1019 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1020 {
1021         int line;
1022         struct mfc_cache *c, *next;
1023
1024         line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1025
1026         list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1027                 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1028                     c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1029                         write_lock_bh(&mrt_lock);
1030                         list_del(&c->list);
1031                         write_unlock_bh(&mrt_lock);
1032
1033                         ipmr_cache_free(c);
1034                         return 0;
1035                 }
1036         }
1037         return -ENOENT;
1038 }
1039
1040 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1041                         struct mfcctl *mfc, int mrtsock)
1042 {
1043         bool found = false;
1044         int line;
1045         struct mfc_cache *uc, *c;
1046
1047         if (mfc->mfcc_parent >= MAXVIFS)
1048                 return -ENFILE;
1049
1050         line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1051
1052         list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1053                 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1054                     c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1055                         found = true;
1056                         break;
1057                 }
1058         }
1059
1060         if (found) {
1061                 write_lock_bh(&mrt_lock);
1062                 c->mfc_parent = mfc->mfcc_parent;
1063                 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1064                 if (!mrtsock)
1065                         c->mfc_flags |= MFC_STATIC;
1066                 write_unlock_bh(&mrt_lock);
1067                 return 0;
1068         }
1069
1070         if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1071                 return -EINVAL;
1072
1073         c = ipmr_cache_alloc();
1074         if (c == NULL)
1075                 return -ENOMEM;
1076
1077         c->mfc_origin = mfc->mfcc_origin.s_addr;
1078         c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1079         c->mfc_parent = mfc->mfcc_parent;
1080         ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1081         if (!mrtsock)
1082                 c->mfc_flags |= MFC_STATIC;
1083
1084         write_lock_bh(&mrt_lock);
1085         list_add(&c->list, &mrt->mfc_cache_array[line]);
1086         write_unlock_bh(&mrt_lock);
1087
1088         /*
1089          *      Check to see if we resolved a queued list. If so we
1090          *      need to send on the frames and tidy up.
1091          */
1092         spin_lock_bh(&mfc_unres_lock);
1093         list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1094                 if (uc->mfc_origin == c->mfc_origin &&
1095                     uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1096                         list_del(&uc->list);
1097                         atomic_dec(&mrt->cache_resolve_queue_len);
1098                         break;
1099                 }
1100         }
1101         if (list_empty(&mrt->mfc_unres_queue))
1102                 del_timer(&mrt->ipmr_expire_timer);
1103         spin_unlock_bh(&mfc_unres_lock);
1104
1105         if (uc) {
1106                 ipmr_cache_resolve(net, mrt, uc, c);
1107                 ipmr_cache_free(uc);
1108         }
1109         return 0;
1110 }
1111
1112 /*
1113  *      Close the multicast socket, and clear the vif tables etc
1114  */
1115
1116 static void mroute_clean_tables(struct mr_table *mrt)
1117 {
1118         int i;
1119         LIST_HEAD(list);
1120         struct mfc_cache *c, *next;
1121
1122         /*
1123          *      Shut down all active vif entries
1124          */
1125         for (i = 0; i < mrt->maxvif; i++) {
1126                 if (!(mrt->vif_table[i].flags&VIFF_STATIC))
1127                         vif_delete(mrt, i, 0, &list);
1128         }
1129         unregister_netdevice_many(&list);
1130
1131         /*
1132          *      Wipe the cache
1133          */
1134         for (i = 0; i < MFC_LINES; i++) {
1135                 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1136                         if (c->mfc_flags&MFC_STATIC)
1137                                 continue;
1138                         write_lock_bh(&mrt_lock);
1139                         list_del(&c->list);
1140                         write_unlock_bh(&mrt_lock);
1141
1142                         ipmr_cache_free(c);
1143                 }
1144         }
1145
1146         if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1147                 spin_lock_bh(&mfc_unres_lock);
1148                 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1149                         list_del(&c->list);
1150                         ipmr_destroy_unres(mrt, c);
1151                 }
1152                 spin_unlock_bh(&mfc_unres_lock);
1153         }
1154 }
1155
1156 static void mrtsock_destruct(struct sock *sk)
1157 {
1158         struct net *net = sock_net(sk);
1159         struct mr_table *mrt;
1160
1161         rtnl_lock();
1162         ipmr_for_each_table(mrt, net) {
1163                 if (sk == mrt->mroute_sk) {
1164                         IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1165
1166                         write_lock_bh(&mrt_lock);
1167                         mrt->mroute_sk = NULL;
1168                         write_unlock_bh(&mrt_lock);
1169
1170                         mroute_clean_tables(mrt);
1171                 }
1172         }
1173         rtnl_unlock();
1174 }
1175
1176 /*
1177  *      Socket options and virtual interface manipulation. The whole
1178  *      virtual interface system is a complete heap, but unfortunately
1179  *      that's how BSD mrouted happens to think. Maybe one day with a proper
1180  *      MOSPF/PIM router set up we can clean this up.
1181  */
1182
1183 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1184 {
1185         int ret;
1186         struct vifctl vif;
1187         struct mfcctl mfc;
1188         struct net *net = sock_net(sk);
1189         struct mr_table *mrt;
1190
1191         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1192         if (mrt == NULL)
1193                 return -ENOENT;
1194
1195         if (optname != MRT_INIT) {
1196                 if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN))
1197                         return -EACCES;
1198         }
1199
1200         switch (optname) {
1201         case MRT_INIT:
1202                 if (sk->sk_type != SOCK_RAW ||
1203                     inet_sk(sk)->inet_num != IPPROTO_IGMP)
1204                         return -EOPNOTSUPP;
1205                 if (optlen != sizeof(int))
1206                         return -ENOPROTOOPT;
1207
1208                 rtnl_lock();
1209                 if (mrt->mroute_sk) {
1210                         rtnl_unlock();
1211                         return -EADDRINUSE;
1212                 }
1213
1214                 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1215                 if (ret == 0) {
1216                         write_lock_bh(&mrt_lock);
1217                         mrt->mroute_sk = sk;
1218                         write_unlock_bh(&mrt_lock);
1219
1220                         IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1221                 }
1222                 rtnl_unlock();
1223                 return ret;
1224         case MRT_DONE:
1225                 if (sk != mrt->mroute_sk)
1226                         return -EACCES;
1227                 return ip_ra_control(sk, 0, NULL);
1228         case MRT_ADD_VIF:
1229         case MRT_DEL_VIF:
1230                 if (optlen != sizeof(vif))
1231                         return -EINVAL;
1232                 if (copy_from_user(&vif, optval, sizeof(vif)))
1233                         return -EFAULT;
1234                 if (vif.vifc_vifi >= MAXVIFS)
1235                         return -ENFILE;
1236                 rtnl_lock();
1237                 if (optname == MRT_ADD_VIF) {
1238                         ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk);
1239                 } else {
1240                         ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1241                 }
1242                 rtnl_unlock();
1243                 return ret;
1244
1245                 /*
1246                  *      Manipulate the forwarding caches. These live
1247                  *      in a sort of kernel/user symbiosis.
1248                  */
1249         case MRT_ADD_MFC:
1250         case MRT_DEL_MFC:
1251                 if (optlen != sizeof(mfc))
1252                         return -EINVAL;
1253                 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1254                         return -EFAULT;
1255                 rtnl_lock();
1256                 if (optname == MRT_DEL_MFC)
1257                         ret = ipmr_mfc_delete(mrt, &mfc);
1258                 else
1259                         ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk);
1260                 rtnl_unlock();
1261                 return ret;
1262                 /*
1263                  *      Control PIM assert.
1264                  */
1265         case MRT_ASSERT:
1266         {
1267                 int v;
1268                 if (get_user(v,(int __user *)optval))
1269                         return -EFAULT;
1270                 mrt->mroute_do_assert = (v) ? 1 : 0;
1271                 return 0;
1272         }
1273 #ifdef CONFIG_IP_PIMSM
1274         case MRT_PIM:
1275         {
1276                 int v;
1277
1278                 if (get_user(v,(int __user *)optval))
1279                         return -EFAULT;
1280                 v = (v) ? 1 : 0;
1281
1282                 rtnl_lock();
1283                 ret = 0;
1284                 if (v != mrt->mroute_do_pim) {
1285                         mrt->mroute_do_pim = v;
1286                         mrt->mroute_do_assert = v;
1287                 }
1288                 rtnl_unlock();
1289                 return ret;
1290         }
1291 #endif
1292 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1293         case MRT_TABLE:
1294         {
1295                 u32 v;
1296
1297                 if (optlen != sizeof(u32))
1298                         return -EINVAL;
1299                 if (get_user(v, (u32 __user *)optval))
1300                         return -EFAULT;
1301                 if (sk == mrt->mroute_sk)
1302                         return -EBUSY;
1303
1304                 rtnl_lock();
1305                 ret = 0;
1306                 if (!ipmr_new_table(net, v))
1307                         ret = -ENOMEM;
1308                 raw_sk(sk)->ipmr_table = v;
1309                 rtnl_unlock();
1310                 return ret;
1311         }
1312 #endif
1313         /*
1314          *      Spurious command, or MRT_VERSION which you cannot
1315          *      set.
1316          */
1317         default:
1318                 return -ENOPROTOOPT;
1319         }
1320 }
1321
1322 /*
1323  *      Getsock opt support for the multicast routing system.
1324  */
1325
1326 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1327 {
1328         int olr;
1329         int val;
1330         struct net *net = sock_net(sk);
1331         struct mr_table *mrt;
1332
1333         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1334         if (mrt == NULL)
1335                 return -ENOENT;
1336
1337         if (optname != MRT_VERSION &&
1338 #ifdef CONFIG_IP_PIMSM
1339            optname!=MRT_PIM &&
1340 #endif
1341            optname!=MRT_ASSERT)
1342                 return -ENOPROTOOPT;
1343
1344         if (get_user(olr, optlen))
1345                 return -EFAULT;
1346
1347         olr = min_t(unsigned int, olr, sizeof(int));
1348         if (olr < 0)
1349                 return -EINVAL;
1350
1351         if (put_user(olr, optlen))
1352                 return -EFAULT;
1353         if (optname == MRT_VERSION)
1354                 val = 0x0305;
1355 #ifdef CONFIG_IP_PIMSM
1356         else if (optname == MRT_PIM)
1357                 val = mrt->mroute_do_pim;
1358 #endif
1359         else
1360                 val = mrt->mroute_do_assert;
1361         if (copy_to_user(optval, &val, olr))
1362                 return -EFAULT;
1363         return 0;
1364 }
1365
1366 /*
1367  *      The IP multicast ioctl support routines.
1368  */
1369
1370 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1371 {
1372         struct sioc_sg_req sr;
1373         struct sioc_vif_req vr;
1374         struct vif_device *vif;
1375         struct mfc_cache *c;
1376         struct net *net = sock_net(sk);
1377         struct mr_table *mrt;
1378
1379         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1380         if (mrt == NULL)
1381                 return -ENOENT;
1382
1383         switch (cmd) {
1384         case SIOCGETVIFCNT:
1385                 if (copy_from_user(&vr, arg, sizeof(vr)))
1386                         return -EFAULT;
1387                 if (vr.vifi >= mrt->maxvif)
1388                         return -EINVAL;
1389                 read_lock(&mrt_lock);
1390                 vif = &mrt->vif_table[vr.vifi];
1391                 if (VIF_EXISTS(mrt, vr.vifi)) {
1392                         vr.icount = vif->pkt_in;
1393                         vr.ocount = vif->pkt_out;
1394                         vr.ibytes = vif->bytes_in;
1395                         vr.obytes = vif->bytes_out;
1396                         read_unlock(&mrt_lock);
1397
1398                         if (copy_to_user(arg, &vr, sizeof(vr)))
1399                                 return -EFAULT;
1400                         return 0;
1401                 }
1402                 read_unlock(&mrt_lock);
1403                 return -EADDRNOTAVAIL;
1404         case SIOCGETSGCNT:
1405                 if (copy_from_user(&sr, arg, sizeof(sr)))
1406                         return -EFAULT;
1407
1408                 read_lock(&mrt_lock);
1409                 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1410                 if (c) {
1411                         sr.pktcnt = c->mfc_un.res.pkt;
1412                         sr.bytecnt = c->mfc_un.res.bytes;
1413                         sr.wrong_if = c->mfc_un.res.wrong_if;
1414                         read_unlock(&mrt_lock);
1415
1416                         if (copy_to_user(arg, &sr, sizeof(sr)))
1417                                 return -EFAULT;
1418                         return 0;
1419                 }
1420                 read_unlock(&mrt_lock);
1421                 return -EADDRNOTAVAIL;
1422         default:
1423                 return -ENOIOCTLCMD;
1424         }
1425 }
1426
1427
1428 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1429 {
1430         struct net_device *dev = ptr;
1431         struct net *net = dev_net(dev);
1432         struct mr_table *mrt;
1433         struct vif_device *v;
1434         int ct;
1435         LIST_HEAD(list);
1436
1437         if (event != NETDEV_UNREGISTER)
1438                 return NOTIFY_DONE;
1439
1440         ipmr_for_each_table(mrt, net) {
1441                 v = &mrt->vif_table[0];
1442                 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1443                         if (v->dev == dev)
1444                                 vif_delete(mrt, ct, 1, &list);
1445                 }
1446         }
1447         unregister_netdevice_many(&list);
1448         return NOTIFY_DONE;
1449 }
1450
1451
1452 static struct notifier_block ip_mr_notifier = {
1453         .notifier_call = ipmr_device_event,
1454 };
1455
1456 /*
1457  *      Encapsulate a packet by attaching a valid IPIP header to it.
1458  *      This avoids tunnel drivers and other mess and gives us the speed so
1459  *      important for multicast video.
1460  */
1461
1462 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1463 {
1464         struct iphdr *iph;
1465         struct iphdr *old_iph = ip_hdr(skb);
1466
1467         skb_push(skb, sizeof(struct iphdr));
1468         skb->transport_header = skb->network_header;
1469         skb_reset_network_header(skb);
1470         iph = ip_hdr(skb);
1471
1472         iph->version    =       4;
1473         iph->tos        =       old_iph->tos;
1474         iph->ttl        =       old_iph->ttl;
1475         iph->frag_off   =       0;
1476         iph->daddr      =       daddr;
1477         iph->saddr      =       saddr;
1478         iph->protocol   =       IPPROTO_IPIP;
1479         iph->ihl        =       5;
1480         iph->tot_len    =       htons(skb->len);
1481         ip_select_ident(iph, skb_dst(skb), NULL);
1482         ip_send_check(iph);
1483
1484         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1485         nf_reset(skb);
1486 }
1487
1488 static inline int ipmr_forward_finish(struct sk_buff *skb)
1489 {
1490         struct ip_options * opt = &(IPCB(skb)->opt);
1491
1492         IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1493
1494         if (unlikely(opt->optlen))
1495                 ip_forward_options(skb);
1496
1497         return dst_output(skb);
1498 }
1499
1500 /*
1501  *      Processing handlers for ipmr_forward
1502  */
1503
1504 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1505                             struct sk_buff *skb, struct mfc_cache *c, int vifi)
1506 {
1507         const struct iphdr *iph = ip_hdr(skb);
1508         struct vif_device *vif = &mrt->vif_table[vifi];
1509         struct net_device *dev;
1510         struct rtable *rt;
1511         int    encap = 0;
1512
1513         if (vif->dev == NULL)
1514                 goto out_free;
1515
1516 #ifdef CONFIG_IP_PIMSM
1517         if (vif->flags & VIFF_REGISTER) {
1518                 vif->pkt_out++;
1519                 vif->bytes_out += skb->len;
1520                 vif->dev->stats.tx_bytes += skb->len;
1521                 vif->dev->stats.tx_packets++;
1522                 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1523                 goto out_free;
1524         }
1525 #endif
1526
1527         if (vif->flags&VIFF_TUNNEL) {
1528                 struct flowi fl = { .oif = vif->link,
1529                                     .nl_u = { .ip4_u =
1530                                               { .daddr = vif->remote,
1531                                                 .saddr = vif->local,
1532                                                 .tos = RT_TOS(iph->tos) } },
1533                                     .proto = IPPROTO_IPIP };
1534                 if (ip_route_output_key(net, &rt, &fl))
1535                         goto out_free;
1536                 encap = sizeof(struct iphdr);
1537         } else {
1538                 struct flowi fl = { .oif = vif->link,
1539                                     .nl_u = { .ip4_u =
1540                                               { .daddr = iph->daddr,
1541                                                 .tos = RT_TOS(iph->tos) } },
1542                                     .proto = IPPROTO_IPIP };
1543                 if (ip_route_output_key(net, &rt, &fl))
1544                         goto out_free;
1545         }
1546
1547         dev = rt->u.dst.dev;
1548
1549         if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1550                 /* Do not fragment multicasts. Alas, IPv4 does not
1551                    allow to send ICMP, so that packets will disappear
1552                    to blackhole.
1553                  */
1554
1555                 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1556                 ip_rt_put(rt);
1557                 goto out_free;
1558         }
1559
1560         encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1561
1562         if (skb_cow(skb, encap)) {
1563                 ip_rt_put(rt);
1564                 goto out_free;
1565         }
1566
1567         vif->pkt_out++;
1568         vif->bytes_out += skb->len;
1569
1570         skb_dst_drop(skb);
1571         skb_dst_set(skb, &rt->u.dst);
1572         ip_decrease_ttl(ip_hdr(skb));
1573
1574         /* FIXME: forward and output firewalls used to be called here.
1575          * What do we do with netfilter? -- RR */
1576         if (vif->flags & VIFF_TUNNEL) {
1577                 ip_encap(skb, vif->local, vif->remote);
1578                 /* FIXME: extra output firewall step used to be here. --RR */
1579                 vif->dev->stats.tx_packets++;
1580                 vif->dev->stats.tx_bytes += skb->len;
1581         }
1582
1583         IPCB(skb)->flags |= IPSKB_FORWARDED;
1584
1585         /*
1586          * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1587          * not only before forwarding, but after forwarding on all output
1588          * interfaces. It is clear, if mrouter runs a multicasting
1589          * program, it should receive packets not depending to what interface
1590          * program is joined.
1591          * If we will not make it, the program will have to join on all
1592          * interfaces. On the other hand, multihoming host (or router, but
1593          * not mrouter) cannot join to more than one interface - it will
1594          * result in receiving multiple packets.
1595          */
1596         NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
1597                 ipmr_forward_finish);
1598         return;
1599
1600 out_free:
1601         kfree_skb(skb);
1602         return;
1603 }
1604
1605 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1606 {
1607         int ct;
1608
1609         for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1610                 if (mrt->vif_table[ct].dev == dev)
1611                         break;
1612         }
1613         return ct;
1614 }
1615
1616 /* "local" means that we should preserve one skb (for local delivery) */
1617
1618 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1619                          struct sk_buff *skb, struct mfc_cache *cache,
1620                          int local)
1621 {
1622         int psend = -1;
1623         int vif, ct;
1624
1625         vif = cache->mfc_parent;
1626         cache->mfc_un.res.pkt++;
1627         cache->mfc_un.res.bytes += skb->len;
1628
1629         /*
1630          * Wrong interface: drop packet and (maybe) send PIM assert.
1631          */
1632         if (mrt->vif_table[vif].dev != skb->dev) {
1633                 int true_vifi;
1634
1635                 if (skb_rtable(skb)->fl.iif == 0) {
1636                         /* It is our own packet, looped back.
1637                            Very complicated situation...
1638
1639                            The best workaround until routing daemons will be
1640                            fixed is not to redistribute packet, if it was
1641                            send through wrong interface. It means, that
1642                            multicast applications WILL NOT work for
1643                            (S,G), which have default multicast route pointing
1644                            to wrong oif. In any case, it is not a good
1645                            idea to use multicasting applications on router.
1646                          */
1647                         goto dont_forward;
1648                 }
1649
1650                 cache->mfc_un.res.wrong_if++;
1651                 true_vifi = ipmr_find_vif(mrt, skb->dev);
1652
1653                 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1654                     /* pimsm uses asserts, when switching from RPT to SPT,
1655                        so that we cannot check that packet arrived on an oif.
1656                        It is bad, but otherwise we would need to move pretty
1657                        large chunk of pimd to kernel. Ough... --ANK
1658                      */
1659                     (mrt->mroute_do_pim ||
1660                      cache->mfc_un.res.ttls[true_vifi] < 255) &&
1661                     time_after(jiffies,
1662                                cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1663                         cache->mfc_un.res.last_assert = jiffies;
1664                         ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1665                 }
1666                 goto dont_forward;
1667         }
1668
1669         mrt->vif_table[vif].pkt_in++;
1670         mrt->vif_table[vif].bytes_in += skb->len;
1671
1672         /*
1673          *      Forward the frame
1674          */
1675         for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
1676                 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1677                         if (psend != -1) {
1678                                 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1679                                 if (skb2)
1680                                         ipmr_queue_xmit(net, mrt, skb2, cache,
1681                                                         psend);
1682                         }
1683                         psend = ct;
1684                 }
1685         }
1686         if (psend != -1) {
1687                 if (local) {
1688                         struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1689                         if (skb2)
1690                                 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1691                 } else {
1692                         ipmr_queue_xmit(net, mrt, skb, cache, psend);
1693                         return 0;
1694                 }
1695         }
1696
1697 dont_forward:
1698         if (!local)
1699                 kfree_skb(skb);
1700         return 0;
1701 }
1702
1703
1704 /*
1705  *      Multicast packets for forwarding arrive here
1706  */
1707
1708 int ip_mr_input(struct sk_buff *skb)
1709 {
1710         struct mfc_cache *cache;
1711         struct net *net = dev_net(skb->dev);
1712         int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1713         struct mr_table *mrt;
1714         int err;
1715
1716         /* Packet is looped back after forward, it should not be
1717            forwarded second time, but still can be delivered locally.
1718          */
1719         if (IPCB(skb)->flags&IPSKB_FORWARDED)
1720                 goto dont_forward;
1721
1722         err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
1723         if (err < 0)
1724                 return err;
1725
1726         if (!local) {
1727                     if (IPCB(skb)->opt.router_alert) {
1728                             if (ip_call_ra_chain(skb))
1729                                     return 0;
1730                     } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
1731                             /* IGMPv1 (and broken IGMPv2 implementations sort of
1732                                Cisco IOS <= 11.2(8)) do not put router alert
1733                                option to IGMP packets destined to routable
1734                                groups. It is very bad, because it means
1735                                that we can forward NO IGMP messages.
1736                              */
1737                             read_lock(&mrt_lock);
1738                             if (mrt->mroute_sk) {
1739                                     nf_reset(skb);
1740                                     raw_rcv(mrt->mroute_sk, skb);
1741                                     read_unlock(&mrt_lock);
1742                                     return 0;
1743                             }
1744                             read_unlock(&mrt_lock);
1745                     }
1746         }
1747
1748         read_lock(&mrt_lock);
1749         cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1750
1751         /*
1752          *      No usable cache entry
1753          */
1754         if (cache == NULL) {
1755                 int vif;
1756
1757                 if (local) {
1758                         struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1759                         ip_local_deliver(skb);
1760                         if (skb2 == NULL) {
1761                                 read_unlock(&mrt_lock);
1762                                 return -ENOBUFS;
1763                         }
1764                         skb = skb2;
1765                 }
1766
1767                 vif = ipmr_find_vif(mrt, skb->dev);
1768                 if (vif >= 0) {
1769                         int err = ipmr_cache_unresolved(mrt, vif, skb);
1770                         read_unlock(&mrt_lock);
1771
1772                         return err;
1773                 }
1774                 read_unlock(&mrt_lock);
1775                 kfree_skb(skb);
1776                 return -ENODEV;
1777         }
1778
1779         ip_mr_forward(net, mrt, skb, cache, local);
1780
1781         read_unlock(&mrt_lock);
1782
1783         if (local)
1784                 return ip_local_deliver(skb);
1785
1786         return 0;
1787
1788 dont_forward:
1789         if (local)
1790                 return ip_local_deliver(skb);
1791         kfree_skb(skb);
1792         return 0;
1793 }
1794
1795 #ifdef CONFIG_IP_PIMSM
1796 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1797                      unsigned int pimlen)
1798 {
1799         struct net_device *reg_dev = NULL;
1800         struct iphdr *encap;
1801
1802         encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1803         /*
1804            Check that:
1805            a. packet is really destinted to a multicast group
1806            b. packet is not a NULL-REGISTER
1807            c. packet is not truncated
1808          */
1809         if (!ipv4_is_multicast(encap->daddr) ||
1810             encap->tot_len == 0 ||
1811             ntohs(encap->tot_len) + pimlen > skb->len)
1812                 return 1;
1813
1814         read_lock(&mrt_lock);
1815         if (mrt->mroute_reg_vif_num >= 0)
1816                 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
1817         if (reg_dev)
1818                 dev_hold(reg_dev);
1819         read_unlock(&mrt_lock);
1820
1821         if (reg_dev == NULL)
1822                 return 1;
1823
1824         skb->mac_header = skb->network_header;
1825         skb_pull(skb, (u8*)encap - skb->data);
1826         skb_reset_network_header(skb);
1827         skb->dev = reg_dev;
1828         skb->protocol = htons(ETH_P_IP);
1829         skb->ip_summed = 0;
1830         skb->pkt_type = PACKET_HOST;
1831         skb_dst_drop(skb);
1832         reg_dev->stats.rx_bytes += skb->len;
1833         reg_dev->stats.rx_packets++;
1834         nf_reset(skb);
1835         netif_rx(skb);
1836         dev_put(reg_dev);
1837
1838         return 0;
1839 }
1840 #endif
1841
1842 #ifdef CONFIG_IP_PIMSM_V1
1843 /*
1844  * Handle IGMP messages of PIMv1
1845  */
1846
1847 int pim_rcv_v1(struct sk_buff * skb)
1848 {
1849         struct igmphdr *pim;
1850         struct net *net = dev_net(skb->dev);
1851         struct mr_table *mrt;
1852
1853         if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1854                 goto drop;
1855
1856         pim = igmp_hdr(skb);
1857
1858         if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
1859                 goto drop;
1860
1861         if (!mrt->mroute_do_pim ||
1862             pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1863                 goto drop;
1864
1865         if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1866 drop:
1867                 kfree_skb(skb);
1868         }
1869         return 0;
1870 }
1871 #endif
1872
1873 #ifdef CONFIG_IP_PIMSM_V2
1874 static int pim_rcv(struct sk_buff * skb)
1875 {
1876         struct pimreghdr *pim;
1877         struct net *net = dev_net(skb->dev);
1878         struct mr_table *mrt;
1879
1880         if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1881                 goto drop;
1882
1883         pim = (struct pimreghdr *)skb_transport_header(skb);
1884         if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1885             (pim->flags&PIM_NULL_REGISTER) ||
1886             (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1887              csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1888                 goto drop;
1889
1890         if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
1891                 goto drop;
1892
1893         if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1894 drop:
1895                 kfree_skb(skb);
1896         }
1897         return 0;
1898 }
1899 #endif
1900
1901 static int
1902 ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c,
1903                  struct rtmsg *rtm)
1904 {
1905         int ct;
1906         struct rtnexthop *nhp;
1907         u8 *b = skb_tail_pointer(skb);
1908         struct rtattr *mp_head;
1909
1910         /* If cache is unresolved, don't try to parse IIF and OIF */
1911         if (c->mfc_parent > MAXVIFS)
1912                 return -ENOENT;
1913
1914         if (VIF_EXISTS(mrt, c->mfc_parent))
1915                 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
1916
1917         mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1918
1919         for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1920                 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
1921                         if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1922                                 goto rtattr_failure;
1923                         nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1924                         nhp->rtnh_flags = 0;
1925                         nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1926                         nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
1927                         nhp->rtnh_len = sizeof(*nhp);
1928                 }
1929         }
1930         mp_head->rta_type = RTA_MULTIPATH;
1931         mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1932         rtm->rtm_type = RTN_MULTICAST;
1933         return 1;
1934
1935 rtattr_failure:
1936         nlmsg_trim(skb, b);
1937         return -EMSGSIZE;
1938 }
1939
1940 int ipmr_get_route(struct net *net,
1941                    struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1942 {
1943         int err;
1944         struct mr_table *mrt;
1945         struct mfc_cache *cache;
1946         struct rtable *rt = skb_rtable(skb);
1947
1948         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
1949         if (mrt == NULL)
1950                 return -ENOENT;
1951
1952         read_lock(&mrt_lock);
1953         cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
1954
1955         if (cache == NULL) {
1956                 struct sk_buff *skb2;
1957                 struct iphdr *iph;
1958                 struct net_device *dev;
1959                 int vif;
1960
1961                 if (nowait) {
1962                         read_unlock(&mrt_lock);
1963                         return -EAGAIN;
1964                 }
1965
1966                 dev = skb->dev;
1967                 if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
1968                         read_unlock(&mrt_lock);
1969                         return -ENODEV;
1970                 }
1971                 skb2 = skb_clone(skb, GFP_ATOMIC);
1972                 if (!skb2) {
1973                         read_unlock(&mrt_lock);
1974                         return -ENOMEM;
1975                 }
1976
1977                 skb_push(skb2, sizeof(struct iphdr));
1978                 skb_reset_network_header(skb2);
1979                 iph = ip_hdr(skb2);
1980                 iph->ihl = sizeof(struct iphdr) >> 2;
1981                 iph->saddr = rt->rt_src;
1982                 iph->daddr = rt->rt_dst;
1983                 iph->version = 0;
1984                 err = ipmr_cache_unresolved(mrt, vif, skb2);
1985                 read_unlock(&mrt_lock);
1986                 return err;
1987         }
1988
1989         if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1990                 cache->mfc_flags |= MFC_NOTIFY;
1991         err = ipmr_fill_mroute(mrt, skb, cache, rtm);
1992         read_unlock(&mrt_lock);
1993         return err;
1994 }
1995
1996 #ifdef CONFIG_PROC_FS
1997 /*
1998  *      The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1999  */
2000 struct ipmr_vif_iter {
2001         struct seq_net_private p;
2002         struct mr_table *mrt;
2003         int ct;
2004 };
2005
2006 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2007                                            struct ipmr_vif_iter *iter,
2008                                            loff_t pos)
2009 {
2010         struct mr_table *mrt = iter->mrt;
2011
2012         for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2013                 if (!VIF_EXISTS(mrt, iter->ct))
2014                         continue;
2015                 if (pos-- == 0)
2016                         return &mrt->vif_table[iter->ct];
2017         }
2018         return NULL;
2019 }
2020
2021 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2022         __acquires(mrt_lock)
2023 {
2024         struct ipmr_vif_iter *iter = seq->private;
2025         struct net *net = seq_file_net(seq);
2026         struct mr_table *mrt;
2027
2028         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2029         if (mrt == NULL)
2030                 return ERR_PTR(-ENOENT);
2031
2032         iter->mrt = mrt;
2033
2034         read_lock(&mrt_lock);
2035         return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2036                 : SEQ_START_TOKEN;
2037 }
2038
2039 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2040 {
2041         struct ipmr_vif_iter *iter = seq->private;
2042         struct net *net = seq_file_net(seq);
2043         struct mr_table *mrt = iter->mrt;
2044
2045         ++*pos;
2046         if (v == SEQ_START_TOKEN)
2047                 return ipmr_vif_seq_idx(net, iter, 0);
2048
2049         while (++iter->ct < mrt->maxvif) {
2050                 if (!VIF_EXISTS(mrt, iter->ct))
2051                         continue;
2052                 return &mrt->vif_table[iter->ct];
2053         }
2054         return NULL;
2055 }
2056
2057 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2058         __releases(mrt_lock)
2059 {
2060         read_unlock(&mrt_lock);
2061 }
2062
2063 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2064 {
2065         struct ipmr_vif_iter *iter = seq->private;
2066         struct mr_table *mrt = iter->mrt;
2067
2068         if (v == SEQ_START_TOKEN) {
2069                 seq_puts(seq,
2070                          "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags Local    Remote\n");
2071         } else {
2072                 const struct vif_device *vif = v;
2073                 const char *name =  vif->dev ? vif->dev->name : "none";
2074
2075                 seq_printf(seq,
2076                            "%2Zd %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
2077                            vif - mrt->vif_table,
2078                            name, vif->bytes_in, vif->pkt_in,
2079                            vif->bytes_out, vif->pkt_out,
2080                            vif->flags, vif->local, vif->remote);
2081         }
2082         return 0;
2083 }
2084
2085 static const struct seq_operations ipmr_vif_seq_ops = {
2086         .start = ipmr_vif_seq_start,
2087         .next  = ipmr_vif_seq_next,
2088         .stop  = ipmr_vif_seq_stop,
2089         .show  = ipmr_vif_seq_show,
2090 };
2091
2092 static int ipmr_vif_open(struct inode *inode, struct file *file)
2093 {
2094         return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2095                             sizeof(struct ipmr_vif_iter));
2096 }
2097
2098 static const struct file_operations ipmr_vif_fops = {
2099         .owner   = THIS_MODULE,
2100         .open    = ipmr_vif_open,
2101         .read    = seq_read,
2102         .llseek  = seq_lseek,
2103         .release = seq_release_net,
2104 };
2105
2106 struct ipmr_mfc_iter {
2107         struct seq_net_private p;
2108         struct mr_table *mrt;
2109         struct list_head *cache;
2110         int ct;
2111 };
2112
2113
2114 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2115                                           struct ipmr_mfc_iter *it, loff_t pos)
2116 {
2117         struct mr_table *mrt = it->mrt;
2118         struct mfc_cache *mfc;
2119
2120         read_lock(&mrt_lock);
2121         for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2122                 it->cache = &mrt->mfc_cache_array[it->ct];
2123                 list_for_each_entry(mfc, it->cache, list)
2124                         if (pos-- == 0)
2125                                 return mfc;
2126         }
2127         read_unlock(&mrt_lock);
2128
2129         spin_lock_bh(&mfc_unres_lock);
2130         it->cache = &mrt->mfc_unres_queue;
2131         list_for_each_entry(mfc, it->cache, list)
2132                 if (pos-- == 0)
2133                         return mfc;
2134         spin_unlock_bh(&mfc_unres_lock);
2135
2136         it->cache = NULL;
2137         return NULL;
2138 }
2139
2140
2141 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2142 {
2143         struct ipmr_mfc_iter *it = seq->private;
2144         struct net *net = seq_file_net(seq);
2145         struct mr_table *mrt;
2146
2147         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2148         if (mrt == NULL)
2149                 return ERR_PTR(-ENOENT);
2150
2151         it->mrt = mrt;
2152         it->cache = NULL;
2153         it->ct = 0;
2154         return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2155                 : SEQ_START_TOKEN;
2156 }
2157
2158 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2159 {
2160         struct mfc_cache *mfc = v;
2161         struct ipmr_mfc_iter *it = seq->private;
2162         struct net *net = seq_file_net(seq);
2163         struct mr_table *mrt = it->mrt;
2164
2165         ++*pos;
2166
2167         if (v == SEQ_START_TOKEN)
2168                 return ipmr_mfc_seq_idx(net, seq->private, 0);
2169
2170         if (mfc->list.next != it->cache)
2171                 return list_entry(mfc->list.next, struct mfc_cache, list);
2172
2173         if (it->cache == &mrt->mfc_unres_queue)
2174                 goto end_of_list;
2175
2176         BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2177
2178         while (++it->ct < MFC_LINES) {
2179                 it->cache = &mrt->mfc_cache_array[it->ct];
2180                 if (list_empty(it->cache))
2181                         continue;
2182                 return list_first_entry(it->cache, struct mfc_cache, list);
2183         }
2184
2185         /* exhausted cache_array, show unresolved */
2186         read_unlock(&mrt_lock);
2187         it->cache = &mrt->mfc_unres_queue;
2188         it->ct = 0;
2189
2190         spin_lock_bh(&mfc_unres_lock);
2191         if (!list_empty(it->cache))
2192                 return list_first_entry(it->cache, struct mfc_cache, list);
2193
2194  end_of_list:
2195         spin_unlock_bh(&mfc_unres_lock);
2196         it->cache = NULL;
2197
2198         return NULL;
2199 }
2200
2201 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2202 {
2203         struct ipmr_mfc_iter *it = seq->private;
2204         struct mr_table *mrt = it->mrt;
2205
2206         if (it->cache == &mrt->mfc_unres_queue)
2207                 spin_unlock_bh(&mfc_unres_lock);
2208         else if (it->cache == &mrt->mfc_cache_array[it->ct])
2209                 read_unlock(&mrt_lock);
2210 }
2211
2212 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2213 {
2214         int n;
2215
2216         if (v == SEQ_START_TOKEN) {
2217                 seq_puts(seq,
2218                  "Group    Origin   Iif     Pkts    Bytes    Wrong Oifs\n");
2219         } else {
2220                 const struct mfc_cache *mfc = v;
2221                 const struct ipmr_mfc_iter *it = seq->private;
2222                 const struct mr_table *mrt = it->mrt;
2223
2224                 seq_printf(seq, "%08lX %08lX %-3hd",
2225                            (unsigned long) mfc->mfc_mcastgrp,
2226                            (unsigned long) mfc->mfc_origin,
2227                            mfc->mfc_parent);
2228
2229                 if (it->cache != &mrt->mfc_unres_queue) {
2230                         seq_printf(seq, " %8lu %8lu %8lu",
2231                                    mfc->mfc_un.res.pkt,
2232                                    mfc->mfc_un.res.bytes,
2233                                    mfc->mfc_un.res.wrong_if);
2234                         for (n = mfc->mfc_un.res.minvif;
2235                              n < mfc->mfc_un.res.maxvif; n++ ) {
2236                                 if (VIF_EXISTS(mrt, n) &&
2237                                     mfc->mfc_un.res.ttls[n] < 255)
2238                                         seq_printf(seq,
2239                                            " %2d:%-3d",
2240                                            n, mfc->mfc_un.res.ttls[n]);
2241                         }
2242                 } else {
2243                         /* unresolved mfc_caches don't contain
2244                          * pkt, bytes and wrong_if values
2245                          */
2246                         seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2247                 }
2248                 seq_putc(seq, '\n');
2249         }
2250         return 0;
2251 }
2252
2253 static const struct seq_operations ipmr_mfc_seq_ops = {
2254         .start = ipmr_mfc_seq_start,
2255         .next  = ipmr_mfc_seq_next,
2256         .stop  = ipmr_mfc_seq_stop,
2257         .show  = ipmr_mfc_seq_show,
2258 };
2259
2260 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2261 {
2262         return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2263                             sizeof(struct ipmr_mfc_iter));
2264 }
2265
2266 static const struct file_operations ipmr_mfc_fops = {
2267         .owner   = THIS_MODULE,
2268         .open    = ipmr_mfc_open,
2269         .read    = seq_read,
2270         .llseek  = seq_lseek,
2271         .release = seq_release_net,
2272 };
2273 #endif
2274
2275 #ifdef CONFIG_IP_PIMSM_V2
2276 static const struct net_protocol pim_protocol = {
2277         .handler        =       pim_rcv,
2278         .netns_ok       =       1,
2279 };
2280 #endif
2281
2282
2283 /*
2284  *      Setup for IP multicast routing
2285  */
2286 static int __net_init ipmr_net_init(struct net *net)
2287 {
2288         int err;
2289
2290         err = ipmr_rules_init(net);
2291         if (err < 0)
2292                 goto fail;
2293
2294 #ifdef CONFIG_PROC_FS
2295         err = -ENOMEM;
2296         if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops))
2297                 goto proc_vif_fail;
2298         if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
2299                 goto proc_cache_fail;
2300 #endif
2301         return 0;
2302
2303 #ifdef CONFIG_PROC_FS
2304 proc_cache_fail:
2305         proc_net_remove(net, "ip_mr_vif");
2306 proc_vif_fail:
2307         ipmr_rules_exit(net);
2308 #endif
2309 fail:
2310         return err;
2311 }
2312
2313 static void __net_exit ipmr_net_exit(struct net *net)
2314 {
2315 #ifdef CONFIG_PROC_FS
2316         proc_net_remove(net, "ip_mr_cache");
2317         proc_net_remove(net, "ip_mr_vif");
2318 #endif
2319         ipmr_rules_exit(net);
2320 }
2321
2322 static struct pernet_operations ipmr_net_ops = {
2323         .init = ipmr_net_init,
2324         .exit = ipmr_net_exit,
2325 };
2326
2327 int __init ip_mr_init(void)
2328 {
2329         int err;
2330
2331         mrt_cachep = kmem_cache_create("ip_mrt_cache",
2332                                        sizeof(struct mfc_cache),
2333                                        0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2334                                        NULL);
2335         if (!mrt_cachep)
2336                 return -ENOMEM;
2337
2338         err = register_pernet_subsys(&ipmr_net_ops);
2339         if (err)
2340                 goto reg_pernet_fail;
2341
2342         err = register_netdevice_notifier(&ip_mr_notifier);
2343         if (err)
2344                 goto reg_notif_fail;
2345 #ifdef CONFIG_IP_PIMSM_V2
2346         if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2347                 printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n");
2348                 err = -EAGAIN;
2349                 goto add_proto_fail;
2350         }
2351 #endif
2352         return 0;
2353
2354 #ifdef CONFIG_IP_PIMSM_V2
2355 add_proto_fail:
2356         unregister_netdevice_notifier(&ip_mr_notifier);
2357 #endif
2358 reg_notif_fail:
2359         unregister_pernet_subsys(&ipmr_net_ops);
2360 reg_pernet_fail:
2361         kmem_cache_destroy(mrt_cachep);
2362         return err;
2363 }