2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requrement to work with older peers.
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
36 #include <linux/kernel.h>
37 #include <linux/fcntl.h>
38 #include <linux/stat.h>
39 #include <linux/socket.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/inetdevice.h>
44 #include <linux/igmp.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/mroute.h>
48 #include <linux/init.h>
49 #include <linux/if_ether.h>
50 #include <linux/slab.h>
51 #include <net/net_namespace.h>
53 #include <net/protocol.h>
54 #include <linux/skbuff.h>
55 #include <net/route.h>
60 #include <linux/notifier.h>
61 #include <linux/if_arp.h>
62 #include <linux/netfilter_ipv4.h>
64 #include <net/checksum.h>
65 #include <net/netlink.h>
66 #include <net/fib_rules.h>
68 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
69 #define CONFIG_IP_PIMSM 1
73 struct list_head list;
78 struct sock *mroute_sk;
79 struct timer_list ipmr_expire_timer;
80 struct list_head mfc_unres_queue;
81 struct list_head mfc_cache_array[MFC_LINES];
82 struct vif_device vif_table[MAXVIFS];
84 atomic_t cache_resolve_queue_len;
87 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
88 int mroute_reg_vif_num;
93 struct fib_rule common;
100 /* Big lock, protecting vif table, mrt cache and mroute socket state.
101 Note that the changes are semaphored via rtnl_lock.
104 static DEFINE_RWLOCK(mrt_lock);
107 * Multicast router control variables
110 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
112 /* Special spinlock for queue of unresolved entries */
113 static DEFINE_SPINLOCK(mfc_unres_lock);
115 /* We return to original Alan's scheme. Hash table of resolved
116 entries is changed only in process context and protected
117 with weak lock mrt_lock. Queue of unresolved entries is protected
118 with strong spinlock mfc_unres_lock.
120 In this case data path is free of exclusive locks at all.
123 static struct kmem_cache *mrt_cachep __read_mostly;
125 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
126 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
127 struct sk_buff *skb, struct mfc_cache *cache,
129 static int ipmr_cache_report(struct mr_table *mrt,
130 struct sk_buff *pkt, vifi_t vifi, int assert);
131 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
132 struct mfc_cache *c, struct rtmsg *rtm);
133 static void ipmr_expire_process(unsigned long arg);
135 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
136 #define ipmr_for_each_table(mrt, net) \
137 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
139 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
141 struct mr_table *mrt;
143 ipmr_for_each_table(mrt, net) {
150 static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
151 struct mr_table **mrt)
153 struct ipmr_result res;
154 struct fib_lookup_arg arg = { .result = &res, };
157 err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg);
164 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
165 int flags, struct fib_lookup_arg *arg)
167 struct ipmr_result *res = arg->result;
168 struct mr_table *mrt;
170 switch (rule->action) {
173 case FR_ACT_UNREACHABLE:
175 case FR_ACT_PROHIBIT:
177 case FR_ACT_BLACKHOLE:
182 mrt = ipmr_get_table(rule->fr_net, rule->table);
189 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
194 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
198 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
199 struct fib_rule_hdr *frh, struct nlattr **tb)
204 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
210 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
211 struct fib_rule_hdr *frh)
219 static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = {
220 .family = FIB_RULES_IPMR,
221 .rule_size = sizeof(struct ipmr_rule),
222 .addr_size = sizeof(u32),
223 .action = ipmr_rule_action,
224 .match = ipmr_rule_match,
225 .configure = ipmr_rule_configure,
226 .compare = ipmr_rule_compare,
227 .default_pref = fib_default_rule_pref,
228 .fill = ipmr_rule_fill,
229 .nlgroup = RTNLGRP_IPV4_RULE,
230 .policy = ipmr_rule_policy,
231 .owner = THIS_MODULE,
234 static int __net_init ipmr_rules_init(struct net *net)
236 struct fib_rules_ops *ops;
237 struct mr_table *mrt;
240 ops = fib_rules_register(&ipmr_rules_ops_template, net);
244 INIT_LIST_HEAD(&net->ipv4.mr_tables);
246 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
252 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
256 net->ipv4.mr_rules_ops = ops;
262 fib_rules_unregister(ops);
266 static void __net_exit ipmr_rules_exit(struct net *net)
268 struct mr_table *mrt, *next;
270 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list)
272 fib_rules_unregister(net->ipv4.mr_rules_ops);
275 #define ipmr_for_each_table(mrt, net) \
276 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
278 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
280 return net->ipv4.mrt;
283 static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
284 struct mr_table **mrt)
286 *mrt = net->ipv4.mrt;
290 static int __net_init ipmr_rules_init(struct net *net)
292 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
293 return net->ipv4.mrt ? 0 : -ENOMEM;
296 static void __net_exit ipmr_rules_exit(struct net *net)
298 kfree(net->ipv4.mrt);
302 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
304 struct mr_table *mrt;
307 mrt = ipmr_get_table(net, id);
311 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
314 write_pnet(&mrt->net, net);
317 /* Forwarding cache */
318 for (i = 0; i < MFC_LINES; i++)
319 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
321 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
323 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
326 #ifdef CONFIG_IP_PIMSM
327 mrt->mroute_reg_vif_num = -1;
329 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
330 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
335 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
337 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
339 struct net *net = dev_net(dev);
343 dev = __dev_get_by_name(net, "tunl0");
345 const struct net_device_ops *ops = dev->netdev_ops;
347 struct ip_tunnel_parm p;
349 memset(&p, 0, sizeof(p));
350 p.iph.daddr = v->vifc_rmt_addr.s_addr;
351 p.iph.saddr = v->vifc_lcl_addr.s_addr;
354 p.iph.protocol = IPPROTO_IPIP;
355 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
356 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
358 if (ops->ndo_do_ioctl) {
359 mm_segment_t oldfs = get_fs();
362 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
369 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
371 struct net_device *dev;
373 dev = __dev_get_by_name(net, "tunl0");
376 const struct net_device_ops *ops = dev->netdev_ops;
379 struct ip_tunnel_parm p;
380 struct in_device *in_dev;
382 memset(&p, 0, sizeof(p));
383 p.iph.daddr = v->vifc_rmt_addr.s_addr;
384 p.iph.saddr = v->vifc_lcl_addr.s_addr;
387 p.iph.protocol = IPPROTO_IPIP;
388 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
389 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
391 if (ops->ndo_do_ioctl) {
392 mm_segment_t oldfs = get_fs();
395 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
403 (dev = __dev_get_by_name(net, p.name)) != NULL) {
404 dev->flags |= IFF_MULTICAST;
406 in_dev = __in_dev_get_rtnl(dev);
410 ipv4_devconf_setall(in_dev);
411 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
421 /* allow the register to be completed before unregistering. */
425 unregister_netdevice(dev);
429 #ifdef CONFIG_IP_PIMSM
431 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
433 struct net *net = dev_net(dev);
434 struct mr_table *mrt;
442 err = ipmr_fib_lookup(net, &fl, &mrt);
446 read_lock(&mrt_lock);
447 dev->stats.tx_bytes += skb->len;
448 dev->stats.tx_packets++;
449 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
450 read_unlock(&mrt_lock);
455 static const struct net_device_ops reg_vif_netdev_ops = {
456 .ndo_start_xmit = reg_vif_xmit,
459 static void reg_vif_setup(struct net_device *dev)
461 dev->type = ARPHRD_PIMREG;
462 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
463 dev->flags = IFF_NOARP;
464 dev->netdev_ops = ®_vif_netdev_ops,
465 dev->destructor = free_netdev;
466 dev->features |= NETIF_F_NETNS_LOCAL;
469 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
471 struct net_device *dev;
472 struct in_device *in_dev;
475 if (mrt->id == RT_TABLE_DEFAULT)
476 sprintf(name, "pimreg");
478 sprintf(name, "pimreg%u", mrt->id);
480 dev = alloc_netdev(0, name, reg_vif_setup);
485 dev_net_set(dev, net);
487 if (register_netdevice(dev)) {
494 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
499 ipv4_devconf_setall(in_dev);
500 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
511 /* allow the register to be completed before unregistering. */
515 unregister_netdevice(dev);
522 * @notify: Set to 1, if the caller is a notifier_call
525 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
526 struct list_head *head)
528 struct vif_device *v;
529 struct net_device *dev;
530 struct in_device *in_dev;
532 if (vifi < 0 || vifi >= mrt->maxvif)
533 return -EADDRNOTAVAIL;
535 v = &mrt->vif_table[vifi];
537 write_lock_bh(&mrt_lock);
542 write_unlock_bh(&mrt_lock);
543 return -EADDRNOTAVAIL;
546 #ifdef CONFIG_IP_PIMSM
547 if (vifi == mrt->mroute_reg_vif_num)
548 mrt->mroute_reg_vif_num = -1;
551 if (vifi+1 == mrt->maxvif) {
553 for (tmp=vifi-1; tmp>=0; tmp--) {
554 if (VIF_EXISTS(mrt, tmp))
560 write_unlock_bh(&mrt_lock);
562 dev_set_allmulti(dev, -1);
564 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
565 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
566 ip_rt_multicast_event(in_dev);
569 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
570 unregister_netdevice_queue(dev, head);
576 static inline void ipmr_cache_free(struct mfc_cache *c)
578 kmem_cache_free(mrt_cachep, c);
581 /* Destroy an unresolved cache entry, killing queued skbs
582 and reporting error to netlink readers.
585 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
587 struct net *net = read_pnet(&mrt->net);
591 atomic_dec(&mrt->cache_resolve_queue_len);
593 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
594 if (ip_hdr(skb)->version == 0) {
595 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
596 nlh->nlmsg_type = NLMSG_ERROR;
597 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
598 skb_trim(skb, nlh->nlmsg_len);
600 e->error = -ETIMEDOUT;
601 memset(&e->msg, 0, sizeof(e->msg));
603 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
612 /* Timer process for the unresolved queue. */
614 static void ipmr_expire_process(unsigned long arg)
616 struct mr_table *mrt = (struct mr_table *)arg;
618 unsigned long expires;
619 struct mfc_cache *c, *next;
621 if (!spin_trylock(&mfc_unres_lock)) {
622 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
626 if (list_empty(&mrt->mfc_unres_queue))
632 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
633 if (time_after(c->mfc_un.unres.expires, now)) {
634 unsigned long interval = c->mfc_un.unres.expires - now;
635 if (interval < expires)
641 ipmr_destroy_unres(mrt, c);
644 if (!list_empty(&mrt->mfc_unres_queue))
645 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
648 spin_unlock(&mfc_unres_lock);
651 /* Fill oifs list. It is called under write locked mrt_lock. */
653 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
658 cache->mfc_un.res.minvif = MAXVIFS;
659 cache->mfc_un.res.maxvif = 0;
660 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
662 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
663 if (VIF_EXISTS(mrt, vifi) &&
664 ttls[vifi] && ttls[vifi] < 255) {
665 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
666 if (cache->mfc_un.res.minvif > vifi)
667 cache->mfc_un.res.minvif = vifi;
668 if (cache->mfc_un.res.maxvif <= vifi)
669 cache->mfc_un.res.maxvif = vifi + 1;
674 static int vif_add(struct net *net, struct mr_table *mrt,
675 struct vifctl *vifc, int mrtsock)
677 int vifi = vifc->vifc_vifi;
678 struct vif_device *v = &mrt->vif_table[vifi];
679 struct net_device *dev;
680 struct in_device *in_dev;
684 if (VIF_EXISTS(mrt, vifi))
687 switch (vifc->vifc_flags) {
688 #ifdef CONFIG_IP_PIMSM
691 * Special Purpose VIF in PIM
692 * All the packets will be sent to the daemon
694 if (mrt->mroute_reg_vif_num >= 0)
696 dev = ipmr_reg_vif(net, mrt);
699 err = dev_set_allmulti(dev, 1);
701 unregister_netdevice(dev);
708 dev = ipmr_new_tunnel(net, vifc);
711 err = dev_set_allmulti(dev, 1);
713 ipmr_del_tunnel(dev, vifc);
719 case VIFF_USE_IFINDEX:
721 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
722 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
723 if (dev && dev->ip_ptr == NULL) {
725 return -EADDRNOTAVAIL;
728 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
731 return -EADDRNOTAVAIL;
732 err = dev_set_allmulti(dev, 1);
742 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
744 return -EADDRNOTAVAIL;
746 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
747 ip_rt_multicast_event(in_dev);
750 * Fill in the VIF structures
752 v->rate_limit = vifc->vifc_rate_limit;
753 v->local = vifc->vifc_lcl_addr.s_addr;
754 v->remote = vifc->vifc_rmt_addr.s_addr;
755 v->flags = vifc->vifc_flags;
757 v->flags |= VIFF_STATIC;
758 v->threshold = vifc->vifc_threshold;
763 v->link = dev->ifindex;
764 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
765 v->link = dev->iflink;
767 /* And finish update writing critical data */
768 write_lock_bh(&mrt_lock);
770 #ifdef CONFIG_IP_PIMSM
771 if (v->flags&VIFF_REGISTER)
772 mrt->mroute_reg_vif_num = vifi;
774 if (vifi+1 > mrt->maxvif)
775 mrt->maxvif = vifi+1;
776 write_unlock_bh(&mrt_lock);
780 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
784 int line = MFC_HASH(mcastgrp, origin);
787 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
788 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
795 * Allocate a multicast cache entry
797 static struct mfc_cache *ipmr_cache_alloc(void)
799 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
802 c->mfc_un.res.minvif = MAXVIFS;
806 static struct mfc_cache *ipmr_cache_alloc_unres(void)
808 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
811 skb_queue_head_init(&c->mfc_un.unres.unresolved);
812 c->mfc_un.unres.expires = jiffies + 10*HZ;
817 * A cache entry has gone into a resolved state from queued
820 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
821 struct mfc_cache *uc, struct mfc_cache *c)
827 * Play the pending entries through our router
830 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
831 if (ip_hdr(skb)->version == 0) {
832 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
834 if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
835 nlh->nlmsg_len = (skb_tail_pointer(skb) -
838 nlh->nlmsg_type = NLMSG_ERROR;
839 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
840 skb_trim(skb, nlh->nlmsg_len);
842 e->error = -EMSGSIZE;
843 memset(&e->msg, 0, sizeof(e->msg));
846 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
848 ip_mr_forward(net, mrt, skb, c, 0);
853 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
854 * expects the following bizarre scheme.
856 * Called under mrt_lock.
859 static int ipmr_cache_report(struct mr_table *mrt,
860 struct sk_buff *pkt, vifi_t vifi, int assert)
863 const int ihl = ip_hdrlen(pkt);
864 struct igmphdr *igmp;
868 #ifdef CONFIG_IP_PIMSM
869 if (assert == IGMPMSG_WHOLEPKT)
870 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
873 skb = alloc_skb(128, GFP_ATOMIC);
878 #ifdef CONFIG_IP_PIMSM
879 if (assert == IGMPMSG_WHOLEPKT) {
880 /* Ugly, but we have no choice with this interface.
881 Duplicate old header, fix ihl, length etc.
882 And all this only to mangle msg->im_msgtype and
883 to set msg->im_mbz to "mbz" :-)
885 skb_push(skb, sizeof(struct iphdr));
886 skb_reset_network_header(skb);
887 skb_reset_transport_header(skb);
888 msg = (struct igmpmsg *)skb_network_header(skb);
889 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
890 msg->im_msgtype = IGMPMSG_WHOLEPKT;
892 msg->im_vif = mrt->mroute_reg_vif_num;
893 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
894 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
895 sizeof(struct iphdr));
904 skb->network_header = skb->tail;
906 skb_copy_to_linear_data(skb, pkt->data, ihl);
907 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
908 msg = (struct igmpmsg *)skb_network_header(skb);
910 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
916 igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
918 msg->im_msgtype = assert;
920 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
921 skb->transport_header = skb->network_header;
924 if (mrt->mroute_sk == NULL) {
932 ret = sock_queue_rcv_skb(mrt->mroute_sk, skb);
935 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
943 * Queue a packet for resolution. It gets locked cache entry!
947 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
952 const struct iphdr *iph = ip_hdr(skb);
954 spin_lock_bh(&mfc_unres_lock);
955 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
956 if (c->mfc_mcastgrp == iph->daddr &&
957 c->mfc_origin == iph->saddr) {
965 * Create a new entry if allowable
968 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
969 (c = ipmr_cache_alloc_unres()) == NULL) {
970 spin_unlock_bh(&mfc_unres_lock);
977 * Fill in the new cache entry
980 c->mfc_origin = iph->saddr;
981 c->mfc_mcastgrp = iph->daddr;
984 * Reflect first query at mrouted.
986 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
988 /* If the report failed throw the cache entry
991 spin_unlock_bh(&mfc_unres_lock);
998 atomic_inc(&mrt->cache_resolve_queue_len);
999 list_add(&c->list, &mrt->mfc_unres_queue);
1001 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1005 * See if we can append the packet
1007 if (c->mfc_un.unres.unresolved.qlen>3) {
1011 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1015 spin_unlock_bh(&mfc_unres_lock);
1020 * MFC cache manipulation by user space mroute daemon
1023 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1026 struct mfc_cache *c, *next;
1028 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1030 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1031 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1032 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1033 write_lock_bh(&mrt_lock);
1035 write_unlock_bh(&mrt_lock);
1044 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1045 struct mfcctl *mfc, int mrtsock)
1049 struct mfc_cache *uc, *c;
1051 if (mfc->mfcc_parent >= MAXVIFS)
1054 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1056 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1057 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1058 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1065 write_lock_bh(&mrt_lock);
1066 c->mfc_parent = mfc->mfcc_parent;
1067 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1069 c->mfc_flags |= MFC_STATIC;
1070 write_unlock_bh(&mrt_lock);
1074 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1077 c = ipmr_cache_alloc();
1081 c->mfc_origin = mfc->mfcc_origin.s_addr;
1082 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1083 c->mfc_parent = mfc->mfcc_parent;
1084 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1086 c->mfc_flags |= MFC_STATIC;
1088 write_lock_bh(&mrt_lock);
1089 list_add(&c->list, &mrt->mfc_cache_array[line]);
1090 write_unlock_bh(&mrt_lock);
1093 * Check to see if we resolved a queued list. If so we
1094 * need to send on the frames and tidy up.
1097 spin_lock_bh(&mfc_unres_lock);
1098 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1099 if (uc->mfc_origin == c->mfc_origin &&
1100 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1101 list_del(&uc->list);
1102 atomic_dec(&mrt->cache_resolve_queue_len);
1107 if (list_empty(&mrt->mfc_unres_queue))
1108 del_timer(&mrt->ipmr_expire_timer);
1109 spin_unlock_bh(&mfc_unres_lock);
1112 ipmr_cache_resolve(net, mrt, uc, c);
1113 ipmr_cache_free(uc);
1119 * Close the multicast socket, and clear the vif tables etc
1122 static void mroute_clean_tables(struct mr_table *mrt)
1126 struct mfc_cache *c, *next;
1129 * Shut down all active vif entries
1131 for (i = 0; i < mrt->maxvif; i++) {
1132 if (!(mrt->vif_table[i].flags&VIFF_STATIC))
1133 vif_delete(mrt, i, 0, &list);
1135 unregister_netdevice_many(&list);
1140 for (i = 0; i < MFC_LINES; i++) {
1141 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1142 if (c->mfc_flags&MFC_STATIC)
1144 write_lock_bh(&mrt_lock);
1146 write_unlock_bh(&mrt_lock);
1152 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1153 spin_lock_bh(&mfc_unres_lock);
1154 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1156 ipmr_destroy_unres(mrt, c);
1158 spin_unlock_bh(&mfc_unres_lock);
1162 static void mrtsock_destruct(struct sock *sk)
1164 struct net *net = sock_net(sk);
1165 struct mr_table *mrt;
1168 ipmr_for_each_table(mrt, net) {
1169 if (sk == mrt->mroute_sk) {
1170 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1172 write_lock_bh(&mrt_lock);
1173 mrt->mroute_sk = NULL;
1174 write_unlock_bh(&mrt_lock);
1176 mroute_clean_tables(mrt);
1183 * Socket options and virtual interface manipulation. The whole
1184 * virtual interface system is a complete heap, but unfortunately
1185 * that's how BSD mrouted happens to think. Maybe one day with a proper
1186 * MOSPF/PIM router set up we can clean this up.
1189 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1194 struct net *net = sock_net(sk);
1195 struct mr_table *mrt;
1197 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1201 if (optname != MRT_INIT) {
1202 if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN))
1208 if (sk->sk_type != SOCK_RAW ||
1209 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1211 if (optlen != sizeof(int))
1212 return -ENOPROTOOPT;
1215 if (mrt->mroute_sk) {
1220 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1222 write_lock_bh(&mrt_lock);
1223 mrt->mroute_sk = sk;
1224 write_unlock_bh(&mrt_lock);
1226 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1231 if (sk != mrt->mroute_sk)
1233 return ip_ra_control(sk, 0, NULL);
1236 if (optlen != sizeof(vif))
1238 if (copy_from_user(&vif, optval, sizeof(vif)))
1240 if (vif.vifc_vifi >= MAXVIFS)
1243 if (optname == MRT_ADD_VIF) {
1244 ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk);
1246 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1252 * Manipulate the forwarding caches. These live
1253 * in a sort of kernel/user symbiosis.
1257 if (optlen != sizeof(mfc))
1259 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1262 if (optname == MRT_DEL_MFC)
1263 ret = ipmr_mfc_delete(mrt, &mfc);
1265 ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk);
1269 * Control PIM assert.
1274 if (get_user(v,(int __user *)optval))
1276 mrt->mroute_do_assert = (v) ? 1 : 0;
1279 #ifdef CONFIG_IP_PIMSM
1284 if (get_user(v,(int __user *)optval))
1290 if (v != mrt->mroute_do_pim) {
1291 mrt->mroute_do_pim = v;
1292 mrt->mroute_do_assert = v;
1298 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1303 if (optlen != sizeof(u32))
1305 if (get_user(v, (u32 __user *)optval))
1307 if (sk == mrt->mroute_sk)
1312 if (!ipmr_new_table(net, v))
1314 raw_sk(sk)->ipmr_table = v;
1320 * Spurious command, or MRT_VERSION which you cannot
1324 return -ENOPROTOOPT;
1329 * Getsock opt support for the multicast routing system.
1332 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1336 struct net *net = sock_net(sk);
1337 struct mr_table *mrt;
1339 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1343 if (optname != MRT_VERSION &&
1344 #ifdef CONFIG_IP_PIMSM
1347 optname!=MRT_ASSERT)
1348 return -ENOPROTOOPT;
1350 if (get_user(olr, optlen))
1353 olr = min_t(unsigned int, olr, sizeof(int));
1357 if (put_user(olr, optlen))
1359 if (optname == MRT_VERSION)
1361 #ifdef CONFIG_IP_PIMSM
1362 else if (optname == MRT_PIM)
1363 val = mrt->mroute_do_pim;
1366 val = mrt->mroute_do_assert;
1367 if (copy_to_user(optval, &val, olr))
1373 * The IP multicast ioctl support routines.
1376 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1378 struct sioc_sg_req sr;
1379 struct sioc_vif_req vr;
1380 struct vif_device *vif;
1381 struct mfc_cache *c;
1382 struct net *net = sock_net(sk);
1383 struct mr_table *mrt;
1385 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1391 if (copy_from_user(&vr, arg, sizeof(vr)))
1393 if (vr.vifi >= mrt->maxvif)
1395 read_lock(&mrt_lock);
1396 vif = &mrt->vif_table[vr.vifi];
1397 if (VIF_EXISTS(mrt, vr.vifi)) {
1398 vr.icount = vif->pkt_in;
1399 vr.ocount = vif->pkt_out;
1400 vr.ibytes = vif->bytes_in;
1401 vr.obytes = vif->bytes_out;
1402 read_unlock(&mrt_lock);
1404 if (copy_to_user(arg, &vr, sizeof(vr)))
1408 read_unlock(&mrt_lock);
1409 return -EADDRNOTAVAIL;
1411 if (copy_from_user(&sr, arg, sizeof(sr)))
1414 read_lock(&mrt_lock);
1415 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1417 sr.pktcnt = c->mfc_un.res.pkt;
1418 sr.bytecnt = c->mfc_un.res.bytes;
1419 sr.wrong_if = c->mfc_un.res.wrong_if;
1420 read_unlock(&mrt_lock);
1422 if (copy_to_user(arg, &sr, sizeof(sr)))
1426 read_unlock(&mrt_lock);
1427 return -EADDRNOTAVAIL;
1429 return -ENOIOCTLCMD;
1434 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1436 struct net_device *dev = ptr;
1437 struct net *net = dev_net(dev);
1438 struct mr_table *mrt;
1439 struct vif_device *v;
1443 if (event != NETDEV_UNREGISTER)
1446 ipmr_for_each_table(mrt, net) {
1447 v = &mrt->vif_table[0];
1448 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1450 vif_delete(mrt, ct, 1, &list);
1453 unregister_netdevice_many(&list);
1458 static struct notifier_block ip_mr_notifier = {
1459 .notifier_call = ipmr_device_event,
1463 * Encapsulate a packet by attaching a valid IPIP header to it.
1464 * This avoids tunnel drivers and other mess and gives us the speed so
1465 * important for multicast video.
1468 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1471 struct iphdr *old_iph = ip_hdr(skb);
1473 skb_push(skb, sizeof(struct iphdr));
1474 skb->transport_header = skb->network_header;
1475 skb_reset_network_header(skb);
1479 iph->tos = old_iph->tos;
1480 iph->ttl = old_iph->ttl;
1484 iph->protocol = IPPROTO_IPIP;
1486 iph->tot_len = htons(skb->len);
1487 ip_select_ident(iph, skb_dst(skb), NULL);
1490 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1494 static inline int ipmr_forward_finish(struct sk_buff *skb)
1496 struct ip_options * opt = &(IPCB(skb)->opt);
1498 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1500 if (unlikely(opt->optlen))
1501 ip_forward_options(skb);
1503 return dst_output(skb);
1507 * Processing handlers for ipmr_forward
1510 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1511 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1513 const struct iphdr *iph = ip_hdr(skb);
1514 struct vif_device *vif = &mrt->vif_table[vifi];
1515 struct net_device *dev;
1519 if (vif->dev == NULL)
1522 #ifdef CONFIG_IP_PIMSM
1523 if (vif->flags & VIFF_REGISTER) {
1525 vif->bytes_out += skb->len;
1526 vif->dev->stats.tx_bytes += skb->len;
1527 vif->dev->stats.tx_packets++;
1528 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1533 if (vif->flags&VIFF_TUNNEL) {
1534 struct flowi fl = { .oif = vif->link,
1536 { .daddr = vif->remote,
1537 .saddr = vif->local,
1538 .tos = RT_TOS(iph->tos) } },
1539 .proto = IPPROTO_IPIP };
1540 if (ip_route_output_key(net, &rt, &fl))
1542 encap = sizeof(struct iphdr);
1544 struct flowi fl = { .oif = vif->link,
1546 { .daddr = iph->daddr,
1547 .tos = RT_TOS(iph->tos) } },
1548 .proto = IPPROTO_IPIP };
1549 if (ip_route_output_key(net, &rt, &fl))
1553 dev = rt->u.dst.dev;
1555 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1556 /* Do not fragment multicasts. Alas, IPv4 does not
1557 allow to send ICMP, so that packets will disappear
1561 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1566 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1568 if (skb_cow(skb, encap)) {
1574 vif->bytes_out += skb->len;
1577 skb_dst_set(skb, &rt->u.dst);
1578 ip_decrease_ttl(ip_hdr(skb));
1580 /* FIXME: forward and output firewalls used to be called here.
1581 * What do we do with netfilter? -- RR */
1582 if (vif->flags & VIFF_TUNNEL) {
1583 ip_encap(skb, vif->local, vif->remote);
1584 /* FIXME: extra output firewall step used to be here. --RR */
1585 vif->dev->stats.tx_packets++;
1586 vif->dev->stats.tx_bytes += skb->len;
1589 IPCB(skb)->flags |= IPSKB_FORWARDED;
1592 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1593 * not only before forwarding, but after forwarding on all output
1594 * interfaces. It is clear, if mrouter runs a multicasting
1595 * program, it should receive packets not depending to what interface
1596 * program is joined.
1597 * If we will not make it, the program will have to join on all
1598 * interfaces. On the other hand, multihoming host (or router, but
1599 * not mrouter) cannot join to more than one interface - it will
1600 * result in receiving multiple packets.
1602 NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
1603 ipmr_forward_finish);
1611 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1615 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1616 if (mrt->vif_table[ct].dev == dev)
1622 /* "local" means that we should preserve one skb (for local delivery) */
1624 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1625 struct sk_buff *skb, struct mfc_cache *cache,
1631 vif = cache->mfc_parent;
1632 cache->mfc_un.res.pkt++;
1633 cache->mfc_un.res.bytes += skb->len;
1636 * Wrong interface: drop packet and (maybe) send PIM assert.
1638 if (mrt->vif_table[vif].dev != skb->dev) {
1641 if (skb_rtable(skb)->fl.iif == 0) {
1642 /* It is our own packet, looped back.
1643 Very complicated situation...
1645 The best workaround until routing daemons will be
1646 fixed is not to redistribute packet, if it was
1647 send through wrong interface. It means, that
1648 multicast applications WILL NOT work for
1649 (S,G), which have default multicast route pointing
1650 to wrong oif. In any case, it is not a good
1651 idea to use multicasting applications on router.
1656 cache->mfc_un.res.wrong_if++;
1657 true_vifi = ipmr_find_vif(mrt, skb->dev);
1659 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1660 /* pimsm uses asserts, when switching from RPT to SPT,
1661 so that we cannot check that packet arrived on an oif.
1662 It is bad, but otherwise we would need to move pretty
1663 large chunk of pimd to kernel. Ough... --ANK
1665 (mrt->mroute_do_pim ||
1666 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1668 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1669 cache->mfc_un.res.last_assert = jiffies;
1670 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1675 mrt->vif_table[vif].pkt_in++;
1676 mrt->vif_table[vif].bytes_in += skb->len;
1681 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
1682 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1684 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1686 ipmr_queue_xmit(net, mrt, skb2, cache,
1694 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1696 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1698 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1711 * Multicast packets for forwarding arrive here
1714 int ip_mr_input(struct sk_buff *skb)
1716 struct mfc_cache *cache;
1717 struct net *net = dev_net(skb->dev);
1718 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1719 struct mr_table *mrt;
1722 /* Packet is looped back after forward, it should not be
1723 forwarded second time, but still can be delivered locally.
1725 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1728 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
1733 if (IPCB(skb)->opt.router_alert) {
1734 if (ip_call_ra_chain(skb))
1736 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
1737 /* IGMPv1 (and broken IGMPv2 implementations sort of
1738 Cisco IOS <= 11.2(8)) do not put router alert
1739 option to IGMP packets destined to routable
1740 groups. It is very bad, because it means
1741 that we can forward NO IGMP messages.
1743 read_lock(&mrt_lock);
1744 if (mrt->mroute_sk) {
1746 raw_rcv(mrt->mroute_sk, skb);
1747 read_unlock(&mrt_lock);
1750 read_unlock(&mrt_lock);
1754 read_lock(&mrt_lock);
1755 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1758 * No usable cache entry
1760 if (cache == NULL) {
1764 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1765 ip_local_deliver(skb);
1767 read_unlock(&mrt_lock);
1773 vif = ipmr_find_vif(mrt, skb->dev);
1775 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1776 read_unlock(&mrt_lock);
1780 read_unlock(&mrt_lock);
1785 ip_mr_forward(net, mrt, skb, cache, local);
1787 read_unlock(&mrt_lock);
1790 return ip_local_deliver(skb);
1796 return ip_local_deliver(skb);
1801 #ifdef CONFIG_IP_PIMSM
1802 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1803 unsigned int pimlen)
1805 struct net_device *reg_dev = NULL;
1806 struct iphdr *encap;
1808 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1811 a. packet is really destinted to a multicast group
1812 b. packet is not a NULL-REGISTER
1813 c. packet is not truncated
1815 if (!ipv4_is_multicast(encap->daddr) ||
1816 encap->tot_len == 0 ||
1817 ntohs(encap->tot_len) + pimlen > skb->len)
1820 read_lock(&mrt_lock);
1821 if (mrt->mroute_reg_vif_num >= 0)
1822 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
1825 read_unlock(&mrt_lock);
1827 if (reg_dev == NULL)
1830 skb->mac_header = skb->network_header;
1831 skb_pull(skb, (u8*)encap - skb->data);
1832 skb_reset_network_header(skb);
1834 skb->protocol = htons(ETH_P_IP);
1836 skb->pkt_type = PACKET_HOST;
1838 reg_dev->stats.rx_bytes += skb->len;
1839 reg_dev->stats.rx_packets++;
1848 #ifdef CONFIG_IP_PIMSM_V1
1850 * Handle IGMP messages of PIMv1
1853 int pim_rcv_v1(struct sk_buff * skb)
1855 struct igmphdr *pim;
1856 struct net *net = dev_net(skb->dev);
1857 struct mr_table *mrt;
1859 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1862 pim = igmp_hdr(skb);
1864 if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
1867 if (!mrt->mroute_do_pim ||
1868 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1871 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1879 #ifdef CONFIG_IP_PIMSM_V2
1880 static int pim_rcv(struct sk_buff * skb)
1882 struct pimreghdr *pim;
1883 struct net *net = dev_net(skb->dev);
1884 struct mr_table *mrt;
1886 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1889 pim = (struct pimreghdr *)skb_transport_header(skb);
1890 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1891 (pim->flags&PIM_NULL_REGISTER) ||
1892 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1893 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1896 if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
1899 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1908 ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c,
1912 struct rtnexthop *nhp;
1913 u8 *b = skb_tail_pointer(skb);
1914 struct rtattr *mp_head;
1916 /* If cache is unresolved, don't try to parse IIF and OIF */
1917 if (c->mfc_parent > MAXVIFS)
1920 if (VIF_EXISTS(mrt, c->mfc_parent))
1921 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
1923 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1925 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1926 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
1927 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1928 goto rtattr_failure;
1929 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1930 nhp->rtnh_flags = 0;
1931 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1932 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
1933 nhp->rtnh_len = sizeof(*nhp);
1936 mp_head->rta_type = RTA_MULTIPATH;
1937 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1938 rtm->rtm_type = RTN_MULTICAST;
1946 int ipmr_get_route(struct net *net,
1947 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1950 struct mr_table *mrt;
1951 struct mfc_cache *cache;
1952 struct rtable *rt = skb_rtable(skb);
1954 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
1958 read_lock(&mrt_lock);
1959 cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
1961 if (cache == NULL) {
1962 struct sk_buff *skb2;
1964 struct net_device *dev;
1968 read_unlock(&mrt_lock);
1973 if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
1974 read_unlock(&mrt_lock);
1977 skb2 = skb_clone(skb, GFP_ATOMIC);
1979 read_unlock(&mrt_lock);
1983 skb_push(skb2, sizeof(struct iphdr));
1984 skb_reset_network_header(skb2);
1986 iph->ihl = sizeof(struct iphdr) >> 2;
1987 iph->saddr = rt->rt_src;
1988 iph->daddr = rt->rt_dst;
1990 err = ipmr_cache_unresolved(mrt, vif, skb2);
1991 read_unlock(&mrt_lock);
1995 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1996 cache->mfc_flags |= MFC_NOTIFY;
1997 err = ipmr_fill_mroute(mrt, skb, cache, rtm);
1998 read_unlock(&mrt_lock);
2002 #ifdef CONFIG_PROC_FS
2004 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
2006 struct ipmr_vif_iter {
2007 struct seq_net_private p;
2008 struct mr_table *mrt;
2012 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2013 struct ipmr_vif_iter *iter,
2016 struct mr_table *mrt = iter->mrt;
2018 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2019 if (!VIF_EXISTS(mrt, iter->ct))
2022 return &mrt->vif_table[iter->ct];
2027 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2028 __acquires(mrt_lock)
2030 struct ipmr_vif_iter *iter = seq->private;
2031 struct net *net = seq_file_net(seq);
2032 struct mr_table *mrt;
2034 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2036 return ERR_PTR(-ENOENT);
2040 read_lock(&mrt_lock);
2041 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2045 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2047 struct ipmr_vif_iter *iter = seq->private;
2048 struct net *net = seq_file_net(seq);
2049 struct mr_table *mrt = iter->mrt;
2052 if (v == SEQ_START_TOKEN)
2053 return ipmr_vif_seq_idx(net, iter, 0);
2055 while (++iter->ct < mrt->maxvif) {
2056 if (!VIF_EXISTS(mrt, iter->ct))
2058 return &mrt->vif_table[iter->ct];
2063 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2064 __releases(mrt_lock)
2066 read_unlock(&mrt_lock);
2069 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2071 struct ipmr_vif_iter *iter = seq->private;
2072 struct mr_table *mrt = iter->mrt;
2074 if (v == SEQ_START_TOKEN) {
2076 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2078 const struct vif_device *vif = v;
2079 const char *name = vif->dev ? vif->dev->name : "none";
2082 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2083 vif - mrt->vif_table,
2084 name, vif->bytes_in, vif->pkt_in,
2085 vif->bytes_out, vif->pkt_out,
2086 vif->flags, vif->local, vif->remote);
2091 static const struct seq_operations ipmr_vif_seq_ops = {
2092 .start = ipmr_vif_seq_start,
2093 .next = ipmr_vif_seq_next,
2094 .stop = ipmr_vif_seq_stop,
2095 .show = ipmr_vif_seq_show,
2098 static int ipmr_vif_open(struct inode *inode, struct file *file)
2100 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2101 sizeof(struct ipmr_vif_iter));
2104 static const struct file_operations ipmr_vif_fops = {
2105 .owner = THIS_MODULE,
2106 .open = ipmr_vif_open,
2108 .llseek = seq_lseek,
2109 .release = seq_release_net,
2112 struct ipmr_mfc_iter {
2113 struct seq_net_private p;
2114 struct mr_table *mrt;
2115 struct list_head *cache;
2120 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2121 struct ipmr_mfc_iter *it, loff_t pos)
2123 struct mr_table *mrt = it->mrt;
2124 struct mfc_cache *mfc;
2126 read_lock(&mrt_lock);
2127 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2128 it->cache = &mrt->mfc_cache_array[it->ct];
2129 list_for_each_entry(mfc, it->cache, list)
2133 read_unlock(&mrt_lock);
2135 spin_lock_bh(&mfc_unres_lock);
2136 it->cache = &mrt->mfc_unres_queue;
2137 list_for_each_entry(mfc, it->cache, list)
2140 spin_unlock_bh(&mfc_unres_lock);
2147 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2149 struct ipmr_mfc_iter *it = seq->private;
2150 struct net *net = seq_file_net(seq);
2151 struct mr_table *mrt;
2153 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2155 return ERR_PTR(-ENOENT);
2160 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2164 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2166 struct mfc_cache *mfc = v;
2167 struct ipmr_mfc_iter *it = seq->private;
2168 struct net *net = seq_file_net(seq);
2169 struct mr_table *mrt = it->mrt;
2173 if (v == SEQ_START_TOKEN)
2174 return ipmr_mfc_seq_idx(net, seq->private, 0);
2176 if (mfc->list.next != it->cache)
2177 return list_entry(mfc->list.next, struct mfc_cache, list);
2179 if (it->cache == &mrt->mfc_unres_queue)
2182 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2184 while (++it->ct < MFC_LINES) {
2185 it->cache = &mrt->mfc_cache_array[it->ct];
2186 if (list_empty(it->cache))
2188 return list_first_entry(it->cache, struct mfc_cache, list);
2191 /* exhausted cache_array, show unresolved */
2192 read_unlock(&mrt_lock);
2193 it->cache = &mrt->mfc_unres_queue;
2196 spin_lock_bh(&mfc_unres_lock);
2197 if (!list_empty(it->cache))
2198 return list_first_entry(it->cache, struct mfc_cache, list);
2201 spin_unlock_bh(&mfc_unres_lock);
2207 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2209 struct ipmr_mfc_iter *it = seq->private;
2210 struct mr_table *mrt = it->mrt;
2212 if (it->cache == &mrt->mfc_unres_queue)
2213 spin_unlock_bh(&mfc_unres_lock);
2214 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2215 read_unlock(&mrt_lock);
2218 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2222 if (v == SEQ_START_TOKEN) {
2224 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2226 const struct mfc_cache *mfc = v;
2227 const struct ipmr_mfc_iter *it = seq->private;
2228 const struct mr_table *mrt = it->mrt;
2230 seq_printf(seq, "%08X %08X %-3hd",
2231 (__force u32) mfc->mfc_mcastgrp,
2232 (__force u32) mfc->mfc_origin,
2235 if (it->cache != &mrt->mfc_unres_queue) {
2236 seq_printf(seq, " %8lu %8lu %8lu",
2237 mfc->mfc_un.res.pkt,
2238 mfc->mfc_un.res.bytes,
2239 mfc->mfc_un.res.wrong_if);
2240 for (n = mfc->mfc_un.res.minvif;
2241 n < mfc->mfc_un.res.maxvif; n++ ) {
2242 if (VIF_EXISTS(mrt, n) &&
2243 mfc->mfc_un.res.ttls[n] < 255)
2246 n, mfc->mfc_un.res.ttls[n]);
2249 /* unresolved mfc_caches don't contain
2250 * pkt, bytes and wrong_if values
2252 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2254 seq_putc(seq, '\n');
2259 static const struct seq_operations ipmr_mfc_seq_ops = {
2260 .start = ipmr_mfc_seq_start,
2261 .next = ipmr_mfc_seq_next,
2262 .stop = ipmr_mfc_seq_stop,
2263 .show = ipmr_mfc_seq_show,
2266 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2268 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2269 sizeof(struct ipmr_mfc_iter));
2272 static const struct file_operations ipmr_mfc_fops = {
2273 .owner = THIS_MODULE,
2274 .open = ipmr_mfc_open,
2276 .llseek = seq_lseek,
2277 .release = seq_release_net,
2281 #ifdef CONFIG_IP_PIMSM_V2
2282 static const struct net_protocol pim_protocol = {
2290 * Setup for IP multicast routing
2292 static int __net_init ipmr_net_init(struct net *net)
2296 err = ipmr_rules_init(net);
2300 #ifdef CONFIG_PROC_FS
2302 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops))
2304 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
2305 goto proc_cache_fail;
2309 #ifdef CONFIG_PROC_FS
2311 proc_net_remove(net, "ip_mr_vif");
2313 ipmr_rules_exit(net);
2319 static void __net_exit ipmr_net_exit(struct net *net)
2321 #ifdef CONFIG_PROC_FS
2322 proc_net_remove(net, "ip_mr_cache");
2323 proc_net_remove(net, "ip_mr_vif");
2325 ipmr_rules_exit(net);
2328 static struct pernet_operations ipmr_net_ops = {
2329 .init = ipmr_net_init,
2330 .exit = ipmr_net_exit,
2333 int __init ip_mr_init(void)
2337 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2338 sizeof(struct mfc_cache),
2339 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2344 err = register_pernet_subsys(&ipmr_net_ops);
2346 goto reg_pernet_fail;
2348 err = register_netdevice_notifier(&ip_mr_notifier);
2350 goto reg_notif_fail;
2351 #ifdef CONFIG_IP_PIMSM_V2
2352 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2353 printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n");
2355 goto add_proto_fail;
2360 #ifdef CONFIG_IP_PIMSM_V2
2362 unregister_netdevice_notifier(&ip_mr_notifier);
2365 unregister_pernet_subsys(&ipmr_net_ops);
2367 kmem_cache_destroy(mrt_cachep);