2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/timer.h>
26 #include <linux/kernel.h>
27 #include <linux/fcntl.h>
28 #include <linux/stat.h>
29 #include <linux/socket.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/compat.h>
38 #include <net/protocol.h>
39 #include <linux/skbuff.h>
42 #include <linux/notifier.h>
43 #include <linux/if_arp.h>
44 #include <net/checksum.h>
45 #include <net/netlink.h>
46 #include <net/fib_rules.h>
49 #include <net/ip6_route.h>
50 #include <linux/mroute6.h>
51 #include <linux/pim.h>
52 #include <net/addrconf.h>
53 #include <linux/netfilter_ipv6.h>
54 #include <linux/export.h>
55 #include <net/ip6_checksum.h>
58 struct list_head list;
63 struct sock *mroute6_sk;
64 struct timer_list ipmr_expire_timer;
65 struct list_head mfc6_unres_queue;
66 struct list_head mfc6_cache_array[MFC6_LINES];
67 struct mif_device vif6_table[MAXMIFS];
69 atomic_t cache_resolve_queue_len;
72 #ifdef CONFIG_IPV6_PIMSM_V2
73 int mroute_reg_vif_num;
78 struct fib_rule common;
82 struct mr6_table *mrt;
85 /* Big lock, protecting vif table, mrt cache and mroute socket state.
86 Note that the changes are semaphored via rtnl_lock.
89 static DEFINE_RWLOCK(mrt_lock);
92 * Multicast router control variables
95 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
97 /* Special spinlock for queue of unresolved entries */
98 static DEFINE_SPINLOCK(mfc_unres_lock);
100 /* We return to original Alan's scheme. Hash table of resolved
101 entries is changed only in process context and protected
102 with weak lock mrt_lock. Queue of unresolved entries is protected
103 with strong spinlock mfc_unres_lock.
105 In this case data path is free of exclusive locks at all.
108 static struct kmem_cache *mrt_cachep __read_mostly;
110 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
111 static void ip6mr_free_table(struct mr6_table *mrt);
113 static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
114 struct sk_buff *skb, struct mfc6_cache *cache);
115 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
116 mifi_t mifi, int assert);
117 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
118 struct mfc6_cache *c, struct rtmsg *rtm);
119 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
120 struct netlink_callback *cb);
121 static void mroute_clean_tables(struct mr6_table *mrt, bool all);
122 static void ipmr_expire_process(unsigned long arg);
124 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
125 #define ip6mr_for_each_table(mrt, net) \
126 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
128 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
130 struct mr6_table *mrt;
132 ip6mr_for_each_table(mrt, net) {
139 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
140 struct mr6_table **mrt)
143 struct ip6mr_result res;
144 struct fib_lookup_arg arg = {
146 .flags = FIB_LOOKUP_NOREF,
149 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
150 flowi6_to_flowi(flp6), 0, &arg);
157 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
158 int flags, struct fib_lookup_arg *arg)
160 struct ip6mr_result *res = arg->result;
161 struct mr6_table *mrt;
163 switch (rule->action) {
166 case FR_ACT_UNREACHABLE:
168 case FR_ACT_PROHIBIT:
170 case FR_ACT_BLACKHOLE:
175 mrt = ip6mr_get_table(rule->fr_net, rule->table);
182 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
187 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
191 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
192 struct fib_rule_hdr *frh, struct nlattr **tb)
197 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
203 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
204 struct fib_rule_hdr *frh)
212 static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = {
213 .family = RTNL_FAMILY_IP6MR,
214 .rule_size = sizeof(struct ip6mr_rule),
215 .addr_size = sizeof(struct in6_addr),
216 .action = ip6mr_rule_action,
217 .match = ip6mr_rule_match,
218 .configure = ip6mr_rule_configure,
219 .compare = ip6mr_rule_compare,
220 .default_pref = fib_default_rule_pref,
221 .fill = ip6mr_rule_fill,
222 .nlgroup = RTNLGRP_IPV6_RULE,
223 .policy = ip6mr_rule_policy,
224 .owner = THIS_MODULE,
227 static int __net_init ip6mr_rules_init(struct net *net)
229 struct fib_rules_ops *ops;
230 struct mr6_table *mrt;
233 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
237 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
239 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
245 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
249 net->ipv6.mr6_rules_ops = ops;
255 fib_rules_unregister(ops);
259 static void __net_exit ip6mr_rules_exit(struct net *net)
261 struct mr6_table *mrt, *next;
264 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
265 list_del(&mrt->list);
266 ip6mr_free_table(mrt);
269 fib_rules_unregister(net->ipv6.mr6_rules_ops);
272 #define ip6mr_for_each_table(mrt, net) \
273 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
275 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
277 return net->ipv6.mrt6;
280 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
281 struct mr6_table **mrt)
283 *mrt = net->ipv6.mrt6;
287 static int __net_init ip6mr_rules_init(struct net *net)
289 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
290 return net->ipv6.mrt6 ? 0 : -ENOMEM;
293 static void __net_exit ip6mr_rules_exit(struct net *net)
296 ip6mr_free_table(net->ipv6.mrt6);
297 net->ipv6.mrt6 = NULL;
302 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
304 struct mr6_table *mrt;
307 mrt = ip6mr_get_table(net, id);
311 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
315 write_pnet(&mrt->net, net);
317 /* Forwarding cache */
318 for (i = 0; i < MFC6_LINES; i++)
319 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
321 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
323 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
326 #ifdef CONFIG_IPV6_PIMSM_V2
327 mrt->mroute_reg_vif_num = -1;
329 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
330 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
335 static void ip6mr_free_table(struct mr6_table *mrt)
337 del_timer_sync(&mrt->ipmr_expire_timer);
338 mroute_clean_tables(mrt, true);
342 #ifdef CONFIG_PROC_FS
344 struct ipmr_mfc_iter {
345 struct seq_net_private p;
346 struct mr6_table *mrt;
347 struct list_head *cache;
352 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
353 struct ipmr_mfc_iter *it, loff_t pos)
355 struct mr6_table *mrt = it->mrt;
356 struct mfc6_cache *mfc;
358 read_lock(&mrt_lock);
359 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
360 it->cache = &mrt->mfc6_cache_array[it->ct];
361 list_for_each_entry(mfc, it->cache, list)
365 read_unlock(&mrt_lock);
367 spin_lock_bh(&mfc_unres_lock);
368 it->cache = &mrt->mfc6_unres_queue;
369 list_for_each_entry(mfc, it->cache, list)
372 spin_unlock_bh(&mfc_unres_lock);
379 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
382 struct ipmr_vif_iter {
383 struct seq_net_private p;
384 struct mr6_table *mrt;
388 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
389 struct ipmr_vif_iter *iter,
392 struct mr6_table *mrt = iter->mrt;
394 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
395 if (!MIF_EXISTS(mrt, iter->ct))
398 return &mrt->vif6_table[iter->ct];
403 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
406 struct ipmr_vif_iter *iter = seq->private;
407 struct net *net = seq_file_net(seq);
408 struct mr6_table *mrt;
410 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
412 return ERR_PTR(-ENOENT);
416 read_lock(&mrt_lock);
417 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
421 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
423 struct ipmr_vif_iter *iter = seq->private;
424 struct net *net = seq_file_net(seq);
425 struct mr6_table *mrt = iter->mrt;
428 if (v == SEQ_START_TOKEN)
429 return ip6mr_vif_seq_idx(net, iter, 0);
431 while (++iter->ct < mrt->maxvif) {
432 if (!MIF_EXISTS(mrt, iter->ct))
434 return &mrt->vif6_table[iter->ct];
439 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
442 read_unlock(&mrt_lock);
445 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
447 struct ipmr_vif_iter *iter = seq->private;
448 struct mr6_table *mrt = iter->mrt;
450 if (v == SEQ_START_TOKEN) {
452 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
454 const struct mif_device *vif = v;
455 const char *name = vif->dev ? vif->dev->name : "none";
458 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
459 vif - mrt->vif6_table,
460 name, vif->bytes_in, vif->pkt_in,
461 vif->bytes_out, vif->pkt_out,
467 static const struct seq_operations ip6mr_vif_seq_ops = {
468 .start = ip6mr_vif_seq_start,
469 .next = ip6mr_vif_seq_next,
470 .stop = ip6mr_vif_seq_stop,
471 .show = ip6mr_vif_seq_show,
474 static int ip6mr_vif_open(struct inode *inode, struct file *file)
476 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
477 sizeof(struct ipmr_vif_iter));
480 static const struct file_operations ip6mr_vif_fops = {
481 .owner = THIS_MODULE,
482 .open = ip6mr_vif_open,
485 .release = seq_release_net,
488 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
490 struct ipmr_mfc_iter *it = seq->private;
491 struct net *net = seq_file_net(seq);
492 struct mr6_table *mrt;
494 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
496 return ERR_PTR(-ENOENT);
499 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
503 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
505 struct mfc6_cache *mfc = v;
506 struct ipmr_mfc_iter *it = seq->private;
507 struct net *net = seq_file_net(seq);
508 struct mr6_table *mrt = it->mrt;
512 if (v == SEQ_START_TOKEN)
513 return ipmr_mfc_seq_idx(net, seq->private, 0);
515 if (mfc->list.next != it->cache)
516 return list_entry(mfc->list.next, struct mfc6_cache, list);
518 if (it->cache == &mrt->mfc6_unres_queue)
521 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
523 while (++it->ct < MFC6_LINES) {
524 it->cache = &mrt->mfc6_cache_array[it->ct];
525 if (list_empty(it->cache))
527 return list_first_entry(it->cache, struct mfc6_cache, list);
530 /* exhausted cache_array, show unresolved */
531 read_unlock(&mrt_lock);
532 it->cache = &mrt->mfc6_unres_queue;
535 spin_lock_bh(&mfc_unres_lock);
536 if (!list_empty(it->cache))
537 return list_first_entry(it->cache, struct mfc6_cache, list);
540 spin_unlock_bh(&mfc_unres_lock);
546 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
548 struct ipmr_mfc_iter *it = seq->private;
549 struct mr6_table *mrt = it->mrt;
551 if (it->cache == &mrt->mfc6_unres_queue)
552 spin_unlock_bh(&mfc_unres_lock);
553 else if (it->cache == &mrt->mfc6_cache_array[it->ct])
554 read_unlock(&mrt_lock);
557 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
561 if (v == SEQ_START_TOKEN) {
565 "Iif Pkts Bytes Wrong Oifs\n");
567 const struct mfc6_cache *mfc = v;
568 const struct ipmr_mfc_iter *it = seq->private;
569 struct mr6_table *mrt = it->mrt;
571 seq_printf(seq, "%pI6 %pI6 %-3hd",
572 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
575 if (it->cache != &mrt->mfc6_unres_queue) {
576 seq_printf(seq, " %8lu %8lu %8lu",
578 mfc->mfc_un.res.bytes,
579 mfc->mfc_un.res.wrong_if);
580 for (n = mfc->mfc_un.res.minvif;
581 n < mfc->mfc_un.res.maxvif; n++) {
582 if (MIF_EXISTS(mrt, n) &&
583 mfc->mfc_un.res.ttls[n] < 255)
586 n, mfc->mfc_un.res.ttls[n]);
589 /* unresolved mfc_caches don't contain
590 * pkt, bytes and wrong_if values
592 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
599 static const struct seq_operations ipmr_mfc_seq_ops = {
600 .start = ipmr_mfc_seq_start,
601 .next = ipmr_mfc_seq_next,
602 .stop = ipmr_mfc_seq_stop,
603 .show = ipmr_mfc_seq_show,
606 static int ipmr_mfc_open(struct inode *inode, struct file *file)
608 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
609 sizeof(struct ipmr_mfc_iter));
612 static const struct file_operations ip6mr_mfc_fops = {
613 .owner = THIS_MODULE,
614 .open = ipmr_mfc_open,
617 .release = seq_release_net,
621 #ifdef CONFIG_IPV6_PIMSM_V2
623 static int pim6_rcv(struct sk_buff *skb)
625 struct pimreghdr *pim;
626 struct ipv6hdr *encap;
627 struct net_device *reg_dev = NULL;
628 struct net *net = dev_net(skb->dev);
629 struct mr6_table *mrt;
630 struct flowi6 fl6 = {
631 .flowi6_iif = skb->dev->ifindex,
632 .flowi6_mark = skb->mark,
636 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
639 pim = (struct pimreghdr *)skb_transport_header(skb);
640 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
641 (pim->flags & PIM_NULL_REGISTER) ||
642 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
643 sizeof(*pim), IPPROTO_PIM,
644 csum_partial((void *)pim, sizeof(*pim), 0)) &&
645 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
648 /* check if the inner packet is destined to mcast group */
649 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
652 if (!ipv6_addr_is_multicast(&encap->daddr) ||
653 encap->payload_len == 0 ||
654 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
657 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
659 reg_vif_num = mrt->mroute_reg_vif_num;
661 read_lock(&mrt_lock);
662 if (reg_vif_num >= 0)
663 reg_dev = mrt->vif6_table[reg_vif_num].dev;
666 read_unlock(&mrt_lock);
671 skb->mac_header = skb->network_header;
672 skb_pull(skb, (u8 *)encap - skb->data);
673 skb_reset_network_header(skb);
674 skb->protocol = htons(ETH_P_IPV6);
675 skb->ip_summed = CHECKSUM_NONE;
676 skb->pkt_type = PACKET_HOST;
678 skb_tunnel_rx(skb, reg_dev);
689 static const struct inet6_protocol pim6_protocol = {
693 /* Service routines creating virtual interfaces: PIMREG */
695 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
696 struct net_device *dev)
698 struct net *net = dev_net(dev);
699 struct mr6_table *mrt;
700 struct flowi6 fl6 = {
701 .flowi6_oif = dev->ifindex,
702 .flowi6_iif = skb->skb_iif,
703 .flowi6_mark = skb->mark,
707 err = ip6mr_fib_lookup(net, &fl6, &mrt);
713 read_lock(&mrt_lock);
714 dev->stats.tx_bytes += skb->len;
715 dev->stats.tx_packets++;
716 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
717 read_unlock(&mrt_lock);
722 static const struct net_device_ops reg_vif_netdev_ops = {
723 .ndo_start_xmit = reg_vif_xmit,
726 static void reg_vif_setup(struct net_device *dev)
728 dev->type = ARPHRD_PIMREG;
729 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
730 dev->flags = IFF_NOARP;
731 dev->netdev_ops = ®_vif_netdev_ops;
732 dev->destructor = free_netdev;
733 dev->features |= NETIF_F_NETNS_LOCAL;
736 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
738 struct net_device *dev;
741 if (mrt->id == RT6_TABLE_DFLT)
742 sprintf(name, "pim6reg");
744 sprintf(name, "pim6reg%u", mrt->id);
746 dev = alloc_netdev(0, name, reg_vif_setup);
750 dev_net_set(dev, net);
752 if (register_netdevice(dev)) {
765 /* allow the register to be completed before unregistering. */
769 unregister_netdevice(dev);
778 static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
780 struct mif_device *v;
781 struct net_device *dev;
782 struct inet6_dev *in6_dev;
784 if (vifi < 0 || vifi >= mrt->maxvif)
785 return -EADDRNOTAVAIL;
787 v = &mrt->vif6_table[vifi];
789 write_lock_bh(&mrt_lock);
794 write_unlock_bh(&mrt_lock);
795 return -EADDRNOTAVAIL;
798 #ifdef CONFIG_IPV6_PIMSM_V2
799 if (vifi == mrt->mroute_reg_vif_num)
800 mrt->mroute_reg_vif_num = -1;
803 if (vifi + 1 == mrt->maxvif) {
805 for (tmp = vifi - 1; tmp >= 0; tmp--) {
806 if (MIF_EXISTS(mrt, tmp))
809 mrt->maxvif = tmp + 1;
812 write_unlock_bh(&mrt_lock);
814 dev_set_allmulti(dev, -1);
816 in6_dev = __in6_dev_get(dev);
818 in6_dev->cnf.mc_forwarding--;
820 if (v->flags & MIFF_REGISTER)
821 unregister_netdevice_queue(dev, head);
827 static inline void ip6mr_cache_free(struct mfc6_cache *c)
829 kmem_cache_free(mrt_cachep, c);
832 /* Destroy an unresolved cache entry, killing queued skbs
833 and reporting error to netlink readers.
836 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
838 struct net *net = read_pnet(&mrt->net);
841 atomic_dec(&mrt->cache_resolve_queue_len);
843 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
844 if (ipv6_hdr(skb)->version == 0) {
845 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
846 nlh->nlmsg_type = NLMSG_ERROR;
847 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
848 skb_trim(skb, nlh->nlmsg_len);
849 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
850 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
859 /* Timer process for all the unresolved queue. */
861 static void ipmr_do_expire_process(struct mr6_table *mrt)
863 unsigned long now = jiffies;
864 unsigned long expires = 10 * HZ;
865 struct mfc6_cache *c, *next;
867 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
868 if (time_after(c->mfc_un.unres.expires, now)) {
870 unsigned long interval = c->mfc_un.unres.expires - now;
871 if (interval < expires)
877 ip6mr_destroy_unres(mrt, c);
880 if (!list_empty(&mrt->mfc6_unres_queue))
881 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
884 static void ipmr_expire_process(unsigned long arg)
886 struct mr6_table *mrt = (struct mr6_table *)arg;
888 if (!spin_trylock(&mfc_unres_lock)) {
889 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
893 if (!list_empty(&mrt->mfc6_unres_queue))
894 ipmr_do_expire_process(mrt);
896 spin_unlock(&mfc_unres_lock);
899 /* Fill oifs list. It is called under write locked mrt_lock. */
901 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
906 cache->mfc_un.res.minvif = MAXMIFS;
907 cache->mfc_un.res.maxvif = 0;
908 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
910 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
911 if (MIF_EXISTS(mrt, vifi) &&
912 ttls[vifi] && ttls[vifi] < 255) {
913 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
914 if (cache->mfc_un.res.minvif > vifi)
915 cache->mfc_un.res.minvif = vifi;
916 if (cache->mfc_un.res.maxvif <= vifi)
917 cache->mfc_un.res.maxvif = vifi + 1;
922 static int mif6_add(struct net *net, struct mr6_table *mrt,
923 struct mif6ctl *vifc, int mrtsock)
925 int vifi = vifc->mif6c_mifi;
926 struct mif_device *v = &mrt->vif6_table[vifi];
927 struct net_device *dev;
928 struct inet6_dev *in6_dev;
932 if (MIF_EXISTS(mrt, vifi))
935 switch (vifc->mif6c_flags) {
936 #ifdef CONFIG_IPV6_PIMSM_V2
939 * Special Purpose VIF in PIM
940 * All the packets will be sent to the daemon
942 if (mrt->mroute_reg_vif_num >= 0)
944 dev = ip6mr_reg_vif(net, mrt);
947 err = dev_set_allmulti(dev, 1);
949 unregister_netdevice(dev);
956 dev = dev_get_by_index(net, vifc->mif6c_pifi);
958 return -EADDRNOTAVAIL;
959 err = dev_set_allmulti(dev, 1);
969 in6_dev = __in6_dev_get(dev);
971 in6_dev->cnf.mc_forwarding++;
974 * Fill in the VIF structures
976 v->rate_limit = vifc->vifc_rate_limit;
977 v->flags = vifc->mif6c_flags;
979 v->flags |= VIFF_STATIC;
980 v->threshold = vifc->vifc_threshold;
985 v->link = dev->ifindex;
986 if (v->flags & MIFF_REGISTER)
987 v->link = dev->iflink;
989 /* And finish update writing critical data */
990 write_lock_bh(&mrt_lock);
992 #ifdef CONFIG_IPV6_PIMSM_V2
993 if (v->flags & MIFF_REGISTER)
994 mrt->mroute_reg_vif_num = vifi;
996 if (vifi + 1 > mrt->maxvif)
997 mrt->maxvif = vifi + 1;
998 write_unlock_bh(&mrt_lock);
1002 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1003 const struct in6_addr *origin,
1004 const struct in6_addr *mcastgrp)
1006 int line = MFC6_HASH(mcastgrp, origin);
1007 struct mfc6_cache *c;
1009 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1010 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1011 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1018 * Allocate a multicast cache entry
1020 static struct mfc6_cache *ip6mr_cache_alloc(void)
1022 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1025 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1026 c->mfc_un.res.minvif = MAXMIFS;
1030 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1032 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1035 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1036 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1041 * A cache entry has gone into a resolved state from queued
1044 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1045 struct mfc6_cache *uc, struct mfc6_cache *c)
1047 struct sk_buff *skb;
1050 * Play the pending entries through our router
1053 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1054 if (ipv6_hdr(skb)->version == 0) {
1055 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1057 if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
1058 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1060 nlh->nlmsg_type = NLMSG_ERROR;
1061 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
1062 skb_trim(skb, nlh->nlmsg_len);
1063 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
1065 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
1067 ip6_mr_forward(net, mrt, skb, c);
1072 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1073 * expects the following bizarre scheme.
1075 * Called under mrt_lock.
1078 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1079 mifi_t mifi, int assert)
1081 struct sk_buff *skb;
1082 struct mrt6msg *msg;
1085 #ifdef CONFIG_IPV6_PIMSM_V2
1086 if (assert == MRT6MSG_WHOLEPKT)
1087 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1091 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1096 /* I suppose that internal messages
1097 * do not require checksums */
1099 skb->ip_summed = CHECKSUM_UNNECESSARY;
1101 #ifdef CONFIG_IPV6_PIMSM_V2
1102 if (assert == MRT6MSG_WHOLEPKT) {
1103 /* Ugly, but we have no choice with this interface.
1104 Duplicate old header, fix length etc.
1105 And all this only to mangle msg->im6_msgtype and
1106 to set msg->im6_mbz to "mbz" :-)
1108 skb_push(skb, -skb_network_offset(pkt));
1110 skb_push(skb, sizeof(*msg));
1111 skb_reset_transport_header(skb);
1112 msg = (struct mrt6msg *)skb_transport_header(skb);
1114 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1115 msg->im6_mif = mrt->mroute_reg_vif_num;
1117 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1118 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1120 skb->ip_summed = CHECKSUM_UNNECESSARY;
1125 * Copy the IP header
1128 skb_put(skb, sizeof(struct ipv6hdr));
1129 skb_reset_network_header(skb);
1130 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1135 skb_put(skb, sizeof(*msg));
1136 skb_reset_transport_header(skb);
1137 msg = (struct mrt6msg *)skb_transport_header(skb);
1140 msg->im6_msgtype = assert;
1141 msg->im6_mif = mifi;
1143 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1144 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1146 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1147 skb->ip_summed = CHECKSUM_UNNECESSARY;
1150 if (mrt->mroute6_sk == NULL) {
1156 * Deliver to user space multicast routing algorithms
1158 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1160 if (net_ratelimit())
1161 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
1169 * Queue a packet for resolution. It gets locked cache entry!
1173 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1177 struct mfc6_cache *c;
1179 spin_lock_bh(&mfc_unres_lock);
1180 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1181 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1182 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1190 * Create a new entry if allowable
1193 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1194 (c = ip6mr_cache_alloc_unres()) == NULL) {
1195 spin_unlock_bh(&mfc_unres_lock);
1202 * Fill in the new cache entry
1204 c->mf6c_parent = -1;
1205 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1206 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1209 * Reflect first query at pim6sd
1211 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1213 /* If the report failed throw the cache entry
1216 spin_unlock_bh(&mfc_unres_lock);
1218 ip6mr_cache_free(c);
1223 atomic_inc(&mrt->cache_resolve_queue_len);
1224 list_add(&c->list, &mrt->mfc6_unres_queue);
1226 ipmr_do_expire_process(mrt);
1230 * See if we can append the packet
1232 if (c->mfc_un.unres.unresolved.qlen > 3) {
1236 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1240 spin_unlock_bh(&mfc_unres_lock);
1245 * MFC6 cache manipulation by user space
1248 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
1251 struct mfc6_cache *c, *next;
1253 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1255 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1256 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1257 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1258 write_lock_bh(&mrt_lock);
1260 write_unlock_bh(&mrt_lock);
1262 ip6mr_cache_free(c);
1269 static int ip6mr_device_event(struct notifier_block *this,
1270 unsigned long event, void *ptr)
1272 struct net_device *dev = ptr;
1273 struct net *net = dev_net(dev);
1274 struct mr6_table *mrt;
1275 struct mif_device *v;
1279 if (event != NETDEV_UNREGISTER)
1282 ip6mr_for_each_table(mrt, net) {
1283 v = &mrt->vif6_table[0];
1284 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1286 mif6_delete(mrt, ct, &list);
1289 unregister_netdevice_many(&list);
1294 static struct notifier_block ip6_mr_notifier = {
1295 .notifier_call = ip6mr_device_event
1299 * Setup for IP multicast routing
1302 static int __net_init ip6mr_net_init(struct net *net)
1306 err = ip6mr_rules_init(net);
1310 #ifdef CONFIG_PROC_FS
1312 if (!proc_net_fops_create(net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
1314 if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops))
1315 goto proc_cache_fail;
1320 #ifdef CONFIG_PROC_FS
1322 proc_net_remove(net, "ip6_mr_vif");
1324 ip6mr_rules_exit(net);
1330 static void __net_exit ip6mr_net_exit(struct net *net)
1332 #ifdef CONFIG_PROC_FS
1333 proc_net_remove(net, "ip6_mr_cache");
1334 proc_net_remove(net, "ip6_mr_vif");
1336 ip6mr_rules_exit(net);
1339 static struct pernet_operations ip6mr_net_ops = {
1340 .init = ip6mr_net_init,
1341 .exit = ip6mr_net_exit,
1344 int __init ip6_mr_init(void)
1348 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1349 sizeof(struct mfc6_cache),
1350 0, SLAB_HWCACHE_ALIGN,
1355 err = register_pernet_subsys(&ip6mr_net_ops);
1357 goto reg_pernet_fail;
1359 err = register_netdevice_notifier(&ip6_mr_notifier);
1361 goto reg_notif_fail;
1362 #ifdef CONFIG_IPV6_PIMSM_V2
1363 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1364 printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n");
1366 goto add_proto_fail;
1369 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1370 ip6mr_rtm_dumproute, NULL);
1372 #ifdef CONFIG_IPV6_PIMSM_V2
1374 unregister_netdevice_notifier(&ip6_mr_notifier);
1377 unregister_pernet_subsys(&ip6mr_net_ops);
1379 kmem_cache_destroy(mrt_cachep);
1383 void ip6_mr_cleanup(void)
1385 unregister_netdevice_notifier(&ip6_mr_notifier);
1386 unregister_pernet_subsys(&ip6mr_net_ops);
1387 kmem_cache_destroy(mrt_cachep);
1390 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1391 struct mf6cctl *mfc, int mrtsock)
1395 struct mfc6_cache *uc, *c;
1396 unsigned char ttls[MAXMIFS];
1399 if (mfc->mf6cc_parent >= MAXMIFS)
1402 memset(ttls, 255, MAXMIFS);
1403 for (i = 0; i < MAXMIFS; i++) {
1404 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1409 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1411 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1412 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1413 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1420 write_lock_bh(&mrt_lock);
1421 c->mf6c_parent = mfc->mf6cc_parent;
1422 ip6mr_update_thresholds(mrt, c, ttls);
1424 c->mfc_flags |= MFC_STATIC;
1425 write_unlock_bh(&mrt_lock);
1429 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1432 c = ip6mr_cache_alloc();
1436 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1437 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1438 c->mf6c_parent = mfc->mf6cc_parent;
1439 ip6mr_update_thresholds(mrt, c, ttls);
1441 c->mfc_flags |= MFC_STATIC;
1443 write_lock_bh(&mrt_lock);
1444 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1445 write_unlock_bh(&mrt_lock);
1448 * Check to see if we resolved a queued list. If so we
1449 * need to send on the frames and tidy up.
1452 spin_lock_bh(&mfc_unres_lock);
1453 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1454 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1455 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1456 list_del(&uc->list);
1457 atomic_dec(&mrt->cache_resolve_queue_len);
1462 if (list_empty(&mrt->mfc6_unres_queue))
1463 del_timer(&mrt->ipmr_expire_timer);
1464 spin_unlock_bh(&mfc_unres_lock);
1467 ip6mr_cache_resolve(net, mrt, uc, c);
1468 ip6mr_cache_free(uc);
1474 * Close the multicast socket, and clear the vif tables etc
1477 static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1481 struct mfc6_cache *c, *next;
1484 * Shut down all active vif entries
1486 for (i = 0; i < mrt->maxvif; i++) {
1487 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1489 mif6_delete(mrt, i, &list);
1491 unregister_netdevice_many(&list);
1496 for (i = 0; i < MFC6_LINES; i++) {
1497 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1498 if (!all && (c->mfc_flags & MFC_STATIC))
1500 write_lock_bh(&mrt_lock);
1502 write_unlock_bh(&mrt_lock);
1504 ip6mr_cache_free(c);
1508 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1509 spin_lock_bh(&mfc_unres_lock);
1510 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1512 ip6mr_destroy_unres(mrt, c);
1514 spin_unlock_bh(&mfc_unres_lock);
1518 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1521 struct net *net = sock_net(sk);
1524 write_lock_bh(&mrt_lock);
1525 if (likely(mrt->mroute6_sk == NULL)) {
1526 mrt->mroute6_sk = sk;
1527 net->ipv6.devconf_all->mc_forwarding++;
1531 write_unlock_bh(&mrt_lock);
1538 int ip6mr_sk_done(struct sock *sk)
1541 struct net *net = sock_net(sk);
1542 struct mr6_table *mrt;
1545 ip6mr_for_each_table(mrt, net) {
1546 if (sk == mrt->mroute6_sk) {
1547 write_lock_bh(&mrt_lock);
1548 mrt->mroute6_sk = NULL;
1549 net->ipv6.devconf_all->mc_forwarding--;
1550 write_unlock_bh(&mrt_lock);
1552 mroute_clean_tables(mrt, false);
1562 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1564 struct mr6_table *mrt;
1565 struct flowi6 fl6 = {
1566 .flowi6_iif = skb->skb_iif,
1567 .flowi6_oif = skb->dev->ifindex,
1568 .flowi6_mark = skb->mark,
1571 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1574 return mrt->mroute6_sk;
1578 * Socket options and virtual interface manipulation. The whole
1579 * virtual interface system is a complete heap, but unfortunately
1580 * that's how BSD mrouted happens to think. Maybe one day with a proper
1581 * MOSPF/PIM router set up we can clean this up.
1584 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1590 struct net *net = sock_net(sk);
1591 struct mr6_table *mrt;
1593 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1597 if (optname != MRT6_INIT) {
1598 if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN))
1604 if (sk->sk_type != SOCK_RAW ||
1605 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1607 if (optlen < sizeof(int))
1610 return ip6mr_sk_init(mrt, sk);
1613 return ip6mr_sk_done(sk);
1616 if (optlen < sizeof(vif))
1618 if (copy_from_user(&vif, optval, sizeof(vif)))
1620 if (vif.mif6c_mifi >= MAXMIFS)
1623 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1628 if (optlen < sizeof(mifi_t))
1630 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1633 ret = mif6_delete(mrt, mifi, NULL);
1638 * Manipulate the forwarding caches. These live
1639 * in a sort of kernel/user symbiosis.
1643 if (optlen < sizeof(mfc))
1645 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1648 if (optname == MRT6_DEL_MFC)
1649 ret = ip6mr_mfc_delete(mrt, &mfc);
1651 ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk);
1656 * Control PIM assert (to activate pim will activate assert)
1661 if (get_user(v, (int __user *)optval))
1663 mrt->mroute_do_assert = !!v;
1667 #ifdef CONFIG_IPV6_PIMSM_V2
1671 if (get_user(v, (int __user *)optval))
1676 if (v != mrt->mroute_do_pim) {
1677 mrt->mroute_do_pim = v;
1678 mrt->mroute_do_assert = v;
1685 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1690 if (optlen != sizeof(u32))
1692 if (get_user(v, (u32 __user *)optval))
1694 if (sk == mrt->mroute6_sk)
1699 if (!ip6mr_new_table(net, v))
1701 raw6_sk(sk)->ip6mr_table = v;
1707 * Spurious command, or MRT6_VERSION which you cannot
1711 return -ENOPROTOOPT;
1716 * Getsock opt support for the multicast routing system.
1719 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1724 struct net *net = sock_net(sk);
1725 struct mr6_table *mrt;
1727 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1735 #ifdef CONFIG_IPV6_PIMSM_V2
1737 val = mrt->mroute_do_pim;
1741 val = mrt->mroute_do_assert;
1744 return -ENOPROTOOPT;
1747 if (get_user(olr, optlen))
1750 olr = min_t(int, olr, sizeof(int));
1754 if (put_user(olr, optlen))
1756 if (copy_to_user(optval, &val, olr))
1762 * The IP multicast ioctl support routines.
1765 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1767 struct sioc_sg_req6 sr;
1768 struct sioc_mif_req6 vr;
1769 struct mif_device *vif;
1770 struct mfc6_cache *c;
1771 struct net *net = sock_net(sk);
1772 struct mr6_table *mrt;
1774 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1779 case SIOCGETMIFCNT_IN6:
1780 if (copy_from_user(&vr, arg, sizeof(vr)))
1782 if (vr.mifi >= mrt->maxvif)
1784 read_lock(&mrt_lock);
1785 vif = &mrt->vif6_table[vr.mifi];
1786 if (MIF_EXISTS(mrt, vr.mifi)) {
1787 vr.icount = vif->pkt_in;
1788 vr.ocount = vif->pkt_out;
1789 vr.ibytes = vif->bytes_in;
1790 vr.obytes = vif->bytes_out;
1791 read_unlock(&mrt_lock);
1793 if (copy_to_user(arg, &vr, sizeof(vr)))
1797 read_unlock(&mrt_lock);
1798 return -EADDRNOTAVAIL;
1799 case SIOCGETSGCNT_IN6:
1800 if (copy_from_user(&sr, arg, sizeof(sr)))
1803 read_lock(&mrt_lock);
1804 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1806 sr.pktcnt = c->mfc_un.res.pkt;
1807 sr.bytecnt = c->mfc_un.res.bytes;
1808 sr.wrong_if = c->mfc_un.res.wrong_if;
1809 read_unlock(&mrt_lock);
1811 if (copy_to_user(arg, &sr, sizeof(sr)))
1815 read_unlock(&mrt_lock);
1816 return -EADDRNOTAVAIL;
1818 return -ENOIOCTLCMD;
1822 #ifdef CONFIG_COMPAT
1823 struct compat_sioc_sg_req6 {
1824 struct sockaddr_in6 src;
1825 struct sockaddr_in6 grp;
1826 compat_ulong_t pktcnt;
1827 compat_ulong_t bytecnt;
1828 compat_ulong_t wrong_if;
1831 struct compat_sioc_mif_req6 {
1833 compat_ulong_t icount;
1834 compat_ulong_t ocount;
1835 compat_ulong_t ibytes;
1836 compat_ulong_t obytes;
1839 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1841 struct compat_sioc_sg_req6 sr;
1842 struct compat_sioc_mif_req6 vr;
1843 struct mif_device *vif;
1844 struct mfc6_cache *c;
1845 struct net *net = sock_net(sk);
1846 struct mr6_table *mrt;
1848 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1853 case SIOCGETMIFCNT_IN6:
1854 if (copy_from_user(&vr, arg, sizeof(vr)))
1856 if (vr.mifi >= mrt->maxvif)
1858 read_lock(&mrt_lock);
1859 vif = &mrt->vif6_table[vr.mifi];
1860 if (MIF_EXISTS(mrt, vr.mifi)) {
1861 vr.icount = vif->pkt_in;
1862 vr.ocount = vif->pkt_out;
1863 vr.ibytes = vif->bytes_in;
1864 vr.obytes = vif->bytes_out;
1865 read_unlock(&mrt_lock);
1867 if (copy_to_user(arg, &vr, sizeof(vr)))
1871 read_unlock(&mrt_lock);
1872 return -EADDRNOTAVAIL;
1873 case SIOCGETSGCNT_IN6:
1874 if (copy_from_user(&sr, arg, sizeof(sr)))
1877 read_lock(&mrt_lock);
1878 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1880 sr.pktcnt = c->mfc_un.res.pkt;
1881 sr.bytecnt = c->mfc_un.res.bytes;
1882 sr.wrong_if = c->mfc_un.res.wrong_if;
1883 read_unlock(&mrt_lock);
1885 if (copy_to_user(arg, &sr, sizeof(sr)))
1889 read_unlock(&mrt_lock);
1890 return -EADDRNOTAVAIL;
1892 return -ENOIOCTLCMD;
1897 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1899 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1900 IPSTATS_MIB_OUTFORWDATAGRAMS);
1901 return dst_output(skb);
1905 * Processing handlers for ip6mr_forward
1908 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1909 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1911 struct ipv6hdr *ipv6h;
1912 struct mif_device *vif = &mrt->vif6_table[vifi];
1913 struct net_device *dev;
1914 struct dst_entry *dst;
1917 if (vif->dev == NULL)
1920 #ifdef CONFIG_IPV6_PIMSM_V2
1921 if (vif->flags & MIFF_REGISTER) {
1923 vif->bytes_out += skb->len;
1924 vif->dev->stats.tx_bytes += skb->len;
1925 vif->dev->stats.tx_packets++;
1926 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
1931 ipv6h = ipv6_hdr(skb);
1933 fl6 = (struct flowi6) {
1934 .flowi6_oif = vif->link,
1935 .daddr = ipv6h->daddr,
1938 dst = ip6_route_output(net, NULL, &fl6);
1943 skb_dst_set(skb, dst);
1946 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1947 * not only before forwarding, but after forwarding on all output
1948 * interfaces. It is clear, if mrouter runs a multicasting
1949 * program, it should receive packets not depending to what interface
1950 * program is joined.
1951 * If we will not make it, the program will have to join on all
1952 * interfaces. On the other hand, multihoming host (or router, but
1953 * not mrouter) cannot join to more than one interface - it will
1954 * result in receiving multiple packets.
1959 vif->bytes_out += skb->len;
1961 /* We are about to write */
1962 /* XXX: extension headers? */
1963 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1966 ipv6h = ipv6_hdr(skb);
1969 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1971 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
1972 ip6mr_forward2_finish);
1979 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
1983 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
1984 if (mrt->vif6_table[ct].dev == dev)
1990 static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
1991 struct sk_buff *skb, struct mfc6_cache *cache)
1996 vif = cache->mf6c_parent;
1997 cache->mfc_un.res.pkt++;
1998 cache->mfc_un.res.bytes += skb->len;
2001 * Wrong interface: drop packet and (maybe) send PIM assert.
2003 if (mrt->vif6_table[vif].dev != skb->dev) {
2006 cache->mfc_un.res.wrong_if++;
2007 true_vifi = ip6mr_find_vif(mrt, skb->dev);
2009 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2010 /* pimsm uses asserts, when switching from RPT to SPT,
2011 so that we cannot check that packet arrived on an oif.
2012 It is bad, but otherwise we would need to move pretty
2013 large chunk of pimd to kernel. Ough... --ANK
2015 (mrt->mroute_do_pim ||
2016 cache->mfc_un.res.ttls[true_vifi] < 255) &&
2018 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2019 cache->mfc_un.res.last_assert = jiffies;
2020 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2025 mrt->vif6_table[vif].pkt_in++;
2026 mrt->vif6_table[vif].bytes_in += skb->len;
2031 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2032 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2034 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2036 ip6mr_forward2(net, mrt, skb2, cache, psend);
2042 ip6mr_forward2(net, mrt, skb, cache, psend);
2053 * Multicast packets for forwarding arrive here
2056 int ip6_mr_input(struct sk_buff *skb)
2058 struct mfc6_cache *cache;
2059 struct net *net = dev_net(skb->dev);
2060 struct mr6_table *mrt;
2061 struct flowi6 fl6 = {
2062 .flowi6_iif = skb->dev->ifindex,
2063 .flowi6_mark = skb->mark,
2067 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2073 read_lock(&mrt_lock);
2074 cache = ip6mr_cache_find(mrt,
2075 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2078 * No usable cache entry
2080 if (cache == NULL) {
2083 vif = ip6mr_find_vif(mrt, skb->dev);
2085 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2086 read_unlock(&mrt_lock);
2090 read_unlock(&mrt_lock);
2095 ip6_mr_forward(net, mrt, skb, cache);
2097 read_unlock(&mrt_lock);
2103 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2104 struct mfc6_cache *c, struct rtmsg *rtm)
2107 struct rtnexthop *nhp;
2108 u8 *b = skb_tail_pointer(skb);
2109 struct rtattr *mp_head;
2111 /* If cache is unresolved, don't try to parse IIF and OIF */
2112 if (c->mf6c_parent >= MAXMIFS)
2115 if (MIF_EXISTS(mrt, c->mf6c_parent))
2116 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex);
2118 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
2120 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2121 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2122 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
2123 goto rtattr_failure;
2124 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
2125 nhp->rtnh_flags = 0;
2126 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2127 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2128 nhp->rtnh_len = sizeof(*nhp);
2131 mp_head->rta_type = RTA_MULTIPATH;
2132 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
2133 rtm->rtm_type = RTN_MULTICAST;
2141 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2142 int nowait, u32 portid)
2145 struct mr6_table *mrt;
2146 struct mfc6_cache *cache;
2147 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2149 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2153 read_lock(&mrt_lock);
2154 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2157 struct sk_buff *skb2;
2158 struct ipv6hdr *iph;
2159 struct net_device *dev;
2163 read_unlock(&mrt_lock);
2168 if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2169 read_unlock(&mrt_lock);
2173 /* really correct? */
2174 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2176 read_unlock(&mrt_lock);
2180 NETLINK_CB(skb2).pid = portid;
2181 skb_reset_transport_header(skb2);
2183 skb_put(skb2, sizeof(struct ipv6hdr));
2184 skb_reset_network_header(skb2);
2186 iph = ipv6_hdr(skb2);
2189 iph->flow_lbl[0] = 0;
2190 iph->flow_lbl[1] = 0;
2191 iph->flow_lbl[2] = 0;
2192 iph->payload_len = 0;
2193 iph->nexthdr = IPPROTO_NONE;
2195 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
2196 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
2198 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2199 read_unlock(&mrt_lock);
2204 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2205 cache->mfc_flags |= MFC_NOTIFY;
2207 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2208 read_unlock(&mrt_lock);
2212 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2213 u32 pid, u32 seq, struct mfc6_cache *c)
2215 struct nlmsghdr *nlh;
2218 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2222 rtm = nlmsg_data(nlh);
2223 rtm->rtm_family = RTNL_FAMILY_IPMR;
2224 rtm->rtm_dst_len = 128;
2225 rtm->rtm_src_len = 128;
2227 rtm->rtm_table = mrt->id;
2228 NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
2229 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2230 rtm->rtm_protocol = RTPROT_UNSPEC;
2233 NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin);
2234 NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp);
2236 if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
2237 goto nla_put_failure;
2239 return nlmsg_end(skb, nlh);
2242 nlmsg_cancel(skb, nlh);
2246 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2248 struct net *net = sock_net(skb->sk);
2249 struct mr6_table *mrt;
2250 struct mfc6_cache *mfc;
2251 unsigned int t = 0, s_t;
2252 unsigned int h = 0, s_h;
2253 unsigned int e = 0, s_e;
2259 read_lock(&mrt_lock);
2260 ip6mr_for_each_table(mrt, net) {
2265 for (h = s_h; h < MFC6_LINES; h++) {
2266 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2269 if (ip6mr_fill_mroute(mrt, skb,
2270 NETLINK_CB(cb->skb).pid,
2284 read_unlock(&mrt_lock);