2 * Extension Header handling for IPv6
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Andi Kleen <ak@muc.de>
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
17 * yoshfuji : ensure not to overrun while parsing
19 * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
20 * YOSHIFUJI Hideaki @USAGI Register inbound extension header
21 * handlers as inet6_protocol{}.
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/netdevice.h>
30 #include <linux/in6.h>
31 #include <linux/icmpv6.h>
32 #include <linux/slab.h>
39 #include <net/protocol.h>
40 #include <net/transp_v6.h>
41 #include <net/rawv6.h>
42 #include <net/ndisc.h>
43 #include <net/ip6_route.h>
44 #include <net/addrconf.h>
45 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
49 #include <asm/uaccess.h>
51 int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
53 const unsigned char *nh = skb_network_header(skb);
54 int packet_len = skb->tail - skb->network_header;
55 struct ipv6_opt_hdr *hdr;
58 if (offset + 2 > packet_len)
60 hdr = (struct ipv6_opt_hdr *)(nh + offset);
61 len = ((hdr->hdrlen + 1) << 3);
63 if (offset + len > packet_len)
70 int opttype = nh[offset];
81 optlen = nh[offset + 1] + 2;
93 EXPORT_SYMBOL_GPL(ipv6_find_tlv);
96 * Parsing tlv encoded headers.
98 * Parsing function "func" returns 1, if parsing succeed
99 * and 0, if it failed.
100 * It MUST NOT touch skb->h.
103 struct tlvtype_proc {
105 int (*func)(struct sk_buff *skb, int offset);
108 /*********************
110 *********************/
112 /* An unknown option is detected, decide what to do */
114 static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
116 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
120 case 1: /* drop packet */
123 case 3: /* Send ICMP if not a multicast address and drop packet */
124 /* Actually, it is redundant check. icmp_send
125 will recheck in any case.
127 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
129 case 2: /* send ICMP PARM PROB regardless and drop packet */
130 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
138 /* Parse tlv encoded option header (hop-by-hop or destination) */
140 static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
142 struct tlvtype_proc *curr;
143 const unsigned char *nh = skb_network_header(skb);
144 int off = skb_network_header_len(skb);
145 int len = (skb_transport_header(skb)[1] + 1) << 3;
147 if (skb_transport_offset(skb) + len > skb_headlen(skb))
154 int optlen = nh[off + 1] + 2;
164 default: /* Other TLV code so scan list */
167 for (curr=procs; curr->type >= 0; curr++) {
168 if (curr->type == nh[off]) {
169 /* type specific length/alignment
170 checks will be performed in the
172 if (curr->func(skb, off) == 0)
177 if (curr->type < 0) {
178 if (ip6_tlvopt_unknown(skb, off) == 0)
193 /*****************************
194 Destination options header.
195 *****************************/
197 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
198 static int ipv6_dest_hao(struct sk_buff *skb, int optoff)
200 struct ipv6_destopt_hao *hao;
201 struct inet6_skb_parm *opt = IP6CB(skb);
202 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
203 struct in6_addr tmp_addr;
207 LIMIT_NETDEBUG(KERN_DEBUG "hao duplicated\n");
210 opt->dsthao = opt->dst1;
213 hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
215 if (hao->length != 16) {
217 KERN_DEBUG "hao invalid option length = %d\n", hao->length);
221 if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
223 KERN_DEBUG "hao is not an unicast addr: %pI6\n", &hao->addr);
227 ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
228 (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
229 if (unlikely(ret < 0))
232 if (skb_cloned(skb)) {
233 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
236 /* update all variable using below by copied skbuff */
237 hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) +
239 ipv6h = ipv6_hdr(skb);
242 if (skb->ip_summed == CHECKSUM_COMPLETE)
243 skb->ip_summed = CHECKSUM_NONE;
245 ipv6_addr_copy(&tmp_addr, &ipv6h->saddr);
246 ipv6_addr_copy(&ipv6h->saddr, &hao->addr);
247 ipv6_addr_copy(&hao->addr, &tmp_addr);
249 if (skb->tstamp.tv64 == 0)
250 __net_timestamp(skb);
260 static struct tlvtype_proc tlvprocdestopt_lst[] = {
261 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
263 .type = IPV6_TLV_HAO,
264 .func = ipv6_dest_hao,
270 static int ipv6_destopt_rcv(struct sk_buff *skb)
272 struct inet6_skb_parm *opt = IP6CB(skb);
273 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
276 struct dst_entry *dst = skb_dst(skb);
278 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
279 !pskb_may_pull(skb, (skb_transport_offset(skb) +
280 ((skb_transport_header(skb)[1] + 1) << 3)))) {
281 IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst),
282 IPSTATS_MIB_INHDRERRORS);
287 opt->lastopt = opt->dst1 = skb_network_header_len(skb);
288 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
292 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
293 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
295 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
298 opt->nhoff = opt->dst1;
303 IP6_INC_STATS_BH(dev_net(dst->dev),
304 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
308 /********************************
310 ********************************/
312 /* called with rcu_read_lock() */
313 static int ipv6_rthdr_rcv(struct sk_buff *skb)
315 struct inet6_skb_parm *opt = IP6CB(skb);
316 struct in6_addr *addr = NULL;
317 struct in6_addr daddr;
318 struct inet6_dev *idev;
320 struct ipv6_rt_hdr *hdr;
321 struct rt0_hdr *rthdr;
322 struct net *net = dev_net(skb->dev);
323 int accept_source_route = net->ipv6.devconf_all->accept_source_route;
325 idev = __in6_dev_get(skb->dev);
326 if (idev && accept_source_route > idev->cnf.accept_source_route)
327 accept_source_route = idev->cnf.accept_source_route;
329 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
330 !pskb_may_pull(skb, (skb_transport_offset(skb) +
331 ((skb_transport_header(skb)[1] + 1) << 3)))) {
332 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
333 IPSTATS_MIB_INHDRERRORS);
338 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
340 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
341 skb->pkt_type != PACKET_HOST) {
342 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
343 IPSTATS_MIB_INADDRERRORS);
349 if (hdr->segments_left == 0) {
351 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
352 case IPV6_SRCRT_TYPE_2:
353 /* Silently discard type 2 header unless it was
357 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
358 IPSTATS_MIB_INADDRERRORS);
368 opt->lastopt = opt->srcrt = skb_network_header_len(skb);
369 skb->transport_header += (hdr->hdrlen + 1) << 3;
370 opt->dst0 = opt->dst1;
372 opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
377 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
378 case IPV6_SRCRT_TYPE_2:
379 if (accept_source_route < 0)
381 /* Silently discard invalid RTH type 2 */
382 if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
383 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
384 IPSTATS_MIB_INHDRERRORS);
395 * This is the routing header forwarding algorithm from
399 n = hdr->hdrlen >> 1;
401 if (hdr->segments_left > n) {
402 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
403 IPSTATS_MIB_INHDRERRORS);
404 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
405 ((&hdr->segments_left) -
406 skb_network_header(skb)));
410 /* We are about to mangle packet header. Be careful!
411 Do not damage packets queued somewhere.
413 if (skb_cloned(skb)) {
414 /* the copy is a forwarded packet */
415 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
416 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
417 IPSTATS_MIB_OUTDISCARDS);
421 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
424 if (skb->ip_summed == CHECKSUM_COMPLETE)
425 skb->ip_summed = CHECKSUM_NONE;
427 i = n - --hdr->segments_left;
429 rthdr = (struct rt0_hdr *) hdr;
434 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
435 case IPV6_SRCRT_TYPE_2:
436 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
437 (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
438 IPPROTO_ROUTING) < 0) {
439 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
440 IPSTATS_MIB_INADDRERRORS);
444 if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
445 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
446 IPSTATS_MIB_INADDRERRORS);
456 if (ipv6_addr_is_multicast(addr)) {
457 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
458 IPSTATS_MIB_INADDRERRORS);
463 ipv6_addr_copy(&daddr, addr);
464 ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr);
465 ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr);
468 ip6_route_input(skb);
469 if (skb_dst(skb)->error) {
470 skb_push(skb, skb->data - skb_network_header(skb));
475 if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
476 if (ipv6_hdr(skb)->hop_limit <= 1) {
477 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
478 IPSTATS_MIB_INHDRERRORS);
479 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
484 ipv6_hdr(skb)->hop_limit--;
488 skb_push(skb, skb->data - skb_network_header(skb));
493 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
494 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
495 (&hdr->type) - skb_network_header(skb));
499 static const struct inet6_protocol rthdr_protocol = {
500 .handler = ipv6_rthdr_rcv,
501 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
504 static const struct inet6_protocol destopt_protocol = {
505 .handler = ipv6_destopt_rcv,
506 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
509 static const struct inet6_protocol nodata_protocol = {
510 .handler = dst_discard,
511 .flags = INET6_PROTO_NOPOLICY,
514 int __init ipv6_exthdrs_init(void)
518 ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING);
522 ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
526 ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE);
533 inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
535 inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
539 void ipv6_exthdrs_exit(void)
541 inet6_del_protocol(&nodata_protocol, IPPROTO_NONE);
542 inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
543 inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
546 /**********************************
548 **********************************/
551 * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
553 static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
555 return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
558 static inline struct net *ipv6_skb_net(struct sk_buff *skb)
560 return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
563 /* Router Alert as of RFC 2711 */
565 static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
567 const unsigned char *nh = skb_network_header(skb);
569 if (nh[optoff + 1] == 2) {
570 IP6CB(skb)->ra = optoff;
573 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
581 static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
583 const unsigned char *nh = skb_network_header(skb);
584 struct net *net = ipv6_skb_net(skb);
587 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
588 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
590 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
591 IPSTATS_MIB_INHDRERRORS);
595 pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
596 if (pkt_len <= IPV6_MAXPLEN) {
597 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
598 IPSTATS_MIB_INHDRERRORS);
599 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
602 if (ipv6_hdr(skb)->payload_len) {
603 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
604 IPSTATS_MIB_INHDRERRORS);
605 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
609 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
610 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
611 IPSTATS_MIB_INTRUNCATEDPKTS);
615 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
625 static struct tlvtype_proc tlvprochopopt_lst[] = {
627 .type = IPV6_TLV_ROUTERALERT,
631 .type = IPV6_TLV_JUMBO,
632 .func = ipv6_hop_jumbo,
637 int ipv6_parse_hopopts(struct sk_buff *skb)
639 struct inet6_skb_parm *opt = IP6CB(skb);
642 * skb_network_header(skb) is equal to skb->data, and
643 * skb_network_header_len(skb) is always equal to
644 * sizeof(struct ipv6hdr) by definition of
645 * hop-by-hop options.
647 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
648 !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
649 ((skb_transport_header(skb)[1] + 1) << 3)))) {
654 opt->hop = sizeof(struct ipv6hdr);
655 if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
656 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
658 opt->nhoff = sizeof(struct ipv6hdr);
665 * Creating outbound headers.
667 * "build" functions work when skb is filled from head to tail (datagram)
668 * "push" functions work when headers are added from tail to head (tcp)
670 * In both cases we assume, that caller reserved enough room
674 static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
675 struct ipv6_rt_hdr *opt,
676 struct in6_addr **addr_p)
678 struct rt0_hdr *phdr, *ihdr;
681 ihdr = (struct rt0_hdr *) opt;
683 phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
684 memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
686 hops = ihdr->rt_hdr.hdrlen >> 1;
689 memcpy(phdr->addr, ihdr->addr + 1,
690 (hops - 1) * sizeof(struct in6_addr));
692 ipv6_addr_copy(phdr->addr + (hops - 1), *addr_p);
693 *addr_p = ihdr->addr;
695 phdr->rt_hdr.nexthdr = *proto;
696 *proto = NEXTHDR_ROUTING;
699 static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
701 struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt));
703 memcpy(h, opt, ipv6_optlen(opt));
708 void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
710 struct in6_addr **daddr)
713 ipv6_push_rthdr(skb, proto, opt->srcrt, daddr);
715 * IPV6_RTHDRDSTOPTS is ignored
716 * unless IPV6_RTHDR is set (RFC3542).
719 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
722 ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
725 EXPORT_SYMBOL(ipv6_push_nfrag_opts);
727 void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
730 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
733 struct ipv6_txoptions *
734 ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
736 struct ipv6_txoptions *opt2;
738 opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
740 long dif = (char*)opt2 - (char*)opt;
741 memcpy(opt2, opt, opt->tot_len);
743 *((char**)&opt2->hopopt) += dif;
745 *((char**)&opt2->dst0opt) += dif;
747 *((char**)&opt2->dst1opt) += dif;
749 *((char**)&opt2->srcrt) += dif;
754 EXPORT_SYMBOL_GPL(ipv6_dup_options);
756 static int ipv6_renew_option(void *ohdr,
757 struct ipv6_opt_hdr __user *newopt, int newoptlen,
759 struct ipv6_opt_hdr **hdr,
764 memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
765 *hdr = (struct ipv6_opt_hdr *)*p;
766 *p += CMSG_ALIGN(ipv6_optlen(*(struct ipv6_opt_hdr **)hdr));
770 if (copy_from_user(*p, newopt, newoptlen))
772 *hdr = (struct ipv6_opt_hdr *)*p;
773 if (ipv6_optlen(*(struct ipv6_opt_hdr **)hdr) > newoptlen)
775 *p += CMSG_ALIGN(newoptlen);
781 struct ipv6_txoptions *
782 ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
784 struct ipv6_opt_hdr __user *newopt, int newoptlen)
788 struct ipv6_txoptions *opt2;
792 if (newtype != IPV6_HOPOPTS && opt->hopopt)
793 tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
794 if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
795 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
796 if (newtype != IPV6_RTHDR && opt->srcrt)
797 tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
798 if (newtype != IPV6_DSTOPTS && opt->dst1opt)
799 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
802 if (newopt && newoptlen)
803 tot_len += CMSG_ALIGN(newoptlen);
808 tot_len += sizeof(*opt2);
809 opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
811 return ERR_PTR(-ENOBUFS);
813 memset(opt2, 0, tot_len);
815 opt2->tot_len = tot_len;
816 p = (char *)(opt2 + 1);
818 err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
819 newtype != IPV6_HOPOPTS,
824 err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
825 newtype != IPV6_RTHDRDSTOPTS,
830 err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
831 newtype != IPV6_RTHDR,
832 (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
836 err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
837 newtype != IPV6_DSTOPTS,
842 opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
843 (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
844 (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0);
845 opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
849 sock_kfree_s(sk, opt2, opt2->tot_len);
853 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
854 struct ipv6_txoptions *opt)
857 * ignore the dest before srcrt unless srcrt is being included.
860 if (opt && opt->dst0opt && !opt->srcrt) {
861 if (opt_space != opt) {
862 memcpy(opt_space, opt, sizeof(*opt_space));
865 opt->opt_nflen -= ipv6_optlen(opt->dst0opt);
873 * fl6_update_dst - update flowi destination address with info given
874 * by srcrt option, if any.
876 * @fl6: flowi6 for which daddr is to be updated
877 * @opt: struct ipv6_txoptions in which to look for srcrt opt
878 * @orig: copy of original daddr address if modified
880 * Returns NULL if no txoptions or no srcrt, otherwise returns orig
881 * and initial value of fl6->daddr set in orig
883 struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
884 const struct ipv6_txoptions *opt,
885 struct in6_addr *orig)
887 if (!opt || !opt->srcrt)
890 ipv6_addr_copy(orig, &fl6->daddr);
891 ipv6_addr_copy(&fl6->daddr, ((struct rt0_hdr *)opt->srcrt)->addr);
895 EXPORT_SYMBOL_GPL(fl6_update_dst);