2 * ip_vs_xmit.c: various packet transmitters for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
16 #define KMSG_COMPONENT "IPVS"
17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19 #include <linux/kernel.h>
20 #include <linux/slab.h>
21 #include <linux/tcp.h> /* for tcphdr */
23 #include <net/tcp.h> /* for csum_tcpudp_magic */
25 #include <net/icmp.h> /* for icmp_send */
26 #include <net/route.h> /* for ip_route_output */
28 #include <net/ip6_route.h>
29 #include <linux/icmpv6.h>
30 #include <linux/netfilter.h>
31 #include <net/netfilter/nf_conntrack.h>
32 #include <linux/netfilter_ipv4.h>
34 #include <net/ip_vs.h>
38 * Destination cache to speed up outgoing route lookup
41 __ip_vs_dst_set(struct ip_vs_dest *dest, u32 rtos, struct dst_entry *dst)
43 struct dst_entry *old_dst;
45 old_dst = dest->dst_cache;
46 dest->dst_cache = dst;
47 dest->dst_rtos = rtos;
51 static inline struct dst_entry *
52 __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos, u32 cookie)
54 struct dst_entry *dst = dest->dst_cache;
59 || (dest->af == AF_INET && rtos != dest->dst_rtos)) &&
60 dst->ops->check(dst, cookie) == NULL) {
61 dest->dst_cache = NULL;
69 static struct rtable *
70 __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
72 struct rtable *rt; /* Route to the other host */
73 struct ip_vs_dest *dest = cp->dest;
76 spin_lock(&dest->dst_lock);
77 if (!(rt = (struct rtable *)
78 __ip_vs_dst_check(dest, rtos, 0))) {
83 .daddr = dest->addr.ip,
88 if (ip_route_output_key(&init_net, &rt, &fl)) {
89 spin_unlock(&dest->dst_lock);
90 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
94 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst));
95 IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
97 atomic_read(&rt->dst.__refcnt), rtos);
99 spin_unlock(&dest->dst_lock);
105 .daddr = cp->daddr.ip,
110 if (ip_route_output_key(&init_net, &rt, &fl)) {
111 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
120 #ifdef CONFIG_IP_VS_IPV6
121 static struct rt6_info *
122 __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp)
124 struct rt6_info *rt; /* Route to the other host */
125 struct ip_vs_dest *dest = cp->dest;
128 spin_lock(&dest->dst_lock);
129 rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0, 0);
135 .daddr = dest->addr.in6,
144 rt = (struct rt6_info *)ip6_route_output(&init_net,
147 spin_unlock(&dest->dst_lock);
148 IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n",
152 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst));
153 IP_VS_DBG(10, "new dst %pI6, refcnt=%d\n",
155 atomic_read(&rt->dst.__refcnt));
157 spin_unlock(&dest->dst_lock);
163 .daddr = cp->daddr.in6,
165 .s6_addr32 = { 0, 0, 0, 0 },
171 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
173 IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n",
185 * Release dest->dst_cache before a dest is removed
188 ip_vs_dst_reset(struct ip_vs_dest *dest)
190 struct dst_entry *old_dst;
192 old_dst = dest->dst_cache;
193 dest->dst_cache = NULL;
194 dst_release(old_dst);
197 #define IP_VS_XMIT(pf, skb, rt) \
199 (skb)->ipvs_property = 1; \
200 skb_forward_csum(skb); \
201 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
202 (rt)->dst.dev, dst_output); \
207 * NULL transmitter (do nothing except return NF_ACCEPT)
210 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
211 struct ip_vs_protocol *pp)
213 /* we do not touch skb and do not need pskb ptr */
220 * Let packets bypass the destination when the destination is not
221 * available, it may be only used in transparent cache cluster.
224 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
225 struct ip_vs_protocol *pp)
227 struct rtable *rt; /* Route to the other host */
228 struct iphdr *iph = ip_hdr(skb);
237 .tos = RT_TOS(tos), } },
242 if (ip_route_output_key(&init_net, &rt, &fl)) {
243 IP_VS_DBG_RL("%s(): ip_route_output error, dest: %pI4\n",
244 __func__, &iph->daddr);
249 mtu = dst_mtu(&rt->dst);
250 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
252 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
253 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
258 * Call ip_send_check because we are not sure it is called
259 * after ip_defrag. Is copy-on-write needed?
261 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
265 ip_send_check(ip_hdr(skb));
269 skb_dst_set(skb, &rt->dst);
271 /* Another hack: avoid icmp_send in ip_fragment */
274 IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
280 dst_link_failure(skb);
287 #ifdef CONFIG_IP_VS_IPV6
289 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
290 struct ip_vs_protocol *pp)
292 struct rt6_info *rt; /* Route to the other host */
293 struct ipv6hdr *iph = ipv6_hdr(skb);
300 .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
305 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
307 IP_VS_DBG_RL("%s(): ip6_route_output error, dest: %pI6\n",
308 __func__, &iph->daddr);
313 mtu = dst_mtu(&rt->dst);
314 if (skb->len > mtu) {
315 dst_release(&rt->dst);
316 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
317 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
322 * Call ip_send_check because we are not sure it is called
323 * after ip_defrag. Is copy-on-write needed?
325 skb = skb_share_check(skb, GFP_ATOMIC);
326 if (unlikely(skb == NULL)) {
327 dst_release(&rt->dst);
333 skb_dst_set(skb, &rt->dst);
335 /* Another hack: avoid icmp_send in ip_fragment */
338 IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
344 dst_link_failure(skb);
353 ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
355 struct nf_conn *ct = (struct nf_conn *)skb->nfct;
356 struct nf_conntrack_tuple new_tuple;
358 if (ct == NULL || nf_ct_is_untracked(ct) || nf_ct_is_confirmed(ct))
362 * The connection is not yet in the hashtable, so we update it.
363 * CIP->VIP will remain the same, so leave the tuple in
364 * IP_CT_DIR_ORIGINAL untouched. When the reply comes back from the
365 * real-server we will see RIP->DIP.
367 new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
368 new_tuple.src.u3 = cp->daddr;
370 * This will also take care of UDP and other protocols.
372 new_tuple.src.u.tcp.port = cp->dport;
373 nf_conntrack_alter_reply(ct, &new_tuple);
377 * NAT transmitter (only for outside-to-inside nat forwarding)
378 * Not used for related ICMP
381 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
382 struct ip_vs_protocol *pp)
384 struct rtable *rt; /* Route to the other host */
386 struct iphdr *iph = ip_hdr(skb);
390 /* check if it is a connection of no-client-port */
391 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
393 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
396 ip_vs_conn_fill_cport(cp, *p);
397 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
400 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos))))
404 mtu = dst_mtu(&rt->dst);
405 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
407 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
408 IP_VS_DBG_RL_PKT(0, pp, skb, 0, "ip_vs_nat_xmit(): frag needed for");
412 /* copy-on-write the packet before mangling it */
413 if (!skb_make_writable(skb, sizeof(struct iphdr)))
416 if (skb_cow(skb, rt->dst.dev->hard_header_len))
421 skb_dst_set(skb, &rt->dst);
423 /* mangle the packet */
424 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
426 ip_hdr(skb)->daddr = cp->daddr.ip;
427 ip_send_check(ip_hdr(skb));
429 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
431 ip_vs_update_conntrack(skb, cp);
433 /* FIXME: when application helper enlarges the packet and the length
434 is larger than the MTU of outgoing device, there will be still
437 /* Another hack: avoid icmp_send in ip_fragment */
440 IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
446 dst_link_failure(skb);
456 #ifdef CONFIG_IP_VS_IPV6
458 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
459 struct ip_vs_protocol *pp)
461 struct rt6_info *rt; /* Route to the other host */
466 /* check if it is a connection of no-client-port */
467 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
469 p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
473 ip_vs_conn_fill_cport(cp, *p);
474 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
477 rt = __ip_vs_get_out_rt_v6(cp);
482 mtu = dst_mtu(&rt->dst);
483 if (skb->len > mtu) {
484 dst_release(&rt->dst);
485 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
486 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
487 "ip_vs_nat_xmit_v6(): frag needed for");
491 /* copy-on-write the packet before mangling it */
492 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
495 if (skb_cow(skb, rt->dst.dev->hard_header_len))
500 skb_dst_set(skb, &rt->dst);
502 /* mangle the packet */
503 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
505 ipv6_hdr(skb)->daddr = cp->daddr.in6;
507 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
509 ip_vs_update_conntrack(skb, cp);
511 /* FIXME: when application helper enlarges the packet and the length
512 is larger than the MTU of outgoing device, there will be still
515 /* Another hack: avoid icmp_send in ip_fragment */
518 IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
524 dst_link_failure(skb);
530 dst_release(&rt->dst);
537 * IP Tunneling transmitter
539 * This function encapsulates the packet in a new IP packet, its
540 * destination will be set to cp->daddr. Most code of this function
541 * is taken from ipip.c.
543 * It is used in VS/TUN cluster. The load balancer selects a real
544 * server from a cluster based on a scheduling algorithm,
545 * encapsulates the request packet and forwards it to the selected
546 * server. For example, all real servers are configured with
547 * "ifconfig tunl0 <Virtual IP Address> up". When the server receives
548 * the encapsulated packet, it will decapsulate the packet, processe
549 * the request and return the response packets directly to the client
550 * without passing the load balancer. This can greatly increase the
551 * scalability of virtual server.
553 * Used for ANY protocol
556 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
557 struct ip_vs_protocol *pp)
559 struct rtable *rt; /* Route to the other host */
560 struct net_device *tdev; /* Device to other host */
561 struct iphdr *old_iph = ip_hdr(skb);
562 u8 tos = old_iph->tos;
563 __be16 df = old_iph->frag_off;
564 sk_buff_data_t old_transport_header = skb->transport_header;
565 struct iphdr *iph; /* Our new IP header */
566 unsigned int max_headroom; /* The extra header space needed */
571 if (skb->protocol != htons(ETH_P_IP)) {
572 IP_VS_DBG_RL("%s(): protocol error, "
573 "ETH_P_IP: %d, skb protocol: %d\n",
574 __func__, htons(ETH_P_IP), skb->protocol);
578 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos))))
583 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
586 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
590 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
592 df |= (old_iph->frag_off & htons(IP_DF));
594 if ((old_iph->frag_off & htons(IP_DF))
595 && mtu < ntohs(old_iph->tot_len)) {
596 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
598 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
603 * Okay, now see if we can stuff it in the buffer as-is.
605 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
607 if (skb_headroom(skb) < max_headroom
608 || skb_cloned(skb) || skb_shared(skb)) {
609 struct sk_buff *new_skb =
610 skb_realloc_headroom(skb, max_headroom);
614 IP_VS_ERR_RL("%s(): no memory\n", __func__);
619 old_iph = ip_hdr(skb);
622 skb->transport_header = old_transport_header;
624 /* fix old IP header checksum */
625 ip_send_check(old_iph);
627 skb_push(skb, sizeof(struct iphdr));
628 skb_reset_network_header(skb);
629 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
633 skb_dst_set(skb, &rt->dst);
636 * Push down and install the IPIP header.
640 iph->ihl = sizeof(struct iphdr)>>2;
642 iph->protocol = IPPROTO_IPIP;
644 iph->daddr = rt->rt_dst;
645 iph->saddr = rt->rt_src;
646 iph->ttl = old_iph->ttl;
647 ip_select_ident(iph, &rt->dst, NULL);
649 /* Another hack: avoid icmp_send in ip_fragment */
659 dst_link_failure(skb);
666 #ifdef CONFIG_IP_VS_IPV6
668 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
669 struct ip_vs_protocol *pp)
671 struct rt6_info *rt; /* Route to the other host */
672 struct net_device *tdev; /* Device to other host */
673 struct ipv6hdr *old_iph = ipv6_hdr(skb);
674 sk_buff_data_t old_transport_header = skb->transport_header;
675 struct ipv6hdr *iph; /* Our new IP header */
676 unsigned int max_headroom; /* The extra header space needed */
681 if (skb->protocol != htons(ETH_P_IPV6)) {
682 IP_VS_DBG_RL("%s(): protocol error, "
683 "ETH_P_IPV6: %d, skb protocol: %d\n",
684 __func__, htons(ETH_P_IPV6), skb->protocol);
688 rt = __ip_vs_get_out_rt_v6(cp);
694 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
695 /* TODO IPv6: do we need this check in IPv6? */
697 dst_release(&rt->dst);
698 IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__);
702 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
704 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
705 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
706 dst_release(&rt->dst);
707 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
712 * Okay, now see if we can stuff it in the buffer as-is.
714 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
716 if (skb_headroom(skb) < max_headroom
717 || skb_cloned(skb) || skb_shared(skb)) {
718 struct sk_buff *new_skb =
719 skb_realloc_headroom(skb, max_headroom);
721 dst_release(&rt->dst);
723 IP_VS_ERR_RL("%s(): no memory\n", __func__);
728 old_iph = ipv6_hdr(skb);
731 skb->transport_header = old_transport_header;
733 skb_push(skb, sizeof(struct ipv6hdr));
734 skb_reset_network_header(skb);
735 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
739 skb_dst_set(skb, &rt->dst);
742 * Push down and install the IPIP header.
746 iph->nexthdr = IPPROTO_IPV6;
747 iph->payload_len = old_iph->payload_len;
748 be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
749 iph->priority = old_iph->priority;
750 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
751 iph->daddr = rt->rt6i_dst.addr;
752 iph->saddr = cp->vaddr.in6; /* rt->rt6i_src.addr; */
753 iph->hop_limit = old_iph->hop_limit;
755 /* Another hack: avoid icmp_send in ip_fragment */
765 dst_link_failure(skb);
775 * Direct Routing transmitter
776 * Used for ANY protocol
779 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
780 struct ip_vs_protocol *pp)
782 struct rtable *rt; /* Route to the other host */
783 struct iphdr *iph = ip_hdr(skb);
788 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos))))
792 mtu = dst_mtu(&rt->dst);
793 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
794 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
796 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
801 * Call ip_send_check because we are not sure it is called
802 * after ip_defrag. Is copy-on-write needed?
804 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
808 ip_send_check(ip_hdr(skb));
812 skb_dst_set(skb, &rt->dst);
814 /* Another hack: avoid icmp_send in ip_fragment */
817 IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
823 dst_link_failure(skb);
830 #ifdef CONFIG_IP_VS_IPV6
832 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
833 struct ip_vs_protocol *pp)
835 struct rt6_info *rt; /* Route to the other host */
840 rt = __ip_vs_get_out_rt_v6(cp);
845 mtu = dst_mtu(&rt->dst);
846 if (skb->len > mtu) {
847 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
848 dst_release(&rt->dst);
849 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
854 * Call ip_send_check because we are not sure it is called
855 * after ip_defrag. Is copy-on-write needed?
857 skb = skb_share_check(skb, GFP_ATOMIC);
858 if (unlikely(skb == NULL)) {
859 dst_release(&rt->dst);
865 skb_dst_set(skb, &rt->dst);
867 /* Another hack: avoid icmp_send in ip_fragment */
870 IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
876 dst_link_failure(skb);
886 * ICMP packet transmitter
887 * called by the ip_vs_in_icmp
890 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
891 struct ip_vs_protocol *pp, int offset)
893 struct rtable *rt; /* Route to the other host */
899 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
900 forwarded directly here, because there is no need to
901 translate address/port back */
902 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
904 rc = cp->packet_xmit(skb, cp, pp);
907 /* do not touch skb anymore */
908 atomic_inc(&cp->in_pkts);
913 * mangle and send the packet here (only for VS/NAT)
916 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(ip_hdr(skb)->tos))))
920 mtu = dst_mtu(&rt->dst);
921 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
923 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
924 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
928 /* copy-on-write the packet before mangling it */
929 if (!skb_make_writable(skb, offset))
932 if (skb_cow(skb, rt->dst.dev->hard_header_len))
935 /* drop the old route when skb is not shared */
937 skb_dst_set(skb, &rt->dst);
939 ip_vs_nat_icmp(skb, pp, cp, 0);
941 /* Another hack: avoid icmp_send in ip_fragment */
944 IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
950 dst_link_failure(skb);
962 #ifdef CONFIG_IP_VS_IPV6
964 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
965 struct ip_vs_protocol *pp, int offset)
967 struct rt6_info *rt; /* Route to the other host */
973 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
974 forwarded directly here, because there is no need to
975 translate address/port back */
976 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
978 rc = cp->packet_xmit(skb, cp, pp);
981 /* do not touch skb anymore */
982 atomic_inc(&cp->in_pkts);
987 * mangle and send the packet here (only for VS/NAT)
990 rt = __ip_vs_get_out_rt_v6(cp);
995 mtu = dst_mtu(&rt->dst);
996 if (skb->len > mtu) {
997 dst_release(&rt->dst);
998 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
999 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1003 /* copy-on-write the packet before mangling it */
1004 if (!skb_make_writable(skb, offset))
1007 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1010 /* drop the old route when skb is not shared */
1012 skb_dst_set(skb, &rt->dst);
1014 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1016 /* Another hack: avoid icmp_send in ip_fragment */
1019 IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
1025 dst_link_failure(skb);
1033 dst_release(&rt->dst);