2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
59 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
61 int __ip6_local_out(struct sk_buff *skb)
65 len = skb->len - sizeof(struct ipv6hdr);
66 if (len > IPV6_MAXPLEN)
68 ipv6_hdr(skb)->payload_len = htons(len);
70 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
71 skb_dst(skb)->dev, dst_output);
74 int ip6_local_out(struct sk_buff *skb)
78 err = __ip6_local_out(skb);
80 err = dst_output(skb);
84 EXPORT_SYMBOL_GPL(ip6_local_out);
86 /* dev_loopback_xmit for use with netfilter. */
87 static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
89 skb_reset_mac_header(newskb);
90 __skb_pull(newskb, skb_network_offset(newskb));
91 newskb->pkt_type = PACKET_LOOPBACK;
92 newskb->ip_summed = CHECKSUM_UNNECESSARY;
93 WARN_ON(!skb_dst(newskb));
99 static int ip6_finish_output2(struct sk_buff *skb)
101 struct dst_entry *dst = skb_dst(skb);
102 struct net_device *dev = dst->dev;
103 struct neighbour *neigh;
105 skb->protocol = htons(ETH_P_IPV6);
108 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
109 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
111 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
112 ((mroute6_socket(dev_net(dev), skb) &&
113 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
114 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
115 &ipv6_hdr(skb)->saddr))) {
116 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
118 /* Do not check for IFF_ALLMULTI; multicast routing
119 is not supported in any case.
122 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
123 newskb, NULL, newskb->dev,
124 ip6_dev_loopback_xmit);
126 if (ipv6_hdr(skb)->hop_limit == 0) {
127 IP6_INC_STATS(dev_net(dev), idev,
128 IPSTATS_MIB_OUTDISCARDS);
134 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
139 neigh = dst_get_neighbour(dst);
141 int res = neigh_output(neigh, skb);
147 IP6_INC_STATS(dev_net(dst->dev),
148 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
153 static int ip6_finish_output(struct sk_buff *skb)
155 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
156 dst_allfrag(skb_dst(skb)))
157 return ip6_fragment(skb, ip6_finish_output2);
159 return ip6_finish_output2(skb);
162 int ip6_output(struct sk_buff *skb)
164 struct net_device *dev = skb_dst(skb)->dev;
165 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
166 if (unlikely(idev->cnf.disable_ipv6)) {
167 IP6_INC_STATS(dev_net(dev), idev,
168 IPSTATS_MIB_OUTDISCARDS);
173 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
175 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
179 * xmit an sk_buff (used by TCP, SCTP and DCCP)
182 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
183 struct ipv6_txoptions *opt, int tclass)
185 struct net *net = sock_net(sk);
186 struct ipv6_pinfo *np = inet6_sk(sk);
187 struct in6_addr *first_hop = &fl6->daddr;
188 struct dst_entry *dst = skb_dst(skb);
190 u8 proto = fl6->flowi6_proto;
191 int seg_len = skb->len;
196 unsigned int head_room;
198 /* First: exthdrs may take lots of space (~8K for now)
199 MAX_HEADER is not enough.
201 head_room = opt->opt_nflen + opt->opt_flen;
202 seg_len += head_room;
203 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
205 if (skb_headroom(skb) < head_room) {
206 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
208 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
209 IPSTATS_MIB_OUTDISCARDS);
215 skb_set_owner_w(skb, sk);
218 ipv6_push_frag_opts(skb, opt, &proto);
220 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
223 skb_push(skb, sizeof(struct ipv6hdr));
224 skb_reset_network_header(skb);
228 * Fill in the IPv6 header
231 hlimit = np->hop_limit;
233 hlimit = ip6_dst_hoplimit(dst);
235 *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;
237 hdr->payload_len = htons(seg_len);
238 hdr->nexthdr = proto;
239 hdr->hop_limit = hlimit;
241 ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
242 ipv6_addr_copy(&hdr->daddr, first_hop);
244 skb->priority = sk->sk_priority;
245 skb->mark = sk->sk_mark;
248 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
249 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
250 IPSTATS_MIB_OUT, skb->len);
251 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
252 dst->dev, dst_output);
256 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
258 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
259 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
264 EXPORT_SYMBOL(ip6_xmit);
267 * To avoid extra problems ND packets are send through this
268 * routine. It's code duplication but I really want to avoid
269 * extra checks since ipv6_build_header is used by TCP (which
270 * is for us performance critical)
273 int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
274 const struct in6_addr *saddr, const struct in6_addr *daddr,
277 struct ipv6_pinfo *np = inet6_sk(sk);
280 skb->protocol = htons(ETH_P_IPV6);
283 skb_reset_network_header(skb);
284 skb_put(skb, sizeof(struct ipv6hdr));
287 *(__be32*)hdr = htonl(0x60000000);
289 hdr->payload_len = htons(len);
290 hdr->nexthdr = proto;
291 hdr->hop_limit = np->hop_limit;
293 ipv6_addr_copy(&hdr->saddr, saddr);
294 ipv6_addr_copy(&hdr->daddr, daddr);
299 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
301 struct ip6_ra_chain *ra;
302 struct sock *last = NULL;
304 read_lock(&ip6_ra_lock);
305 for (ra = ip6_ra_chain; ra; ra = ra->next) {
306 struct sock *sk = ra->sk;
307 if (sk && ra->sel == sel &&
308 (!sk->sk_bound_dev_if ||
309 sk->sk_bound_dev_if == skb->dev->ifindex)) {
311 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
313 rawv6_rcv(last, skb2);
320 rawv6_rcv(last, skb);
321 read_unlock(&ip6_ra_lock);
324 read_unlock(&ip6_ra_lock);
328 static int ip6_forward_proxy_check(struct sk_buff *skb)
330 struct ipv6hdr *hdr = ipv6_hdr(skb);
331 u8 nexthdr = hdr->nexthdr;
334 if (ipv6_ext_hdr(nexthdr)) {
335 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
339 offset = sizeof(struct ipv6hdr);
341 if (nexthdr == IPPROTO_ICMPV6) {
342 struct icmp6hdr *icmp6;
344 if (!pskb_may_pull(skb, (skb_network_header(skb) +
345 offset + 1 - skb->data)))
348 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
350 switch (icmp6->icmp6_type) {
351 case NDISC_ROUTER_SOLICITATION:
352 case NDISC_ROUTER_ADVERTISEMENT:
353 case NDISC_NEIGHBOUR_SOLICITATION:
354 case NDISC_NEIGHBOUR_ADVERTISEMENT:
356 /* For reaction involving unicast neighbor discovery
357 * message destined to the proxied address, pass it to
367 * The proxying router can't forward traffic sent to a link-local
368 * address, so signal the sender and discard the packet. This
369 * behavior is clarified by the MIPv6 specification.
371 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
372 dst_link_failure(skb);
379 static inline int ip6_forward_finish(struct sk_buff *skb)
381 return dst_output(skb);
384 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
386 if (skb->len <= mtu || skb->local_df)
389 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
395 int ip6_forward(struct sk_buff *skb)
397 struct dst_entry *dst = skb_dst(skb);
398 struct ipv6hdr *hdr = ipv6_hdr(skb);
399 struct inet6_skb_parm *opt = IP6CB(skb);
400 struct net *net = dev_net(dst->dev);
404 if (net->ipv6.devconf_all->forwarding == 0)
407 if (skb_warn_if_lro(skb))
410 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
411 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
415 if (skb->pkt_type != PACKET_HOST)
418 skb_forward_csum(skb);
421 * We DO NOT make any processing on
422 * RA packets, pushing them to user level AS IS
423 * without ane WARRANTY that application will be able
424 * to interpret them. The reason is that we
425 * cannot make anything clever here.
427 * We are not end-node, so that if packet contains
428 * AH/ESP, we cannot make anything.
429 * Defragmentation also would be mistake, RA packets
430 * cannot be fragmented, because there is no warranty
431 * that different fragments will go along one path. --ANK
434 u8 *ptr = skb_network_header(skb) + opt->ra;
435 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
440 * check and decrement ttl
442 if (hdr->hop_limit <= 1) {
443 /* Force OUTPUT device used as source address */
445 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
446 IP6_INC_STATS_BH(net,
447 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
453 /* XXX: idev->cnf.proxy_ndp? */
454 if (net->ipv6.devconf_all->proxy_ndp &&
455 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
456 int proxied = ip6_forward_proxy_check(skb);
458 return ip6_input(skb);
459 else if (proxied < 0) {
460 IP6_INC_STATS(net, ip6_dst_idev(dst),
461 IPSTATS_MIB_INDISCARDS);
466 if (!xfrm6_route_forward(skb)) {
467 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
472 /* IPv6 specs say nothing about it, but it is clear that we cannot
473 send redirects to source routed frames.
474 We don't send redirects to frames decapsulated from IPsec.
476 n = dst_get_neighbour(dst);
477 if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
478 struct in6_addr *target = NULL;
482 * incoming and outgoing devices are the same
486 rt = (struct rt6_info *) dst;
487 if ((rt->rt6i_flags & RTF_GATEWAY))
488 target = (struct in6_addr*)&n->primary_key;
490 target = &hdr->daddr;
493 rt6_bind_peer(rt, 1);
495 /* Limit redirects both by destination (here)
496 and by source (inside ndisc_send_redirect)
498 if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
499 ndisc_send_redirect(skb, n, target);
501 int addrtype = ipv6_addr_type(&hdr->saddr);
503 /* This check is security critical. */
504 if (addrtype == IPV6_ADDR_ANY ||
505 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
507 if (addrtype & IPV6_ADDR_LINKLOCAL) {
508 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
509 ICMPV6_NOT_NEIGHBOUR, 0);
515 if (mtu < IPV6_MIN_MTU)
518 if (ip6_pkt_too_big(skb, mtu)) {
519 /* Again, force OUTPUT device used as source address */
521 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
522 IP6_INC_STATS_BH(net,
523 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
524 IP6_INC_STATS_BH(net,
525 ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
530 if (skb_cow(skb, dst->dev->hard_header_len)) {
531 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
537 /* Mangling hops number delayed to point after skb COW */
541 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
542 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
546 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
552 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
554 to->pkt_type = from->pkt_type;
555 to->priority = from->priority;
556 to->protocol = from->protocol;
558 skb_dst_set(to, dst_clone(skb_dst(from)));
560 to->mark = from->mark;
562 #ifdef CONFIG_NET_SCHED
563 to->tc_index = from->tc_index;
566 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
567 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
568 to->nf_trace = from->nf_trace;
570 skb_copy_secmark(to, from);
573 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
575 u16 offset = sizeof(struct ipv6hdr);
576 struct ipv6_opt_hdr *exthdr =
577 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
578 unsigned int packet_len = skb->tail - skb->network_header;
580 *nexthdr = &ipv6_hdr(skb)->nexthdr;
582 while (offset + 1 <= packet_len) {
588 case NEXTHDR_ROUTING:
592 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
593 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
603 offset += ipv6_optlen(exthdr);
604 *nexthdr = &exthdr->nexthdr;
605 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
612 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
614 static atomic_t ipv6_fragmentation_id;
617 if (rt && !(rt->dst.flags & DST_NOPEER)) {
618 struct inet_peer *peer;
621 rt6_bind_peer(rt, 1);
622 peer = rt->rt6i_peer;
624 fhdr->identification = htonl(inet_getid(peer, 0));
629 old = atomic_read(&ipv6_fragmentation_id);
633 } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
634 fhdr->identification = htonl(new);
637 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
639 struct sk_buff *frag;
640 struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
641 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
642 struct ipv6hdr *tmp_hdr;
644 unsigned int mtu, hlen, left, len;
646 int ptr, offset = 0, err=0;
647 u8 *prevhdr, nexthdr = 0;
648 struct net *net = dev_net(skb_dst(skb)->dev);
650 hlen = ip6_find_1stfragopt(skb, &prevhdr);
653 mtu = ip6_skb_dst_mtu(skb);
655 /* We must not fragment if the socket is set to force MTU discovery
656 * or if the skb it not generated by a local socket.
658 if (!skb->local_df && skb->len > mtu) {
659 skb->dev = skb_dst(skb)->dev;
660 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
661 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
662 IPSTATS_MIB_FRAGFAILS);
667 if (np && np->frag_size < mtu) {
671 mtu -= hlen + sizeof(struct frag_hdr);
673 if (skb_has_frag_list(skb)) {
674 int first_len = skb_pagelen(skb);
675 struct sk_buff *frag2;
677 if (first_len - hlen > mtu ||
678 ((first_len - hlen) & 7) ||
682 skb_walk_frags(skb, frag) {
683 /* Correct geometry. */
684 if (frag->len > mtu ||
685 ((frag->len & 7) && frag->next) ||
686 skb_headroom(frag) < hlen)
687 goto slow_path_clean;
689 /* Partially cloned skb? */
690 if (skb_shared(frag))
691 goto slow_path_clean;
696 frag->destructor = sock_wfree;
698 skb->truesize -= frag->truesize;
703 frag = skb_shinfo(skb)->frag_list;
704 skb_frag_list_init(skb);
707 *prevhdr = NEXTHDR_FRAGMENT;
708 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
710 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
711 IPSTATS_MIB_FRAGFAILS);
715 __skb_pull(skb, hlen);
716 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
717 __skb_push(skb, hlen);
718 skb_reset_network_header(skb);
719 memcpy(skb_network_header(skb), tmp_hdr, hlen);
721 ipv6_select_ident(fh, rt);
722 fh->nexthdr = nexthdr;
724 fh->frag_off = htons(IP6_MF);
725 frag_id = fh->identification;
727 first_len = skb_pagelen(skb);
728 skb->data_len = first_len - skb_headlen(skb);
729 skb->len = first_len;
730 ipv6_hdr(skb)->payload_len = htons(first_len -
731 sizeof(struct ipv6hdr));
736 /* Prepare header of the next frame,
737 * before previous one went down. */
739 frag->ip_summed = CHECKSUM_NONE;
740 skb_reset_transport_header(frag);
741 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
742 __skb_push(frag, hlen);
743 skb_reset_network_header(frag);
744 memcpy(skb_network_header(frag), tmp_hdr,
746 offset += skb->len - hlen - sizeof(struct frag_hdr);
747 fh->nexthdr = nexthdr;
749 fh->frag_off = htons(offset);
750 if (frag->next != NULL)
751 fh->frag_off |= htons(IP6_MF);
752 fh->identification = frag_id;
753 ipv6_hdr(frag)->payload_len =
755 sizeof(struct ipv6hdr));
756 ip6_copy_metadata(frag, skb);
761 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
762 IPSTATS_MIB_FRAGCREATES);
775 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
776 IPSTATS_MIB_FRAGOKS);
777 dst_release(&rt->dst);
787 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
788 IPSTATS_MIB_FRAGFAILS);
789 dst_release(&rt->dst);
793 skb_walk_frags(skb, frag2) {
797 frag2->destructor = NULL;
798 skb->truesize += frag2->truesize;
803 left = skb->len - hlen; /* Space per frame */
804 ptr = hlen; /* Where to start from */
807 * Fragment the datagram.
810 *prevhdr = NEXTHDR_FRAGMENT;
813 * Keep copying data until we run out.
817 /* IF: it doesn't fit, use 'mtu' - the data space left */
820 /* IF: we are not sending up to and including the packet end
821 then align the next start on an eight byte boundary */
829 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
830 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
831 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
832 IPSTATS_MIB_FRAGFAILS);
838 * Set up data on packet
841 ip6_copy_metadata(frag, skb);
842 skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
843 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
844 skb_reset_network_header(frag);
845 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
846 frag->transport_header = (frag->network_header + hlen +
847 sizeof(struct frag_hdr));
850 * Charge the memory for the fragment to any owner
854 skb_set_owner_w(frag, skb->sk);
857 * Copy the packet header into the new buffer.
859 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
862 * Build fragment header.
864 fh->nexthdr = nexthdr;
867 ipv6_select_ident(fh, rt);
868 frag_id = fh->identification;
870 fh->identification = frag_id;
873 * Copy a block of the IP datagram.
875 if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
879 fh->frag_off = htons(offset);
881 fh->frag_off |= htons(IP6_MF);
882 ipv6_hdr(frag)->payload_len = htons(frag->len -
883 sizeof(struct ipv6hdr));
889 * Put this fragment into the sending queue.
895 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
896 IPSTATS_MIB_FRAGCREATES);
898 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
899 IPSTATS_MIB_FRAGOKS);
904 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
905 IPSTATS_MIB_FRAGFAILS);
910 static inline int ip6_rt_check(const struct rt6key *rt_key,
911 const struct in6_addr *fl_addr,
912 const struct in6_addr *addr_cache)
914 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
915 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
918 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
919 struct dst_entry *dst,
920 const struct flowi6 *fl6)
922 struct ipv6_pinfo *np = inet6_sk(sk);
928 if (dst->ops->family != AF_INET6) {
933 rt = (struct rt6_info *)dst;
934 /* Yes, checking route validity in not connected
935 * case is not very simple. Take into account,
936 * that we do not support routing by source, TOS,
937 * and MSG_DONTROUTE --ANK (980726)
939 * 1. ip6_rt_check(): If route was host route,
940 * check that cached destination is current.
941 * If it is network route, we still may
942 * check its validity using saved pointer
943 * to the last used address: daddr_cache.
944 * We do not want to save whole address now,
945 * (because main consumer of this service
946 * is tcp, which has not this problem),
947 * so that the last trick works only on connected
949 * 2. oif also should be the same.
951 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
952 #ifdef CONFIG_IPV6_SUBTREES
953 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
955 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
964 static int ip6_dst_lookup_tail(struct sock *sk,
965 struct dst_entry **dst, struct flowi6 *fl6)
967 struct net *net = sock_net(sk);
968 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
974 *dst = ip6_route_output(net, sk, fl6);
976 if ((err = (*dst)->error))
977 goto out_err_release;
979 if (ipv6_addr_any(&fl6->saddr)) {
980 struct rt6_info *rt = (struct rt6_info *) *dst;
981 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
982 sk ? inet6_sk(sk)->srcprefs : 0,
985 goto out_err_release;
988 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
990 * Here if the dst entry we've looked up
991 * has a neighbour entry that is in the INCOMPLETE
992 * state and the src address from the flow is
993 * marked as OPTIMISTIC, we release the found
994 * dst entry and replace it instead with the
995 * dst entry of the nexthop router
998 n = dst_get_neighbour(*dst);
999 if (n && !(n->nud_state & NUD_VALID)) {
1000 struct inet6_ifaddr *ifp;
1001 struct flowi6 fl_gw6;
1005 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
1008 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
1014 * We need to get the dst entry for the
1015 * default router instead
1018 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1019 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1020 *dst = ip6_route_output(net, sk, &fl_gw6);
1021 if ((err = (*dst)->error))
1022 goto out_err_release;
1032 if (err == -ENETUNREACH)
1033 IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1040 * ip6_dst_lookup - perform route lookup on flow
1041 * @sk: socket which provides route info
1042 * @dst: pointer to dst_entry * for result
1043 * @fl6: flow to lookup
1045 * This function performs a route lookup on the given flow.
1047 * It returns zero on success, or a standard errno code on error.
1049 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
1052 return ip6_dst_lookup_tail(sk, dst, fl6);
1054 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1057 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1058 * @sk: socket which provides route info
1059 * @fl6: flow to lookup
1060 * @final_dst: final destination address for ipsec lookup
1061 * @can_sleep: we are in a sleepable context
1063 * This function performs a route lookup on the given flow.
1065 * It returns a valid dst pointer on success, or a pointer encoded
1068 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1069 const struct in6_addr *final_dst,
1072 struct dst_entry *dst = NULL;
1075 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1077 return ERR_PTR(err);
1079 ipv6_addr_copy(&fl6->daddr, final_dst);
1081 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1083 return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1085 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1088 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1089 * @sk: socket which provides the dst cache and route info
1090 * @fl6: flow to lookup
1091 * @final_dst: final destination address for ipsec lookup
1092 * @can_sleep: we are in a sleepable context
1094 * This function performs a route lookup on the given flow with the
1095 * possibility of using the cached route in the socket if it is valid.
1096 * It will take the socket dst lock when operating on the dst cache.
1097 * As a result, this function can only be used in process context.
1099 * It returns a valid dst pointer on success, or a pointer encoded
1102 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1103 const struct in6_addr *final_dst,
1106 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1109 dst = ip6_sk_dst_check(sk, dst, fl6);
1111 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1113 return ERR_PTR(err);
1115 ipv6_addr_copy(&fl6->daddr, final_dst);
1117 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1119 return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1121 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1123 static inline int ip6_ufo_append_data(struct sock *sk,
1124 int getfrag(void *from, char *to, int offset, int len,
1125 int odd, struct sk_buff *skb),
1126 void *from, int length, int hh_len, int fragheaderlen,
1127 int transhdrlen, int mtu,unsigned int flags,
1128 struct rt6_info *rt)
1131 struct sk_buff *skb;
1134 /* There is support for UDP large send offload by network
1135 * device, so create one single skb packet containing complete
1138 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1139 struct frag_hdr fhdr;
1141 skb = sock_alloc_send_skb(sk,
1142 hh_len + fragheaderlen + transhdrlen + 20,
1143 (flags & MSG_DONTWAIT), &err);
1147 /* reserve space for Hardware header */
1148 skb_reserve(skb, hh_len);
1150 /* create space for UDP/IP header */
1151 skb_put(skb,fragheaderlen + transhdrlen);
1153 /* initialize network header pointer */
1154 skb_reset_network_header(skb);
1156 /* initialize protocol header pointer */
1157 skb->transport_header = skb->network_header + fragheaderlen;
1159 skb->ip_summed = CHECKSUM_PARTIAL;
1162 /* Specify the length of each IPv6 datagram fragment.
1163 * It has to be a multiple of 8.
1165 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1166 sizeof(struct frag_hdr)) & ~7;
1167 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1168 ipv6_select_ident(&fhdr, rt);
1169 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1170 __skb_queue_tail(&sk->sk_write_queue, skb);
1173 return skb_append_datato_frags(sk, skb, getfrag, from,
1174 (length - transhdrlen));
1177 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1180 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1183 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1186 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1189 static void ip6_append_data_mtu(unsigned int *mtu,
1191 unsigned int fragheaderlen,
1192 struct sk_buff *skb,
1193 struct rt6_info *rt,
1194 unsigned int orig_mtu)
1196 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1198 /* first fragment, reserve header_len */
1199 *mtu = orig_mtu - rt->dst.header_len;
1203 * this fragment is not first, the headers
1204 * space is regarded as data space.
1208 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1209 + fragheaderlen - sizeof(struct frag_hdr);
1213 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1214 int offset, int len, int odd, struct sk_buff *skb),
1215 void *from, int length, int transhdrlen,
1216 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1217 struct rt6_info *rt, unsigned int flags, int dontfrag)
1219 struct inet_sock *inet = inet_sk(sk);
1220 struct ipv6_pinfo *np = inet6_sk(sk);
1221 struct inet_cork *cork;
1222 struct sk_buff *skb, *skb_prev = NULL;
1223 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1230 int csummode = CHECKSUM_NONE;
1233 if (flags&MSG_PROBE)
1235 cork = &inet->cork.base;
1236 if (skb_queue_empty(&sk->sk_write_queue)) {
1241 if (WARN_ON(np->cork.opt))
1244 np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1245 if (unlikely(np->cork.opt == NULL))
1248 np->cork.opt->tot_len = opt->tot_len;
1249 np->cork.opt->opt_flen = opt->opt_flen;
1250 np->cork.opt->opt_nflen = opt->opt_nflen;
1252 np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1254 if (opt->dst0opt && !np->cork.opt->dst0opt)
1257 np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1259 if (opt->dst1opt && !np->cork.opt->dst1opt)
1262 np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1264 if (opt->hopopt && !np->cork.opt->hopopt)
1267 np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1269 if (opt->srcrt && !np->cork.opt->srcrt)
1272 /* need source address above miyazawa*/
1275 cork->dst = &rt->dst;
1276 inet->cork.fl.u.ip6 = *fl6;
1277 np->cork.hop_limit = hlimit;
1278 np->cork.tclass = tclass;
1279 if (rt->dst.flags & DST_XFRM_TUNNEL)
1280 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1281 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1283 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1284 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1285 if (np->frag_size < mtu) {
1287 mtu = np->frag_size;
1289 cork->fragsize = mtu;
1290 if (dst_allfrag(rt->dst.path))
1291 cork->flags |= IPCORK_ALLFRAG;
1293 sk->sk_sndmsg_page = NULL;
1294 sk->sk_sndmsg_off = 0;
1295 exthdrlen = (opt ? opt->opt_flen : 0);
1296 length += exthdrlen;
1297 transhdrlen += exthdrlen;
1298 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1300 rt = (struct rt6_info *)cork->dst;
1301 fl6 = &inet->cork.fl.u.ip6;
1306 mtu = cork->fragsize;
1310 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1312 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1313 (opt ? opt->opt_nflen : 0);
1314 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1316 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1317 if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1318 ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
1323 /* For UDP, check if TX timestamp is enabled */
1324 if (sk->sk_type == SOCK_DGRAM) {
1325 err = sock_tx_timestamp(sk, &tx_flags);
1331 * Let's try using as much space as possible.
1332 * Use MTU if total length of the message fits into the MTU.
1333 * Otherwise, we need to reserve fragment header and
1334 * fragment alignment (= 8-15 octects, in total).
1336 * Note that we may need to "move" the data from the tail of
1337 * of the buffer to the new fragment when we split
1340 * FIXME: It may be fragmented into multiple chunks
1341 * at once if non-fragmentable extension headers
1346 if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1347 sk->sk_protocol == IPPROTO_RAW)) {
1348 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1352 skb = skb_peek_tail(&sk->sk_write_queue);
1353 cork->length += length;
1354 if (((length > mtu) ||
1355 (skb && skb_has_frags(skb))) &&
1356 (sk->sk_protocol == IPPROTO_UDP) &&
1357 (rt->dst.dev->features & NETIF_F_UFO)) {
1358 err = ip6_ufo_append_data(sk, getfrag, from, length,
1359 hh_len, fragheaderlen,
1360 transhdrlen, mtu, flags, rt);
1369 while (length > 0) {
1370 /* Check if the remaining data fits into current packet. */
1371 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1373 copy = maxfraglen - skb->len;
1377 unsigned int datalen;
1378 unsigned int fraglen;
1379 unsigned int fraggap;
1380 unsigned int alloclen;
1382 /* There's no room in the current skb */
1384 fraggap = skb->len - maxfraglen;
1387 /* update mtu and maxfraglen if necessary */
1388 if (skb == NULL || skb_prev == NULL)
1389 ip6_append_data_mtu(&mtu, &maxfraglen,
1390 fragheaderlen, skb, rt,
1396 * If remaining data exceeds the mtu,
1397 * we know we need more fragment(s).
1399 datalen = length + fraggap;
1401 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1402 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1403 if ((flags & MSG_MORE) &&
1404 !(rt->dst.dev->features&NETIF_F_SG))
1407 alloclen = datalen + fragheaderlen;
1409 alloclen += dst_exthdrlen;
1411 if (datalen != length + fraggap) {
1413 * this is not the last fragment, the trailer
1414 * space is regarded as data space.
1416 datalen += rt->dst.trailer_len;
1419 alloclen += rt->dst.trailer_len;
1420 fraglen = datalen + fragheaderlen;
1423 * We just reserve space for fragment header.
1424 * Note: this may be overallocation if the message
1425 * (without MSG_MORE) fits into the MTU.
1427 alloclen += sizeof(struct frag_hdr);
1430 skb = sock_alloc_send_skb(sk,
1432 (flags & MSG_DONTWAIT), &err);
1435 if (atomic_read(&sk->sk_wmem_alloc) <=
1437 skb = sock_wmalloc(sk,
1438 alloclen + hh_len, 1,
1440 if (unlikely(skb == NULL))
1443 /* Only the initial fragment
1452 * Fill in the control structures
1454 skb->ip_summed = csummode;
1456 /* reserve for fragmentation and ipsec header */
1457 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1460 if (sk->sk_type == SOCK_DGRAM)
1461 skb_shinfo(skb)->tx_flags = tx_flags;
1464 * Find where to start putting bytes
1466 data = skb_put(skb, fraglen);
1467 skb_set_network_header(skb, exthdrlen);
1468 data += fragheaderlen;
1469 skb->transport_header = (skb->network_header +
1472 skb->csum = skb_copy_and_csum_bits(
1473 skb_prev, maxfraglen,
1474 data + transhdrlen, fraggap, 0);
1475 skb_prev->csum = csum_sub(skb_prev->csum,
1478 pskb_trim_unique(skb_prev, maxfraglen);
1480 copy = datalen - transhdrlen - fraggap;
1486 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1493 length -= datalen - fraggap;
1497 csummode = CHECKSUM_NONE;
1500 * Put the packet on the pending queue
1502 __skb_queue_tail(&sk->sk_write_queue, skb);
1509 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1513 if (getfrag(from, skb_put(skb, copy),
1514 offset, copy, off, skb) < 0) {
1515 __skb_trim(skb, off);
1520 int i = skb_shinfo(skb)->nr_frags;
1521 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1522 struct page *page = sk->sk_sndmsg_page;
1523 int off = sk->sk_sndmsg_off;
1526 if (page && (left = PAGE_SIZE - off) > 0) {
1529 if (page != skb_frag_page(frag)) {
1530 if (i == MAX_SKB_FRAGS) {
1534 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1535 skb_frag_ref(skb, i);
1536 frag = &skb_shinfo(skb)->frags[i];
1538 } else if(i < MAX_SKB_FRAGS) {
1539 if (copy > PAGE_SIZE)
1541 page = alloc_pages(sk->sk_allocation, 0);
1546 sk->sk_sndmsg_page = page;
1547 sk->sk_sndmsg_off = 0;
1549 skb_fill_page_desc(skb, i, page, 0, 0);
1550 frag = &skb_shinfo(skb)->frags[i];
1556 skb_frag_address(frag) + skb_frag_size(frag),
1557 offset, copy, skb->len, skb) < 0) {
1561 sk->sk_sndmsg_off += copy;
1562 skb_frag_size_add(frag, copy);
1564 skb->data_len += copy;
1565 skb->truesize += copy;
1566 atomic_add(copy, &sk->sk_wmem_alloc);
1573 cork->length -= length;
1574 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1578 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1581 kfree(np->cork.opt->dst0opt);
1582 kfree(np->cork.opt->dst1opt);
1583 kfree(np->cork.opt->hopopt);
1584 kfree(np->cork.opt->srcrt);
1585 kfree(np->cork.opt);
1586 np->cork.opt = NULL;
1589 if (inet->cork.base.dst) {
1590 dst_release(inet->cork.base.dst);
1591 inet->cork.base.dst = NULL;
1592 inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1594 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1597 int ip6_push_pending_frames(struct sock *sk)
1599 struct sk_buff *skb, *tmp_skb;
1600 struct sk_buff **tail_skb;
1601 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1602 struct inet_sock *inet = inet_sk(sk);
1603 struct ipv6_pinfo *np = inet6_sk(sk);
1604 struct net *net = sock_net(sk);
1605 struct ipv6hdr *hdr;
1606 struct ipv6_txoptions *opt = np->cork.opt;
1607 struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1608 struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1609 unsigned char proto = fl6->flowi6_proto;
1612 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1614 tail_skb = &(skb_shinfo(skb)->frag_list);
1616 /* move skb->data to ip header from ext header */
1617 if (skb->data < skb_network_header(skb))
1618 __skb_pull(skb, skb_network_offset(skb));
1619 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1620 __skb_pull(tmp_skb, skb_network_header_len(skb));
1621 *tail_skb = tmp_skb;
1622 tail_skb = &(tmp_skb->next);
1623 skb->len += tmp_skb->len;
1624 skb->data_len += tmp_skb->len;
1625 skb->truesize += tmp_skb->truesize;
1626 tmp_skb->destructor = NULL;
1630 /* Allow local fragmentation. */
1631 if (np->pmtudisc < IPV6_PMTUDISC_DO)
1634 ipv6_addr_copy(final_dst, &fl6->daddr);
1635 __skb_pull(skb, skb_network_header_len(skb));
1636 if (opt && opt->opt_flen)
1637 ipv6_push_frag_opts(skb, opt, &proto);
1638 if (opt && opt->opt_nflen)
1639 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1641 skb_push(skb, sizeof(struct ipv6hdr));
1642 skb_reset_network_header(skb);
1643 hdr = ipv6_hdr(skb);
1645 *(__be32*)hdr = fl6->flowlabel |
1646 htonl(0x60000000 | ((int)np->cork.tclass << 20));
1648 hdr->hop_limit = np->cork.hop_limit;
1649 hdr->nexthdr = proto;
1650 ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
1651 ipv6_addr_copy(&hdr->daddr, final_dst);
1653 skb->priority = sk->sk_priority;
1654 skb->mark = sk->sk_mark;
1656 skb_dst_set(skb, dst_clone(&rt->dst));
1657 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1658 if (proto == IPPROTO_ICMPV6) {
1659 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1661 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1662 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1665 err = ip6_local_out(skb);
1668 err = net_xmit_errno(err);
1674 ip6_cork_release(inet, np);
1677 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1681 void ip6_flush_pending_frames(struct sock *sk)
1683 struct sk_buff *skb;
1685 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1687 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1688 IPSTATS_MIB_OUTDISCARDS);
1692 ip6_cork_release(inet_sk(sk), inet6_sk(sk));