inet: add IPv6 support to sk_ehashfn()
[pandora-kernel.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on:
9  *      linux/net/ipv4/tcp.c
10  *      linux/net/ipv4/tcp_input.c
11  *      linux/net/ipv4/tcp_output.c
12  *
13  *      Fixes:
14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
17  *                                      a single port at the same time.
18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void     tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75                                       struct request_sock *req);
76
77 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86                                                    const struct in6_addr *addr)
87 {
88         return NULL;
89 }
90 #endif
91
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94         struct dst_entry *dst = skb_dst(skb);
95
96         if (dst) {
97                 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99                 dst_hold(dst);
100                 sk->sk_rx_dst = dst;
101                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102                 if (rt->rt6i_node)
103                         inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104         }
105 }
106
107 static void tcp_v6_hash(struct sock *sk)
108 {
109         if (sk->sk_state != TCP_CLOSE) {
110                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
111                         tcp_prot.hash(sk);
112                         return;
113                 }
114                 local_bh_disable();
115                 __inet6_hash(sk, NULL);
116                 local_bh_enable();
117         }
118 }
119
120 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
121 {
122         return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
123                                             ipv6_hdr(skb)->saddr.s6_addr32,
124                                             tcp_hdr(skb)->dest,
125                                             tcp_hdr(skb)->source);
126 }
127
128 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129                           int addr_len)
130 {
131         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
132         struct inet_sock *inet = inet_sk(sk);
133         struct inet_connection_sock *icsk = inet_csk(sk);
134         struct ipv6_pinfo *np = inet6_sk(sk);
135         struct tcp_sock *tp = tcp_sk(sk);
136         struct in6_addr *saddr = NULL, *final_p, final;
137         struct rt6_info *rt;
138         struct flowi6 fl6;
139         struct dst_entry *dst;
140         int addr_type;
141         int err;
142
143         if (addr_len < SIN6_LEN_RFC2133)
144                 return -EINVAL;
145
146         if (usin->sin6_family != AF_INET6)
147                 return -EAFNOSUPPORT;
148
149         memset(&fl6, 0, sizeof(fl6));
150
151         if (np->sndflow) {
152                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153                 IP6_ECN_flow_init(fl6.flowlabel);
154                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
155                         struct ip6_flowlabel *flowlabel;
156                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157                         if (flowlabel == NULL)
158                                 return -EINVAL;
159                         fl6_sock_release(flowlabel);
160                 }
161         }
162
163         /*
164          *      connect() to INADDR_ANY means loopback (BSD'ism).
165          */
166
167         if (ipv6_addr_any(&usin->sin6_addr))
168                 usin->sin6_addr.s6_addr[15] = 0x1;
169
170         addr_type = ipv6_addr_type(&usin->sin6_addr);
171
172         if (addr_type & IPV6_ADDR_MULTICAST)
173                 return -ENETUNREACH;
174
175         if (addr_type&IPV6_ADDR_LINKLOCAL) {
176                 if (addr_len >= sizeof(struct sockaddr_in6) &&
177                     usin->sin6_scope_id) {
178                         /* If interface is set while binding, indices
179                          * must coincide.
180                          */
181                         if (sk->sk_bound_dev_if &&
182                             sk->sk_bound_dev_if != usin->sin6_scope_id)
183                                 return -EINVAL;
184
185                         sk->sk_bound_dev_if = usin->sin6_scope_id;
186                 }
187
188                 /* Connect to link-local address requires an interface */
189                 if (!sk->sk_bound_dev_if)
190                         return -EINVAL;
191         }
192
193         if (tp->rx_opt.ts_recent_stamp &&
194             !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
195                 tp->rx_opt.ts_recent = 0;
196                 tp->rx_opt.ts_recent_stamp = 0;
197                 tp->write_seq = 0;
198         }
199
200         sk->sk_v6_daddr = usin->sin6_addr;
201         np->flow_label = fl6.flowlabel;
202
203         /*
204          *      TCP over IPv4
205          */
206
207         if (addr_type == IPV6_ADDR_MAPPED) {
208                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
209                 struct sockaddr_in sin;
210
211                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
212
213                 if (__ipv6_only_sock(sk))
214                         return -ENETUNREACH;
215
216                 sin.sin_family = AF_INET;
217                 sin.sin_port = usin->sin6_port;
218                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
219
220                 icsk->icsk_af_ops = &ipv6_mapped;
221                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
222 #ifdef CONFIG_TCP_MD5SIG
223                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
224 #endif
225
226                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
227
228                 if (err) {
229                         icsk->icsk_ext_hdr_len = exthdrlen;
230                         icsk->icsk_af_ops = &ipv6_specific;
231                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
232 #ifdef CONFIG_TCP_MD5SIG
233                         tp->af_specific = &tcp_sock_ipv6_specific;
234 #endif
235                         goto failure;
236                 }
237                 np->saddr = sk->sk_v6_rcv_saddr;
238
239                 return err;
240         }
241
242         if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
243                 saddr = &sk->sk_v6_rcv_saddr;
244
245         fl6.flowi6_proto = IPPROTO_TCP;
246         fl6.daddr = sk->sk_v6_daddr;
247         fl6.saddr = saddr ? *saddr : np->saddr;
248         fl6.flowi6_oif = sk->sk_bound_dev_if;
249         fl6.flowi6_mark = sk->sk_mark;
250         fl6.fl6_dport = usin->sin6_port;
251         fl6.fl6_sport = inet->inet_sport;
252
253         final_p = fl6_update_dst(&fl6, np->opt, &final);
254
255         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
256
257         dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
258         if (IS_ERR(dst)) {
259                 err = PTR_ERR(dst);
260                 goto failure;
261         }
262
263         if (saddr == NULL) {
264                 saddr = &fl6.saddr;
265                 sk->sk_v6_rcv_saddr = *saddr;
266         }
267
268         /* set the source address */
269         np->saddr = *saddr;
270         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
271
272         sk->sk_gso_type = SKB_GSO_TCPV6;
273         __ip6_dst_store(sk, dst, NULL, NULL);
274
275         rt = (struct rt6_info *) dst;
276         if (tcp_death_row.sysctl_tw_recycle &&
277             !tp->rx_opt.ts_recent_stamp &&
278             ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
279                 tcp_fetch_timewait_stamp(sk, dst);
280
281         icsk->icsk_ext_hdr_len = 0;
282         if (np->opt)
283                 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
284                                           np->opt->opt_nflen);
285
286         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
287
288         inet->inet_dport = usin->sin6_port;
289
290         tcp_set_state(sk, TCP_SYN_SENT);
291         err = inet6_hash_connect(&tcp_death_row, sk);
292         if (err)
293                 goto late_failure;
294
295         ip6_set_txhash(sk);
296
297         if (!tp->write_seq && likely(!tp->repair))
298                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
299                                                              sk->sk_v6_daddr.s6_addr32,
300                                                              inet->inet_sport,
301                                                              inet->inet_dport);
302
303         err = tcp_connect(sk);
304         if (err)
305                 goto late_failure;
306
307         return 0;
308
309 late_failure:
310         tcp_set_state(sk, TCP_CLOSE);
311         __sk_dst_reset(sk);
312 failure:
313         inet->inet_dport = 0;
314         sk->sk_route_caps = 0;
315         return err;
316 }
317
318 static void tcp_v6_mtu_reduced(struct sock *sk)
319 {
320         struct dst_entry *dst;
321
322         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
323                 return;
324
325         dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
326         if (!dst)
327                 return;
328
329         if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
330                 tcp_sync_mss(sk, dst_mtu(dst));
331                 tcp_simple_retransmit(sk);
332         }
333 }
334
335 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
336                 u8 type, u8 code, int offset, __be32 info)
337 {
338         const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
339         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
340         struct ipv6_pinfo *np;
341         struct sock *sk;
342         int err;
343         struct tcp_sock *tp;
344         struct request_sock *fastopen;
345         __u32 seq, snd_una;
346         struct net *net = dev_net(skb->dev);
347
348         sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
349                         th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
350
351         if (sk == NULL) {
352                 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
353                                    ICMP6_MIB_INERRORS);
354                 return;
355         }
356
357         if (sk->sk_state == TCP_TIME_WAIT) {
358                 inet_twsk_put(inet_twsk(sk));
359                 return;
360         }
361
362         bh_lock_sock(sk);
363         if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
364                 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
365
366         if (sk->sk_state == TCP_CLOSE)
367                 goto out;
368
369         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
370                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
371                 goto out;
372         }
373
374         tp = tcp_sk(sk);
375         seq = ntohl(th->seq);
376         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
377         fastopen = tp->fastopen_rsk;
378         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
379         if (sk->sk_state != TCP_LISTEN &&
380             !between(seq, snd_una, tp->snd_nxt)) {
381                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
382                 goto out;
383         }
384
385         np = inet6_sk(sk);
386
387         if (type == NDISC_REDIRECT) {
388                 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
389
390                 if (dst)
391                         dst->ops->redirect(dst, sk, skb);
392                 goto out;
393         }
394
395         if (type == ICMPV6_PKT_TOOBIG) {
396                 /* We are not interested in TCP_LISTEN and open_requests
397                  * (SYN-ACKs send out by Linux are always <576bytes so
398                  * they should go through unfragmented).
399                  */
400                 if (sk->sk_state == TCP_LISTEN)
401                         goto out;
402
403                 if (!ip6_sk_accept_pmtu(sk))
404                         goto out;
405
406                 tp->mtu_info = ntohl(info);
407                 if (!sock_owned_by_user(sk))
408                         tcp_v6_mtu_reduced(sk);
409                 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
410                                            &tp->tsq_flags))
411                         sock_hold(sk);
412                 goto out;
413         }
414
415         icmpv6_err_convert(type, code, &err);
416
417         /* Might be for an request_sock */
418         switch (sk->sk_state) {
419                 struct request_sock *req, **prev;
420         case TCP_LISTEN:
421                 if (sock_owned_by_user(sk))
422                         goto out;
423
424                 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
425                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
426                                            &hdr->saddr, inet6_iif(skb));
427                 if (!req)
428                         goto out;
429
430                 /* ICMPs are not backlogged, hence we cannot get
431                  * an established socket here.
432                  */
433                 WARN_ON(req->sk != NULL);
434
435                 if (seq != tcp_rsk(req)->snt_isn) {
436                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
437                         goto out;
438                 }
439
440                 inet_csk_reqsk_queue_drop(sk, req, prev);
441                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
442                 goto out;
443
444         case TCP_SYN_SENT:
445         case TCP_SYN_RECV:
446                 /* Only in fast or simultaneous open. If a fast open socket is
447                  * is already accepted it is treated as a connected one below.
448                  */
449                 if (fastopen && fastopen->sk == NULL)
450                         break;
451
452                 if (!sock_owned_by_user(sk)) {
453                         sk->sk_err = err;
454                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
455
456                         tcp_done(sk);
457                 } else
458                         sk->sk_err_soft = err;
459                 goto out;
460         }
461
462         if (!sock_owned_by_user(sk) && np->recverr) {
463                 sk->sk_err = err;
464                 sk->sk_error_report(sk);
465         } else
466                 sk->sk_err_soft = err;
467
468 out:
469         bh_unlock_sock(sk);
470         sock_put(sk);
471 }
472
473
474 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
475                               struct flowi *fl,
476                               struct request_sock *req,
477                               u16 queue_mapping,
478                               struct tcp_fastopen_cookie *foc)
479 {
480         struct inet_request_sock *ireq = inet_rsk(req);
481         struct ipv6_pinfo *np = inet6_sk(sk);
482         struct flowi6 *fl6 = &fl->u.ip6;
483         struct sk_buff *skb;
484         int err = -ENOMEM;
485
486         /* First, grab a route. */
487         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
488                 goto done;
489
490         skb = tcp_make_synack(sk, dst, req, foc);
491
492         if (skb) {
493                 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
494                                     &ireq->ir_v6_rmt_addr);
495
496                 fl6->daddr = ireq->ir_v6_rmt_addr;
497                 if (np->repflow && (ireq->pktopts != NULL))
498                         fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
499
500                 skb_set_queue_mapping(skb, queue_mapping);
501                 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
502                 err = net_xmit_eval(err);
503         }
504
505 done:
506         return err;
507 }
508
509
510 static void tcp_v6_reqsk_destructor(struct request_sock *req)
511 {
512         kfree_skb(inet_rsk(req)->pktopts);
513 }
514
515 #ifdef CONFIG_TCP_MD5SIG
516 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
517                                                    const struct in6_addr *addr)
518 {
519         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
520 }
521
522 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
523                                                 struct sock *addr_sk)
524 {
525         return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
526 }
527
528 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
529                                                       struct request_sock *req)
530 {
531         return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
532 }
533
534 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
535                                  int optlen)
536 {
537         struct tcp_md5sig cmd;
538         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
539
540         if (optlen < sizeof(cmd))
541                 return -EINVAL;
542
543         if (copy_from_user(&cmd, optval, sizeof(cmd)))
544                 return -EFAULT;
545
546         if (sin6->sin6_family != AF_INET6)
547                 return -EINVAL;
548
549         if (!cmd.tcpm_keylen) {
550                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
551                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
552                                               AF_INET);
553                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
554                                       AF_INET6);
555         }
556
557         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
558                 return -EINVAL;
559
560         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
561                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
562                                       AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
563
564         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
565                               AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
566 }
567
568 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
569                                         const struct in6_addr *daddr,
570                                         const struct in6_addr *saddr, int nbytes)
571 {
572         struct tcp6_pseudohdr *bp;
573         struct scatterlist sg;
574
575         bp = &hp->md5_blk.ip6;
576         /* 1. TCP pseudo-header (RFC2460) */
577         bp->saddr = *saddr;
578         bp->daddr = *daddr;
579         bp->protocol = cpu_to_be32(IPPROTO_TCP);
580         bp->len = cpu_to_be32(nbytes);
581
582         sg_init_one(&sg, bp, sizeof(*bp));
583         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
584 }
585
586 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
587                                const struct in6_addr *daddr, struct in6_addr *saddr,
588                                const struct tcphdr *th)
589 {
590         struct tcp_md5sig_pool *hp;
591         struct hash_desc *desc;
592
593         hp = tcp_get_md5sig_pool();
594         if (!hp)
595                 goto clear_hash_noput;
596         desc = &hp->md5_desc;
597
598         if (crypto_hash_init(desc))
599                 goto clear_hash;
600         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
601                 goto clear_hash;
602         if (tcp_md5_hash_header(hp, th))
603                 goto clear_hash;
604         if (tcp_md5_hash_key(hp, key))
605                 goto clear_hash;
606         if (crypto_hash_final(desc, md5_hash))
607                 goto clear_hash;
608
609         tcp_put_md5sig_pool();
610         return 0;
611
612 clear_hash:
613         tcp_put_md5sig_pool();
614 clear_hash_noput:
615         memset(md5_hash, 0, 16);
616         return 1;
617 }
618
619 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
620                                const struct sock *sk,
621                                const struct request_sock *req,
622                                const struct sk_buff *skb)
623 {
624         const struct in6_addr *saddr, *daddr;
625         struct tcp_md5sig_pool *hp;
626         struct hash_desc *desc;
627         const struct tcphdr *th = tcp_hdr(skb);
628
629         if (sk) {
630                 saddr = &inet6_sk(sk)->saddr;
631                 daddr = &sk->sk_v6_daddr;
632         } else if (req) {
633                 saddr = &inet_rsk(req)->ir_v6_loc_addr;
634                 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
635         } else {
636                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
637                 saddr = &ip6h->saddr;
638                 daddr = &ip6h->daddr;
639         }
640
641         hp = tcp_get_md5sig_pool();
642         if (!hp)
643                 goto clear_hash_noput;
644         desc = &hp->md5_desc;
645
646         if (crypto_hash_init(desc))
647                 goto clear_hash;
648
649         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
650                 goto clear_hash;
651         if (tcp_md5_hash_header(hp, th))
652                 goto clear_hash;
653         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
654                 goto clear_hash;
655         if (tcp_md5_hash_key(hp, key))
656                 goto clear_hash;
657         if (crypto_hash_final(desc, md5_hash))
658                 goto clear_hash;
659
660         tcp_put_md5sig_pool();
661         return 0;
662
663 clear_hash:
664         tcp_put_md5sig_pool();
665 clear_hash_noput:
666         memset(md5_hash, 0, 16);
667         return 1;
668 }
669
670 static int __tcp_v6_inbound_md5_hash(struct sock *sk,
671                                      const struct sk_buff *skb)
672 {
673         const __u8 *hash_location = NULL;
674         struct tcp_md5sig_key *hash_expected;
675         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
676         const struct tcphdr *th = tcp_hdr(skb);
677         int genhash;
678         u8 newhash[16];
679
680         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
681         hash_location = tcp_parse_md5sig_option(th);
682
683         /* We've parsed the options - do we have a hash? */
684         if (!hash_expected && !hash_location)
685                 return 0;
686
687         if (hash_expected && !hash_location) {
688                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
689                 return 1;
690         }
691
692         if (!hash_expected && hash_location) {
693                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
694                 return 1;
695         }
696
697         /* check the signature */
698         genhash = tcp_v6_md5_hash_skb(newhash,
699                                       hash_expected,
700                                       NULL, NULL, skb);
701
702         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
703                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
704                                      genhash ? "failed" : "mismatch",
705                                      &ip6h->saddr, ntohs(th->source),
706                                      &ip6h->daddr, ntohs(th->dest));
707                 return 1;
708         }
709         return 0;
710 }
711
712 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
713 {
714         int ret;
715
716         rcu_read_lock();
717         ret = __tcp_v6_inbound_md5_hash(sk, skb);
718         rcu_read_unlock();
719
720         return ret;
721 }
722
723 #endif
724
725 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
726                             struct sk_buff *skb)
727 {
728         struct inet_request_sock *ireq = inet_rsk(req);
729         struct ipv6_pinfo *np = inet6_sk(sk);
730
731         ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
732         ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
733
734         /* So that link locals have meaning */
735         if (!sk->sk_bound_dev_if &&
736             ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
737                 ireq->ir_iif = tcp_v6_iif(skb);
738
739         if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
740             (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
741              np->rxopt.bits.rxinfo ||
742              np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
743              np->rxopt.bits.rxohlim || np->repflow)) {
744                 atomic_inc(&skb->users);
745                 ireq->pktopts = skb;
746         }
747         ireq->ireq_family = AF_INET6;
748 }
749
750 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
751                                           const struct request_sock *req,
752                                           bool *strict)
753 {
754         if (strict)
755                 *strict = true;
756         return inet6_csk_route_req(sk, &fl->u.ip6, req);
757 }
758
759 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
760         .family         =       AF_INET6,
761         .obj_size       =       sizeof(struct tcp6_request_sock),
762         .rtx_syn_ack    =       tcp_rtx_synack,
763         .send_ack       =       tcp_v6_reqsk_send_ack,
764         .destructor     =       tcp_v6_reqsk_destructor,
765         .send_reset     =       tcp_v6_send_reset,
766         .syn_ack_timeout =      tcp_syn_ack_timeout,
767 };
768
769 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
770         .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
771                                 sizeof(struct ipv6hdr),
772 #ifdef CONFIG_TCP_MD5SIG
773         .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
774         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
775 #endif
776         .init_req       =       tcp_v6_init_req,
777 #ifdef CONFIG_SYN_COOKIES
778         .cookie_init_seq =      cookie_v6_init_sequence,
779 #endif
780         .route_req      =       tcp_v6_route_req,
781         .init_seq       =       tcp_v6_init_sequence,
782         .send_synack    =       tcp_v6_send_synack,
783         .queue_hash_add =       inet6_csk_reqsk_queue_hash_add,
784 };
785
786 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
787                                  u32 ack, u32 win, u32 tsval, u32 tsecr,
788                                  int oif, struct tcp_md5sig_key *key, int rst,
789                                  u8 tclass, u32 label)
790 {
791         const struct tcphdr *th = tcp_hdr(skb);
792         struct tcphdr *t1;
793         struct sk_buff *buff;
794         struct flowi6 fl6;
795         struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
796         struct sock *ctl_sk = net->ipv6.tcp_sk;
797         unsigned int tot_len = sizeof(struct tcphdr);
798         struct dst_entry *dst;
799         __be32 *topt;
800
801         if (tsecr)
802                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
803 #ifdef CONFIG_TCP_MD5SIG
804         if (key)
805                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
806 #endif
807
808         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
809                          GFP_ATOMIC);
810         if (buff == NULL)
811                 return;
812
813         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
814
815         t1 = (struct tcphdr *) skb_push(buff, tot_len);
816         skb_reset_transport_header(buff);
817
818         /* Swap the send and the receive. */
819         memset(t1, 0, sizeof(*t1));
820         t1->dest = th->source;
821         t1->source = th->dest;
822         t1->doff = tot_len / 4;
823         t1->seq = htonl(seq);
824         t1->ack_seq = htonl(ack);
825         t1->ack = !rst || !th->ack;
826         t1->rst = rst;
827         t1->window = htons(win);
828
829         topt = (__be32 *)(t1 + 1);
830
831         if (tsecr) {
832                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
833                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
834                 *topt++ = htonl(tsval);
835                 *topt++ = htonl(tsecr);
836         }
837
838 #ifdef CONFIG_TCP_MD5SIG
839         if (key) {
840                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
841                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
842                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
843                                     &ipv6_hdr(skb)->saddr,
844                                     &ipv6_hdr(skb)->daddr, t1);
845         }
846 #endif
847
848         memset(&fl6, 0, sizeof(fl6));
849         fl6.daddr = ipv6_hdr(skb)->saddr;
850         fl6.saddr = ipv6_hdr(skb)->daddr;
851         fl6.flowlabel = label;
852
853         buff->ip_summed = CHECKSUM_PARTIAL;
854         buff->csum = 0;
855
856         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
857
858         fl6.flowi6_proto = IPPROTO_TCP;
859         if (rt6_need_strict(&fl6.daddr) && !oif)
860                 fl6.flowi6_oif = tcp_v6_iif(skb);
861         else
862                 fl6.flowi6_oif = oif;
863         fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
864         fl6.fl6_dport = t1->dest;
865         fl6.fl6_sport = t1->source;
866         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
867
868         /* Pass a socket to ip6_dst_lookup either it is for RST
869          * Underlying function will use this to retrieve the network
870          * namespace
871          */
872         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
873         if (!IS_ERR(dst)) {
874                 skb_dst_set(buff, dst);
875                 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
876                 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
877                 if (rst)
878                         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
879                 return;
880         }
881
882         kfree_skb(buff);
883 }
884
885 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
886 {
887         const struct tcphdr *th = tcp_hdr(skb);
888         u32 seq = 0, ack_seq = 0;
889         struct tcp_md5sig_key *key = NULL;
890 #ifdef CONFIG_TCP_MD5SIG
891         const __u8 *hash_location = NULL;
892         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
893         unsigned char newhash[16];
894         int genhash;
895         struct sock *sk1 = NULL;
896 #endif
897         int oif;
898
899         if (th->rst)
900                 return;
901
902         /* If sk not NULL, it means we did a successful lookup and incoming
903          * route had to be correct. prequeue might have dropped our dst.
904          */
905         if (!sk && !ipv6_unicast_destination(skb))
906                 return;
907
908 #ifdef CONFIG_TCP_MD5SIG
909         hash_location = tcp_parse_md5sig_option(th);
910         if (!sk && hash_location) {
911                 /*
912                  * active side is lost. Try to find listening socket through
913                  * source port, and then find md5 key through listening socket.
914                  * we are not loose security here:
915                  * Incoming packet is checked with md5 hash with finding key,
916                  * no RST generated if md5 hash doesn't match.
917                  */
918                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
919                                            &tcp_hashinfo, &ipv6h->saddr,
920                                            th->source, &ipv6h->daddr,
921                                            ntohs(th->source), tcp_v6_iif(skb));
922                 if (!sk1)
923                         return;
924
925                 rcu_read_lock();
926                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
927                 if (!key)
928                         goto release_sk1;
929
930                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
931                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
932                         goto release_sk1;
933         } else {
934                 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
935         }
936 #endif
937
938         if (th->ack)
939                 seq = ntohl(th->ack_seq);
940         else
941                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
942                           (th->doff << 2);
943
944         oif = sk ? sk->sk_bound_dev_if : 0;
945         tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
946
947 #ifdef CONFIG_TCP_MD5SIG
948 release_sk1:
949         if (sk1) {
950                 rcu_read_unlock();
951                 sock_put(sk1);
952         }
953 #endif
954 }
955
956 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
957                             u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
958                             struct tcp_md5sig_key *key, u8 tclass,
959                             u32 label)
960 {
961         tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
962                              tclass, label);
963 }
964
965 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
966 {
967         struct inet_timewait_sock *tw = inet_twsk(sk);
968         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
969
970         tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
971                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
972                         tcp_time_stamp + tcptw->tw_ts_offset,
973                         tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
974                         tw->tw_tclass, (tw->tw_flowlabel << 12));
975
976         inet_twsk_put(tw);
977 }
978
979 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
980                                   struct request_sock *req)
981 {
982         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
983          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
984          */
985         tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
986                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
987                         tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
988                         tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
989                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
990                         0, 0);
991 }
992
993
994 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
995 {
996         struct request_sock *req, **prev;
997         const struct tcphdr *th = tcp_hdr(skb);
998         struct sock *nsk;
999
1000         /* Find possible connection requests. */
1001         req = inet6_csk_search_req(sk, &prev, th->source,
1002                                    &ipv6_hdr(skb)->saddr,
1003                                    &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
1004         if (req)
1005                 return tcp_check_req(sk, skb, req, prev, false);
1006
1007         nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1008                                          &ipv6_hdr(skb)->saddr, th->source,
1009                                          &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1010                                          tcp_v6_iif(skb));
1011
1012         if (nsk) {
1013                 if (nsk->sk_state != TCP_TIME_WAIT) {
1014                         bh_lock_sock(nsk);
1015                         return nsk;
1016                 }
1017                 inet_twsk_put(inet_twsk(nsk));
1018                 return NULL;
1019         }
1020
1021 #ifdef CONFIG_SYN_COOKIES
1022         if (!th->syn)
1023                 sk = cookie_v6_check(sk, skb);
1024 #endif
1025         return sk;
1026 }
1027
1028 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1029 {
1030         if (skb->protocol == htons(ETH_P_IP))
1031                 return tcp_v4_conn_request(sk, skb);
1032
1033         if (!ipv6_unicast_destination(skb))
1034                 goto drop;
1035
1036         return tcp_conn_request(&tcp6_request_sock_ops,
1037                                 &tcp_request_sock_ipv6_ops, sk, skb);
1038
1039 drop:
1040         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1041         return 0; /* don't send reset */
1042 }
1043
1044 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1045                                          struct request_sock *req,
1046                                          struct dst_entry *dst)
1047 {
1048         struct inet_request_sock *ireq;
1049         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1050         struct tcp6_sock *newtcp6sk;
1051         struct inet_sock *newinet;
1052         struct tcp_sock *newtp;
1053         struct sock *newsk;
1054 #ifdef CONFIG_TCP_MD5SIG
1055         struct tcp_md5sig_key *key;
1056 #endif
1057         struct flowi6 fl6;
1058
1059         if (skb->protocol == htons(ETH_P_IP)) {
1060                 /*
1061                  *      v6 mapped
1062                  */
1063
1064                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1065
1066                 if (newsk == NULL)
1067                         return NULL;
1068
1069                 newtcp6sk = (struct tcp6_sock *)newsk;
1070                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1071
1072                 newinet = inet_sk(newsk);
1073                 newnp = inet6_sk(newsk);
1074                 newtp = tcp_sk(newsk);
1075
1076                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1077
1078                 newnp->saddr = newsk->sk_v6_rcv_saddr;
1079
1080                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1081                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1082 #ifdef CONFIG_TCP_MD5SIG
1083                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1084 #endif
1085
1086                 newnp->ipv6_ac_list = NULL;
1087                 newnp->ipv6_fl_list = NULL;
1088                 newnp->pktoptions  = NULL;
1089                 newnp->opt         = NULL;
1090                 newnp->mcast_oif   = tcp_v6_iif(skb);
1091                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1092                 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1093                 if (np->repflow)
1094                         newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1095
1096                 /*
1097                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1098                  * here, tcp_create_openreq_child now does this for us, see the comment in
1099                  * that function for the gory details. -acme
1100                  */
1101
1102                 /* It is tricky place. Until this moment IPv4 tcp
1103                    worked with IPv6 icsk.icsk_af_ops.
1104                    Sync it now.
1105                  */
1106                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1107
1108                 return newsk;
1109         }
1110
1111         ireq = inet_rsk(req);
1112
1113         if (sk_acceptq_is_full(sk))
1114                 goto out_overflow;
1115
1116         if (!dst) {
1117                 dst = inet6_csk_route_req(sk, &fl6, req);
1118                 if (!dst)
1119                         goto out;
1120         }
1121
1122         newsk = tcp_create_openreq_child(sk, req, skb);
1123         if (newsk == NULL)
1124                 goto out_nonewsk;
1125
1126         /*
1127          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1128          * count here, tcp_create_openreq_child now does this for us, see the
1129          * comment in that function for the gory details. -acme
1130          */
1131
1132         newsk->sk_gso_type = SKB_GSO_TCPV6;
1133         __ip6_dst_store(newsk, dst, NULL, NULL);
1134         inet6_sk_rx_dst_set(newsk, skb);
1135
1136         newtcp6sk = (struct tcp6_sock *)newsk;
1137         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1138
1139         newtp = tcp_sk(newsk);
1140         newinet = inet_sk(newsk);
1141         newnp = inet6_sk(newsk);
1142
1143         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1144
1145         newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1146         newnp->saddr = ireq->ir_v6_loc_addr;
1147         newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1148         newsk->sk_bound_dev_if = ireq->ir_iif;
1149
1150         ip6_set_txhash(newsk);
1151
1152         /* Now IPv6 options...
1153
1154            First: no IPv4 options.
1155          */
1156         newinet->inet_opt = NULL;
1157         newnp->ipv6_ac_list = NULL;
1158         newnp->ipv6_fl_list = NULL;
1159
1160         /* Clone RX bits */
1161         newnp->rxopt.all = np->rxopt.all;
1162
1163         /* Clone pktoptions received with SYN */
1164         newnp->pktoptions = NULL;
1165         if (ireq->pktopts != NULL) {
1166                 newnp->pktoptions = skb_clone(ireq->pktopts,
1167                                               sk_gfp_atomic(sk, GFP_ATOMIC));
1168                 consume_skb(ireq->pktopts);
1169                 ireq->pktopts = NULL;
1170                 if (newnp->pktoptions)
1171                         skb_set_owner_r(newnp->pktoptions, newsk);
1172         }
1173         newnp->opt        = NULL;
1174         newnp->mcast_oif  = tcp_v6_iif(skb);
1175         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1176         newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1177         if (np->repflow)
1178                 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1179
1180         /* Clone native IPv6 options from listening socket (if any)
1181
1182            Yes, keeping reference count would be much more clever,
1183            but we make one more one thing there: reattach optmem
1184            to newsk.
1185          */
1186         if (np->opt)
1187                 newnp->opt = ipv6_dup_options(newsk, np->opt);
1188
1189         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1190         if (newnp->opt)
1191                 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1192                                                      newnp->opt->opt_flen);
1193
1194         tcp_ca_openreq_child(newsk, dst);
1195
1196         tcp_sync_mss(newsk, dst_mtu(dst));
1197         newtp->advmss = dst_metric_advmss(dst);
1198         if (tcp_sk(sk)->rx_opt.user_mss &&
1199             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1200                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1201
1202         tcp_initialize_rcv_mss(newsk);
1203
1204         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1205         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1206
1207 #ifdef CONFIG_TCP_MD5SIG
1208         /* Copy over the MD5 key from the original socket */
1209         key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1210         if (key != NULL) {
1211                 /* We're using one, so create a matching key
1212                  * on the newsk structure. If we fail to get
1213                  * memory, then we end up not copying the key
1214                  * across. Shucks.
1215                  */
1216                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1217                                AF_INET6, key->key, key->keylen,
1218                                sk_gfp_atomic(sk, GFP_ATOMIC));
1219         }
1220 #endif
1221
1222         if (__inet_inherit_port(sk, newsk) < 0) {
1223                 inet_csk_prepare_forced_close(newsk);
1224                 tcp_done(newsk);
1225                 goto out;
1226         }
1227         __inet6_hash(newsk, NULL);
1228
1229         return newsk;
1230
1231 out_overflow:
1232         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1233 out_nonewsk:
1234         dst_release(dst);
1235 out:
1236         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1237         return NULL;
1238 }
1239
1240 /* The socket must have it's spinlock held when we get
1241  * here.
1242  *
1243  * We have a potential double-lock case here, so even when
1244  * doing backlog processing we use the BH locking scheme.
1245  * This is because we cannot sleep with the original spinlock
1246  * held.
1247  */
1248 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1249 {
1250         struct ipv6_pinfo *np = inet6_sk(sk);
1251         struct tcp_sock *tp;
1252         struct sk_buff *opt_skb = NULL;
1253
1254         /* Imagine: socket is IPv6. IPv4 packet arrives,
1255            goes to IPv4 receive handler and backlogged.
1256            From backlog it always goes here. Kerboom...
1257            Fortunately, tcp_rcv_established and rcv_established
1258            handle them correctly, but it is not case with
1259            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1260          */
1261
1262         if (skb->protocol == htons(ETH_P_IP))
1263                 return tcp_v4_do_rcv(sk, skb);
1264
1265         if (sk_filter(sk, skb))
1266                 goto discard;
1267
1268         /*
1269          *      socket locking is here for SMP purposes as backlog rcv
1270          *      is currently called with bh processing disabled.
1271          */
1272
1273         /* Do Stevens' IPV6_PKTOPTIONS.
1274
1275            Yes, guys, it is the only place in our code, where we
1276            may make it not affecting IPv4.
1277            The rest of code is protocol independent,
1278            and I do not like idea to uglify IPv4.
1279
1280            Actually, all the idea behind IPV6_PKTOPTIONS
1281            looks not very well thought. For now we latch
1282            options, received in the last packet, enqueued
1283            by tcp. Feel free to propose better solution.
1284                                                --ANK (980728)
1285          */
1286         if (np->rxopt.all)
1287                 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1288
1289         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1290                 struct dst_entry *dst = sk->sk_rx_dst;
1291
1292                 sock_rps_save_rxhash(sk, skb);
1293                 sk_mark_napi_id(sk, skb);
1294                 if (dst) {
1295                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1296                             dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1297                                 dst_release(dst);
1298                                 sk->sk_rx_dst = NULL;
1299                         }
1300                 }
1301
1302                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1303                 if (opt_skb)
1304                         goto ipv6_pktoptions;
1305                 return 0;
1306         }
1307
1308         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1309                 goto csum_err;
1310
1311         if (sk->sk_state == TCP_LISTEN) {
1312                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1313                 if (!nsk)
1314                         goto discard;
1315
1316                 /*
1317                  * Queue it on the new socket if the new socket is active,
1318                  * otherwise we just shortcircuit this and continue with
1319                  * the new socket..
1320                  */
1321                 if (nsk != sk) {
1322                         sock_rps_save_rxhash(nsk, skb);
1323                         sk_mark_napi_id(sk, skb);
1324                         if (tcp_child_process(sk, nsk, skb))
1325                                 goto reset;
1326                         if (opt_skb)
1327                                 __kfree_skb(opt_skb);
1328                         return 0;
1329                 }
1330         } else
1331                 sock_rps_save_rxhash(sk, skb);
1332
1333         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1334                 goto reset;
1335         if (opt_skb)
1336                 goto ipv6_pktoptions;
1337         return 0;
1338
1339 reset:
1340         tcp_v6_send_reset(sk, skb);
1341 discard:
1342         if (opt_skb)
1343                 __kfree_skb(opt_skb);
1344         kfree_skb(skb);
1345         return 0;
1346 csum_err:
1347         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1348         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1349         goto discard;
1350
1351
1352 ipv6_pktoptions:
1353         /* Do you ask, what is it?
1354
1355            1. skb was enqueued by tcp.
1356            2. skb is added to tail of read queue, rather than out of order.
1357            3. socket is not in passive state.
1358            4. Finally, it really contains options, which user wants to receive.
1359          */
1360         tp = tcp_sk(sk);
1361         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1362             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1363                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1364                         np->mcast_oif = tcp_v6_iif(opt_skb);
1365                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1366                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1367                 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1368                         np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1369                 if (np->repflow)
1370                         np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1371                 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1372                         skb_set_owner_r(opt_skb, sk);
1373                         opt_skb = xchg(&np->pktoptions, opt_skb);
1374                 } else {
1375                         __kfree_skb(opt_skb);
1376                         opt_skb = xchg(&np->pktoptions, NULL);
1377                 }
1378         }
1379
1380         kfree_skb(opt_skb);
1381         return 0;
1382 }
1383
1384 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1385                            const struct tcphdr *th)
1386 {
1387         /* This is tricky: we move IP6CB at its correct location into
1388          * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1389          * _decode_session6() uses IP6CB().
1390          * barrier() makes sure compiler won't play aliasing games.
1391          */
1392         memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1393                 sizeof(struct inet6_skb_parm));
1394         barrier();
1395
1396         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1397         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1398                                     skb->len - th->doff*4);
1399         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1400         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1401         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1402         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1403         TCP_SKB_CB(skb)->sacked = 0;
1404 }
1405
1406 static int tcp_v6_rcv(struct sk_buff *skb)
1407 {
1408         const struct tcphdr *th;
1409         const struct ipv6hdr *hdr;
1410         struct sock *sk;
1411         int ret;
1412         struct net *net = dev_net(skb->dev);
1413
1414         if (skb->pkt_type != PACKET_HOST)
1415                 goto discard_it;
1416
1417         /*
1418          *      Count it even if it's bad.
1419          */
1420         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1421
1422         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1423                 goto discard_it;
1424
1425         th = tcp_hdr(skb);
1426
1427         if (th->doff < sizeof(struct tcphdr)/4)
1428                 goto bad_packet;
1429         if (!pskb_may_pull(skb, th->doff*4))
1430                 goto discard_it;
1431
1432         if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1433                 goto csum_error;
1434
1435         th = tcp_hdr(skb);
1436         hdr = ipv6_hdr(skb);
1437
1438         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1439                                 inet6_iif(skb));
1440         if (!sk)
1441                 goto no_tcp_socket;
1442
1443 process:
1444         if (sk->sk_state == TCP_TIME_WAIT)
1445                 goto do_time_wait;
1446
1447         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1448                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1449                 goto discard_and_relse;
1450         }
1451
1452         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1453                 goto discard_and_relse;
1454
1455         tcp_v6_fill_cb(skb, hdr, th);
1456
1457 #ifdef CONFIG_TCP_MD5SIG
1458         if (tcp_v6_inbound_md5_hash(sk, skb))
1459                 goto discard_and_relse;
1460 #endif
1461
1462         if (sk_filter(sk, skb))
1463                 goto discard_and_relse;
1464
1465         sk_incoming_cpu_update(sk);
1466         skb->dev = NULL;
1467
1468         bh_lock_sock_nested(sk);
1469         ret = 0;
1470         if (!sock_owned_by_user(sk)) {
1471                 if (!tcp_prequeue(sk, skb))
1472                         ret = tcp_v6_do_rcv(sk, skb);
1473         } else if (unlikely(sk_add_backlog(sk, skb,
1474                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1475                 bh_unlock_sock(sk);
1476                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1477                 goto discard_and_relse;
1478         }
1479         bh_unlock_sock(sk);
1480
1481         sock_put(sk);
1482         return ret ? -1 : 0;
1483
1484 no_tcp_socket:
1485         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1486                 goto discard_it;
1487
1488         tcp_v6_fill_cb(skb, hdr, th);
1489
1490         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1491 csum_error:
1492                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1493 bad_packet:
1494                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1495         } else {
1496                 tcp_v6_send_reset(NULL, skb);
1497         }
1498
1499 discard_it:
1500         kfree_skb(skb);
1501         return 0;
1502
1503 discard_and_relse:
1504         sock_put(sk);
1505         goto discard_it;
1506
1507 do_time_wait:
1508         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1509                 inet_twsk_put(inet_twsk(sk));
1510                 goto discard_it;
1511         }
1512
1513         tcp_v6_fill_cb(skb, hdr, th);
1514
1515         if (skb->len < (th->doff<<2)) {
1516                 inet_twsk_put(inet_twsk(sk));
1517                 goto bad_packet;
1518         }
1519         if (tcp_checksum_complete(skb)) {
1520                 inet_twsk_put(inet_twsk(sk));
1521                 goto csum_error;
1522         }
1523
1524         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1525         case TCP_TW_SYN:
1526         {
1527                 struct sock *sk2;
1528
1529                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1530                                             &ipv6_hdr(skb)->saddr, th->source,
1531                                             &ipv6_hdr(skb)->daddr,
1532                                             ntohs(th->dest), tcp_v6_iif(skb));
1533                 if (sk2 != NULL) {
1534                         struct inet_timewait_sock *tw = inet_twsk(sk);
1535                         inet_twsk_deschedule(tw, &tcp_death_row);
1536                         inet_twsk_put(tw);
1537                         sk = sk2;
1538                         goto process;
1539                 }
1540                 /* Fall through to ACK */
1541         }
1542         case TCP_TW_ACK:
1543                 tcp_v6_timewait_ack(sk, skb);
1544                 break;
1545         case TCP_TW_RST:
1546                 goto no_tcp_socket;
1547         case TCP_TW_SUCCESS:
1548                 ;
1549         }
1550         goto discard_it;
1551 }
1552
1553 static void tcp_v6_early_demux(struct sk_buff *skb)
1554 {
1555         const struct ipv6hdr *hdr;
1556         const struct tcphdr *th;
1557         struct sock *sk;
1558
1559         if (skb->pkt_type != PACKET_HOST)
1560                 return;
1561
1562         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1563                 return;
1564
1565         hdr = ipv6_hdr(skb);
1566         th = tcp_hdr(skb);
1567
1568         if (th->doff < sizeof(struct tcphdr) / 4)
1569                 return;
1570
1571         /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1572         sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1573                                         &hdr->saddr, th->source,
1574                                         &hdr->daddr, ntohs(th->dest),
1575                                         inet6_iif(skb));
1576         if (sk) {
1577                 skb->sk = sk;
1578                 skb->destructor = sock_edemux;
1579                 if (sk_fullsock(sk)) {
1580                         struct dst_entry *dst = sk->sk_rx_dst;
1581
1582                         if (dst)
1583                                 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1584                         if (dst &&
1585                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1586                                 skb_dst_set_noref(skb, dst);
1587                 }
1588         }
1589 }
1590
1591 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1592         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1593         .twsk_unique    = tcp_twsk_unique,
1594         .twsk_destructor = tcp_twsk_destructor,
1595 };
1596
1597 static const struct inet_connection_sock_af_ops ipv6_specific = {
1598         .queue_xmit        = inet6_csk_xmit,
1599         .send_check        = tcp_v6_send_check,
1600         .rebuild_header    = inet6_sk_rebuild_header,
1601         .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1602         .conn_request      = tcp_v6_conn_request,
1603         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1604         .net_header_len    = sizeof(struct ipv6hdr),
1605         .net_frag_header_len = sizeof(struct frag_hdr),
1606         .setsockopt        = ipv6_setsockopt,
1607         .getsockopt        = ipv6_getsockopt,
1608         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1609         .sockaddr_len      = sizeof(struct sockaddr_in6),
1610         .bind_conflict     = inet6_csk_bind_conflict,
1611 #ifdef CONFIG_COMPAT
1612         .compat_setsockopt = compat_ipv6_setsockopt,
1613         .compat_getsockopt = compat_ipv6_getsockopt,
1614 #endif
1615         .mtu_reduced       = tcp_v6_mtu_reduced,
1616 };
1617
1618 #ifdef CONFIG_TCP_MD5SIG
1619 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1620         .md5_lookup     =       tcp_v6_md5_lookup,
1621         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1622         .md5_parse      =       tcp_v6_parse_md5_keys,
1623 };
1624 #endif
1625
1626 /*
1627  *      TCP over IPv4 via INET6 API
1628  */
1629 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1630         .queue_xmit        = ip_queue_xmit,
1631         .send_check        = tcp_v4_send_check,
1632         .rebuild_header    = inet_sk_rebuild_header,
1633         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1634         .conn_request      = tcp_v6_conn_request,
1635         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1636         .net_header_len    = sizeof(struct iphdr),
1637         .setsockopt        = ipv6_setsockopt,
1638         .getsockopt        = ipv6_getsockopt,
1639         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1640         .sockaddr_len      = sizeof(struct sockaddr_in6),
1641         .bind_conflict     = inet6_csk_bind_conflict,
1642 #ifdef CONFIG_COMPAT
1643         .compat_setsockopt = compat_ipv6_setsockopt,
1644         .compat_getsockopt = compat_ipv6_getsockopt,
1645 #endif
1646         .mtu_reduced       = tcp_v4_mtu_reduced,
1647 };
1648
1649 #ifdef CONFIG_TCP_MD5SIG
1650 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1651         .md5_lookup     =       tcp_v4_md5_lookup,
1652         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1653         .md5_parse      =       tcp_v6_parse_md5_keys,
1654 };
1655 #endif
1656
1657 /* NOTE: A lot of things set to zero explicitly by call to
1658  *       sk_alloc() so need not be done here.
1659  */
1660 static int tcp_v6_init_sock(struct sock *sk)
1661 {
1662         struct inet_connection_sock *icsk = inet_csk(sk);
1663
1664         tcp_init_sock(sk);
1665
1666         icsk->icsk_af_ops = &ipv6_specific;
1667
1668 #ifdef CONFIG_TCP_MD5SIG
1669         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1670 #endif
1671
1672         return 0;
1673 }
1674
1675 static void tcp_v6_destroy_sock(struct sock *sk)
1676 {
1677         tcp_v4_destroy_sock(sk);
1678         inet6_destroy_sock(sk);
1679 }
1680
1681 #ifdef CONFIG_PROC_FS
1682 /* Proc filesystem TCPv6 sock list dumping. */
1683 static void get_openreq6(struct seq_file *seq,
1684                          struct request_sock *req, int i, kuid_t uid)
1685 {
1686         int ttd = req->expires - jiffies;
1687         const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1688         const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1689
1690         if (ttd < 0)
1691                 ttd = 0;
1692
1693         seq_printf(seq,
1694                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1695                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1696                    i,
1697                    src->s6_addr32[0], src->s6_addr32[1],
1698                    src->s6_addr32[2], src->s6_addr32[3],
1699                    inet_rsk(req)->ir_num,
1700                    dest->s6_addr32[0], dest->s6_addr32[1],
1701                    dest->s6_addr32[2], dest->s6_addr32[3],
1702                    ntohs(inet_rsk(req)->ir_rmt_port),
1703                    TCP_SYN_RECV,
1704                    0, 0, /* could print option size, but that is af dependent. */
1705                    1,   /* timers active (only the expire timer) */
1706                    jiffies_to_clock_t(ttd),
1707                    req->num_timeout,
1708                    from_kuid_munged(seq_user_ns(seq), uid),
1709                    0,  /* non standard timer */
1710                    0, /* open_requests have no inode */
1711                    0, req);
1712 }
1713
1714 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1715 {
1716         const struct in6_addr *dest, *src;
1717         __u16 destp, srcp;
1718         int timer_active;
1719         unsigned long timer_expires;
1720         const struct inet_sock *inet = inet_sk(sp);
1721         const struct tcp_sock *tp = tcp_sk(sp);
1722         const struct inet_connection_sock *icsk = inet_csk(sp);
1723         struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1724
1725         dest  = &sp->sk_v6_daddr;
1726         src   = &sp->sk_v6_rcv_saddr;
1727         destp = ntohs(inet->inet_dport);
1728         srcp  = ntohs(inet->inet_sport);
1729
1730         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1731                 timer_active    = 1;
1732                 timer_expires   = icsk->icsk_timeout;
1733         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1734                 timer_active    = 4;
1735                 timer_expires   = icsk->icsk_timeout;
1736         } else if (timer_pending(&sp->sk_timer)) {
1737                 timer_active    = 2;
1738                 timer_expires   = sp->sk_timer.expires;
1739         } else {
1740                 timer_active    = 0;
1741                 timer_expires = jiffies;
1742         }
1743
1744         seq_printf(seq,
1745                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1746                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1747                    i,
1748                    src->s6_addr32[0], src->s6_addr32[1],
1749                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1750                    dest->s6_addr32[0], dest->s6_addr32[1],
1751                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1752                    sp->sk_state,
1753                    tp->write_seq-tp->snd_una,
1754                    (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1755                    timer_active,
1756                    jiffies_delta_to_clock_t(timer_expires - jiffies),
1757                    icsk->icsk_retransmits,
1758                    from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1759                    icsk->icsk_probes_out,
1760                    sock_i_ino(sp),
1761                    atomic_read(&sp->sk_refcnt), sp,
1762                    jiffies_to_clock_t(icsk->icsk_rto),
1763                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1764                    (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1765                    tp->snd_cwnd,
1766                    sp->sk_state == TCP_LISTEN ?
1767                         (fastopenq ? fastopenq->max_qlen : 0) :
1768                         (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1769                    );
1770 }
1771
1772 static void get_timewait6_sock(struct seq_file *seq,
1773                                struct inet_timewait_sock *tw, int i)
1774 {
1775         const struct in6_addr *dest, *src;
1776         __u16 destp, srcp;
1777         s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1778
1779         dest = &tw->tw_v6_daddr;
1780         src  = &tw->tw_v6_rcv_saddr;
1781         destp = ntohs(tw->tw_dport);
1782         srcp  = ntohs(tw->tw_sport);
1783
1784         seq_printf(seq,
1785                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1786                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1787                    i,
1788                    src->s6_addr32[0], src->s6_addr32[1],
1789                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1790                    dest->s6_addr32[0], dest->s6_addr32[1],
1791                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1792                    tw->tw_substate, 0, 0,
1793                    3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1794                    atomic_read(&tw->tw_refcnt), tw);
1795 }
1796
1797 static int tcp6_seq_show(struct seq_file *seq, void *v)
1798 {
1799         struct tcp_iter_state *st;
1800         struct sock *sk = v;
1801
1802         if (v == SEQ_START_TOKEN) {
1803                 seq_puts(seq,
1804                          "  sl  "
1805                          "local_address                         "
1806                          "remote_address                        "
1807                          "st tx_queue rx_queue tr tm->when retrnsmt"
1808                          "   uid  timeout inode\n");
1809                 goto out;
1810         }
1811         st = seq->private;
1812
1813         switch (st->state) {
1814         case TCP_SEQ_STATE_LISTENING:
1815         case TCP_SEQ_STATE_ESTABLISHED:
1816                 if (sk->sk_state == TCP_TIME_WAIT)
1817                         get_timewait6_sock(seq, v, st->num);
1818                 else
1819                         get_tcp6_sock(seq, v, st->num);
1820                 break;
1821         case TCP_SEQ_STATE_OPENREQ:
1822                 get_openreq6(seq, v, st->num, st->uid);
1823                 break;
1824         }
1825 out:
1826         return 0;
1827 }
1828
1829 static const struct file_operations tcp6_afinfo_seq_fops = {
1830         .owner   = THIS_MODULE,
1831         .open    = tcp_seq_open,
1832         .read    = seq_read,
1833         .llseek  = seq_lseek,
1834         .release = seq_release_net
1835 };
1836
1837 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1838         .name           = "tcp6",
1839         .family         = AF_INET6,
1840         .seq_fops       = &tcp6_afinfo_seq_fops,
1841         .seq_ops        = {
1842                 .show           = tcp6_seq_show,
1843         },
1844 };
1845
1846 int __net_init tcp6_proc_init(struct net *net)
1847 {
1848         return tcp_proc_register(net, &tcp6_seq_afinfo);
1849 }
1850
1851 void tcp6_proc_exit(struct net *net)
1852 {
1853         tcp_proc_unregister(net, &tcp6_seq_afinfo);
1854 }
1855 #endif
1856
1857 static void tcp_v6_clear_sk(struct sock *sk, int size)
1858 {
1859         struct inet_sock *inet = inet_sk(sk);
1860
1861         /* we do not want to clear pinet6 field, because of RCU lookups */
1862         sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1863
1864         size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1865         memset(&inet->pinet6 + 1, 0, size);
1866 }
1867
1868 struct proto tcpv6_prot = {
1869         .name                   = "TCPv6",
1870         .owner                  = THIS_MODULE,
1871         .close                  = tcp_close,
1872         .connect                = tcp_v6_connect,
1873         .disconnect             = tcp_disconnect,
1874         .accept                 = inet_csk_accept,
1875         .ioctl                  = tcp_ioctl,
1876         .init                   = tcp_v6_init_sock,
1877         .destroy                = tcp_v6_destroy_sock,
1878         .shutdown               = tcp_shutdown,
1879         .setsockopt             = tcp_setsockopt,
1880         .getsockopt             = tcp_getsockopt,
1881         .recvmsg                = tcp_recvmsg,
1882         .sendmsg                = tcp_sendmsg,
1883         .sendpage               = tcp_sendpage,
1884         .backlog_rcv            = tcp_v6_do_rcv,
1885         .release_cb             = tcp_release_cb,
1886         .hash                   = tcp_v6_hash,
1887         .unhash                 = inet_unhash,
1888         .get_port               = inet_csk_get_port,
1889         .enter_memory_pressure  = tcp_enter_memory_pressure,
1890         .stream_memory_free     = tcp_stream_memory_free,
1891         .sockets_allocated      = &tcp_sockets_allocated,
1892         .memory_allocated       = &tcp_memory_allocated,
1893         .memory_pressure        = &tcp_memory_pressure,
1894         .orphan_count           = &tcp_orphan_count,
1895         .sysctl_mem             = sysctl_tcp_mem,
1896         .sysctl_wmem            = sysctl_tcp_wmem,
1897         .sysctl_rmem            = sysctl_tcp_rmem,
1898         .max_header             = MAX_TCP_HEADER,
1899         .obj_size               = sizeof(struct tcp6_sock),
1900         .slab_flags             = SLAB_DESTROY_BY_RCU,
1901         .twsk_prot              = &tcp6_timewait_sock_ops,
1902         .rsk_prot               = &tcp6_request_sock_ops,
1903         .h.hashinfo             = &tcp_hashinfo,
1904         .no_autobind            = true,
1905 #ifdef CONFIG_COMPAT
1906         .compat_setsockopt      = compat_tcp_setsockopt,
1907         .compat_getsockopt      = compat_tcp_getsockopt,
1908 #endif
1909 #ifdef CONFIG_MEMCG_KMEM
1910         .proto_cgroup           = tcp_proto_cgroup,
1911 #endif
1912         .clear_sk               = tcp_v6_clear_sk,
1913 };
1914
1915 static const struct inet6_protocol tcpv6_protocol = {
1916         .early_demux    =       tcp_v6_early_demux,
1917         .handler        =       tcp_v6_rcv,
1918         .err_handler    =       tcp_v6_err,
1919         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1920 };
1921
1922 static struct inet_protosw tcpv6_protosw = {
1923         .type           =       SOCK_STREAM,
1924         .protocol       =       IPPROTO_TCP,
1925         .prot           =       &tcpv6_prot,
1926         .ops            =       &inet6_stream_ops,
1927         .flags          =       INET_PROTOSW_PERMANENT |
1928                                 INET_PROTOSW_ICSK,
1929 };
1930
1931 static int __net_init tcpv6_net_init(struct net *net)
1932 {
1933         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1934                                     SOCK_RAW, IPPROTO_TCP, net);
1935 }
1936
1937 static void __net_exit tcpv6_net_exit(struct net *net)
1938 {
1939         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1940 }
1941
1942 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1943 {
1944         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1945 }
1946
1947 static struct pernet_operations tcpv6_net_ops = {
1948         .init       = tcpv6_net_init,
1949         .exit       = tcpv6_net_exit,
1950         .exit_batch = tcpv6_net_exit_batch,
1951 };
1952
1953 int __init tcpv6_init(void)
1954 {
1955         int ret;
1956
1957         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1958         if (ret)
1959                 goto out;
1960
1961         /* register inet6 protocol */
1962         ret = inet6_register_protosw(&tcpv6_protosw);
1963         if (ret)
1964                 goto out_tcpv6_protocol;
1965
1966         ret = register_pernet_subsys(&tcpv6_net_ops);
1967         if (ret)
1968                 goto out_tcpv6_protosw;
1969 out:
1970         return ret;
1971
1972 out_tcpv6_protosw:
1973         inet6_unregister_protosw(&tcpv6_protosw);
1974 out_tcpv6_protocol:
1975         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1976         goto out;
1977 }
1978
1979 void tcpv6_exit(void)
1980 {
1981         unregister_pernet_subsys(&tcpv6_net_ops);
1982         inet6_unregister_protosw(&tcpv6_protosw);
1983         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1984 }