tcp: md5: get rid of tcp_v[46]_reqsk_md5_lookup()
[pandora-kernel.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  *              IPv4 specific functions
9  *
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  *
18  *      This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23
24 /*
25  * Changes:
26  *              David S. Miller :       New socket lookup architecture.
27  *                                      This code is dedicated to John Dyson.
28  *              David S. Miller :       Change semantics of established hash,
29  *                                      half is devoted to TIME_WAIT sockets
30  *                                      and the rest go in the other half.
31  *              Andi Kleen :            Add support for syncookies and fixed
32  *                                      some bugs: ip options weren't passed to
33  *                                      the TCP layer, missed a check for an
34  *                                      ACK bit.
35  *              Andi Kleen :            Implemented fast path mtu discovery.
36  *                                      Fixed many serious bugs in the
37  *                                      request_sock handling and moved
38  *                                      most of it into the af independent code.
39  *                                      Added tail drop and some other bugfixes.
40  *                                      Added new listen semantics.
41  *              Mike McLagan    :       Routing by source
42  *      Juan Jose Ciarlante:            ip_dynaddr bits
43  *              Andi Kleen:             various fixes.
44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
45  *                                      coma.
46  *      Andi Kleen              :       Fix new listen.
47  *      Andi Kleen              :       Fix accept error reporting.
48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
50  *                                      a single port at the same time.
51  */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99
100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103                                           ip_hdr(skb)->saddr,
104                                           tcp_hdr(skb)->dest,
105                                           tcp_hdr(skb)->source);
106 }
107
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111         struct tcp_sock *tp = tcp_sk(sk);
112
113         /* With PAWS, it is safe from the viewpoint
114            of data integrity. Even without PAWS it is safe provided sequence
115            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117            Actually, the idea is close to VJ's one, only timestamp cache is
118            held not per host, but per port pair and TW bucket is used as state
119            holder.
120
121            If TW bucket has been already destroyed we fall back to VJ's scheme
122            and use initial timestamp retrieved from peer table.
123          */
124         if (tcptw->tw_ts_recent_stamp &&
125             (twp == NULL || (sysctl_tcp_tw_reuse &&
126                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128                 if (tp->write_seq == 0)
129                         tp->write_seq = 1;
130                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
131                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132                 sock_hold(sktw);
133                 return 1;
134         }
135
136         return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144         struct inet_sock *inet = inet_sk(sk);
145         struct tcp_sock *tp = tcp_sk(sk);
146         __be16 orig_sport, orig_dport;
147         __be32 daddr, nexthop;
148         struct flowi4 *fl4;
149         struct rtable *rt;
150         int err;
151         struct ip_options_rcu *inet_opt;
152
153         if (addr_len < sizeof(struct sockaddr_in))
154                 return -EINVAL;
155
156         if (usin->sin_family != AF_INET)
157                 return -EAFNOSUPPORT;
158
159         nexthop = daddr = usin->sin_addr.s_addr;
160         inet_opt = rcu_dereference_protected(inet->inet_opt,
161                                              sock_owned_by_user(sk));
162         if (inet_opt && inet_opt->opt.srr) {
163                 if (!daddr)
164                         return -EINVAL;
165                 nexthop = inet_opt->opt.faddr;
166         }
167
168         orig_sport = inet->inet_sport;
169         orig_dport = usin->sin_port;
170         fl4 = &inet->cork.fl.u.ip4;
171         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173                               IPPROTO_TCP,
174                               orig_sport, orig_dport, sk);
175         if (IS_ERR(rt)) {
176                 err = PTR_ERR(rt);
177                 if (err == -ENETUNREACH)
178                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179                 return err;
180         }
181
182         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183                 ip_rt_put(rt);
184                 return -ENETUNREACH;
185         }
186
187         if (!inet_opt || !inet_opt->opt.srr)
188                 daddr = fl4->daddr;
189
190         if (!inet->inet_saddr)
191                 inet->inet_saddr = fl4->saddr;
192         sk_rcv_saddr_set(sk, inet->inet_saddr);
193
194         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195                 /* Reset inherited state */
196                 tp->rx_opt.ts_recent       = 0;
197                 tp->rx_opt.ts_recent_stamp = 0;
198                 if (likely(!tp->repair))
199                         tp->write_seq      = 0;
200         }
201
202         if (tcp_death_row.sysctl_tw_recycle &&
203             !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204                 tcp_fetch_timewait_stamp(sk, &rt->dst);
205
206         inet->inet_dport = usin->sin_port;
207         sk_daddr_set(sk, daddr);
208
209         inet_csk(sk)->icsk_ext_hdr_len = 0;
210         if (inet_opt)
211                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212
213         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214
215         /* Socket identity is still unknown (sport may be zero).
216          * However we set state to SYN-SENT and not releasing socket
217          * lock select source port, enter ourselves into the hash tables and
218          * complete initialization after this.
219          */
220         tcp_set_state(sk, TCP_SYN_SENT);
221         err = inet_hash_connect(&tcp_death_row, sk);
222         if (err)
223                 goto failure;
224
225         inet_set_txhash(sk);
226
227         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228                                inet->inet_sport, inet->inet_dport, sk);
229         if (IS_ERR(rt)) {
230                 err = PTR_ERR(rt);
231                 rt = NULL;
232                 goto failure;
233         }
234         /* OK, now commit destination to socket.  */
235         sk->sk_gso_type = SKB_GSO_TCPV4;
236         sk_setup_caps(sk, &rt->dst);
237
238         if (!tp->write_seq && likely(!tp->repair))
239                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240                                                            inet->inet_daddr,
241                                                            inet->inet_sport,
242                                                            usin->sin_port);
243
244         inet->inet_id = tp->write_seq ^ jiffies;
245
246         err = tcp_connect(sk);
247
248         rt = NULL;
249         if (err)
250                 goto failure;
251
252         return 0;
253
254 failure:
255         /*
256          * This unhashes the socket and releases the local port,
257          * if necessary.
258          */
259         tcp_set_state(sk, TCP_CLOSE);
260         ip_rt_put(rt);
261         sk->sk_route_caps = 0;
262         inet->inet_dport = 0;
263         return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274         struct dst_entry *dst;
275         struct inet_sock *inet = inet_sk(sk);
276         u32 mtu = tcp_sk(sk)->mtu_info;
277
278         dst = inet_csk_update_pmtu(sk, mtu);
279         if (!dst)
280                 return;
281
282         /* Something is about to be wrong... Remember soft error
283          * for the case, if this connection will not able to recover.
284          */
285         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286                 sk->sk_err_soft = EMSGSIZE;
287
288         mtu = dst_mtu(dst);
289
290         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291             ip_sk_accept_pmtu(sk) &&
292             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293                 tcp_sync_mss(sk, mtu);
294
295                 /* Resend the TCP packet because it's
296                  * clear that the old packet has been
297                  * dropped. This is the new "fast" path mtu
298                  * discovery.
299                  */
300                 tcp_simple_retransmit(sk);
301         } /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307         struct dst_entry *dst = __sk_dst_check(sk, 0);
308
309         if (dst)
310                 dst->ops->redirect(dst, sk, skb);
311 }
312
313
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317         struct request_sock *req = inet_reqsk(sk);
318         struct net *net = sock_net(sk);
319
320         /* ICMPs are not backlogged, hence we cannot get
321          * an established socket here.
322          */
323         WARN_ON(req->sk);
324
325         if (seq != tcp_rsk(req)->snt_isn) {
326                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327                 reqsk_put(req);
328         } else {
329                 /*
330                  * Still in SYN_RECV, just remove it silently.
331                  * There is no good way to pass the error to the newly
332                  * created socket, and POSIX does not want network
333                  * errors returned from accept().
334                  */
335                 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336                 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
337         }
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340
341 /*
342  * This routine is called by the ICMP module when it gets some
343  * sort of error condition.  If err < 0 then the socket should
344  * be closed and the error returned to the user.  If err > 0
345  * it's just the icmp type << 8 | icmp code.  After adjustment
346  * header points to the first 8 bytes of the tcp header.  We need
347  * to find the appropriate port.
348  *
349  * The locking strategy used here is very "optimistic". When
350  * someone else accesses the socket the ICMP is just dropped
351  * and for some paths there is no check at all.
352  * A more general error queue to queue errors for later handling
353  * is probably better.
354  *
355  */
356
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361         struct inet_connection_sock *icsk;
362         struct tcp_sock *tp;
363         struct inet_sock *inet;
364         const int type = icmp_hdr(icmp_skb)->type;
365         const int code = icmp_hdr(icmp_skb)->code;
366         struct sock *sk;
367         struct sk_buff *skb;
368         struct request_sock *fastopen;
369         __u32 seq, snd_una;
370         __u32 remaining;
371         int err;
372         struct net *net = dev_net(icmp_skb->dev);
373
374         sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375                                        th->dest, iph->saddr, ntohs(th->source),
376                                        inet_iif(icmp_skb));
377         if (!sk) {
378                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379                 return;
380         }
381         if (sk->sk_state == TCP_TIME_WAIT) {
382                 inet_twsk_put(inet_twsk(sk));
383                 return;
384         }
385         seq = ntohl(th->seq);
386         if (sk->sk_state == TCP_NEW_SYN_RECV)
387                 return tcp_req_err(sk, seq);
388
389         bh_lock_sock(sk);
390         /* If too many ICMPs get dropped on busy
391          * servers this needs to be solved differently.
392          * We do take care of PMTU discovery (RFC1191) special case :
393          * we can receive locally generated ICMP messages while socket is held.
394          */
395         if (sock_owned_by_user(sk)) {
396                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397                         NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398         }
399         if (sk->sk_state == TCP_CLOSE)
400                 goto out;
401
402         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404                 goto out;
405         }
406
407         icsk = inet_csk(sk);
408         tp = tcp_sk(sk);
409         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410         fastopen = tp->fastopen_rsk;
411         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412         if (sk->sk_state != TCP_LISTEN &&
413             !between(seq, snd_una, tp->snd_nxt)) {
414                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415                 goto out;
416         }
417
418         switch (type) {
419         case ICMP_REDIRECT:
420                 do_redirect(icmp_skb, sk);
421                 goto out;
422         case ICMP_SOURCE_QUENCH:
423                 /* Just silently ignore these. */
424                 goto out;
425         case ICMP_PARAMETERPROB:
426                 err = EPROTO;
427                 break;
428         case ICMP_DEST_UNREACH:
429                 if (code > NR_ICMP_UNREACH)
430                         goto out;
431
432                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433                         /* We are not interested in TCP_LISTEN and open_requests
434                          * (SYN-ACKs send out by Linux are always <576bytes so
435                          * they should go through unfragmented).
436                          */
437                         if (sk->sk_state == TCP_LISTEN)
438                                 goto out;
439
440                         tp->mtu_info = info;
441                         if (!sock_owned_by_user(sk)) {
442                                 tcp_v4_mtu_reduced(sk);
443                         } else {
444                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445                                         sock_hold(sk);
446                         }
447                         goto out;
448                 }
449
450                 err = icmp_err_convert[code].errno;
451                 /* check if icmp_skb allows revert of backoff
452                  * (see draft-zimmermann-tcp-lcd) */
453                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454                         break;
455                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
456                     !icsk->icsk_backoff || fastopen)
457                         break;
458
459                 if (sock_owned_by_user(sk))
460                         break;
461
462                 icsk->icsk_backoff--;
463                 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464                                                TCP_TIMEOUT_INIT;
465                 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466
467                 skb = tcp_write_queue_head(sk);
468                 BUG_ON(!skb);
469
470                 remaining = icsk->icsk_rto -
471                             min(icsk->icsk_rto,
472                                 tcp_time_stamp - tcp_skb_timestamp(skb));
473
474                 if (remaining) {
475                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476                                                   remaining, TCP_RTO_MAX);
477                 } else {
478                         /* RTO revert clocked out retransmission.
479                          * Will retransmit now */
480                         tcp_retransmit_timer(sk);
481                 }
482
483                 break;
484         case ICMP_TIME_EXCEEDED:
485                 err = EHOSTUNREACH;
486                 break;
487         default:
488                 goto out;
489         }
490
491         switch (sk->sk_state) {
492         case TCP_SYN_SENT:
493         case TCP_SYN_RECV:
494                 /* Only in fast or simultaneous open. If a fast open socket is
495                  * is already accepted it is treated as a connected one below.
496                  */
497                 if (fastopen && fastopen->sk == NULL)
498                         break;
499
500                 if (!sock_owned_by_user(sk)) {
501                         sk->sk_err = err;
502
503                         sk->sk_error_report(sk);
504
505                         tcp_done(sk);
506                 } else {
507                         sk->sk_err_soft = err;
508                 }
509                 goto out;
510         }
511
512         /* If we've already connected we will keep trying
513          * until we time out, or the user gives up.
514          *
515          * rfc1122 4.2.3.9 allows to consider as hard errors
516          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517          * but it is obsoleted by pmtu discovery).
518          *
519          * Note, that in modern internet, where routing is unreliable
520          * and in each dark corner broken firewalls sit, sending random
521          * errors ordered by their masters even this two messages finally lose
522          * their original sense (even Linux sends invalid PORT_UNREACHs)
523          *
524          * Now we are in compliance with RFCs.
525          *                                                      --ANK (980905)
526          */
527
528         inet = inet_sk(sk);
529         if (!sock_owned_by_user(sk) && inet->recverr) {
530                 sk->sk_err = err;
531                 sk->sk_error_report(sk);
532         } else  { /* Only an error on timeout */
533                 sk->sk_err_soft = err;
534         }
535
536 out:
537         bh_unlock_sock(sk);
538         sock_put(sk);
539 }
540
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543         struct tcphdr *th = tcp_hdr(skb);
544
545         if (skb->ip_summed == CHECKSUM_PARTIAL) {
546                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547                 skb->csum_start = skb_transport_header(skb) - skb->head;
548                 skb->csum_offset = offsetof(struct tcphdr, check);
549         } else {
550                 th->check = tcp_v4_check(skb->len, saddr, daddr,
551                                          csum_partial(th,
552                                                       th->doff << 2,
553                                                       skb->csum));
554         }
555 }
556
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560         const struct inet_sock *inet = inet_sk(sk);
561
562         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565
566 /*
567  *      This routine will send an RST to the other tcp.
568  *
569  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570  *                    for reset.
571  *      Answer: if a packet caused RST, it is not for a socket
572  *              existing in our system, if it is matched to a socket,
573  *              it is just duplicate segment or bug in other side's TCP.
574  *              So that we build reply only basing on parameters
575  *              arrived with segment.
576  *      Exception: precedence violation. We do not implement it in any case.
577  */
578
579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
580 {
581         const struct tcphdr *th = tcp_hdr(skb);
582         struct {
583                 struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587         } rep;
588         struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590         struct tcp_md5sig_key *key;
591         const __u8 *hash_location = NULL;
592         unsigned char newhash[16];
593         int genhash;
594         struct sock *sk1 = NULL;
595 #endif
596         struct net *net;
597
598         /* Never send a reset in response to a reset. */
599         if (th->rst)
600                 return;
601
602         /* If sk not NULL, it means we did a successful lookup and incoming
603          * route had to be correct. prequeue might have dropped our dst.
604          */
605         if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606                 return;
607
608         /* Swap the send and the receive. */
609         memset(&rep, 0, sizeof(rep));
610         rep.th.dest   = th->source;
611         rep.th.source = th->dest;
612         rep.th.doff   = sizeof(struct tcphdr) / 4;
613         rep.th.rst    = 1;
614
615         if (th->ack) {
616                 rep.th.seq = th->ack_seq;
617         } else {
618                 rep.th.ack = 1;
619                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620                                        skb->len - (th->doff << 2));
621         }
622
623         memset(&arg, 0, sizeof(arg));
624         arg.iov[0].iov_base = (unsigned char *)&rep;
625         arg.iov[0].iov_len  = sizeof(rep.th);
626
627         net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629         hash_location = tcp_parse_md5sig_option(th);
630         if (!sk && hash_location) {
631                 /*
632                  * active side is lost. Try to find listening socket through
633                  * source port, and then find md5 key through listening socket.
634                  * we are not loose security here:
635                  * Incoming packet is checked with md5 hash with finding key,
636                  * no RST generated if md5 hash doesn't match.
637                  */
638                 sk1 = __inet_lookup_listener(net,
639                                              &tcp_hashinfo, ip_hdr(skb)->saddr,
640                                              th->source, ip_hdr(skb)->daddr,
641                                              ntohs(th->source), inet_iif(skb));
642                 /* don't send rst if it can't find key */
643                 if (!sk1)
644                         return;
645                 rcu_read_lock();
646                 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647                                         &ip_hdr(skb)->saddr, AF_INET);
648                 if (!key)
649                         goto release_sk1;
650
651                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653                         goto release_sk1;
654         } else {
655                 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656                                              &ip_hdr(skb)->saddr,
657                                              AF_INET) : NULL;
658         }
659
660         if (key) {
661                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662                                    (TCPOPT_NOP << 16) |
663                                    (TCPOPT_MD5SIG << 8) |
664                                    TCPOLEN_MD5SIG);
665                 /* Update length and the length the header thinks exists */
666                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667                 rep.th.doff = arg.iov[0].iov_len / 4;
668
669                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670                                      key, ip_hdr(skb)->saddr,
671                                      ip_hdr(skb)->daddr, &rep.th);
672         }
673 #endif
674         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675                                       ip_hdr(skb)->saddr, /* XXX */
676                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
677         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678         arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679         /* When socket is gone, all binding information is lost.
680          * routing might fail in this case. No choice here, if we choose to force
681          * input interface, we will misroute in case of asymmetric route.
682          */
683         if (sk)
684                 arg.bound_dev_if = sk->sk_bound_dev_if;
685
686         arg.tos = ip_hdr(skb)->tos;
687         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
689                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690                               &arg, arg.iov[0].iov_len);
691
692         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697         if (sk1) {
698                 rcu_read_unlock();
699                 sock_put(sk1);
700         }
701 #endif
702 }
703
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705    outside socket context is ugly, certainly. What can I do?
706  */
707
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709                             u32 win, u32 tsval, u32 tsecr, int oif,
710                             struct tcp_md5sig_key *key,
711                             int reply_flags, u8 tos)
712 {
713         const struct tcphdr *th = tcp_hdr(skb);
714         struct {
715                 struct tcphdr th;
716                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720                         ];
721         } rep;
722         struct ip_reply_arg arg;
723         struct net *net = dev_net(skb_dst(skb)->dev);
724
725         memset(&rep.th, 0, sizeof(struct tcphdr));
726         memset(&arg, 0, sizeof(arg));
727
728         arg.iov[0].iov_base = (unsigned char *)&rep;
729         arg.iov[0].iov_len  = sizeof(rep.th);
730         if (tsecr) {
731                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732                                    (TCPOPT_TIMESTAMP << 8) |
733                                    TCPOLEN_TIMESTAMP);
734                 rep.opt[1] = htonl(tsval);
735                 rep.opt[2] = htonl(tsecr);
736                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737         }
738
739         /* Swap the send and the receive. */
740         rep.th.dest    = th->source;
741         rep.th.source  = th->dest;
742         rep.th.doff    = arg.iov[0].iov_len / 4;
743         rep.th.seq     = htonl(seq);
744         rep.th.ack_seq = htonl(ack);
745         rep.th.ack     = 1;
746         rep.th.window  = htons(win);
747
748 #ifdef CONFIG_TCP_MD5SIG
749         if (key) {
750                 int offset = (tsecr) ? 3 : 0;
751
752                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753                                           (TCPOPT_NOP << 16) |
754                                           (TCPOPT_MD5SIG << 8) |
755                                           TCPOLEN_MD5SIG);
756                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757                 rep.th.doff = arg.iov[0].iov_len/4;
758
759                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760                                     key, ip_hdr(skb)->saddr,
761                                     ip_hdr(skb)->daddr, &rep.th);
762         }
763 #endif
764         arg.flags = reply_flags;
765         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766                                       ip_hdr(skb)->saddr, /* XXX */
767                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
768         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769         if (oif)
770                 arg.bound_dev_if = oif;
771         arg.tos = tos;
772         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
774                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775                               &arg, arg.iov[0].iov_len);
776
777         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782         struct inet_timewait_sock *tw = inet_twsk(sk);
783         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784
785         tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787                         tcp_time_stamp + tcptw->tw_ts_offset,
788                         tcptw->tw_ts_recent,
789                         tw->tw_bound_dev_if,
790                         tcp_twsk_md5_key(tcptw),
791                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792                         tw->tw_tos
793                         );
794
795         inet_twsk_put(tw);
796 }
797
798 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
799                                   struct request_sock *req)
800 {
801         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803          */
804         tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806                         tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
807                         tcp_time_stamp,
808                         req->ts_recent,
809                         0,
810                         tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811                                           AF_INET),
812                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813                         ip_hdr(skb)->tos);
814 }
815
816 /*
817  *      Send a SYN-ACK after having received a SYN.
818  *      This still operates on a request_sock only, not on a big
819  *      socket.
820  */
821 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
822                               struct flowi *fl,
823                               struct request_sock *req,
824                               u16 queue_mapping,
825                               struct tcp_fastopen_cookie *foc)
826 {
827         const struct inet_request_sock *ireq = inet_rsk(req);
828         struct flowi4 fl4;
829         int err = -1;
830         struct sk_buff *skb;
831
832         /* First, grab a route. */
833         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834                 return -1;
835
836         skb = tcp_make_synack(sk, dst, req, foc);
837
838         if (skb) {
839                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840
841                 skb_set_queue_mapping(skb, queue_mapping);
842                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
843                                             ireq->ir_rmt_addr,
844                                             ireq->opt);
845                 err = net_xmit_eval(err);
846         }
847
848         return err;
849 }
850
851 /*
852  *      IPv4 request_sock destructor.
853  */
854 static void tcp_v4_reqsk_destructor(struct request_sock *req)
855 {
856         kfree(inet_rsk(req)->opt);
857 }
858
859 /*
860  * Return true if a syncookie should be sent
861  */
862 bool tcp_syn_flood_action(struct sock *sk,
863                          const struct sk_buff *skb,
864                          const char *proto)
865 {
866         const char *msg = "Dropping request";
867         bool want_cookie = false;
868         struct listen_sock *lopt;
869
870 #ifdef CONFIG_SYN_COOKIES
871         if (sysctl_tcp_syncookies) {
872                 msg = "Sending cookies";
873                 want_cookie = true;
874                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
875         } else
876 #endif
877                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
878
879         lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
880         if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
881                 lopt->synflood_warned = 1;
882                 pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
883                         proto, ntohs(tcp_hdr(skb)->dest), msg);
884         }
885         return want_cookie;
886 }
887 EXPORT_SYMBOL(tcp_syn_flood_action);
888
889 #ifdef CONFIG_TCP_MD5SIG
890 /*
891  * RFC2385 MD5 checksumming requires a mapping of
892  * IP address->MD5 Key.
893  * We need to maintain these in the sk structure.
894  */
895
896 /* Find the Key structure for an address.  */
897 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
898                                          const union tcp_md5_addr *addr,
899                                          int family)
900 {
901         const struct tcp_sock *tp = tcp_sk(sk);
902         struct tcp_md5sig_key *key;
903         unsigned int size = sizeof(struct in_addr);
904         const struct tcp_md5sig_info *md5sig;
905
906         /* caller either holds rcu_read_lock() or socket lock */
907         md5sig = rcu_dereference_check(tp->md5sig_info,
908                                        sock_owned_by_user(sk) ||
909                                        lockdep_is_held(&sk->sk_lock.slock));
910         if (!md5sig)
911                 return NULL;
912 #if IS_ENABLED(CONFIG_IPV6)
913         if (family == AF_INET6)
914                 size = sizeof(struct in6_addr);
915 #endif
916         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
917                 if (key->family != family)
918                         continue;
919                 if (!memcmp(&key->addr, addr, size))
920                         return key;
921         }
922         return NULL;
923 }
924 EXPORT_SYMBOL(tcp_md5_do_lookup);
925
926 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
927                                          const struct sock *addr_sk)
928 {
929         union tcp_md5_addr *addr;
930
931         addr = (union tcp_md5_addr *)&sk->sk_daddr;
932         return tcp_md5_do_lookup(sk, addr, AF_INET);
933 }
934 EXPORT_SYMBOL(tcp_v4_md5_lookup);
935
936 /* This can be called on a newly created socket, from other files */
937 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
938                    int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
939 {
940         /* Add Key to the list */
941         struct tcp_md5sig_key *key;
942         struct tcp_sock *tp = tcp_sk(sk);
943         struct tcp_md5sig_info *md5sig;
944
945         key = tcp_md5_do_lookup(sk, addr, family);
946         if (key) {
947                 /* Pre-existing entry - just update that one. */
948                 memcpy(key->key, newkey, newkeylen);
949                 key->keylen = newkeylen;
950                 return 0;
951         }
952
953         md5sig = rcu_dereference_protected(tp->md5sig_info,
954                                            sock_owned_by_user(sk));
955         if (!md5sig) {
956                 md5sig = kmalloc(sizeof(*md5sig), gfp);
957                 if (!md5sig)
958                         return -ENOMEM;
959
960                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
961                 INIT_HLIST_HEAD(&md5sig->head);
962                 rcu_assign_pointer(tp->md5sig_info, md5sig);
963         }
964
965         key = sock_kmalloc(sk, sizeof(*key), gfp);
966         if (!key)
967                 return -ENOMEM;
968         if (!tcp_alloc_md5sig_pool()) {
969                 sock_kfree_s(sk, key, sizeof(*key));
970                 return -ENOMEM;
971         }
972
973         memcpy(key->key, newkey, newkeylen);
974         key->keylen = newkeylen;
975         key->family = family;
976         memcpy(&key->addr, addr,
977                (family == AF_INET6) ? sizeof(struct in6_addr) :
978                                       sizeof(struct in_addr));
979         hlist_add_head_rcu(&key->node, &md5sig->head);
980         return 0;
981 }
982 EXPORT_SYMBOL(tcp_md5_do_add);
983
984 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
985 {
986         struct tcp_md5sig_key *key;
987
988         key = tcp_md5_do_lookup(sk, addr, family);
989         if (!key)
990                 return -ENOENT;
991         hlist_del_rcu(&key->node);
992         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
993         kfree_rcu(key, rcu);
994         return 0;
995 }
996 EXPORT_SYMBOL(tcp_md5_do_del);
997
998 static void tcp_clear_md5_list(struct sock *sk)
999 {
1000         struct tcp_sock *tp = tcp_sk(sk);
1001         struct tcp_md5sig_key *key;
1002         struct hlist_node *n;
1003         struct tcp_md5sig_info *md5sig;
1004
1005         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1006
1007         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1008                 hlist_del_rcu(&key->node);
1009                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1010                 kfree_rcu(key, rcu);
1011         }
1012 }
1013
1014 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1015                                  int optlen)
1016 {
1017         struct tcp_md5sig cmd;
1018         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1019
1020         if (optlen < sizeof(cmd))
1021                 return -EINVAL;
1022
1023         if (copy_from_user(&cmd, optval, sizeof(cmd)))
1024                 return -EFAULT;
1025
1026         if (sin->sin_family != AF_INET)
1027                 return -EINVAL;
1028
1029         if (!cmd.tcpm_keylen)
1030                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1031                                       AF_INET);
1032
1033         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1034                 return -EINVAL;
1035
1036         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1037                               AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1038                               GFP_KERNEL);
1039 }
1040
1041 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1042                                         __be32 daddr, __be32 saddr, int nbytes)
1043 {
1044         struct tcp4_pseudohdr *bp;
1045         struct scatterlist sg;
1046
1047         bp = &hp->md5_blk.ip4;
1048
1049         /*
1050          * 1. the TCP pseudo-header (in the order: source IP address,
1051          * destination IP address, zero-padded protocol number, and
1052          * segment length)
1053          */
1054         bp->saddr = saddr;
1055         bp->daddr = daddr;
1056         bp->pad = 0;
1057         bp->protocol = IPPROTO_TCP;
1058         bp->len = cpu_to_be16(nbytes);
1059
1060         sg_init_one(&sg, bp, sizeof(*bp));
1061         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1062 }
1063
1064 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1065                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1066 {
1067         struct tcp_md5sig_pool *hp;
1068         struct hash_desc *desc;
1069
1070         hp = tcp_get_md5sig_pool();
1071         if (!hp)
1072                 goto clear_hash_noput;
1073         desc = &hp->md5_desc;
1074
1075         if (crypto_hash_init(desc))
1076                 goto clear_hash;
1077         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1078                 goto clear_hash;
1079         if (tcp_md5_hash_header(hp, th))
1080                 goto clear_hash;
1081         if (tcp_md5_hash_key(hp, key))
1082                 goto clear_hash;
1083         if (crypto_hash_final(desc, md5_hash))
1084                 goto clear_hash;
1085
1086         tcp_put_md5sig_pool();
1087         return 0;
1088
1089 clear_hash:
1090         tcp_put_md5sig_pool();
1091 clear_hash_noput:
1092         memset(md5_hash, 0, 16);
1093         return 1;
1094 }
1095
1096 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1097                         const struct sock *sk,
1098                         const struct sk_buff *skb)
1099 {
1100         struct tcp_md5sig_pool *hp;
1101         struct hash_desc *desc;
1102         const struct tcphdr *th = tcp_hdr(skb);
1103         __be32 saddr, daddr;
1104
1105         if (sk) { /* valid for establish/request sockets */
1106                 saddr = sk->sk_rcv_saddr;
1107                 daddr = sk->sk_daddr;
1108         } else {
1109                 const struct iphdr *iph = ip_hdr(skb);
1110                 saddr = iph->saddr;
1111                 daddr = iph->daddr;
1112         }
1113
1114         hp = tcp_get_md5sig_pool();
1115         if (!hp)
1116                 goto clear_hash_noput;
1117         desc = &hp->md5_desc;
1118
1119         if (crypto_hash_init(desc))
1120                 goto clear_hash;
1121
1122         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1123                 goto clear_hash;
1124         if (tcp_md5_hash_header(hp, th))
1125                 goto clear_hash;
1126         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1127                 goto clear_hash;
1128         if (tcp_md5_hash_key(hp, key))
1129                 goto clear_hash;
1130         if (crypto_hash_final(desc, md5_hash))
1131                 goto clear_hash;
1132
1133         tcp_put_md5sig_pool();
1134         return 0;
1135
1136 clear_hash:
1137         tcp_put_md5sig_pool();
1138 clear_hash_noput:
1139         memset(md5_hash, 0, 16);
1140         return 1;
1141 }
1142 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1143
1144 /* Called with rcu_read_lock() */
1145 static bool tcp_v4_inbound_md5_hash(struct sock *sk,
1146                                     const struct sk_buff *skb)
1147 {
1148         /*
1149          * This gets called for each TCP segment that arrives
1150          * so we want to be efficient.
1151          * We have 3 drop cases:
1152          * o No MD5 hash and one expected.
1153          * o MD5 hash and we're not expecting one.
1154          * o MD5 hash and its wrong.
1155          */
1156         const __u8 *hash_location = NULL;
1157         struct tcp_md5sig_key *hash_expected;
1158         const struct iphdr *iph = ip_hdr(skb);
1159         const struct tcphdr *th = tcp_hdr(skb);
1160         int genhash;
1161         unsigned char newhash[16];
1162
1163         hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1164                                           AF_INET);
1165         hash_location = tcp_parse_md5sig_option(th);
1166
1167         /* We've parsed the options - do we have a hash? */
1168         if (!hash_expected && !hash_location)
1169                 return false;
1170
1171         if (hash_expected && !hash_location) {
1172                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1173                 return true;
1174         }
1175
1176         if (!hash_expected && hash_location) {
1177                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1178                 return true;
1179         }
1180
1181         /* Okay, so this is hash_expected and hash_location -
1182          * so we need to calculate the checksum.
1183          */
1184         genhash = tcp_v4_md5_hash_skb(newhash,
1185                                       hash_expected,
1186                                       NULL, skb);
1187
1188         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1189                 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1190                                      &iph->saddr, ntohs(th->source),
1191                                      &iph->daddr, ntohs(th->dest),
1192                                      genhash ? " tcp_v4_calc_md5_hash failed"
1193                                      : "");
1194                 return true;
1195         }
1196         return false;
1197 }
1198 #endif
1199
1200 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
1201                             struct sk_buff *skb)
1202 {
1203         struct inet_request_sock *ireq = inet_rsk(req);
1204
1205         sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1206         sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1207         ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1208         ireq->opt = tcp_v4_save_options(skb);
1209         ireq->ireq_family = AF_INET;
1210 }
1211
1212 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1213                                           const struct request_sock *req,
1214                                           bool *strict)
1215 {
1216         struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1217
1218         if (strict) {
1219                 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1220                         *strict = true;
1221                 else
1222                         *strict = false;
1223         }
1224
1225         return dst;
1226 }
1227
1228 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1229         .family         =       PF_INET,
1230         .obj_size       =       sizeof(struct tcp_request_sock),
1231         .rtx_syn_ack    =       tcp_rtx_synack,
1232         .send_ack       =       tcp_v4_reqsk_send_ack,
1233         .destructor     =       tcp_v4_reqsk_destructor,
1234         .send_reset     =       tcp_v4_send_reset,
1235         .syn_ack_timeout =      tcp_syn_ack_timeout,
1236 };
1237
1238 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1239         .mss_clamp      =       TCP_MSS_DEFAULT,
1240 #ifdef CONFIG_TCP_MD5SIG
1241         .req_md5_lookup =       tcp_v4_md5_lookup,
1242         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1243 #endif
1244         .init_req       =       tcp_v4_init_req,
1245 #ifdef CONFIG_SYN_COOKIES
1246         .cookie_init_seq =      cookie_v4_init_sequence,
1247 #endif
1248         .route_req      =       tcp_v4_route_req,
1249         .init_seq       =       tcp_v4_init_sequence,
1250         .send_synack    =       tcp_v4_send_synack,
1251         .queue_hash_add =       inet_csk_reqsk_queue_hash_add,
1252 };
1253
1254 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1255 {
1256         /* Never answer to SYNs send to broadcast or multicast */
1257         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1258                 goto drop;
1259
1260         return tcp_conn_request(&tcp_request_sock_ops,
1261                                 &tcp_request_sock_ipv4_ops, sk, skb);
1262
1263 drop:
1264         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1265         return 0;
1266 }
1267 EXPORT_SYMBOL(tcp_v4_conn_request);
1268
1269
1270 /*
1271  * The three way handshake has completed - we got a valid synack -
1272  * now create the new socket.
1273  */
1274 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1275                                   struct request_sock *req,
1276                                   struct dst_entry *dst)
1277 {
1278         struct inet_request_sock *ireq;
1279         struct inet_sock *newinet;
1280         struct tcp_sock *newtp;
1281         struct sock *newsk;
1282 #ifdef CONFIG_TCP_MD5SIG
1283         struct tcp_md5sig_key *key;
1284 #endif
1285         struct ip_options_rcu *inet_opt;
1286
1287         if (sk_acceptq_is_full(sk))
1288                 goto exit_overflow;
1289
1290         newsk = tcp_create_openreq_child(sk, req, skb);
1291         if (!newsk)
1292                 goto exit_nonewsk;
1293
1294         newsk->sk_gso_type = SKB_GSO_TCPV4;
1295         inet_sk_rx_dst_set(newsk, skb);
1296
1297         newtp                 = tcp_sk(newsk);
1298         newinet               = inet_sk(newsk);
1299         ireq                  = inet_rsk(req);
1300         sk_daddr_set(newsk, ireq->ir_rmt_addr);
1301         sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1302         newinet->inet_saddr           = ireq->ir_loc_addr;
1303         inet_opt              = ireq->opt;
1304         rcu_assign_pointer(newinet->inet_opt, inet_opt);
1305         ireq->opt             = NULL;
1306         newinet->mc_index     = inet_iif(skb);
1307         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1308         newinet->rcv_tos      = ip_hdr(skb)->tos;
1309         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1310         inet_set_txhash(newsk);
1311         if (inet_opt)
1312                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1313         newinet->inet_id = newtp->write_seq ^ jiffies;
1314
1315         if (!dst) {
1316                 dst = inet_csk_route_child_sock(sk, newsk, req);
1317                 if (!dst)
1318                         goto put_and_exit;
1319         } else {
1320                 /* syncookie case : see end of cookie_v4_check() */
1321         }
1322         sk_setup_caps(newsk, dst);
1323
1324         tcp_ca_openreq_child(newsk, dst);
1325
1326         tcp_sync_mss(newsk, dst_mtu(dst));
1327         newtp->advmss = dst_metric_advmss(dst);
1328         if (tcp_sk(sk)->rx_opt.user_mss &&
1329             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1330                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1331
1332         tcp_initialize_rcv_mss(newsk);
1333
1334 #ifdef CONFIG_TCP_MD5SIG
1335         /* Copy over the MD5 key from the original socket */
1336         key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1337                                 AF_INET);
1338         if (key != NULL) {
1339                 /*
1340                  * We're using one, so create a matching key
1341                  * on the newsk structure. If we fail to get
1342                  * memory, then we end up not copying the key
1343                  * across. Shucks.
1344                  */
1345                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1346                                AF_INET, key->key, key->keylen, GFP_ATOMIC);
1347                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1348         }
1349 #endif
1350
1351         if (__inet_inherit_port(sk, newsk) < 0)
1352                 goto put_and_exit;
1353         __inet_hash_nolisten(newsk, NULL);
1354
1355         return newsk;
1356
1357 exit_overflow:
1358         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1359 exit_nonewsk:
1360         dst_release(dst);
1361 exit:
1362         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1363         return NULL;
1364 put_and_exit:
1365         inet_csk_prepare_forced_close(newsk);
1366         tcp_done(newsk);
1367         goto exit;
1368 }
1369 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1370
1371 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1372 {
1373         const struct tcphdr *th = tcp_hdr(skb);
1374         const struct iphdr *iph = ip_hdr(skb);
1375         struct request_sock *req;
1376         struct sock *nsk;
1377
1378         req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1379         if (req) {
1380                 nsk = tcp_check_req(sk, skb, req, false);
1381                 reqsk_put(req);
1382                 return nsk;
1383         }
1384
1385         nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1386                         th->source, iph->daddr, th->dest, inet_iif(skb));
1387
1388         if (nsk) {
1389                 if (nsk->sk_state != TCP_TIME_WAIT) {
1390                         bh_lock_sock(nsk);
1391                         return nsk;
1392                 }
1393                 inet_twsk_put(inet_twsk(nsk));
1394                 return NULL;
1395         }
1396
1397 #ifdef CONFIG_SYN_COOKIES
1398         if (!th->syn)
1399                 sk = cookie_v4_check(sk, skb);
1400 #endif
1401         return sk;
1402 }
1403
1404 /* The socket must have it's spinlock held when we get
1405  * here.
1406  *
1407  * We have a potential double-lock case here, so even when
1408  * doing backlog processing we use the BH locking scheme.
1409  * This is because we cannot sleep with the original spinlock
1410  * held.
1411  */
1412 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1413 {
1414         struct sock *rsk;
1415
1416         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1417                 struct dst_entry *dst = sk->sk_rx_dst;
1418
1419                 sock_rps_save_rxhash(sk, skb);
1420                 sk_mark_napi_id(sk, skb);
1421                 if (dst) {
1422                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1423                             dst->ops->check(dst, 0) == NULL) {
1424                                 dst_release(dst);
1425                                 sk->sk_rx_dst = NULL;
1426                         }
1427                 }
1428                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1429                 return 0;
1430         }
1431
1432         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1433                 goto csum_err;
1434
1435         if (sk->sk_state == TCP_LISTEN) {
1436                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1437                 if (!nsk)
1438                         goto discard;
1439
1440                 if (nsk != sk) {
1441                         sock_rps_save_rxhash(nsk, skb);
1442                         sk_mark_napi_id(sk, skb);
1443                         if (tcp_child_process(sk, nsk, skb)) {
1444                                 rsk = nsk;
1445                                 goto reset;
1446                         }
1447                         return 0;
1448                 }
1449         } else
1450                 sock_rps_save_rxhash(sk, skb);
1451
1452         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1453                 rsk = sk;
1454                 goto reset;
1455         }
1456         return 0;
1457
1458 reset:
1459         tcp_v4_send_reset(rsk, skb);
1460 discard:
1461         kfree_skb(skb);
1462         /* Be careful here. If this function gets more complicated and
1463          * gcc suffers from register pressure on the x86, sk (in %ebx)
1464          * might be destroyed here. This current version compiles correctly,
1465          * but you have been warned.
1466          */
1467         return 0;
1468
1469 csum_err:
1470         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1471         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1472         goto discard;
1473 }
1474 EXPORT_SYMBOL(tcp_v4_do_rcv);
1475
1476 void tcp_v4_early_demux(struct sk_buff *skb)
1477 {
1478         const struct iphdr *iph;
1479         const struct tcphdr *th;
1480         struct sock *sk;
1481
1482         if (skb->pkt_type != PACKET_HOST)
1483                 return;
1484
1485         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1486                 return;
1487
1488         iph = ip_hdr(skb);
1489         th = tcp_hdr(skb);
1490
1491         if (th->doff < sizeof(struct tcphdr) / 4)
1492                 return;
1493
1494         sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1495                                        iph->saddr, th->source,
1496                                        iph->daddr, ntohs(th->dest),
1497                                        skb->skb_iif);
1498         if (sk) {
1499                 skb->sk = sk;
1500                 skb->destructor = sock_edemux;
1501                 if (sk_fullsock(sk)) {
1502                         struct dst_entry *dst = sk->sk_rx_dst;
1503
1504                         if (dst)
1505                                 dst = dst_check(dst, 0);
1506                         if (dst &&
1507                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1508                                 skb_dst_set_noref(skb, dst);
1509                 }
1510         }
1511 }
1512
1513 /* Packet is added to VJ-style prequeue for processing in process
1514  * context, if a reader task is waiting. Apparently, this exciting
1515  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1516  * failed somewhere. Latency? Burstiness? Well, at least now we will
1517  * see, why it failed. 8)8)                               --ANK
1518  *
1519  */
1520 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1521 {
1522         struct tcp_sock *tp = tcp_sk(sk);
1523
1524         if (sysctl_tcp_low_latency || !tp->ucopy.task)
1525                 return false;
1526
1527         if (skb->len <= tcp_hdrlen(skb) &&
1528             skb_queue_len(&tp->ucopy.prequeue) == 0)
1529                 return false;
1530
1531         /* Before escaping RCU protected region, we need to take care of skb
1532          * dst. Prequeue is only enabled for established sockets.
1533          * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1534          * Instead of doing full sk_rx_dst validity here, let's perform
1535          * an optimistic check.
1536          */
1537         if (likely(sk->sk_rx_dst))
1538                 skb_dst_drop(skb);
1539         else
1540                 skb_dst_force(skb);
1541
1542         __skb_queue_tail(&tp->ucopy.prequeue, skb);
1543         tp->ucopy.memory += skb->truesize;
1544         if (tp->ucopy.memory > sk->sk_rcvbuf) {
1545                 struct sk_buff *skb1;
1546
1547                 BUG_ON(sock_owned_by_user(sk));
1548
1549                 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1550                         sk_backlog_rcv(sk, skb1);
1551                         NET_INC_STATS_BH(sock_net(sk),
1552                                          LINUX_MIB_TCPPREQUEUEDROPPED);
1553                 }
1554
1555                 tp->ucopy.memory = 0;
1556         } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1557                 wake_up_interruptible_sync_poll(sk_sleep(sk),
1558                                            POLLIN | POLLRDNORM | POLLRDBAND);
1559                 if (!inet_csk_ack_scheduled(sk))
1560                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1561                                                   (3 * tcp_rto_min(sk)) / 4,
1562                                                   TCP_RTO_MAX);
1563         }
1564         return true;
1565 }
1566 EXPORT_SYMBOL(tcp_prequeue);
1567
1568 /*
1569  *      From tcp_input.c
1570  */
1571
1572 int tcp_v4_rcv(struct sk_buff *skb)
1573 {
1574         const struct iphdr *iph;
1575         const struct tcphdr *th;
1576         struct sock *sk;
1577         int ret;
1578         struct net *net = dev_net(skb->dev);
1579
1580         if (skb->pkt_type != PACKET_HOST)
1581                 goto discard_it;
1582
1583         /* Count it even if it's bad */
1584         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1585
1586         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1587                 goto discard_it;
1588
1589         th = tcp_hdr(skb);
1590
1591         if (th->doff < sizeof(struct tcphdr) / 4)
1592                 goto bad_packet;
1593         if (!pskb_may_pull(skb, th->doff * 4))
1594                 goto discard_it;
1595
1596         /* An explanation is required here, I think.
1597          * Packet length and doff are validated by header prediction,
1598          * provided case of th->doff==0 is eliminated.
1599          * So, we defer the checks. */
1600
1601         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1602                 goto csum_error;
1603
1604         th = tcp_hdr(skb);
1605         iph = ip_hdr(skb);
1606         /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1607          * barrier() makes sure compiler wont play fool^Waliasing games.
1608          */
1609         memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1610                 sizeof(struct inet_skb_parm));
1611         barrier();
1612
1613         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1614         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1615                                     skb->len - th->doff * 4);
1616         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1617         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1618         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1619         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1620         TCP_SKB_CB(skb)->sacked  = 0;
1621
1622         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1623         if (!sk)
1624                 goto no_tcp_socket;
1625
1626 process:
1627         if (sk->sk_state == TCP_TIME_WAIT)
1628                 goto do_time_wait;
1629
1630         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1631                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1632                 goto discard_and_relse;
1633         }
1634
1635         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1636                 goto discard_and_relse;
1637
1638 #ifdef CONFIG_TCP_MD5SIG
1639         /*
1640          * We really want to reject the packet as early as possible
1641          * if:
1642          *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1643          *  o There is an MD5 option and we're not expecting one
1644          */
1645         if (tcp_v4_inbound_md5_hash(sk, skb))
1646                 goto discard_and_relse;
1647 #endif
1648
1649         nf_reset(skb);
1650
1651         if (sk_filter(sk, skb))
1652                 goto discard_and_relse;
1653
1654         sk_incoming_cpu_update(sk);
1655         skb->dev = NULL;
1656
1657         bh_lock_sock_nested(sk);
1658         ret = 0;
1659         if (!sock_owned_by_user(sk)) {
1660                 if (!tcp_prequeue(sk, skb))
1661                         ret = tcp_v4_do_rcv(sk, skb);
1662         } else if (unlikely(sk_add_backlog(sk, skb,
1663                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1664                 bh_unlock_sock(sk);
1665                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1666                 goto discard_and_relse;
1667         }
1668         bh_unlock_sock(sk);
1669
1670         sock_put(sk);
1671
1672         return ret;
1673
1674 no_tcp_socket:
1675         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1676                 goto discard_it;
1677
1678         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1679 csum_error:
1680                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1681 bad_packet:
1682                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1683         } else {
1684                 tcp_v4_send_reset(NULL, skb);
1685         }
1686
1687 discard_it:
1688         /* Discard frame. */
1689         kfree_skb(skb);
1690         return 0;
1691
1692 discard_and_relse:
1693         sock_put(sk);
1694         goto discard_it;
1695
1696 do_time_wait:
1697         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1698                 inet_twsk_put(inet_twsk(sk));
1699                 goto discard_it;
1700         }
1701
1702         if (skb->len < (th->doff << 2)) {
1703                 inet_twsk_put(inet_twsk(sk));
1704                 goto bad_packet;
1705         }
1706         if (tcp_checksum_complete(skb)) {
1707                 inet_twsk_put(inet_twsk(sk));
1708                 goto csum_error;
1709         }
1710         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1711         case TCP_TW_SYN: {
1712                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1713                                                         &tcp_hashinfo,
1714                                                         iph->saddr, th->source,
1715                                                         iph->daddr, th->dest,
1716                                                         inet_iif(skb));
1717                 if (sk2) {
1718                         inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1719                         inet_twsk_put(inet_twsk(sk));
1720                         sk = sk2;
1721                         goto process;
1722                 }
1723                 /* Fall through to ACK */
1724         }
1725         case TCP_TW_ACK:
1726                 tcp_v4_timewait_ack(sk, skb);
1727                 break;
1728         case TCP_TW_RST:
1729                 goto no_tcp_socket;
1730         case TCP_TW_SUCCESS:;
1731         }
1732         goto discard_it;
1733 }
1734
1735 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1736         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
1737         .twsk_unique    = tcp_twsk_unique,
1738         .twsk_destructor= tcp_twsk_destructor,
1739 };
1740
1741 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1742 {
1743         struct dst_entry *dst = skb_dst(skb);
1744
1745         if (dst) {
1746                 dst_hold(dst);
1747                 sk->sk_rx_dst = dst;
1748                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1749         }
1750 }
1751 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1752
1753 const struct inet_connection_sock_af_ops ipv4_specific = {
1754         .queue_xmit        = ip_queue_xmit,
1755         .send_check        = tcp_v4_send_check,
1756         .rebuild_header    = inet_sk_rebuild_header,
1757         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1758         .conn_request      = tcp_v4_conn_request,
1759         .syn_recv_sock     = tcp_v4_syn_recv_sock,
1760         .net_header_len    = sizeof(struct iphdr),
1761         .setsockopt        = ip_setsockopt,
1762         .getsockopt        = ip_getsockopt,
1763         .addr2sockaddr     = inet_csk_addr2sockaddr,
1764         .sockaddr_len      = sizeof(struct sockaddr_in),
1765         .bind_conflict     = inet_csk_bind_conflict,
1766 #ifdef CONFIG_COMPAT
1767         .compat_setsockopt = compat_ip_setsockopt,
1768         .compat_getsockopt = compat_ip_getsockopt,
1769 #endif
1770         .mtu_reduced       = tcp_v4_mtu_reduced,
1771 };
1772 EXPORT_SYMBOL(ipv4_specific);
1773
1774 #ifdef CONFIG_TCP_MD5SIG
1775 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1776         .md5_lookup             = tcp_v4_md5_lookup,
1777         .calc_md5_hash          = tcp_v4_md5_hash_skb,
1778         .md5_parse              = tcp_v4_parse_md5_keys,
1779 };
1780 #endif
1781
1782 /* NOTE: A lot of things set to zero explicitly by call to
1783  *       sk_alloc() so need not be done here.
1784  */
1785 static int tcp_v4_init_sock(struct sock *sk)
1786 {
1787         struct inet_connection_sock *icsk = inet_csk(sk);
1788
1789         tcp_init_sock(sk);
1790
1791         icsk->icsk_af_ops = &ipv4_specific;
1792
1793 #ifdef CONFIG_TCP_MD5SIG
1794         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1795 #endif
1796
1797         return 0;
1798 }
1799
1800 void tcp_v4_destroy_sock(struct sock *sk)
1801 {
1802         struct tcp_sock *tp = tcp_sk(sk);
1803
1804         tcp_clear_xmit_timers(sk);
1805
1806         tcp_cleanup_congestion_control(sk);
1807
1808         /* Cleanup up the write buffer. */
1809         tcp_write_queue_purge(sk);
1810
1811         /* Cleans up our, hopefully empty, out_of_order_queue. */
1812         __skb_queue_purge(&tp->out_of_order_queue);
1813
1814 #ifdef CONFIG_TCP_MD5SIG
1815         /* Clean up the MD5 key list, if any */
1816         if (tp->md5sig_info) {
1817                 tcp_clear_md5_list(sk);
1818                 kfree_rcu(tp->md5sig_info, rcu);
1819                 tp->md5sig_info = NULL;
1820         }
1821 #endif
1822
1823         /* Clean prequeue, it must be empty really */
1824         __skb_queue_purge(&tp->ucopy.prequeue);
1825
1826         /* Clean up a referenced TCP bind bucket. */
1827         if (inet_csk(sk)->icsk_bind_hash)
1828                 inet_put_port(sk);
1829
1830         BUG_ON(tp->fastopen_rsk != NULL);
1831
1832         /* If socket is aborted during connect operation */
1833         tcp_free_fastopen_req(tp);
1834
1835         sk_sockets_allocated_dec(sk);
1836         sock_release_memcg(sk);
1837 }
1838 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1839
1840 #ifdef CONFIG_PROC_FS
1841 /* Proc filesystem TCP sock list dumping. */
1842
1843 /*
1844  * Get next listener socket follow cur.  If cur is NULL, get first socket
1845  * starting from bucket given in st->bucket; when st->bucket is zero the
1846  * very first socket in the hash table is returned.
1847  */
1848 static void *listening_get_next(struct seq_file *seq, void *cur)
1849 {
1850         struct inet_connection_sock *icsk;
1851         struct hlist_nulls_node *node;
1852         struct sock *sk = cur;
1853         struct inet_listen_hashbucket *ilb;
1854         struct tcp_iter_state *st = seq->private;
1855         struct net *net = seq_file_net(seq);
1856
1857         if (!sk) {
1858                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1859                 spin_lock_bh(&ilb->lock);
1860                 sk = sk_nulls_head(&ilb->head);
1861                 st->offset = 0;
1862                 goto get_sk;
1863         }
1864         ilb = &tcp_hashinfo.listening_hash[st->bucket];
1865         ++st->num;
1866         ++st->offset;
1867
1868         if (st->state == TCP_SEQ_STATE_OPENREQ) {
1869                 struct request_sock *req = cur;
1870
1871                 icsk = inet_csk(st->syn_wait_sk);
1872                 req = req->dl_next;
1873                 while (1) {
1874                         while (req) {
1875                                 if (req->rsk_ops->family == st->family) {
1876                                         cur = req;
1877                                         goto out;
1878                                 }
1879                                 req = req->dl_next;
1880                         }
1881                         if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1882                                 break;
1883 get_req:
1884                         req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1885                 }
1886                 sk        = sk_nulls_next(st->syn_wait_sk);
1887                 st->state = TCP_SEQ_STATE_LISTENING;
1888                 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1889         } else {
1890                 icsk = inet_csk(sk);
1891                 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1892                 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1893                         goto start_req;
1894                 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1895                 sk = sk_nulls_next(sk);
1896         }
1897 get_sk:
1898         sk_nulls_for_each_from(sk, node) {
1899                 if (!net_eq(sock_net(sk), net))
1900                         continue;
1901                 if (sk->sk_family == st->family) {
1902                         cur = sk;
1903                         goto out;
1904                 }
1905                 icsk = inet_csk(sk);
1906                 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1907                 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1908 start_req:
1909                         st->uid         = sock_i_uid(sk);
1910                         st->syn_wait_sk = sk;
1911                         st->state       = TCP_SEQ_STATE_OPENREQ;
1912                         st->sbucket     = 0;
1913                         goto get_req;
1914                 }
1915                 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1916         }
1917         spin_unlock_bh(&ilb->lock);
1918         st->offset = 0;
1919         if (++st->bucket < INET_LHTABLE_SIZE) {
1920                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1921                 spin_lock_bh(&ilb->lock);
1922                 sk = sk_nulls_head(&ilb->head);
1923                 goto get_sk;
1924         }
1925         cur = NULL;
1926 out:
1927         return cur;
1928 }
1929
1930 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1931 {
1932         struct tcp_iter_state *st = seq->private;
1933         void *rc;
1934
1935         st->bucket = 0;
1936         st->offset = 0;
1937         rc = listening_get_next(seq, NULL);
1938
1939         while (rc && *pos) {
1940                 rc = listening_get_next(seq, rc);
1941                 --*pos;
1942         }
1943         return rc;
1944 }
1945
1946 static inline bool empty_bucket(const struct tcp_iter_state *st)
1947 {
1948         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1949 }
1950
1951 /*
1952  * Get first established socket starting from bucket given in st->bucket.
1953  * If st->bucket is zero, the very first socket in the hash is returned.
1954  */
1955 static void *established_get_first(struct seq_file *seq)
1956 {
1957         struct tcp_iter_state *st = seq->private;
1958         struct net *net = seq_file_net(seq);
1959         void *rc = NULL;
1960
1961         st->offset = 0;
1962         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1963                 struct sock *sk;
1964                 struct hlist_nulls_node *node;
1965                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1966
1967                 /* Lockless fast path for the common case of empty buckets */
1968                 if (empty_bucket(st))
1969                         continue;
1970
1971                 spin_lock_bh(lock);
1972                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1973                         if (sk->sk_family != st->family ||
1974                             !net_eq(sock_net(sk), net)) {
1975                                 continue;
1976                         }
1977                         rc = sk;
1978                         goto out;
1979                 }
1980                 spin_unlock_bh(lock);
1981         }
1982 out:
1983         return rc;
1984 }
1985
1986 static void *established_get_next(struct seq_file *seq, void *cur)
1987 {
1988         struct sock *sk = cur;
1989         struct hlist_nulls_node *node;
1990         struct tcp_iter_state *st = seq->private;
1991         struct net *net = seq_file_net(seq);
1992
1993         ++st->num;
1994         ++st->offset;
1995
1996         sk = sk_nulls_next(sk);
1997
1998         sk_nulls_for_each_from(sk, node) {
1999                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2000                         return sk;
2001         }
2002
2003         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2004         ++st->bucket;
2005         return established_get_first(seq);
2006 }
2007
2008 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2009 {
2010         struct tcp_iter_state *st = seq->private;
2011         void *rc;
2012
2013         st->bucket = 0;
2014         rc = established_get_first(seq);
2015
2016         while (rc && pos) {
2017                 rc = established_get_next(seq, rc);
2018                 --pos;
2019         }
2020         return rc;
2021 }
2022
2023 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2024 {
2025         void *rc;
2026         struct tcp_iter_state *st = seq->private;
2027
2028         st->state = TCP_SEQ_STATE_LISTENING;
2029         rc        = listening_get_idx(seq, &pos);
2030
2031         if (!rc) {
2032                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2033                 rc        = established_get_idx(seq, pos);
2034         }
2035
2036         return rc;
2037 }
2038
2039 static void *tcp_seek_last_pos(struct seq_file *seq)
2040 {
2041         struct tcp_iter_state *st = seq->private;
2042         int offset = st->offset;
2043         int orig_num = st->num;
2044         void *rc = NULL;
2045
2046         switch (st->state) {
2047         case TCP_SEQ_STATE_OPENREQ:
2048         case TCP_SEQ_STATE_LISTENING:
2049                 if (st->bucket >= INET_LHTABLE_SIZE)
2050                         break;
2051                 st->state = TCP_SEQ_STATE_LISTENING;
2052                 rc = listening_get_next(seq, NULL);
2053                 while (offset-- && rc)
2054                         rc = listening_get_next(seq, rc);
2055                 if (rc)
2056                         break;
2057                 st->bucket = 0;
2058                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2059                 /* Fallthrough */
2060         case TCP_SEQ_STATE_ESTABLISHED:
2061                 if (st->bucket > tcp_hashinfo.ehash_mask)
2062                         break;
2063                 rc = established_get_first(seq);
2064                 while (offset-- && rc)
2065                         rc = established_get_next(seq, rc);
2066         }
2067
2068         st->num = orig_num;
2069
2070         return rc;
2071 }
2072
2073 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2074 {
2075         struct tcp_iter_state *st = seq->private;
2076         void *rc;
2077
2078         if (*pos && *pos == st->last_pos) {
2079                 rc = tcp_seek_last_pos(seq);
2080                 if (rc)
2081                         goto out;
2082         }
2083
2084         st->state = TCP_SEQ_STATE_LISTENING;
2085         st->num = 0;
2086         st->bucket = 0;
2087         st->offset = 0;
2088         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2089
2090 out:
2091         st->last_pos = *pos;
2092         return rc;
2093 }
2094
2095 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2096 {
2097         struct tcp_iter_state *st = seq->private;
2098         void *rc = NULL;
2099
2100         if (v == SEQ_START_TOKEN) {
2101                 rc = tcp_get_idx(seq, 0);
2102                 goto out;
2103         }
2104
2105         switch (st->state) {
2106         case TCP_SEQ_STATE_OPENREQ:
2107         case TCP_SEQ_STATE_LISTENING:
2108                 rc = listening_get_next(seq, v);
2109                 if (!rc) {
2110                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2111                         st->bucket = 0;
2112                         st->offset = 0;
2113                         rc        = established_get_first(seq);
2114                 }
2115                 break;
2116         case TCP_SEQ_STATE_ESTABLISHED:
2117                 rc = established_get_next(seq, v);
2118                 break;
2119         }
2120 out:
2121         ++*pos;
2122         st->last_pos = *pos;
2123         return rc;
2124 }
2125
2126 static void tcp_seq_stop(struct seq_file *seq, void *v)
2127 {
2128         struct tcp_iter_state *st = seq->private;
2129
2130         switch (st->state) {
2131         case TCP_SEQ_STATE_OPENREQ:
2132                 if (v) {
2133                         struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2134                         spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2135                 }
2136         case TCP_SEQ_STATE_LISTENING:
2137                 if (v != SEQ_START_TOKEN)
2138                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2139                 break;
2140         case TCP_SEQ_STATE_ESTABLISHED:
2141                 if (v)
2142                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2143                 break;
2144         }
2145 }
2146
2147 int tcp_seq_open(struct inode *inode, struct file *file)
2148 {
2149         struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2150         struct tcp_iter_state *s;
2151         int err;
2152
2153         err = seq_open_net(inode, file, &afinfo->seq_ops,
2154                           sizeof(struct tcp_iter_state));
2155         if (err < 0)
2156                 return err;
2157
2158         s = ((struct seq_file *)file->private_data)->private;
2159         s->family               = afinfo->family;
2160         s->last_pos             = 0;
2161         return 0;
2162 }
2163 EXPORT_SYMBOL(tcp_seq_open);
2164
2165 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2166 {
2167         int rc = 0;
2168         struct proc_dir_entry *p;
2169
2170         afinfo->seq_ops.start           = tcp_seq_start;
2171         afinfo->seq_ops.next            = tcp_seq_next;
2172         afinfo->seq_ops.stop            = tcp_seq_stop;
2173
2174         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2175                              afinfo->seq_fops, afinfo);
2176         if (!p)
2177                 rc = -ENOMEM;
2178         return rc;
2179 }
2180 EXPORT_SYMBOL(tcp_proc_register);
2181
2182 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2183 {
2184         remove_proc_entry(afinfo->name, net->proc_net);
2185 }
2186 EXPORT_SYMBOL(tcp_proc_unregister);
2187
2188 static void get_openreq4(const struct request_sock *req,
2189                          struct seq_file *f, int i, kuid_t uid)
2190 {
2191         const struct inet_request_sock *ireq = inet_rsk(req);
2192         long delta = req->rsk_timer.expires - jiffies;
2193
2194         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2195                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2196                 i,
2197                 ireq->ir_loc_addr,
2198                 ireq->ir_num,
2199                 ireq->ir_rmt_addr,
2200                 ntohs(ireq->ir_rmt_port),
2201                 TCP_SYN_RECV,
2202                 0, 0, /* could print option size, but that is af dependent. */
2203                 1,    /* timers active (only the expire timer) */
2204                 jiffies_delta_to_clock_t(delta),
2205                 req->num_timeout,
2206                 from_kuid_munged(seq_user_ns(f), uid),
2207                 0,  /* non standard timer */
2208                 0, /* open_requests have no inode */
2209                 0,
2210                 req);
2211 }
2212
2213 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2214 {
2215         int timer_active;
2216         unsigned long timer_expires;
2217         const struct tcp_sock *tp = tcp_sk(sk);
2218         const struct inet_connection_sock *icsk = inet_csk(sk);
2219         const struct inet_sock *inet = inet_sk(sk);
2220         struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2221         __be32 dest = inet->inet_daddr;
2222         __be32 src = inet->inet_rcv_saddr;
2223         __u16 destp = ntohs(inet->inet_dport);
2224         __u16 srcp = ntohs(inet->inet_sport);
2225         int rx_queue;
2226
2227         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2228             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2229             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2230                 timer_active    = 1;
2231                 timer_expires   = icsk->icsk_timeout;
2232         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2233                 timer_active    = 4;
2234                 timer_expires   = icsk->icsk_timeout;
2235         } else if (timer_pending(&sk->sk_timer)) {
2236                 timer_active    = 2;
2237                 timer_expires   = sk->sk_timer.expires;
2238         } else {
2239                 timer_active    = 0;
2240                 timer_expires = jiffies;
2241         }
2242
2243         if (sk->sk_state == TCP_LISTEN)
2244                 rx_queue = sk->sk_ack_backlog;
2245         else
2246                 /*
2247                  * because we dont lock socket, we might find a transient negative value
2248                  */
2249                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2250
2251         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2252                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2253                 i, src, srcp, dest, destp, sk->sk_state,
2254                 tp->write_seq - tp->snd_una,
2255                 rx_queue,
2256                 timer_active,
2257                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2258                 icsk->icsk_retransmits,
2259                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2260                 icsk->icsk_probes_out,
2261                 sock_i_ino(sk),
2262                 atomic_read(&sk->sk_refcnt), sk,
2263                 jiffies_to_clock_t(icsk->icsk_rto),
2264                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2265                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2266                 tp->snd_cwnd,
2267                 sk->sk_state == TCP_LISTEN ?
2268                     (fastopenq ? fastopenq->max_qlen : 0) :
2269                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2270 }
2271
2272 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2273                                struct seq_file *f, int i)
2274 {
2275         __be32 dest, src;
2276         __u16 destp, srcp;
2277         s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2278
2279         dest  = tw->tw_daddr;
2280         src   = tw->tw_rcv_saddr;
2281         destp = ntohs(tw->tw_dport);
2282         srcp  = ntohs(tw->tw_sport);
2283
2284         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2285                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2286                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2287                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2288                 atomic_read(&tw->tw_refcnt), tw);
2289 }
2290
2291 #define TMPSZ 150
2292
2293 static int tcp4_seq_show(struct seq_file *seq, void *v)
2294 {
2295         struct tcp_iter_state *st;
2296         struct sock *sk = v;
2297
2298         seq_setwidth(seq, TMPSZ - 1);
2299         if (v == SEQ_START_TOKEN) {
2300                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2301                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2302                            "inode");
2303                 goto out;
2304         }
2305         st = seq->private;
2306
2307         switch (st->state) {
2308         case TCP_SEQ_STATE_LISTENING:
2309         case TCP_SEQ_STATE_ESTABLISHED:
2310                 if (sk->sk_state == TCP_TIME_WAIT)
2311                         get_timewait4_sock(v, seq, st->num);
2312                 else
2313                         get_tcp4_sock(v, seq, st->num);
2314                 break;
2315         case TCP_SEQ_STATE_OPENREQ:
2316                 get_openreq4(v, seq, st->num, st->uid);
2317                 break;
2318         }
2319 out:
2320         seq_pad(seq, '\n');
2321         return 0;
2322 }
2323
2324 static const struct file_operations tcp_afinfo_seq_fops = {
2325         .owner   = THIS_MODULE,
2326         .open    = tcp_seq_open,
2327         .read    = seq_read,
2328         .llseek  = seq_lseek,
2329         .release = seq_release_net
2330 };
2331
2332 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2333         .name           = "tcp",
2334         .family         = AF_INET,
2335         .seq_fops       = &tcp_afinfo_seq_fops,
2336         .seq_ops        = {
2337                 .show           = tcp4_seq_show,
2338         },
2339 };
2340
2341 static int __net_init tcp4_proc_init_net(struct net *net)
2342 {
2343         return tcp_proc_register(net, &tcp4_seq_afinfo);
2344 }
2345
2346 static void __net_exit tcp4_proc_exit_net(struct net *net)
2347 {
2348         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2349 }
2350
2351 static struct pernet_operations tcp4_net_ops = {
2352         .init = tcp4_proc_init_net,
2353         .exit = tcp4_proc_exit_net,
2354 };
2355
2356 int __init tcp4_proc_init(void)
2357 {
2358         return register_pernet_subsys(&tcp4_net_ops);
2359 }
2360
2361 void tcp4_proc_exit(void)
2362 {
2363         unregister_pernet_subsys(&tcp4_net_ops);
2364 }
2365 #endif /* CONFIG_PROC_FS */
2366
2367 struct proto tcp_prot = {
2368         .name                   = "TCP",
2369         .owner                  = THIS_MODULE,
2370         .close                  = tcp_close,
2371         .connect                = tcp_v4_connect,
2372         .disconnect             = tcp_disconnect,
2373         .accept                 = inet_csk_accept,
2374         .ioctl                  = tcp_ioctl,
2375         .init                   = tcp_v4_init_sock,
2376         .destroy                = tcp_v4_destroy_sock,
2377         .shutdown               = tcp_shutdown,
2378         .setsockopt             = tcp_setsockopt,
2379         .getsockopt             = tcp_getsockopt,
2380         .recvmsg                = tcp_recvmsg,
2381         .sendmsg                = tcp_sendmsg,
2382         .sendpage               = tcp_sendpage,
2383         .backlog_rcv            = tcp_v4_do_rcv,
2384         .release_cb             = tcp_release_cb,
2385         .hash                   = inet_hash,
2386         .unhash                 = inet_unhash,
2387         .get_port               = inet_csk_get_port,
2388         .enter_memory_pressure  = tcp_enter_memory_pressure,
2389         .stream_memory_free     = tcp_stream_memory_free,
2390         .sockets_allocated      = &tcp_sockets_allocated,
2391         .orphan_count           = &tcp_orphan_count,
2392         .memory_allocated       = &tcp_memory_allocated,
2393         .memory_pressure        = &tcp_memory_pressure,
2394         .sysctl_mem             = sysctl_tcp_mem,
2395         .sysctl_wmem            = sysctl_tcp_wmem,
2396         .sysctl_rmem            = sysctl_tcp_rmem,
2397         .max_header             = MAX_TCP_HEADER,
2398         .obj_size               = sizeof(struct tcp_sock),
2399         .slab_flags             = SLAB_DESTROY_BY_RCU,
2400         .twsk_prot              = &tcp_timewait_sock_ops,
2401         .rsk_prot               = &tcp_request_sock_ops,
2402         .h.hashinfo             = &tcp_hashinfo,
2403         .no_autobind            = true,
2404 #ifdef CONFIG_COMPAT
2405         .compat_setsockopt      = compat_tcp_setsockopt,
2406         .compat_getsockopt      = compat_tcp_getsockopt,
2407 #endif
2408 #ifdef CONFIG_MEMCG_KMEM
2409         .init_cgroup            = tcp_init_cgroup,
2410         .destroy_cgroup         = tcp_destroy_cgroup,
2411         .proto_cgroup           = tcp_proto_cgroup,
2412 #endif
2413 };
2414 EXPORT_SYMBOL(tcp_prot);
2415
2416 static void __net_exit tcp_sk_exit(struct net *net)
2417 {
2418         int cpu;
2419
2420         for_each_possible_cpu(cpu)
2421                 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2422         free_percpu(net->ipv4.tcp_sk);
2423 }
2424
2425 static int __net_init tcp_sk_init(struct net *net)
2426 {
2427         int res, cpu;
2428
2429         net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2430         if (!net->ipv4.tcp_sk)
2431                 return -ENOMEM;
2432
2433         for_each_possible_cpu(cpu) {
2434                 struct sock *sk;
2435
2436                 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2437                                            IPPROTO_TCP, net);
2438                 if (res)
2439                         goto fail;
2440                 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2441         }
2442         net->ipv4.sysctl_tcp_ecn = 2;
2443         net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2444         net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2445         net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2446         return 0;
2447
2448 fail:
2449         tcp_sk_exit(net);
2450
2451         return res;
2452 }
2453
2454 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2455 {
2456         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2457 }
2458
2459 static struct pernet_operations __net_initdata tcp_sk_ops = {
2460        .init       = tcp_sk_init,
2461        .exit       = tcp_sk_exit,
2462        .exit_batch = tcp_sk_exit_batch,
2463 };
2464
2465 void __init tcp_v4_init(void)
2466 {
2467         inet_hashinfo_init(&tcp_hashinfo);
2468         if (register_pernet_subsys(&tcp_sk_ops))
2469                 panic("Failed to create the TCP control socket.\n");
2470 }