tcp: md5: remove spinlock usage in fast path
[pandora-kernel.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  *              IPv4 specific functions
9  *
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  *
18  *      This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23
24 /*
25  * Changes:
26  *              David S. Miller :       New socket lookup architecture.
27  *                                      This code is dedicated to John Dyson.
28  *              David S. Miller :       Change semantics of established hash,
29  *                                      half is devoted to TIME_WAIT sockets
30  *                                      and the rest go in the other half.
31  *              Andi Kleen :            Add support for syncookies and fixed
32  *                                      some bugs: ip options weren't passed to
33  *                                      the TCP layer, missed a check for an
34  *                                      ACK bit.
35  *              Andi Kleen :            Implemented fast path mtu discovery.
36  *                                      Fixed many serious bugs in the
37  *                                      request_sock handling and moved
38  *                                      most of it into the af independent code.
39  *                                      Added tail drop and some other bugfixes.
40  *                                      Added new listen semantics.
41  *              Mike McLagan    :       Routing by source
42  *      Juan Jose Ciarlante:            ip_dynaddr bits
43  *              Andi Kleen:             various fixes.
44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
45  *                                      coma.
46  *      Andi Kleen              :       Fix new listen.
47  *      Andi Kleen              :       Fix accept error reporting.
48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
50  *                                      a single port at the same time.
51  */
52
53
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
63 #include <linux/slab.h>
64
65 #include <net/net_namespace.h>
66 #include <net/icmp.h>
67 #include <net/inet_hashtables.h>
68 #include <net/tcp.h>
69 #include <net/transp_v6.h>
70 #include <net/ipv6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
73 #include <net/xfrm.h>
74 #include <net/netdma.h>
75 #include <net/secure_seq.h>
76
77 #include <linux/inet.h>
78 #include <linux/ipv6.h>
79 #include <linux/stddef.h>
80 #include <linux/proc_fs.h>
81 #include <linux/seq_file.h>
82
83 #include <linux/crypto.h>
84 #include <linux/scatterlist.h>
85
86 int sysctl_tcp_tw_reuse __read_mostly;
87 int sysctl_tcp_low_latency __read_mostly;
88 EXPORT_SYMBOL(sysctl_tcp_low_latency);
89
90
91 #ifdef CONFIG_TCP_MD5SIG
92 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
93                                                    __be32 addr);
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
95                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 #else
97 static inline
98 struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
99 {
100         return NULL;
101 }
102 #endif
103
104 struct inet_hashinfo tcp_hashinfo;
105 EXPORT_SYMBOL(tcp_hashinfo);
106
107 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
108 {
109         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
110                                           ip_hdr(skb)->saddr,
111                                           tcp_hdr(skb)->dest,
112                                           tcp_hdr(skb)->source);
113 }
114
115 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
116 {
117         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
118         struct tcp_sock *tp = tcp_sk(sk);
119
120         /* With PAWS, it is safe from the viewpoint
121            of data integrity. Even without PAWS it is safe provided sequence
122            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
123
124            Actually, the idea is close to VJ's one, only timestamp cache is
125            held not per host, but per port pair and TW bucket is used as state
126            holder.
127
128            If TW bucket has been already destroyed we fall back to VJ's scheme
129            and use initial timestamp retrieved from peer table.
130          */
131         if (tcptw->tw_ts_recent_stamp &&
132             (twp == NULL || (sysctl_tcp_tw_reuse &&
133                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
134                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
135                 if (tp->write_seq == 0)
136                         tp->write_seq = 1;
137                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
138                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139                 sock_hold(sktw);
140                 return 1;
141         }
142
143         return 0;
144 }
145 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
146
147 /* This will initiate an outgoing connection. */
148 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
149 {
150         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
151         struct inet_sock *inet = inet_sk(sk);
152         struct tcp_sock *tp = tcp_sk(sk);
153         __be16 orig_sport, orig_dport;
154         __be32 daddr, nexthop;
155         struct flowi4 *fl4;
156         struct rtable *rt;
157         int err;
158         struct ip_options_rcu *inet_opt;
159
160         if (addr_len < sizeof(struct sockaddr_in))
161                 return -EINVAL;
162
163         if (usin->sin_family != AF_INET)
164                 return -EAFNOSUPPORT;
165
166         nexthop = daddr = usin->sin_addr.s_addr;
167         inet_opt = rcu_dereference_protected(inet->inet_opt,
168                                              sock_owned_by_user(sk));
169         if (inet_opt && inet_opt->opt.srr) {
170                 if (!daddr)
171                         return -EINVAL;
172                 nexthop = inet_opt->opt.faddr;
173         }
174
175         orig_sport = inet->inet_sport;
176         orig_dport = usin->sin_port;
177         fl4 = &inet->cork.fl.u.ip4;
178         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
179                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
180                               IPPROTO_TCP,
181                               orig_sport, orig_dport, sk, true);
182         if (IS_ERR(rt)) {
183                 err = PTR_ERR(rt);
184                 if (err == -ENETUNREACH)
185                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
186                 return err;
187         }
188
189         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
190                 ip_rt_put(rt);
191                 return -ENETUNREACH;
192         }
193
194         if (!inet_opt || !inet_opt->opt.srr)
195                 daddr = fl4->daddr;
196
197         if (!inet->inet_saddr)
198                 inet->inet_saddr = fl4->saddr;
199         inet->inet_rcv_saddr = inet->inet_saddr;
200
201         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
202                 /* Reset inherited state */
203                 tp->rx_opt.ts_recent       = 0;
204                 tp->rx_opt.ts_recent_stamp = 0;
205                 tp->write_seq              = 0;
206         }
207
208         if (tcp_death_row.sysctl_tw_recycle &&
209             !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
210                 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
211                 /*
212                  * VJ's idea. We save last timestamp seen from
213                  * the destination in peer table, when entering state
214                  * TIME-WAIT * and initialize rx_opt.ts_recent from it,
215                  * when trying new connection.
216                  */
217                 if (peer) {
218                         inet_peer_refcheck(peer);
219                         if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
220                                 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
221                                 tp->rx_opt.ts_recent = peer->tcp_ts;
222                         }
223                 }
224         }
225
226         inet->inet_dport = usin->sin_port;
227         inet->inet_daddr = daddr;
228
229         inet_csk(sk)->icsk_ext_hdr_len = 0;
230         if (inet_opt)
231                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
232
233         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
234
235         /* Socket identity is still unknown (sport may be zero).
236          * However we set state to SYN-SENT and not releasing socket
237          * lock select source port, enter ourselves into the hash tables and
238          * complete initialization after this.
239          */
240         tcp_set_state(sk, TCP_SYN_SENT);
241         err = inet_hash_connect(&tcp_death_row, sk);
242         if (err)
243                 goto failure;
244
245         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
246                                inet->inet_sport, inet->inet_dport, sk);
247         if (IS_ERR(rt)) {
248                 err = PTR_ERR(rt);
249                 rt = NULL;
250                 goto failure;
251         }
252         /* OK, now commit destination to socket.  */
253         sk->sk_gso_type = SKB_GSO_TCPV4;
254         sk_setup_caps(sk, &rt->dst);
255
256         if (!tp->write_seq)
257                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
258                                                            inet->inet_daddr,
259                                                            inet->inet_sport,
260                                                            usin->sin_port);
261
262         inet->inet_id = tp->write_seq ^ jiffies;
263
264         err = tcp_connect(sk);
265         rt = NULL;
266         if (err)
267                 goto failure;
268
269         return 0;
270
271 failure:
272         /*
273          * This unhashes the socket and releases the local port,
274          * if necessary.
275          */
276         tcp_set_state(sk, TCP_CLOSE);
277         ip_rt_put(rt);
278         sk->sk_route_caps = 0;
279         inet->inet_dport = 0;
280         return err;
281 }
282 EXPORT_SYMBOL(tcp_v4_connect);
283
284 /*
285  * This routine does path mtu discovery as defined in RFC1191.
286  */
287 static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
288 {
289         struct dst_entry *dst;
290         struct inet_sock *inet = inet_sk(sk);
291
292         /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
293          * send out by Linux are always <576bytes so they should go through
294          * unfragmented).
295          */
296         if (sk->sk_state == TCP_LISTEN)
297                 return;
298
299         /* We don't check in the destentry if pmtu discovery is forbidden
300          * on this route. We just assume that no packet_to_big packets
301          * are send back when pmtu discovery is not active.
302          * There is a small race when the user changes this flag in the
303          * route, but I think that's acceptable.
304          */
305         if ((dst = __sk_dst_check(sk, 0)) == NULL)
306                 return;
307
308         dst->ops->update_pmtu(dst, mtu);
309
310         /* Something is about to be wrong... Remember soft error
311          * for the case, if this connection will not able to recover.
312          */
313         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
314                 sk->sk_err_soft = EMSGSIZE;
315
316         mtu = dst_mtu(dst);
317
318         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
319             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
320                 tcp_sync_mss(sk, mtu);
321
322                 /* Resend the TCP packet because it's
323                  * clear that the old packet has been
324                  * dropped. This is the new "fast" path mtu
325                  * discovery.
326                  */
327                 tcp_simple_retransmit(sk);
328         } /* else let the usual retransmit timer handle it */
329 }
330
331 /*
332  * This routine is called by the ICMP module when it gets some
333  * sort of error condition.  If err < 0 then the socket should
334  * be closed and the error returned to the user.  If err > 0
335  * it's just the icmp type << 8 | icmp code.  After adjustment
336  * header points to the first 8 bytes of the tcp header.  We need
337  * to find the appropriate port.
338  *
339  * The locking strategy used here is very "optimistic". When
340  * someone else accesses the socket the ICMP is just dropped
341  * and for some paths there is no check at all.
342  * A more general error queue to queue errors for later handling
343  * is probably better.
344  *
345  */
346
347 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
348 {
349         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
350         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
351         struct inet_connection_sock *icsk;
352         struct tcp_sock *tp;
353         struct inet_sock *inet;
354         const int type = icmp_hdr(icmp_skb)->type;
355         const int code = icmp_hdr(icmp_skb)->code;
356         struct sock *sk;
357         struct sk_buff *skb;
358         __u32 seq;
359         __u32 remaining;
360         int err;
361         struct net *net = dev_net(icmp_skb->dev);
362
363         if (icmp_skb->len < (iph->ihl << 2) + 8) {
364                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
365                 return;
366         }
367
368         sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
369                         iph->saddr, th->source, inet_iif(icmp_skb));
370         if (!sk) {
371                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
372                 return;
373         }
374         if (sk->sk_state == TCP_TIME_WAIT) {
375                 inet_twsk_put(inet_twsk(sk));
376                 return;
377         }
378
379         bh_lock_sock(sk);
380         /* If too many ICMPs get dropped on busy
381          * servers this needs to be solved differently.
382          */
383         if (sock_owned_by_user(sk))
384                 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
385
386         if (sk->sk_state == TCP_CLOSE)
387                 goto out;
388
389         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
390                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
391                 goto out;
392         }
393
394         icsk = inet_csk(sk);
395         tp = tcp_sk(sk);
396         seq = ntohl(th->seq);
397         if (sk->sk_state != TCP_LISTEN &&
398             !between(seq, tp->snd_una, tp->snd_nxt)) {
399                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
400                 goto out;
401         }
402
403         switch (type) {
404         case ICMP_SOURCE_QUENCH:
405                 /* Just silently ignore these. */
406                 goto out;
407         case ICMP_PARAMETERPROB:
408                 err = EPROTO;
409                 break;
410         case ICMP_DEST_UNREACH:
411                 if (code > NR_ICMP_UNREACH)
412                         goto out;
413
414                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
415                         if (!sock_owned_by_user(sk))
416                                 do_pmtu_discovery(sk, iph, info);
417                         goto out;
418                 }
419
420                 err = icmp_err_convert[code].errno;
421                 /* check if icmp_skb allows revert of backoff
422                  * (see draft-zimmermann-tcp-lcd) */
423                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
424                         break;
425                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
426                     !icsk->icsk_backoff)
427                         break;
428
429                 if (sock_owned_by_user(sk))
430                         break;
431
432                 icsk->icsk_backoff--;
433                 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
434                         TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
435                 tcp_bound_rto(sk);
436
437                 skb = tcp_write_queue_head(sk);
438                 BUG_ON(!skb);
439
440                 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
441                                 tcp_time_stamp - TCP_SKB_CB(skb)->when);
442
443                 if (remaining) {
444                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
445                                                   remaining, TCP_RTO_MAX);
446                 } else {
447                         /* RTO revert clocked out retransmission.
448                          * Will retransmit now */
449                         tcp_retransmit_timer(sk);
450                 }
451
452                 break;
453         case ICMP_TIME_EXCEEDED:
454                 err = EHOSTUNREACH;
455                 break;
456         default:
457                 goto out;
458         }
459
460         switch (sk->sk_state) {
461                 struct request_sock *req, **prev;
462         case TCP_LISTEN:
463                 if (sock_owned_by_user(sk))
464                         goto out;
465
466                 req = inet_csk_search_req(sk, &prev, th->dest,
467                                           iph->daddr, iph->saddr);
468                 if (!req)
469                         goto out;
470
471                 /* ICMPs are not backlogged, hence we cannot get
472                    an established socket here.
473                  */
474                 WARN_ON(req->sk);
475
476                 if (seq != tcp_rsk(req)->snt_isn) {
477                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
478                         goto out;
479                 }
480
481                 /*
482                  * Still in SYN_RECV, just remove it silently.
483                  * There is no good way to pass the error to the newly
484                  * created socket, and POSIX does not want network
485                  * errors returned from accept().
486                  */
487                 inet_csk_reqsk_queue_drop(sk, req, prev);
488                 goto out;
489
490         case TCP_SYN_SENT:
491         case TCP_SYN_RECV:  /* Cannot happen.
492                                It can f.e. if SYNs crossed.
493                              */
494                 if (!sock_owned_by_user(sk)) {
495                         sk->sk_err = err;
496
497                         sk->sk_error_report(sk);
498
499                         tcp_done(sk);
500                 } else {
501                         sk->sk_err_soft = err;
502                 }
503                 goto out;
504         }
505
506         /* If we've already connected we will keep trying
507          * until we time out, or the user gives up.
508          *
509          * rfc1122 4.2.3.9 allows to consider as hard errors
510          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
511          * but it is obsoleted by pmtu discovery).
512          *
513          * Note, that in modern internet, where routing is unreliable
514          * and in each dark corner broken firewalls sit, sending random
515          * errors ordered by their masters even this two messages finally lose
516          * their original sense (even Linux sends invalid PORT_UNREACHs)
517          *
518          * Now we are in compliance with RFCs.
519          *                                                      --ANK (980905)
520          */
521
522         inet = inet_sk(sk);
523         if (!sock_owned_by_user(sk) && inet->recverr) {
524                 sk->sk_err = err;
525                 sk->sk_error_report(sk);
526         } else  { /* Only an error on timeout */
527                 sk->sk_err_soft = err;
528         }
529
530 out:
531         bh_unlock_sock(sk);
532         sock_put(sk);
533 }
534
535 static void __tcp_v4_send_check(struct sk_buff *skb,
536                                 __be32 saddr, __be32 daddr)
537 {
538         struct tcphdr *th = tcp_hdr(skb);
539
540         if (skb->ip_summed == CHECKSUM_PARTIAL) {
541                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
542                 skb->csum_start = skb_transport_header(skb) - skb->head;
543                 skb->csum_offset = offsetof(struct tcphdr, check);
544         } else {
545                 th->check = tcp_v4_check(skb->len, saddr, daddr,
546                                          csum_partial(th,
547                                                       th->doff << 2,
548                                                       skb->csum));
549         }
550 }
551
552 /* This routine computes an IPv4 TCP checksum. */
553 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
554 {
555         const struct inet_sock *inet = inet_sk(sk);
556
557         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
558 }
559 EXPORT_SYMBOL(tcp_v4_send_check);
560
561 int tcp_v4_gso_send_check(struct sk_buff *skb)
562 {
563         const struct iphdr *iph;
564         struct tcphdr *th;
565
566         if (!pskb_may_pull(skb, sizeof(*th)))
567                 return -EINVAL;
568
569         iph = ip_hdr(skb);
570         th = tcp_hdr(skb);
571
572         th->check = 0;
573         skb->ip_summed = CHECKSUM_PARTIAL;
574         __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
575         return 0;
576 }
577
578 /*
579  *      This routine will send an RST to the other tcp.
580  *
581  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
582  *                    for reset.
583  *      Answer: if a packet caused RST, it is not for a socket
584  *              existing in our system, if it is matched to a socket,
585  *              it is just duplicate segment or bug in other side's TCP.
586  *              So that we build reply only basing on parameters
587  *              arrived with segment.
588  *      Exception: precedence violation. We do not implement it in any case.
589  */
590
591 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
592 {
593         const struct tcphdr *th = tcp_hdr(skb);
594         struct {
595                 struct tcphdr th;
596 #ifdef CONFIG_TCP_MD5SIG
597                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
598 #endif
599         } rep;
600         struct ip_reply_arg arg;
601 #ifdef CONFIG_TCP_MD5SIG
602         struct tcp_md5sig_key *key;
603 #endif
604         struct net *net;
605
606         /* Never send a reset in response to a reset. */
607         if (th->rst)
608                 return;
609
610         if (skb_rtable(skb)->rt_type != RTN_LOCAL)
611                 return;
612
613         /* Swap the send and the receive. */
614         memset(&rep, 0, sizeof(rep));
615         rep.th.dest   = th->source;
616         rep.th.source = th->dest;
617         rep.th.doff   = sizeof(struct tcphdr) / 4;
618         rep.th.rst    = 1;
619
620         if (th->ack) {
621                 rep.th.seq = th->ack_seq;
622         } else {
623                 rep.th.ack = 1;
624                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
625                                        skb->len - (th->doff << 2));
626         }
627
628         memset(&arg, 0, sizeof(arg));
629         arg.iov[0].iov_base = (unsigned char *)&rep;
630         arg.iov[0].iov_len  = sizeof(rep.th);
631
632 #ifdef CONFIG_TCP_MD5SIG
633         key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
634         if (key) {
635                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
636                                    (TCPOPT_NOP << 16) |
637                                    (TCPOPT_MD5SIG << 8) |
638                                    TCPOLEN_MD5SIG);
639                 /* Update length and the length the header thinks exists */
640                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
641                 rep.th.doff = arg.iov[0].iov_len / 4;
642
643                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
644                                      key, ip_hdr(skb)->saddr,
645                                      ip_hdr(skb)->daddr, &rep.th);
646         }
647 #endif
648         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
649                                       ip_hdr(skb)->saddr, /* XXX */
650                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
651         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
652         arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
653         /* When socket is gone, all binding information is lost.
654          * routing might fail in this case. No choice here, if we choose to force
655          * input interface, we will misroute in case of asymmetric route.
656          */
657         if (sk)
658                 arg.bound_dev_if = sk->sk_bound_dev_if;
659
660         net = dev_net(skb_dst(skb)->dev);
661         arg.tos = ip_hdr(skb)->tos;
662         ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
663                       &arg, arg.iov[0].iov_len);
664
665         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
666         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
667 }
668
669 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
670    outside socket context is ugly, certainly. What can I do?
671  */
672
673 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
674                             u32 win, u32 ts, int oif,
675                             struct tcp_md5sig_key *key,
676                             int reply_flags, u8 tos)
677 {
678         const struct tcphdr *th = tcp_hdr(skb);
679         struct {
680                 struct tcphdr th;
681                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
682 #ifdef CONFIG_TCP_MD5SIG
683                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
684 #endif
685                         ];
686         } rep;
687         struct ip_reply_arg arg;
688         struct net *net = dev_net(skb_dst(skb)->dev);
689
690         memset(&rep.th, 0, sizeof(struct tcphdr));
691         memset(&arg, 0, sizeof(arg));
692
693         arg.iov[0].iov_base = (unsigned char *)&rep;
694         arg.iov[0].iov_len  = sizeof(rep.th);
695         if (ts) {
696                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
697                                    (TCPOPT_TIMESTAMP << 8) |
698                                    TCPOLEN_TIMESTAMP);
699                 rep.opt[1] = htonl(tcp_time_stamp);
700                 rep.opt[2] = htonl(ts);
701                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
702         }
703
704         /* Swap the send and the receive. */
705         rep.th.dest    = th->source;
706         rep.th.source  = th->dest;
707         rep.th.doff    = arg.iov[0].iov_len / 4;
708         rep.th.seq     = htonl(seq);
709         rep.th.ack_seq = htonl(ack);
710         rep.th.ack     = 1;
711         rep.th.window  = htons(win);
712
713 #ifdef CONFIG_TCP_MD5SIG
714         if (key) {
715                 int offset = (ts) ? 3 : 0;
716
717                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
718                                           (TCPOPT_NOP << 16) |
719                                           (TCPOPT_MD5SIG << 8) |
720                                           TCPOLEN_MD5SIG);
721                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
722                 rep.th.doff = arg.iov[0].iov_len/4;
723
724                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
725                                     key, ip_hdr(skb)->saddr,
726                                     ip_hdr(skb)->daddr, &rep.th);
727         }
728 #endif
729         arg.flags = reply_flags;
730         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
731                                       ip_hdr(skb)->saddr, /* XXX */
732                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
733         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
734         if (oif)
735                 arg.bound_dev_if = oif;
736         arg.tos = tos;
737         ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
738                       &arg, arg.iov[0].iov_len);
739
740         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
741 }
742
743 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
744 {
745         struct inet_timewait_sock *tw = inet_twsk(sk);
746         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
747
748         tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
749                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
750                         tcptw->tw_ts_recent,
751                         tw->tw_bound_dev_if,
752                         tcp_twsk_md5_key(tcptw),
753                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
754                         tw->tw_tos
755                         );
756
757         inet_twsk_put(tw);
758 }
759
760 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
761                                   struct request_sock *req)
762 {
763         tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
764                         tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
765                         req->ts_recent,
766                         0,
767                         tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
768                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
769                         ip_hdr(skb)->tos);
770 }
771
772 /*
773  *      Send a SYN-ACK after having received a SYN.
774  *      This still operates on a request_sock only, not on a big
775  *      socket.
776  */
777 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
778                               struct request_sock *req,
779                               struct request_values *rvp)
780 {
781         const struct inet_request_sock *ireq = inet_rsk(req);
782         struct flowi4 fl4;
783         int err = -1;
784         struct sk_buff * skb;
785
786         /* First, grab a route. */
787         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
788                 return -1;
789
790         skb = tcp_make_synack(sk, dst, req, rvp);
791
792         if (skb) {
793                 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
794
795                 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
796                                             ireq->rmt_addr,
797                                             ireq->opt);
798                 err = net_xmit_eval(err);
799         }
800
801         dst_release(dst);
802         return err;
803 }
804
805 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
806                               struct request_values *rvp)
807 {
808         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
809         return tcp_v4_send_synack(sk, NULL, req, rvp);
810 }
811
812 /*
813  *      IPv4 request_sock destructor.
814  */
815 static void tcp_v4_reqsk_destructor(struct request_sock *req)
816 {
817         kfree(inet_rsk(req)->opt);
818 }
819
820 /*
821  * Return 1 if a syncookie should be sent
822  */
823 int tcp_syn_flood_action(struct sock *sk,
824                          const struct sk_buff *skb,
825                          const char *proto)
826 {
827         const char *msg = "Dropping request";
828         int want_cookie = 0;
829         struct listen_sock *lopt;
830
831
832
833 #ifdef CONFIG_SYN_COOKIES
834         if (sysctl_tcp_syncookies) {
835                 msg = "Sending cookies";
836                 want_cookie = 1;
837                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
838         } else
839 #endif
840                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
841
842         lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
843         if (!lopt->synflood_warned) {
844                 lopt->synflood_warned = 1;
845                 pr_info("%s: Possible SYN flooding on port %d. %s. "
846                         " Check SNMP counters.\n",
847                         proto, ntohs(tcp_hdr(skb)->dest), msg);
848         }
849         return want_cookie;
850 }
851 EXPORT_SYMBOL(tcp_syn_flood_action);
852
853 /*
854  * Save and compile IPv4 options into the request_sock if needed.
855  */
856 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
857                                                   struct sk_buff *skb)
858 {
859         const struct ip_options *opt = &(IPCB(skb)->opt);
860         struct ip_options_rcu *dopt = NULL;
861
862         if (opt && opt->optlen) {
863                 int opt_size = sizeof(*dopt) + opt->optlen;
864
865                 dopt = kmalloc(opt_size, GFP_ATOMIC);
866                 if (dopt) {
867                         if (ip_options_echo(&dopt->opt, skb)) {
868                                 kfree(dopt);
869                                 dopt = NULL;
870                         }
871                 }
872         }
873         return dopt;
874 }
875
876 #ifdef CONFIG_TCP_MD5SIG
877 /*
878  * RFC2385 MD5 checksumming requires a mapping of
879  * IP address->MD5 Key.
880  * We need to maintain these in the sk structure.
881  */
882
883 /* Find the Key structure for an address.  */
884 static struct tcp_md5sig_key *
885                         tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
886 {
887         struct tcp_sock *tp = tcp_sk(sk);
888         int i;
889
890         if (!tp->md5sig_info || !tp->md5sig_info->entries4)
891                 return NULL;
892         for (i = 0; i < tp->md5sig_info->entries4; i++) {
893                 if (tp->md5sig_info->keys4[i].addr == addr)
894                         return &tp->md5sig_info->keys4[i].base;
895         }
896         return NULL;
897 }
898
899 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
900                                          struct sock *addr_sk)
901 {
902         return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
903 }
904 EXPORT_SYMBOL(tcp_v4_md5_lookup);
905
906 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
907                                                       struct request_sock *req)
908 {
909         return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
910 }
911
912 /* This can be called on a newly created socket, from other files */
913 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
914                       u8 *newkey, u8 newkeylen)
915 {
916         /* Add Key to the list */
917         struct tcp_md5sig_key *key;
918         struct tcp_sock *tp = tcp_sk(sk);
919         struct tcp4_md5sig_key *keys;
920
921         key = tcp_v4_md5_do_lookup(sk, addr);
922         if (key) {
923                 /* Pre-existing entry - just update that one. */
924                 kfree(key->key);
925                 key->key = newkey;
926                 key->keylen = newkeylen;
927         } else {
928                 struct tcp_md5sig_info *md5sig;
929
930                 if (!tp->md5sig_info) {
931                         tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
932                                                   GFP_ATOMIC);
933                         if (!tp->md5sig_info) {
934                                 kfree(newkey);
935                                 return -ENOMEM;
936                         }
937                         sk_nocaps_add(sk, NETIF_F_GSO_MASK);
938                 }
939
940                 md5sig = tp->md5sig_info;
941                 if (md5sig->entries4 == 0 && !tcp_alloc_md5sig_pool()) {
942                         kfree(newkey);
943                         return -ENOMEM;
944                 }
945
946                 if (md5sig->alloced4 == md5sig->entries4) {
947                         keys = kmalloc((sizeof(*keys) *
948                                         (md5sig->entries4 + 1)), GFP_ATOMIC);
949                         if (!keys) {
950                                 kfree(newkey);
951                                 return -ENOMEM;
952                         }
953
954                         if (md5sig->entries4)
955                                 memcpy(keys, md5sig->keys4,
956                                        sizeof(*keys) * md5sig->entries4);
957
958                         /* Free old key list, and reference new one */
959                         kfree(md5sig->keys4);
960                         md5sig->keys4 = keys;
961                         md5sig->alloced4++;
962                 }
963                 md5sig->entries4++;
964                 md5sig->keys4[md5sig->entries4 - 1].addr        = addr;
965                 md5sig->keys4[md5sig->entries4 - 1].base.key    = newkey;
966                 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
967         }
968         return 0;
969 }
970 EXPORT_SYMBOL(tcp_v4_md5_do_add);
971
972 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
973                                u8 *newkey, u8 newkeylen)
974 {
975         return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
976                                  newkey, newkeylen);
977 }
978
979 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
980 {
981         struct tcp_sock *tp = tcp_sk(sk);
982         int i;
983
984         for (i = 0; i < tp->md5sig_info->entries4; i++) {
985                 if (tp->md5sig_info->keys4[i].addr == addr) {
986                         /* Free the key */
987                         kfree(tp->md5sig_info->keys4[i].base.key);
988                         tp->md5sig_info->entries4--;
989
990                         if (tp->md5sig_info->entries4 == 0) {
991                                 kfree(tp->md5sig_info->keys4);
992                                 tp->md5sig_info->keys4 = NULL;
993                                 tp->md5sig_info->alloced4 = 0;
994                         } else if (tp->md5sig_info->entries4 != i) {
995                                 /* Need to do some manipulation */
996                                 memmove(&tp->md5sig_info->keys4[i],
997                                         &tp->md5sig_info->keys4[i+1],
998                                         (tp->md5sig_info->entries4 - i) *
999                                          sizeof(struct tcp4_md5sig_key));
1000                         }
1001                         return 0;
1002                 }
1003         }
1004         return -ENOENT;
1005 }
1006 EXPORT_SYMBOL(tcp_v4_md5_do_del);
1007
1008 static void tcp_v4_clear_md5_list(struct sock *sk)
1009 {
1010         struct tcp_sock *tp = tcp_sk(sk);
1011
1012         /* Free each key, then the set of key keys,
1013          * the crypto element, and then decrement our
1014          * hold on the last resort crypto.
1015          */
1016         if (tp->md5sig_info->entries4) {
1017                 int i;
1018                 for (i = 0; i < tp->md5sig_info->entries4; i++)
1019                         kfree(tp->md5sig_info->keys4[i].base.key);
1020                 tp->md5sig_info->entries4 = 0;
1021         }
1022         if (tp->md5sig_info->keys4) {
1023                 kfree(tp->md5sig_info->keys4);
1024                 tp->md5sig_info->keys4 = NULL;
1025                 tp->md5sig_info->alloced4  = 0;
1026         }
1027 }
1028
1029 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1030                                  int optlen)
1031 {
1032         struct tcp_md5sig cmd;
1033         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1034         u8 *newkey;
1035
1036         if (optlen < sizeof(cmd))
1037                 return -EINVAL;
1038
1039         if (copy_from_user(&cmd, optval, sizeof(cmd)))
1040                 return -EFAULT;
1041
1042         if (sin->sin_family != AF_INET)
1043                 return -EINVAL;
1044
1045         if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1046                 if (!tcp_sk(sk)->md5sig_info)
1047                         return -ENOENT;
1048                 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1049         }
1050
1051         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1052                 return -EINVAL;
1053
1054         if (!tcp_sk(sk)->md5sig_info) {
1055                 struct tcp_sock *tp = tcp_sk(sk);
1056                 struct tcp_md5sig_info *p;
1057
1058                 p = kzalloc(sizeof(*p), sk->sk_allocation);
1059                 if (!p)
1060                         return -EINVAL;
1061
1062                 tp->md5sig_info = p;
1063                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1064         }
1065
1066         newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1067         if (!newkey)
1068                 return -ENOMEM;
1069         return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1070                                  newkey, cmd.tcpm_keylen);
1071 }
1072
1073 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1074                                         __be32 daddr, __be32 saddr, int nbytes)
1075 {
1076         struct tcp4_pseudohdr *bp;
1077         struct scatterlist sg;
1078
1079         bp = &hp->md5_blk.ip4;
1080
1081         /*
1082          * 1. the TCP pseudo-header (in the order: source IP address,
1083          * destination IP address, zero-padded protocol number, and
1084          * segment length)
1085          */
1086         bp->saddr = saddr;
1087         bp->daddr = daddr;
1088         bp->pad = 0;
1089         bp->protocol = IPPROTO_TCP;
1090         bp->len = cpu_to_be16(nbytes);
1091
1092         sg_init_one(&sg, bp, sizeof(*bp));
1093         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1094 }
1095
1096 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1097                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1098 {
1099         struct tcp_md5sig_pool *hp;
1100         struct hash_desc *desc;
1101
1102         hp = tcp_get_md5sig_pool();
1103         if (!hp)
1104                 goto clear_hash_noput;
1105         desc = &hp->md5_desc;
1106
1107         if (crypto_hash_init(desc))
1108                 goto clear_hash;
1109         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1110                 goto clear_hash;
1111         if (tcp_md5_hash_header(hp, th))
1112                 goto clear_hash;
1113         if (tcp_md5_hash_key(hp, key))
1114                 goto clear_hash;
1115         if (crypto_hash_final(desc, md5_hash))
1116                 goto clear_hash;
1117
1118         tcp_put_md5sig_pool();
1119         return 0;
1120
1121 clear_hash:
1122         tcp_put_md5sig_pool();
1123 clear_hash_noput:
1124         memset(md5_hash, 0, 16);
1125         return 1;
1126 }
1127
1128 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1129                         const struct sock *sk, const struct request_sock *req,
1130                         const struct sk_buff *skb)
1131 {
1132         struct tcp_md5sig_pool *hp;
1133         struct hash_desc *desc;
1134         const struct tcphdr *th = tcp_hdr(skb);
1135         __be32 saddr, daddr;
1136
1137         if (sk) {
1138                 saddr = inet_sk(sk)->inet_saddr;
1139                 daddr = inet_sk(sk)->inet_daddr;
1140         } else if (req) {
1141                 saddr = inet_rsk(req)->loc_addr;
1142                 daddr = inet_rsk(req)->rmt_addr;
1143         } else {
1144                 const struct iphdr *iph = ip_hdr(skb);
1145                 saddr = iph->saddr;
1146                 daddr = iph->daddr;
1147         }
1148
1149         hp = tcp_get_md5sig_pool();
1150         if (!hp)
1151                 goto clear_hash_noput;
1152         desc = &hp->md5_desc;
1153
1154         if (crypto_hash_init(desc))
1155                 goto clear_hash;
1156
1157         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1158                 goto clear_hash;
1159         if (tcp_md5_hash_header(hp, th))
1160                 goto clear_hash;
1161         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1162                 goto clear_hash;
1163         if (tcp_md5_hash_key(hp, key))
1164                 goto clear_hash;
1165         if (crypto_hash_final(desc, md5_hash))
1166                 goto clear_hash;
1167
1168         tcp_put_md5sig_pool();
1169         return 0;
1170
1171 clear_hash:
1172         tcp_put_md5sig_pool();
1173 clear_hash_noput:
1174         memset(md5_hash, 0, 16);
1175         return 1;
1176 }
1177 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1178
1179 static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1180 {
1181         /*
1182          * This gets called for each TCP segment that arrives
1183          * so we want to be efficient.
1184          * We have 3 drop cases:
1185          * o No MD5 hash and one expected.
1186          * o MD5 hash and we're not expecting one.
1187          * o MD5 hash and its wrong.
1188          */
1189         const __u8 *hash_location = NULL;
1190         struct tcp_md5sig_key *hash_expected;
1191         const struct iphdr *iph = ip_hdr(skb);
1192         const struct tcphdr *th = tcp_hdr(skb);
1193         int genhash;
1194         unsigned char newhash[16];
1195
1196         hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1197         hash_location = tcp_parse_md5sig_option(th);
1198
1199         /* We've parsed the options - do we have a hash? */
1200         if (!hash_expected && !hash_location)
1201                 return 0;
1202
1203         if (hash_expected && !hash_location) {
1204                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1205                 return 1;
1206         }
1207
1208         if (!hash_expected && hash_location) {
1209                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1210                 return 1;
1211         }
1212
1213         /* Okay, so this is hash_expected and hash_location -
1214          * so we need to calculate the checksum.
1215          */
1216         genhash = tcp_v4_md5_hash_skb(newhash,
1217                                       hash_expected,
1218                                       NULL, NULL, skb);
1219
1220         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1221                 if (net_ratelimit()) {
1222                         printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1223                                &iph->saddr, ntohs(th->source),
1224                                &iph->daddr, ntohs(th->dest),
1225                                genhash ? " tcp_v4_calc_md5_hash failed" : "");
1226                 }
1227                 return 1;
1228         }
1229         return 0;
1230 }
1231
1232 #endif
1233
1234 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1235         .family         =       PF_INET,
1236         .obj_size       =       sizeof(struct tcp_request_sock),
1237         .rtx_syn_ack    =       tcp_v4_rtx_synack,
1238         .send_ack       =       tcp_v4_reqsk_send_ack,
1239         .destructor     =       tcp_v4_reqsk_destructor,
1240         .send_reset     =       tcp_v4_send_reset,
1241         .syn_ack_timeout =      tcp_syn_ack_timeout,
1242 };
1243
1244 #ifdef CONFIG_TCP_MD5SIG
1245 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1246         .md5_lookup     =       tcp_v4_reqsk_md5_lookup,
1247         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1248 };
1249 #endif
1250
1251 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1252 {
1253         struct tcp_extend_values tmp_ext;
1254         struct tcp_options_received tmp_opt;
1255         const u8 *hash_location;
1256         struct request_sock *req;
1257         struct inet_request_sock *ireq;
1258         struct tcp_sock *tp = tcp_sk(sk);
1259         struct dst_entry *dst = NULL;
1260         __be32 saddr = ip_hdr(skb)->saddr;
1261         __be32 daddr = ip_hdr(skb)->daddr;
1262         __u32 isn = TCP_SKB_CB(skb)->when;
1263         int want_cookie = 0;
1264
1265         /* Never answer to SYNs send to broadcast or multicast */
1266         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1267                 goto drop;
1268
1269         /* TW buckets are converted to open requests without
1270          * limitations, they conserve resources and peer is
1271          * evidently real one.
1272          */
1273         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1274                 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1275                 if (!want_cookie)
1276                         goto drop;
1277         }
1278
1279         /* Accept backlog is full. If we have already queued enough
1280          * of warm entries in syn queue, drop request. It is better than
1281          * clogging syn queue with openreqs with exponentially increasing
1282          * timeout.
1283          */
1284         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1285                 goto drop;
1286
1287         req = inet_reqsk_alloc(&tcp_request_sock_ops);
1288         if (!req)
1289                 goto drop;
1290
1291 #ifdef CONFIG_TCP_MD5SIG
1292         tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1293 #endif
1294
1295         tcp_clear_options(&tmp_opt);
1296         tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1297         tmp_opt.user_mss  = tp->rx_opt.user_mss;
1298         tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1299
1300         if (tmp_opt.cookie_plus > 0 &&
1301             tmp_opt.saw_tstamp &&
1302             !tp->rx_opt.cookie_out_never &&
1303             (sysctl_tcp_cookie_size > 0 ||
1304              (tp->cookie_values != NULL &&
1305               tp->cookie_values->cookie_desired > 0))) {
1306                 u8 *c;
1307                 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1308                 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1309
1310                 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1311                         goto drop_and_release;
1312
1313                 /* Secret recipe starts with IP addresses */
1314                 *mess++ ^= (__force u32)daddr;
1315                 *mess++ ^= (__force u32)saddr;
1316
1317                 /* plus variable length Initiator Cookie */
1318                 c = (u8 *)mess;
1319                 while (l-- > 0)
1320                         *c++ ^= *hash_location++;
1321
1322                 want_cookie = 0;        /* not our kind of cookie */
1323                 tmp_ext.cookie_out_never = 0; /* false */
1324                 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1325         } else if (!tp->rx_opt.cookie_in_always) {
1326                 /* redundant indications, but ensure initialization. */
1327                 tmp_ext.cookie_out_never = 1; /* true */
1328                 tmp_ext.cookie_plus = 0;
1329         } else {
1330                 goto drop_and_release;
1331         }
1332         tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1333
1334         if (want_cookie && !tmp_opt.saw_tstamp)
1335                 tcp_clear_options(&tmp_opt);
1336
1337         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1338         tcp_openreq_init(req, &tmp_opt, skb);
1339
1340         ireq = inet_rsk(req);
1341         ireq->loc_addr = daddr;
1342         ireq->rmt_addr = saddr;
1343         ireq->no_srccheck = inet_sk(sk)->transparent;
1344         ireq->opt = tcp_v4_save_options(sk, skb);
1345
1346         if (security_inet_conn_request(sk, skb, req))
1347                 goto drop_and_free;
1348
1349         if (!want_cookie || tmp_opt.tstamp_ok)
1350                 TCP_ECN_create_request(req, skb);
1351
1352         if (want_cookie) {
1353                 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1354                 req->cookie_ts = tmp_opt.tstamp_ok;
1355         } else if (!isn) {
1356                 struct inet_peer *peer = NULL;
1357                 struct flowi4 fl4;
1358
1359                 /* VJ's idea. We save last timestamp seen
1360                  * from the destination in peer table, when entering
1361                  * state TIME-WAIT, and check against it before
1362                  * accepting new connection request.
1363                  *
1364                  * If "isn" is not zero, this request hit alive
1365                  * timewait bucket, so that all the necessary checks
1366                  * are made in the function processing timewait state.
1367                  */
1368                 if (tmp_opt.saw_tstamp &&
1369                     tcp_death_row.sysctl_tw_recycle &&
1370                     (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1371                     fl4.daddr == saddr &&
1372                     (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1373                         inet_peer_refcheck(peer);
1374                         if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1375                             (s32)(peer->tcp_ts - req->ts_recent) >
1376                                                         TCP_PAWS_WINDOW) {
1377                                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1378                                 goto drop_and_release;
1379                         }
1380                 }
1381                 /* Kill the following clause, if you dislike this way. */
1382                 else if (!sysctl_tcp_syncookies &&
1383                          (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1384                           (sysctl_max_syn_backlog >> 2)) &&
1385                          (!peer || !peer->tcp_ts_stamp) &&
1386                          (!dst || !dst_metric(dst, RTAX_RTT))) {
1387                         /* Without syncookies last quarter of
1388                          * backlog is filled with destinations,
1389                          * proven to be alive.
1390                          * It means that we continue to communicate
1391                          * to destinations, already remembered
1392                          * to the moment of synflood.
1393                          */
1394                         LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1395                                        &saddr, ntohs(tcp_hdr(skb)->source));
1396                         goto drop_and_release;
1397                 }
1398
1399                 isn = tcp_v4_init_sequence(skb);
1400         }
1401         tcp_rsk(req)->snt_isn = isn;
1402         tcp_rsk(req)->snt_synack = tcp_time_stamp;
1403
1404         if (tcp_v4_send_synack(sk, dst, req,
1405                                (struct request_values *)&tmp_ext) ||
1406             want_cookie)
1407                 goto drop_and_free;
1408
1409         inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1410         return 0;
1411
1412 drop_and_release:
1413         dst_release(dst);
1414 drop_and_free:
1415         reqsk_free(req);
1416 drop:
1417         return 0;
1418 }
1419 EXPORT_SYMBOL(tcp_v4_conn_request);
1420
1421
1422 /*
1423  * The three way handshake has completed - we got a valid synack -
1424  * now create the new socket.
1425  */
1426 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1427                                   struct request_sock *req,
1428                                   struct dst_entry *dst)
1429 {
1430         struct inet_request_sock *ireq;
1431         struct inet_sock *newinet;
1432         struct tcp_sock *newtp;
1433         struct sock *newsk;
1434 #ifdef CONFIG_TCP_MD5SIG
1435         struct tcp_md5sig_key *key;
1436 #endif
1437         struct ip_options_rcu *inet_opt;
1438
1439         if (sk_acceptq_is_full(sk))
1440                 goto exit_overflow;
1441
1442         newsk = tcp_create_openreq_child(sk, req, skb);
1443         if (!newsk)
1444                 goto exit_nonewsk;
1445
1446         newsk->sk_gso_type = SKB_GSO_TCPV4;
1447
1448         newtp                 = tcp_sk(newsk);
1449         newinet               = inet_sk(newsk);
1450         ireq                  = inet_rsk(req);
1451         newinet->inet_daddr   = ireq->rmt_addr;
1452         newinet->inet_rcv_saddr = ireq->loc_addr;
1453         newinet->inet_saddr           = ireq->loc_addr;
1454         inet_opt              = ireq->opt;
1455         rcu_assign_pointer(newinet->inet_opt, inet_opt);
1456         ireq->opt             = NULL;
1457         newinet->mc_index     = inet_iif(skb);
1458         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1459         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1460         if (inet_opt)
1461                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1462         newinet->inet_id = newtp->write_seq ^ jiffies;
1463
1464         if (!dst) {
1465                 dst = inet_csk_route_child_sock(sk, newsk, req);
1466                 if (!dst)
1467                         goto put_and_exit;
1468         } else {
1469                 /* syncookie case : see end of cookie_v4_check() */
1470         }
1471         sk_setup_caps(newsk, dst);
1472
1473         tcp_mtup_init(newsk);
1474         tcp_sync_mss(newsk, dst_mtu(dst));
1475         newtp->advmss = dst_metric_advmss(dst);
1476         if (tcp_sk(sk)->rx_opt.user_mss &&
1477             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1478                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1479
1480         tcp_initialize_rcv_mss(newsk);
1481         if (tcp_rsk(req)->snt_synack)
1482                 tcp_valid_rtt_meas(newsk,
1483                     tcp_time_stamp - tcp_rsk(req)->snt_synack);
1484         newtp->total_retrans = req->retrans;
1485
1486 #ifdef CONFIG_TCP_MD5SIG
1487         /* Copy over the MD5 key from the original socket */
1488         key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1489         if (key != NULL) {
1490                 /*
1491                  * We're using one, so create a matching key
1492                  * on the newsk structure. If we fail to get
1493                  * memory, then we end up not copying the key
1494                  * across. Shucks.
1495                  */
1496                 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1497                 if (newkey != NULL)
1498                         tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1499                                           newkey, key->keylen);
1500                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1501         }
1502 #endif
1503
1504         if (__inet_inherit_port(sk, newsk) < 0)
1505                 goto put_and_exit;
1506         __inet_hash_nolisten(newsk, NULL);
1507
1508         return newsk;
1509
1510 exit_overflow:
1511         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1512 exit_nonewsk:
1513         dst_release(dst);
1514 exit:
1515         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1516         return NULL;
1517 put_and_exit:
1518         inet_csk_prepare_forced_close(newsk);
1519         tcp_done(newsk);
1520         goto exit;
1521 }
1522 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1523
1524 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1525 {
1526         struct tcphdr *th = tcp_hdr(skb);
1527         const struct iphdr *iph = ip_hdr(skb);
1528         struct sock *nsk;
1529         struct request_sock **prev;
1530         /* Find possible connection requests. */
1531         struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1532                                                        iph->saddr, iph->daddr);
1533         if (req)
1534                 return tcp_check_req(sk, skb, req, prev);
1535
1536         nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1537                         th->source, iph->daddr, th->dest, inet_iif(skb));
1538
1539         if (nsk) {
1540                 if (nsk->sk_state != TCP_TIME_WAIT) {
1541                         bh_lock_sock(nsk);
1542                         return nsk;
1543                 }
1544                 inet_twsk_put(inet_twsk(nsk));
1545                 return NULL;
1546         }
1547
1548 #ifdef CONFIG_SYN_COOKIES
1549         if (!th->syn)
1550                 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1551 #endif
1552         return sk;
1553 }
1554
1555 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1556 {
1557         const struct iphdr *iph = ip_hdr(skb);
1558
1559         if (skb->ip_summed == CHECKSUM_COMPLETE) {
1560                 if (!tcp_v4_check(skb->len, iph->saddr,
1561                                   iph->daddr, skb->csum)) {
1562                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1563                         return 0;
1564                 }
1565         }
1566
1567         skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1568                                        skb->len, IPPROTO_TCP, 0);
1569
1570         if (skb->len <= 76) {
1571                 return __skb_checksum_complete(skb);
1572         }
1573         return 0;
1574 }
1575
1576
1577 /* The socket must have it's spinlock held when we get
1578  * here.
1579  *
1580  * We have a potential double-lock case here, so even when
1581  * doing backlog processing we use the BH locking scheme.
1582  * This is because we cannot sleep with the original spinlock
1583  * held.
1584  */
1585 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1586 {
1587         struct sock *rsk;
1588 #ifdef CONFIG_TCP_MD5SIG
1589         /*
1590          * We really want to reject the packet as early as possible
1591          * if:
1592          *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1593          *  o There is an MD5 option and we're not expecting one
1594          */
1595         if (tcp_v4_inbound_md5_hash(sk, skb))
1596                 goto discard;
1597 #endif
1598
1599         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1600                 sock_rps_save_rxhash(sk, skb);
1601                 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1602                         rsk = sk;
1603                         goto reset;
1604                 }
1605                 return 0;
1606         }
1607
1608         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1609                 goto csum_err;
1610
1611         if (sk->sk_state == TCP_LISTEN) {
1612                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1613                 if (!nsk)
1614                         goto discard;
1615
1616                 if (nsk != sk) {
1617                         sock_rps_save_rxhash(nsk, skb);
1618                         if (tcp_child_process(sk, nsk, skb)) {
1619                                 rsk = nsk;
1620                                 goto reset;
1621                         }
1622                         return 0;
1623                 }
1624         } else
1625                 sock_rps_save_rxhash(sk, skb);
1626
1627         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1628                 rsk = sk;
1629                 goto reset;
1630         }
1631         return 0;
1632
1633 reset:
1634         tcp_v4_send_reset(rsk, skb);
1635 discard:
1636         kfree_skb(skb);
1637         /* Be careful here. If this function gets more complicated and
1638          * gcc suffers from register pressure on the x86, sk (in %ebx)
1639          * might be destroyed here. This current version compiles correctly,
1640          * but you have been warned.
1641          */
1642         return 0;
1643
1644 csum_err:
1645         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1646         goto discard;
1647 }
1648 EXPORT_SYMBOL(tcp_v4_do_rcv);
1649
1650 /*
1651  *      From tcp_input.c
1652  */
1653
1654 int tcp_v4_rcv(struct sk_buff *skb)
1655 {
1656         const struct iphdr *iph;
1657         const struct tcphdr *th;
1658         struct sock *sk;
1659         int ret;
1660         struct net *net = dev_net(skb->dev);
1661
1662         if (skb->pkt_type != PACKET_HOST)
1663                 goto discard_it;
1664
1665         /* Count it even if it's bad */
1666         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1667
1668         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1669                 goto discard_it;
1670
1671         th = tcp_hdr(skb);
1672
1673         if (th->doff < sizeof(struct tcphdr) / 4)
1674                 goto bad_packet;
1675         if (!pskb_may_pull(skb, th->doff * 4))
1676                 goto discard_it;
1677
1678         /* An explanation is required here, I think.
1679          * Packet length and doff are validated by header prediction,
1680          * provided case of th->doff==0 is eliminated.
1681          * So, we defer the checks. */
1682         if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1683                 goto bad_packet;
1684
1685         th = tcp_hdr(skb);
1686         iph = ip_hdr(skb);
1687         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1688         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1689                                     skb->len - th->doff * 4);
1690         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1691         TCP_SKB_CB(skb)->when    = 0;
1692         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1693         TCP_SKB_CB(skb)->sacked  = 0;
1694
1695         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1696         if (!sk)
1697                 goto no_tcp_socket;
1698
1699 process:
1700         if (sk->sk_state == TCP_TIME_WAIT)
1701                 goto do_time_wait;
1702
1703         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1704                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1705                 goto discard_and_relse;
1706         }
1707
1708         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1709                 goto discard_and_relse;
1710         nf_reset(skb);
1711
1712         if (sk_filter(sk, skb))
1713                 goto discard_and_relse;
1714
1715         skb->dev = NULL;
1716
1717         bh_lock_sock_nested(sk);
1718         ret = 0;
1719         if (!sock_owned_by_user(sk)) {
1720 #ifdef CONFIG_NET_DMA
1721                 struct tcp_sock *tp = tcp_sk(sk);
1722                 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1723                         tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1724                 if (tp->ucopy.dma_chan)
1725                         ret = tcp_v4_do_rcv(sk, skb);
1726                 else
1727 #endif
1728                 {
1729                         if (!tcp_prequeue(sk, skb))
1730                                 ret = tcp_v4_do_rcv(sk, skb);
1731                 }
1732         } else if (unlikely(sk_add_backlog(sk, skb))) {
1733                 bh_unlock_sock(sk);
1734                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1735                 goto discard_and_relse;
1736         }
1737         bh_unlock_sock(sk);
1738
1739         sock_put(sk);
1740
1741         return ret;
1742
1743 no_tcp_socket:
1744         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1745                 goto discard_it;
1746
1747         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1748 bad_packet:
1749                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1750         } else {
1751                 tcp_v4_send_reset(NULL, skb);
1752         }
1753
1754 discard_it:
1755         /* Discard frame. */
1756         kfree_skb(skb);
1757         return 0;
1758
1759 discard_and_relse:
1760         sock_put(sk);
1761         goto discard_it;
1762
1763 do_time_wait:
1764         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1765                 inet_twsk_put(inet_twsk(sk));
1766                 goto discard_it;
1767         }
1768
1769         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1770                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1771                 inet_twsk_put(inet_twsk(sk));
1772                 goto discard_it;
1773         }
1774         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1775         case TCP_TW_SYN: {
1776                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1777                                                         &tcp_hashinfo,
1778                                                         iph->daddr, th->dest,
1779                                                         inet_iif(skb));
1780                 if (sk2) {
1781                         inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1782                         inet_twsk_put(inet_twsk(sk));
1783                         sk = sk2;
1784                         goto process;
1785                 }
1786                 /* Fall through to ACK */
1787         }
1788         case TCP_TW_ACK:
1789                 tcp_v4_timewait_ack(sk, skb);
1790                 break;
1791         case TCP_TW_RST:
1792                 goto no_tcp_socket;
1793         case TCP_TW_SUCCESS:;
1794         }
1795         goto discard_it;
1796 }
1797
1798 struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1799 {
1800         struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1801         struct inet_sock *inet = inet_sk(sk);
1802         struct inet_peer *peer;
1803
1804         if (!rt ||
1805             inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1806                 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1807                 *release_it = true;
1808         } else {
1809                 if (!rt->peer)
1810                         rt_bind_peer(rt, inet->inet_daddr, 1);
1811                 peer = rt->peer;
1812                 *release_it = false;
1813         }
1814
1815         return peer;
1816 }
1817 EXPORT_SYMBOL(tcp_v4_get_peer);
1818
1819 void *tcp_v4_tw_get_peer(struct sock *sk)
1820 {
1821         const struct inet_timewait_sock *tw = inet_twsk(sk);
1822
1823         return inet_getpeer_v4(tw->tw_daddr, 1);
1824 }
1825 EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1826
1827 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1828         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
1829         .twsk_unique    = tcp_twsk_unique,
1830         .twsk_destructor= tcp_twsk_destructor,
1831         .twsk_getpeer   = tcp_v4_tw_get_peer,
1832 };
1833
1834 const struct inet_connection_sock_af_ops ipv4_specific = {
1835         .queue_xmit        = ip_queue_xmit,
1836         .send_check        = tcp_v4_send_check,
1837         .rebuild_header    = inet_sk_rebuild_header,
1838         .conn_request      = tcp_v4_conn_request,
1839         .syn_recv_sock     = tcp_v4_syn_recv_sock,
1840         .get_peer          = tcp_v4_get_peer,
1841         .net_header_len    = sizeof(struct iphdr),
1842         .setsockopt        = ip_setsockopt,
1843         .getsockopt        = ip_getsockopt,
1844         .addr2sockaddr     = inet_csk_addr2sockaddr,
1845         .sockaddr_len      = sizeof(struct sockaddr_in),
1846         .bind_conflict     = inet_csk_bind_conflict,
1847 #ifdef CONFIG_COMPAT
1848         .compat_setsockopt = compat_ip_setsockopt,
1849         .compat_getsockopt = compat_ip_getsockopt,
1850 #endif
1851 };
1852 EXPORT_SYMBOL(ipv4_specific);
1853
1854 #ifdef CONFIG_TCP_MD5SIG
1855 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1856         .md5_lookup             = tcp_v4_md5_lookup,
1857         .calc_md5_hash          = tcp_v4_md5_hash_skb,
1858         .md5_add                = tcp_v4_md5_add_func,
1859         .md5_parse              = tcp_v4_parse_md5_keys,
1860 };
1861 #endif
1862
1863 /* NOTE: A lot of things set to zero explicitly by call to
1864  *       sk_alloc() so need not be done here.
1865  */
1866 static int tcp_v4_init_sock(struct sock *sk)
1867 {
1868         struct inet_connection_sock *icsk = inet_csk(sk);
1869         struct tcp_sock *tp = tcp_sk(sk);
1870
1871         skb_queue_head_init(&tp->out_of_order_queue);
1872         tcp_init_xmit_timers(sk);
1873         tcp_prequeue_init(tp);
1874
1875         icsk->icsk_rto = TCP_TIMEOUT_INIT;
1876         tp->mdev = TCP_TIMEOUT_INIT;
1877
1878         /* So many TCP implementations out there (incorrectly) count the
1879          * initial SYN frame in their delayed-ACK and congestion control
1880          * algorithms that we must have the following bandaid to talk
1881          * efficiently to them.  -DaveM
1882          */
1883         tp->snd_cwnd = TCP_INIT_CWND;
1884
1885         /* See draft-stevens-tcpca-spec-01 for discussion of the
1886          * initialization of these values.
1887          */
1888         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1889         tp->snd_cwnd_clamp = ~0;
1890         tp->mss_cache = TCP_MSS_DEFAULT;
1891
1892         tp->reordering = sysctl_tcp_reordering;
1893         icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1894
1895         sk->sk_state = TCP_CLOSE;
1896
1897         sk->sk_write_space = sk_stream_write_space;
1898         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1899
1900         icsk->icsk_af_ops = &ipv4_specific;
1901         icsk->icsk_sync_mss = tcp_sync_mss;
1902 #ifdef CONFIG_TCP_MD5SIG
1903         tp->af_specific = &tcp_sock_ipv4_specific;
1904 #endif
1905
1906         /* TCP Cookie Transactions */
1907         if (sysctl_tcp_cookie_size > 0) {
1908                 /* Default, cookies without s_data_payload. */
1909                 tp->cookie_values =
1910                         kzalloc(sizeof(*tp->cookie_values),
1911                                 sk->sk_allocation);
1912                 if (tp->cookie_values != NULL)
1913                         kref_init(&tp->cookie_values->kref);
1914         }
1915         /* Presumed zeroed, in order of appearance:
1916          *      cookie_in_always, cookie_out_never,
1917          *      s_data_constant, s_data_in, s_data_out
1918          */
1919         sk->sk_sndbuf = sysctl_tcp_wmem[1];
1920         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1921
1922         local_bh_disable();
1923         percpu_counter_inc(&tcp_sockets_allocated);
1924         local_bh_enable();
1925
1926         return 0;
1927 }
1928
1929 void tcp_v4_destroy_sock(struct sock *sk)
1930 {
1931         struct tcp_sock *tp = tcp_sk(sk);
1932
1933         tcp_clear_xmit_timers(sk);
1934
1935         tcp_cleanup_congestion_control(sk);
1936
1937         /* Cleanup up the write buffer. */
1938         tcp_write_queue_purge(sk);
1939
1940         /* Cleans up our, hopefully empty, out_of_order_queue. */
1941         __skb_queue_purge(&tp->out_of_order_queue);
1942
1943 #ifdef CONFIG_TCP_MD5SIG
1944         /* Clean up the MD5 key list, if any */
1945         if (tp->md5sig_info) {
1946                 tcp_v4_clear_md5_list(sk);
1947                 kfree(tp->md5sig_info);
1948                 tp->md5sig_info = NULL;
1949         }
1950 #endif
1951
1952 #ifdef CONFIG_NET_DMA
1953         /* Cleans up our sk_async_wait_queue */
1954         __skb_queue_purge(&sk->sk_async_wait_queue);
1955 #endif
1956
1957         /* Clean prequeue, it must be empty really */
1958         __skb_queue_purge(&tp->ucopy.prequeue);
1959
1960         /* Clean up a referenced TCP bind bucket. */
1961         if (inet_csk(sk)->icsk_bind_hash)
1962                 inet_put_port(sk);
1963
1964         /*
1965          * If sendmsg cached page exists, toss it.
1966          */
1967         if (sk->sk_sndmsg_page) {
1968                 __free_page(sk->sk_sndmsg_page);
1969                 sk->sk_sndmsg_page = NULL;
1970         }
1971
1972         /* TCP Cookie Transactions */
1973         if (tp->cookie_values != NULL) {
1974                 kref_put(&tp->cookie_values->kref,
1975                          tcp_cookie_values_release);
1976                 tp->cookie_values = NULL;
1977         }
1978
1979         percpu_counter_dec(&tcp_sockets_allocated);
1980 }
1981 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1982
1983 #ifdef CONFIG_PROC_FS
1984 /* Proc filesystem TCP sock list dumping. */
1985
1986 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1987 {
1988         return hlist_nulls_empty(head) ? NULL :
1989                 list_entry(head->first, struct inet_timewait_sock, tw_node);
1990 }
1991
1992 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1993 {
1994         return !is_a_nulls(tw->tw_node.next) ?
1995                 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1996 }
1997
1998 /*
1999  * Get next listener socket follow cur.  If cur is NULL, get first socket
2000  * starting from bucket given in st->bucket; when st->bucket is zero the
2001  * very first socket in the hash table is returned.
2002  */
2003 static void *listening_get_next(struct seq_file *seq, void *cur)
2004 {
2005         struct inet_connection_sock *icsk;
2006         struct hlist_nulls_node *node;
2007         struct sock *sk = cur;
2008         struct inet_listen_hashbucket *ilb;
2009         struct tcp_iter_state *st = seq->private;
2010         struct net *net = seq_file_net(seq);
2011
2012         if (!sk) {
2013                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2014                 spin_lock_bh(&ilb->lock);
2015                 sk = sk_nulls_head(&ilb->head);
2016                 st->offset = 0;
2017                 goto get_sk;
2018         }
2019         ilb = &tcp_hashinfo.listening_hash[st->bucket];
2020         ++st->num;
2021         ++st->offset;
2022
2023         if (st->state == TCP_SEQ_STATE_OPENREQ) {
2024                 struct request_sock *req = cur;
2025
2026                 icsk = inet_csk(st->syn_wait_sk);
2027                 req = req->dl_next;
2028                 while (1) {
2029                         while (req) {
2030                                 if (req->rsk_ops->family == st->family) {
2031                                         cur = req;
2032                                         goto out;
2033                                 }
2034                                 req = req->dl_next;
2035                         }
2036                         if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2037                                 break;
2038 get_req:
2039                         req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2040                 }
2041                 sk        = sk_nulls_next(st->syn_wait_sk);
2042                 st->state = TCP_SEQ_STATE_LISTENING;
2043                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2044         } else {
2045                 icsk = inet_csk(sk);
2046                 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2047                 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2048                         goto start_req;
2049                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2050                 sk = sk_nulls_next(sk);
2051         }
2052 get_sk:
2053         sk_nulls_for_each_from(sk, node) {
2054                 if (!net_eq(sock_net(sk), net))
2055                         continue;
2056                 if (sk->sk_family == st->family) {
2057                         cur = sk;
2058                         goto out;
2059                 }
2060                 icsk = inet_csk(sk);
2061                 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2062                 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2063 start_req:
2064                         st->uid         = sock_i_uid(sk);
2065                         st->syn_wait_sk = sk;
2066                         st->state       = TCP_SEQ_STATE_OPENREQ;
2067                         st->sbucket     = 0;
2068                         goto get_req;
2069                 }
2070                 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2071         }
2072         spin_unlock_bh(&ilb->lock);
2073         st->offset = 0;
2074         if (++st->bucket < INET_LHTABLE_SIZE) {
2075                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2076                 spin_lock_bh(&ilb->lock);
2077                 sk = sk_nulls_head(&ilb->head);
2078                 goto get_sk;
2079         }
2080         cur = NULL;
2081 out:
2082         return cur;
2083 }
2084
2085 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2086 {
2087         struct tcp_iter_state *st = seq->private;
2088         void *rc;
2089
2090         st->bucket = 0;
2091         st->offset = 0;
2092         rc = listening_get_next(seq, NULL);
2093
2094         while (rc && *pos) {
2095                 rc = listening_get_next(seq, rc);
2096                 --*pos;
2097         }
2098         return rc;
2099 }
2100
2101 static inline int empty_bucket(struct tcp_iter_state *st)
2102 {
2103         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2104                 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2105 }
2106
2107 /*
2108  * Get first established socket starting from bucket given in st->bucket.
2109  * If st->bucket is zero, the very first socket in the hash is returned.
2110  */
2111 static void *established_get_first(struct seq_file *seq)
2112 {
2113         struct tcp_iter_state *st = seq->private;
2114         struct net *net = seq_file_net(seq);
2115         void *rc = NULL;
2116
2117         st->offset = 0;
2118         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2119                 struct sock *sk;
2120                 struct hlist_nulls_node *node;
2121                 struct inet_timewait_sock *tw;
2122                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2123
2124                 /* Lockless fast path for the common case of empty buckets */
2125                 if (empty_bucket(st))
2126                         continue;
2127
2128                 spin_lock_bh(lock);
2129                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2130                         if (sk->sk_family != st->family ||
2131                             !net_eq(sock_net(sk), net)) {
2132                                 continue;
2133                         }
2134                         rc = sk;
2135                         goto out;
2136                 }
2137                 st->state = TCP_SEQ_STATE_TIME_WAIT;
2138                 inet_twsk_for_each(tw, node,
2139                                    &tcp_hashinfo.ehash[st->bucket].twchain) {
2140                         if (tw->tw_family != st->family ||
2141                             !net_eq(twsk_net(tw), net)) {
2142                                 continue;
2143                         }
2144                         rc = tw;
2145                         goto out;
2146                 }
2147                 spin_unlock_bh(lock);
2148                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2149         }
2150 out:
2151         return rc;
2152 }
2153
2154 static void *established_get_next(struct seq_file *seq, void *cur)
2155 {
2156         struct sock *sk = cur;
2157         struct inet_timewait_sock *tw;
2158         struct hlist_nulls_node *node;
2159         struct tcp_iter_state *st = seq->private;
2160         struct net *net = seq_file_net(seq);
2161
2162         ++st->num;
2163         ++st->offset;
2164
2165         if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2166                 tw = cur;
2167                 tw = tw_next(tw);
2168 get_tw:
2169                 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2170                         tw = tw_next(tw);
2171                 }
2172                 if (tw) {
2173                         cur = tw;
2174                         goto out;
2175                 }
2176                 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2177                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2178
2179                 /* Look for next non empty bucket */
2180                 st->offset = 0;
2181                 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2182                                 empty_bucket(st))
2183                         ;
2184                 if (st->bucket > tcp_hashinfo.ehash_mask)
2185                         return NULL;
2186
2187                 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2188                 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2189         } else
2190                 sk = sk_nulls_next(sk);
2191
2192         sk_nulls_for_each_from(sk, node) {
2193                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2194                         goto found;
2195         }
2196
2197         st->state = TCP_SEQ_STATE_TIME_WAIT;
2198         tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2199         goto get_tw;
2200 found:
2201         cur = sk;
2202 out:
2203         return cur;
2204 }
2205
2206 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2207 {
2208         struct tcp_iter_state *st = seq->private;
2209         void *rc;
2210
2211         st->bucket = 0;
2212         rc = established_get_first(seq);
2213
2214         while (rc && pos) {
2215                 rc = established_get_next(seq, rc);
2216                 --pos;
2217         }
2218         return rc;
2219 }
2220
2221 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2222 {
2223         void *rc;
2224         struct tcp_iter_state *st = seq->private;
2225
2226         st->state = TCP_SEQ_STATE_LISTENING;
2227         rc        = listening_get_idx(seq, &pos);
2228
2229         if (!rc) {
2230                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2231                 rc        = established_get_idx(seq, pos);
2232         }
2233
2234         return rc;
2235 }
2236
2237 static void *tcp_seek_last_pos(struct seq_file *seq)
2238 {
2239         struct tcp_iter_state *st = seq->private;
2240         int offset = st->offset;
2241         int orig_num = st->num;
2242         void *rc = NULL;
2243
2244         switch (st->state) {
2245         case TCP_SEQ_STATE_OPENREQ:
2246         case TCP_SEQ_STATE_LISTENING:
2247                 if (st->bucket >= INET_LHTABLE_SIZE)
2248                         break;
2249                 st->state = TCP_SEQ_STATE_LISTENING;
2250                 rc = listening_get_next(seq, NULL);
2251                 while (offset-- && rc)
2252                         rc = listening_get_next(seq, rc);
2253                 if (rc)
2254                         break;
2255                 st->bucket = 0;
2256                 /* Fallthrough */
2257         case TCP_SEQ_STATE_ESTABLISHED:
2258         case TCP_SEQ_STATE_TIME_WAIT:
2259                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2260                 if (st->bucket > tcp_hashinfo.ehash_mask)
2261                         break;
2262                 rc = established_get_first(seq);
2263                 while (offset-- && rc)
2264                         rc = established_get_next(seq, rc);
2265         }
2266
2267         st->num = orig_num;
2268
2269         return rc;
2270 }
2271
2272 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2273 {
2274         struct tcp_iter_state *st = seq->private;
2275         void *rc;
2276
2277         if (*pos && *pos == st->last_pos) {
2278                 rc = tcp_seek_last_pos(seq);
2279                 if (rc)
2280                         goto out;
2281         }
2282
2283         st->state = TCP_SEQ_STATE_LISTENING;
2284         st->num = 0;
2285         st->bucket = 0;
2286         st->offset = 0;
2287         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2288
2289 out:
2290         st->last_pos = *pos;
2291         return rc;
2292 }
2293
2294 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2295 {
2296         struct tcp_iter_state *st = seq->private;
2297         void *rc = NULL;
2298
2299         if (v == SEQ_START_TOKEN) {
2300                 rc = tcp_get_idx(seq, 0);
2301                 goto out;
2302         }
2303
2304         switch (st->state) {
2305         case TCP_SEQ_STATE_OPENREQ:
2306         case TCP_SEQ_STATE_LISTENING:
2307                 rc = listening_get_next(seq, v);
2308                 if (!rc) {
2309                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2310                         st->bucket = 0;
2311                         st->offset = 0;
2312                         rc        = established_get_first(seq);
2313                 }
2314                 break;
2315         case TCP_SEQ_STATE_ESTABLISHED:
2316         case TCP_SEQ_STATE_TIME_WAIT:
2317                 rc = established_get_next(seq, v);
2318                 break;
2319         }
2320 out:
2321         ++*pos;
2322         st->last_pos = *pos;
2323         return rc;
2324 }
2325
2326 static void tcp_seq_stop(struct seq_file *seq, void *v)
2327 {
2328         struct tcp_iter_state *st = seq->private;
2329
2330         switch (st->state) {
2331         case TCP_SEQ_STATE_OPENREQ:
2332                 if (v) {
2333                         struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2334                         read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2335                 }
2336         case TCP_SEQ_STATE_LISTENING:
2337                 if (v != SEQ_START_TOKEN)
2338                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2339                 break;
2340         case TCP_SEQ_STATE_TIME_WAIT:
2341         case TCP_SEQ_STATE_ESTABLISHED:
2342                 if (v)
2343                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2344                 break;
2345         }
2346 }
2347
2348 int tcp_seq_open(struct inode *inode, struct file *file)
2349 {
2350         struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2351         struct tcp_iter_state *s;
2352         int err;
2353
2354         err = seq_open_net(inode, file, &afinfo->seq_ops,
2355                           sizeof(struct tcp_iter_state));
2356         if (err < 0)
2357                 return err;
2358
2359         s = ((struct seq_file *)file->private_data)->private;
2360         s->family               = afinfo->family;
2361         s->last_pos             = 0;
2362         return 0;
2363 }
2364 EXPORT_SYMBOL(tcp_seq_open);
2365
2366 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2367 {
2368         int rc = 0;
2369         struct proc_dir_entry *p;
2370
2371         afinfo->seq_ops.start           = tcp_seq_start;
2372         afinfo->seq_ops.next            = tcp_seq_next;
2373         afinfo->seq_ops.stop            = tcp_seq_stop;
2374
2375         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2376                              afinfo->seq_fops, afinfo);
2377         if (!p)
2378                 rc = -ENOMEM;
2379         return rc;
2380 }
2381 EXPORT_SYMBOL(tcp_proc_register);
2382
2383 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2384 {
2385         proc_net_remove(net, afinfo->name);
2386 }
2387 EXPORT_SYMBOL(tcp_proc_unregister);
2388
2389 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2390                          struct seq_file *f, int i, int uid, int *len)
2391 {
2392         const struct inet_request_sock *ireq = inet_rsk(req);
2393         int ttd = req->expires - jiffies;
2394
2395         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2396                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2397                 i,
2398                 ireq->loc_addr,
2399                 ntohs(inet_sk(sk)->inet_sport),
2400                 ireq->rmt_addr,
2401                 ntohs(ireq->rmt_port),
2402                 TCP_SYN_RECV,
2403                 0, 0, /* could print option size, but that is af dependent. */
2404                 1,    /* timers active (only the expire timer) */
2405                 jiffies_to_clock_t(ttd),
2406                 req->retrans,
2407                 uid,
2408                 0,  /* non standard timer */
2409                 0, /* open_requests have no inode */
2410                 atomic_read(&sk->sk_refcnt),
2411                 req,
2412                 len);
2413 }
2414
2415 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2416 {
2417         int timer_active;
2418         unsigned long timer_expires;
2419         const struct tcp_sock *tp = tcp_sk(sk);
2420         const struct inet_connection_sock *icsk = inet_csk(sk);
2421         const struct inet_sock *inet = inet_sk(sk);
2422         __be32 dest = inet->inet_daddr;
2423         __be32 src = inet->inet_rcv_saddr;
2424         __u16 destp = ntohs(inet->inet_dport);
2425         __u16 srcp = ntohs(inet->inet_sport);
2426         int rx_queue;
2427
2428         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2429                 timer_active    = 1;
2430                 timer_expires   = icsk->icsk_timeout;
2431         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2432                 timer_active    = 4;
2433                 timer_expires   = icsk->icsk_timeout;
2434         } else if (timer_pending(&sk->sk_timer)) {
2435                 timer_active    = 2;
2436                 timer_expires   = sk->sk_timer.expires;
2437         } else {
2438                 timer_active    = 0;
2439                 timer_expires = jiffies;
2440         }
2441
2442         if (sk->sk_state == TCP_LISTEN)
2443                 rx_queue = sk->sk_ack_backlog;
2444         else
2445                 /*
2446                  * because we dont lock socket, we might find a transient negative value
2447                  */
2448                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2449
2450         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2451                         "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2452                 i, src, srcp, dest, destp, sk->sk_state,
2453                 tp->write_seq - tp->snd_una,
2454                 rx_queue,
2455                 timer_active,
2456                 jiffies_to_clock_t(timer_expires - jiffies),
2457                 icsk->icsk_retransmits,
2458                 sock_i_uid(sk),
2459                 icsk->icsk_probes_out,
2460                 sock_i_ino(sk),
2461                 atomic_read(&sk->sk_refcnt), sk,
2462                 jiffies_to_clock_t(icsk->icsk_rto),
2463                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2464                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2465                 tp->snd_cwnd,
2466                 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2467                 len);
2468 }
2469
2470 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2471                                struct seq_file *f, int i, int *len)
2472 {
2473         __be32 dest, src;
2474         __u16 destp, srcp;
2475         int ttd = tw->tw_ttd - jiffies;
2476
2477         if (ttd < 0)
2478                 ttd = 0;
2479
2480         dest  = tw->tw_daddr;
2481         src   = tw->tw_rcv_saddr;
2482         destp = ntohs(tw->tw_dport);
2483         srcp  = ntohs(tw->tw_sport);
2484
2485         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2486                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2487                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2488                 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2489                 atomic_read(&tw->tw_refcnt), tw, len);
2490 }
2491
2492 #define TMPSZ 150
2493
2494 static int tcp4_seq_show(struct seq_file *seq, void *v)
2495 {
2496         struct tcp_iter_state *st;
2497         int len;
2498
2499         if (v == SEQ_START_TOKEN) {
2500                 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2501                            "  sl  local_address rem_address   st tx_queue "
2502                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2503                            "inode");
2504                 goto out;
2505         }
2506         st = seq->private;
2507
2508         switch (st->state) {
2509         case TCP_SEQ_STATE_LISTENING:
2510         case TCP_SEQ_STATE_ESTABLISHED:
2511                 get_tcp4_sock(v, seq, st->num, &len);
2512                 break;
2513         case TCP_SEQ_STATE_OPENREQ:
2514                 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2515                 break;
2516         case TCP_SEQ_STATE_TIME_WAIT:
2517                 get_timewait4_sock(v, seq, st->num, &len);
2518                 break;
2519         }
2520         seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2521 out:
2522         return 0;
2523 }
2524
2525 static const struct file_operations tcp_afinfo_seq_fops = {
2526         .owner   = THIS_MODULE,
2527         .open    = tcp_seq_open,
2528         .read    = seq_read,
2529         .llseek  = seq_lseek,
2530         .release = seq_release_net
2531 };
2532
2533 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2534         .name           = "tcp",
2535         .family         = AF_INET,
2536         .seq_fops       = &tcp_afinfo_seq_fops,
2537         .seq_ops        = {
2538                 .show           = tcp4_seq_show,
2539         },
2540 };
2541
2542 static int __net_init tcp4_proc_init_net(struct net *net)
2543 {
2544         return tcp_proc_register(net, &tcp4_seq_afinfo);
2545 }
2546
2547 static void __net_exit tcp4_proc_exit_net(struct net *net)
2548 {
2549         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2550 }
2551
2552 static struct pernet_operations tcp4_net_ops = {
2553         .init = tcp4_proc_init_net,
2554         .exit = tcp4_proc_exit_net,
2555 };
2556
2557 int __init tcp4_proc_init(void)
2558 {
2559         return register_pernet_subsys(&tcp4_net_ops);
2560 }
2561
2562 void tcp4_proc_exit(void)
2563 {
2564         unregister_pernet_subsys(&tcp4_net_ops);
2565 }
2566 #endif /* CONFIG_PROC_FS */
2567
2568 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2569 {
2570         const struct iphdr *iph = skb_gro_network_header(skb);
2571
2572         switch (skb->ip_summed) {
2573         case CHECKSUM_COMPLETE:
2574                 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2575                                   skb->csum)) {
2576                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2577                         break;
2578                 }
2579
2580                 /* fall through */
2581         case CHECKSUM_NONE:
2582                 NAPI_GRO_CB(skb)->flush = 1;
2583                 return NULL;
2584         }
2585
2586         return tcp_gro_receive(head, skb);
2587 }
2588
2589 int tcp4_gro_complete(struct sk_buff *skb)
2590 {
2591         const struct iphdr *iph = ip_hdr(skb);
2592         struct tcphdr *th = tcp_hdr(skb);
2593
2594         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2595                                   iph->saddr, iph->daddr, 0);
2596         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2597
2598         return tcp_gro_complete(skb);
2599 }
2600
2601 struct proto tcp_prot = {
2602         .name                   = "TCP",
2603         .owner                  = THIS_MODULE,
2604         .close                  = tcp_close,
2605         .connect                = tcp_v4_connect,
2606         .disconnect             = tcp_disconnect,
2607         .accept                 = inet_csk_accept,
2608         .ioctl                  = tcp_ioctl,
2609         .init                   = tcp_v4_init_sock,
2610         .destroy                = tcp_v4_destroy_sock,
2611         .shutdown               = tcp_shutdown,
2612         .setsockopt             = tcp_setsockopt,
2613         .getsockopt             = tcp_getsockopt,
2614         .recvmsg                = tcp_recvmsg,
2615         .sendmsg                = tcp_sendmsg,
2616         .sendpage               = tcp_sendpage,
2617         .backlog_rcv            = tcp_v4_do_rcv,
2618         .hash                   = inet_hash,
2619         .unhash                 = inet_unhash,
2620         .get_port               = inet_csk_get_port,
2621         .enter_memory_pressure  = tcp_enter_memory_pressure,
2622         .sockets_allocated      = &tcp_sockets_allocated,
2623         .orphan_count           = &tcp_orphan_count,
2624         .memory_allocated       = &tcp_memory_allocated,
2625         .memory_pressure        = &tcp_memory_pressure,
2626         .sysctl_mem             = sysctl_tcp_mem,
2627         .sysctl_wmem            = sysctl_tcp_wmem,
2628         .sysctl_rmem            = sysctl_tcp_rmem,
2629         .max_header             = MAX_TCP_HEADER,
2630         .obj_size               = sizeof(struct tcp_sock),
2631         .slab_flags             = SLAB_DESTROY_BY_RCU,
2632         .twsk_prot              = &tcp_timewait_sock_ops,
2633         .rsk_prot               = &tcp_request_sock_ops,
2634         .h.hashinfo             = &tcp_hashinfo,
2635         .no_autobind            = true,
2636 #ifdef CONFIG_COMPAT
2637         .compat_setsockopt      = compat_tcp_setsockopt,
2638         .compat_getsockopt      = compat_tcp_getsockopt,
2639 #endif
2640 };
2641 EXPORT_SYMBOL(tcp_prot);
2642
2643
2644 static int __net_init tcp_sk_init(struct net *net)
2645 {
2646         return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2647                                     PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2648 }
2649
2650 static void __net_exit tcp_sk_exit(struct net *net)
2651 {
2652         inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2653 }
2654
2655 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2656 {
2657         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2658 }
2659
2660 static struct pernet_operations __net_initdata tcp_sk_ops = {
2661        .init       = tcp_sk_init,
2662        .exit       = tcp_sk_exit,
2663        .exit_batch = tcp_sk_exit_batch,
2664 };
2665
2666 void __init tcp_v4_init(void)
2667 {
2668         inet_hashinfo_init(&tcp_hashinfo);
2669         if (register_pernet_subsys(&tcp_sk_ops))
2670                 panic("Failed to create the TCP control socket.\n");
2671 }