2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 static int tcp_repair_connect(struct sock *sk)
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
149 /* This will initiate an outgoing connection. */
150 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
152 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 struct inet_sock *inet = inet_sk(sk);
154 struct tcp_sock *tp = tcp_sk(sk);
155 __be16 orig_sport, orig_dport;
156 __be32 daddr, nexthop;
160 struct ip_options_rcu *inet_opt;
162 if (addr_len < sizeof(struct sockaddr_in))
165 if (usin->sin_family != AF_INET)
166 return -EAFNOSUPPORT;
168 nexthop = daddr = usin->sin_addr.s_addr;
169 inet_opt = rcu_dereference_protected(inet->inet_opt,
170 sock_owned_by_user(sk));
171 if (inet_opt && inet_opt->opt.srr) {
174 nexthop = inet_opt->opt.faddr;
177 orig_sport = inet->inet_sport;
178 orig_dport = usin->sin_port;
179 fl4 = &inet->cork.fl.u.ip4;
180 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
183 orig_sport, orig_dport, sk, true);
186 if (err == -ENETUNREACH)
187 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
191 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
196 if (!inet_opt || !inet_opt->opt.srr)
199 if (!inet->inet_saddr)
200 inet->inet_saddr = fl4->saddr;
201 inet->inet_rcv_saddr = inet->inet_saddr;
203 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
204 /* Reset inherited state */
205 tp->rx_opt.ts_recent = 0;
206 tp->rx_opt.ts_recent_stamp = 0;
207 if (likely(!tp->repair))
211 if (tcp_death_row.sysctl_tw_recycle &&
212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
213 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
215 * VJ's idea. We save last timestamp seen from
216 * the destination in peer table, when entering state
217 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
218 * when trying new connection.
221 inet_peer_refcheck(peer);
222 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
223 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
224 tp->rx_opt.ts_recent = peer->tcp_ts;
229 inet->inet_dport = usin->sin_port;
230 inet->inet_daddr = daddr;
232 inet_csk(sk)->icsk_ext_hdr_len = 0;
234 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
236 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
238 /* Socket identity is still unknown (sport may be zero).
239 * However we set state to SYN-SENT and not releasing socket
240 * lock select source port, enter ourselves into the hash tables and
241 * complete initialization after this.
243 tcp_set_state(sk, TCP_SYN_SENT);
244 err = inet_hash_connect(&tcp_death_row, sk);
248 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
249 inet->inet_sport, inet->inet_dport, sk);
255 /* OK, now commit destination to socket. */
256 sk->sk_gso_type = SKB_GSO_TCPV4;
257 sk_setup_caps(sk, &rt->dst);
259 if (!tp->write_seq && likely(!tp->repair))
260 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
265 inet->inet_id = tp->write_seq ^ jiffies;
267 if (likely(!tp->repair))
268 err = tcp_connect(sk);
270 err = tcp_repair_connect(sk);
280 * This unhashes the socket and releases the local port,
283 tcp_set_state(sk, TCP_CLOSE);
285 sk->sk_route_caps = 0;
286 inet->inet_dport = 0;
289 EXPORT_SYMBOL(tcp_v4_connect);
292 * This routine does path mtu discovery as defined in RFC1191.
294 static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
296 struct dst_entry *dst;
297 struct inet_sock *inet = inet_sk(sk);
299 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
300 * send out by Linux are always <576bytes so they should go through
303 if (sk->sk_state == TCP_LISTEN)
306 /* We don't check in the destentry if pmtu discovery is forbidden
307 * on this route. We just assume that no packet_to_big packets
308 * are send back when pmtu discovery is not active.
309 * There is a small race when the user changes this flag in the
310 * route, but I think that's acceptable.
312 if ((dst = __sk_dst_check(sk, 0)) == NULL)
315 dst->ops->update_pmtu(dst, mtu);
317 /* Something is about to be wrong... Remember soft error
318 * for the case, if this connection will not able to recover.
320 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
321 sk->sk_err_soft = EMSGSIZE;
325 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
326 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
327 tcp_sync_mss(sk, mtu);
329 /* Resend the TCP packet because it's
330 * clear that the old packet has been
331 * dropped. This is the new "fast" path mtu
334 tcp_simple_retransmit(sk);
335 } /* else let the usual retransmit timer handle it */
339 * This routine is called by the ICMP module when it gets some
340 * sort of error condition. If err < 0 then the socket should
341 * be closed and the error returned to the user. If err > 0
342 * it's just the icmp type << 8 | icmp code. After adjustment
343 * header points to the first 8 bytes of the tcp header. We need
344 * to find the appropriate port.
346 * The locking strategy used here is very "optimistic". When
347 * someone else accesses the socket the ICMP is just dropped
348 * and for some paths there is no check at all.
349 * A more general error queue to queue errors for later handling
350 * is probably better.
354 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
356 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
357 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
358 struct inet_connection_sock *icsk;
360 struct inet_sock *inet;
361 const int type = icmp_hdr(icmp_skb)->type;
362 const int code = icmp_hdr(icmp_skb)->code;
368 struct net *net = dev_net(icmp_skb->dev);
370 if (icmp_skb->len < (iph->ihl << 2) + 8) {
371 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
375 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
376 iph->saddr, th->source, inet_iif(icmp_skb));
378 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
381 if (sk->sk_state == TCP_TIME_WAIT) {
382 inet_twsk_put(inet_twsk(sk));
387 /* If too many ICMPs get dropped on busy
388 * servers this needs to be solved differently.
390 if (sock_owned_by_user(sk))
391 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
393 if (sk->sk_state == TCP_CLOSE)
396 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
397 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
403 seq = ntohl(th->seq);
404 if (sk->sk_state != TCP_LISTEN &&
405 !between(seq, tp->snd_una, tp->snd_nxt)) {
406 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
411 case ICMP_SOURCE_QUENCH:
412 /* Just silently ignore these. */
414 case ICMP_PARAMETERPROB:
417 case ICMP_DEST_UNREACH:
418 if (code > NR_ICMP_UNREACH)
421 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
422 if (!sock_owned_by_user(sk))
423 do_pmtu_discovery(sk, iph, info);
427 err = icmp_err_convert[code].errno;
428 /* check if icmp_skb allows revert of backoff
429 * (see draft-zimmermann-tcp-lcd) */
430 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
432 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
436 if (sock_owned_by_user(sk))
439 icsk->icsk_backoff--;
440 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
441 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
444 skb = tcp_write_queue_head(sk);
447 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
448 tcp_time_stamp - TCP_SKB_CB(skb)->when);
451 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
452 remaining, TCP_RTO_MAX);
454 /* RTO revert clocked out retransmission.
455 * Will retransmit now */
456 tcp_retransmit_timer(sk);
460 case ICMP_TIME_EXCEEDED:
467 switch (sk->sk_state) {
468 struct request_sock *req, **prev;
470 if (sock_owned_by_user(sk))
473 req = inet_csk_search_req(sk, &prev, th->dest,
474 iph->daddr, iph->saddr);
478 /* ICMPs are not backlogged, hence we cannot get
479 an established socket here.
483 if (seq != tcp_rsk(req)->snt_isn) {
484 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
489 * Still in SYN_RECV, just remove it silently.
490 * There is no good way to pass the error to the newly
491 * created socket, and POSIX does not want network
492 * errors returned from accept().
494 inet_csk_reqsk_queue_drop(sk, req, prev);
498 case TCP_SYN_RECV: /* Cannot happen.
499 It can f.e. if SYNs crossed.
501 if (!sock_owned_by_user(sk)) {
504 sk->sk_error_report(sk);
508 sk->sk_err_soft = err;
513 /* If we've already connected we will keep trying
514 * until we time out, or the user gives up.
516 * rfc1122 4.2.3.9 allows to consider as hard errors
517 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
518 * but it is obsoleted by pmtu discovery).
520 * Note, that in modern internet, where routing is unreliable
521 * and in each dark corner broken firewalls sit, sending random
522 * errors ordered by their masters even this two messages finally lose
523 * their original sense (even Linux sends invalid PORT_UNREACHs)
525 * Now we are in compliance with RFCs.
530 if (!sock_owned_by_user(sk) && inet->recverr) {
532 sk->sk_error_report(sk);
533 } else { /* Only an error on timeout */
534 sk->sk_err_soft = err;
542 static void __tcp_v4_send_check(struct sk_buff *skb,
543 __be32 saddr, __be32 daddr)
545 struct tcphdr *th = tcp_hdr(skb);
547 if (skb->ip_summed == CHECKSUM_PARTIAL) {
548 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
549 skb->csum_start = skb_transport_header(skb) - skb->head;
550 skb->csum_offset = offsetof(struct tcphdr, check);
552 th->check = tcp_v4_check(skb->len, saddr, daddr,
559 /* This routine computes an IPv4 TCP checksum. */
560 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
562 const struct inet_sock *inet = inet_sk(sk);
564 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
566 EXPORT_SYMBOL(tcp_v4_send_check);
568 int tcp_v4_gso_send_check(struct sk_buff *skb)
570 const struct iphdr *iph;
573 if (!pskb_may_pull(skb, sizeof(*th)))
580 skb->ip_summed = CHECKSUM_PARTIAL;
581 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
586 * This routine will send an RST to the other tcp.
588 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
590 * Answer: if a packet caused RST, it is not for a socket
591 * existing in our system, if it is matched to a socket,
592 * it is just duplicate segment or bug in other side's TCP.
593 * So that we build reply only basing on parameters
594 * arrived with segment.
595 * Exception: precedence violation. We do not implement it in any case.
598 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
600 const struct tcphdr *th = tcp_hdr(skb);
603 #ifdef CONFIG_TCP_MD5SIG
604 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
607 struct ip_reply_arg arg;
608 #ifdef CONFIG_TCP_MD5SIG
609 struct tcp_md5sig_key *key;
610 const __u8 *hash_location = NULL;
611 unsigned char newhash[16];
613 struct sock *sk1 = NULL;
617 /* Never send a reset in response to a reset. */
621 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
624 /* Swap the send and the receive. */
625 memset(&rep, 0, sizeof(rep));
626 rep.th.dest = th->source;
627 rep.th.source = th->dest;
628 rep.th.doff = sizeof(struct tcphdr) / 4;
632 rep.th.seq = th->ack_seq;
635 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
636 skb->len - (th->doff << 2));
639 memset(&arg, 0, sizeof(arg));
640 arg.iov[0].iov_base = (unsigned char *)&rep;
641 arg.iov[0].iov_len = sizeof(rep.th);
643 #ifdef CONFIG_TCP_MD5SIG
644 hash_location = tcp_parse_md5sig_option(th);
645 if (!sk && hash_location) {
647 * active side is lost. Try to find listening socket through
648 * source port, and then find md5 key through listening socket.
649 * we are not loose security here:
650 * Incoming packet is checked with md5 hash with finding key,
651 * no RST generated if md5 hash doesn't match.
653 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
654 &tcp_hashinfo, ip_hdr(skb)->daddr,
655 ntohs(th->source), inet_iif(skb));
656 /* don't send rst if it can't find key */
660 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
661 &ip_hdr(skb)->saddr, AF_INET);
665 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
666 if (genhash || memcmp(hash_location, newhash, 16) != 0)
669 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
675 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
677 (TCPOPT_MD5SIG << 8) |
679 /* Update length and the length the header thinks exists */
680 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
681 rep.th.doff = arg.iov[0].iov_len / 4;
683 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
684 key, ip_hdr(skb)->saddr,
685 ip_hdr(skb)->daddr, &rep.th);
688 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
689 ip_hdr(skb)->saddr, /* XXX */
690 arg.iov[0].iov_len, IPPROTO_TCP, 0);
691 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
692 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
693 /* When socket is gone, all binding information is lost.
694 * routing might fail in this case. using iif for oif to
695 * make sure we can deliver it
697 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
699 net = dev_net(skb_dst(skb)->dev);
700 arg.tos = ip_hdr(skb)->tos;
701 ip_send_unicast_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
702 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
704 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
705 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
707 #ifdef CONFIG_TCP_MD5SIG
716 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
717 outside socket context is ugly, certainly. What can I do?
720 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
721 u32 win, u32 ts, int oif,
722 struct tcp_md5sig_key *key,
723 int reply_flags, u8 tos)
725 const struct tcphdr *th = tcp_hdr(skb);
728 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
729 #ifdef CONFIG_TCP_MD5SIG
730 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
734 struct ip_reply_arg arg;
735 struct net *net = dev_net(skb_dst(skb)->dev);
737 memset(&rep.th, 0, sizeof(struct tcphdr));
738 memset(&arg, 0, sizeof(arg));
740 arg.iov[0].iov_base = (unsigned char *)&rep;
741 arg.iov[0].iov_len = sizeof(rep.th);
743 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
744 (TCPOPT_TIMESTAMP << 8) |
746 rep.opt[1] = htonl(tcp_time_stamp);
747 rep.opt[2] = htonl(ts);
748 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
751 /* Swap the send and the receive. */
752 rep.th.dest = th->source;
753 rep.th.source = th->dest;
754 rep.th.doff = arg.iov[0].iov_len / 4;
755 rep.th.seq = htonl(seq);
756 rep.th.ack_seq = htonl(ack);
758 rep.th.window = htons(win);
760 #ifdef CONFIG_TCP_MD5SIG
762 int offset = (ts) ? 3 : 0;
764 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
766 (TCPOPT_MD5SIG << 8) |
768 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
769 rep.th.doff = arg.iov[0].iov_len/4;
771 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
772 key, ip_hdr(skb)->saddr,
773 ip_hdr(skb)->daddr, &rep.th);
776 arg.flags = reply_flags;
777 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
778 ip_hdr(skb)->saddr, /* XXX */
779 arg.iov[0].iov_len, IPPROTO_TCP, 0);
780 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
782 arg.bound_dev_if = oif;
784 ip_send_unicast_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
785 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
787 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
790 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
792 struct inet_timewait_sock *tw = inet_twsk(sk);
793 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
795 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
796 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
799 tcp_twsk_md5_key(tcptw),
800 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
807 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
808 struct request_sock *req)
810 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
811 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
814 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
816 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
821 * Send a SYN-ACK after having received a SYN.
822 * This still operates on a request_sock only, not on a big
825 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req,
827 struct request_values *rvp,
831 const struct inet_request_sock *ireq = inet_rsk(req);
834 struct sk_buff * skb;
836 /* First, grab a route. */
837 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req, nocache)) == NULL)
840 skb = tcp_make_synack(sk, dst, req, rvp);
843 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
845 skb_set_queue_mapping(skb, queue_mapping);
846 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
849 err = net_xmit_eval(err);
855 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
856 struct request_values *rvp)
858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
863 * IPv4 request_sock destructor.
865 static void tcp_v4_reqsk_destructor(struct request_sock *req)
867 kfree(inet_rsk(req)->opt);
871 * Return true if a syncookie should be sent
873 bool tcp_syn_flood_action(struct sock *sk,
874 const struct sk_buff *skb,
877 const char *msg = "Dropping request";
878 bool want_cookie = false;
879 struct listen_sock *lopt;
883 #ifdef CONFIG_SYN_COOKIES
884 if (sysctl_tcp_syncookies) {
885 msg = "Sending cookies";
887 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
890 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
892 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
893 if (!lopt->synflood_warned) {
894 lopt->synflood_warned = 1;
895 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
896 proto, ntohs(tcp_hdr(skb)->dest), msg);
900 EXPORT_SYMBOL(tcp_syn_flood_action);
903 * Save and compile IPv4 options into the request_sock if needed.
905 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
908 const struct ip_options *opt = &(IPCB(skb)->opt);
909 struct ip_options_rcu *dopt = NULL;
911 if (opt && opt->optlen) {
912 int opt_size = sizeof(*dopt) + opt->optlen;
914 dopt = kmalloc(opt_size, GFP_ATOMIC);
916 if (ip_options_echo(&dopt->opt, skb)) {
925 #ifdef CONFIG_TCP_MD5SIG
927 * RFC2385 MD5 checksumming requires a mapping of
928 * IP address->MD5 Key.
929 * We need to maintain these in the sk structure.
932 /* Find the Key structure for an address. */
933 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
934 const union tcp_md5_addr *addr,
937 struct tcp_sock *tp = tcp_sk(sk);
938 struct tcp_md5sig_key *key;
939 struct hlist_node *pos;
940 unsigned int size = sizeof(struct in_addr);
941 struct tcp_md5sig_info *md5sig;
943 /* caller either holds rcu_read_lock() or socket lock */
944 md5sig = rcu_dereference_check(tp->md5sig_info,
945 sock_owned_by_user(sk) ||
946 lockdep_is_held(&sk->sk_lock.slock));
949 #if IS_ENABLED(CONFIG_IPV6)
950 if (family == AF_INET6)
951 size = sizeof(struct in6_addr);
953 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
954 if (key->family != family)
956 if (!memcmp(&key->addr, addr, size))
961 EXPORT_SYMBOL(tcp_md5_do_lookup);
963 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
964 struct sock *addr_sk)
966 union tcp_md5_addr *addr;
968 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
969 return tcp_md5_do_lookup(sk, addr, AF_INET);
971 EXPORT_SYMBOL(tcp_v4_md5_lookup);
973 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
974 struct request_sock *req)
976 union tcp_md5_addr *addr;
978 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
979 return tcp_md5_do_lookup(sk, addr, AF_INET);
982 /* This can be called on a newly created socket, from other files */
983 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
984 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
986 /* Add Key to the list */
987 struct tcp_md5sig_key *key;
988 struct tcp_sock *tp = tcp_sk(sk);
989 struct tcp_md5sig_info *md5sig;
991 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
993 /* Pre-existing entry - just update that one. */
994 memcpy(key->key, newkey, newkeylen);
995 key->keylen = newkeylen;
999 md5sig = rcu_dereference_protected(tp->md5sig_info,
1000 sock_owned_by_user(sk));
1002 md5sig = kmalloc(sizeof(*md5sig), gfp);
1006 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1007 INIT_HLIST_HEAD(&md5sig->head);
1008 rcu_assign_pointer(tp->md5sig_info, md5sig);
1011 key = sock_kmalloc(sk, sizeof(*key), gfp);
1014 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1015 sock_kfree_s(sk, key, sizeof(*key));
1019 memcpy(key->key, newkey, newkeylen);
1020 key->keylen = newkeylen;
1021 key->family = family;
1022 memcpy(&key->addr, addr,
1023 (family == AF_INET6) ? sizeof(struct in6_addr) :
1024 sizeof(struct in_addr));
1025 hlist_add_head_rcu(&key->node, &md5sig->head);
1028 EXPORT_SYMBOL(tcp_md5_do_add);
1030 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1032 struct tcp_sock *tp = tcp_sk(sk);
1033 struct tcp_md5sig_key *key;
1034 struct tcp_md5sig_info *md5sig;
1036 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1039 hlist_del_rcu(&key->node);
1040 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1041 kfree_rcu(key, rcu);
1042 md5sig = rcu_dereference_protected(tp->md5sig_info,
1043 sock_owned_by_user(sk));
1044 if (hlist_empty(&md5sig->head))
1045 tcp_free_md5sig_pool();
1048 EXPORT_SYMBOL(tcp_md5_do_del);
1050 void tcp_clear_md5_list(struct sock *sk)
1052 struct tcp_sock *tp = tcp_sk(sk);
1053 struct tcp_md5sig_key *key;
1054 struct hlist_node *pos, *n;
1055 struct tcp_md5sig_info *md5sig;
1057 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1059 if (!hlist_empty(&md5sig->head))
1060 tcp_free_md5sig_pool();
1061 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1062 hlist_del_rcu(&key->node);
1063 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1064 kfree_rcu(key, rcu);
1068 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1071 struct tcp_md5sig cmd;
1072 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1074 if (optlen < sizeof(cmd))
1077 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1080 if (sin->sin_family != AF_INET)
1083 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1084 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1087 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1090 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1091 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1095 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1096 __be32 daddr, __be32 saddr, int nbytes)
1098 struct tcp4_pseudohdr *bp;
1099 struct scatterlist sg;
1101 bp = &hp->md5_blk.ip4;
1104 * 1. the TCP pseudo-header (in the order: source IP address,
1105 * destination IP address, zero-padded protocol number, and
1111 bp->protocol = IPPROTO_TCP;
1112 bp->len = cpu_to_be16(nbytes);
1114 sg_init_one(&sg, bp, sizeof(*bp));
1115 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1118 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1119 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1121 struct tcp_md5sig_pool *hp;
1122 struct hash_desc *desc;
1124 hp = tcp_get_md5sig_pool();
1126 goto clear_hash_noput;
1127 desc = &hp->md5_desc;
1129 if (crypto_hash_init(desc))
1131 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1133 if (tcp_md5_hash_header(hp, th))
1135 if (tcp_md5_hash_key(hp, key))
1137 if (crypto_hash_final(desc, md5_hash))
1140 tcp_put_md5sig_pool();
1144 tcp_put_md5sig_pool();
1146 memset(md5_hash, 0, 16);
1150 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1151 const struct sock *sk, const struct request_sock *req,
1152 const struct sk_buff *skb)
1154 struct tcp_md5sig_pool *hp;
1155 struct hash_desc *desc;
1156 const struct tcphdr *th = tcp_hdr(skb);
1157 __be32 saddr, daddr;
1160 saddr = inet_sk(sk)->inet_saddr;
1161 daddr = inet_sk(sk)->inet_daddr;
1163 saddr = inet_rsk(req)->loc_addr;
1164 daddr = inet_rsk(req)->rmt_addr;
1166 const struct iphdr *iph = ip_hdr(skb);
1171 hp = tcp_get_md5sig_pool();
1173 goto clear_hash_noput;
1174 desc = &hp->md5_desc;
1176 if (crypto_hash_init(desc))
1179 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1181 if (tcp_md5_hash_header(hp, th))
1183 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1185 if (tcp_md5_hash_key(hp, key))
1187 if (crypto_hash_final(desc, md5_hash))
1190 tcp_put_md5sig_pool();
1194 tcp_put_md5sig_pool();
1196 memset(md5_hash, 0, 16);
1199 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1201 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1204 * This gets called for each TCP segment that arrives
1205 * so we want to be efficient.
1206 * We have 3 drop cases:
1207 * o No MD5 hash and one expected.
1208 * o MD5 hash and we're not expecting one.
1209 * o MD5 hash and its wrong.
1211 const __u8 *hash_location = NULL;
1212 struct tcp_md5sig_key *hash_expected;
1213 const struct iphdr *iph = ip_hdr(skb);
1214 const struct tcphdr *th = tcp_hdr(skb);
1216 unsigned char newhash[16];
1218 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1220 hash_location = tcp_parse_md5sig_option(th);
1222 /* We've parsed the options - do we have a hash? */
1223 if (!hash_expected && !hash_location)
1226 if (hash_expected && !hash_location) {
1227 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1231 if (!hash_expected && hash_location) {
1232 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1236 /* Okay, so this is hash_expected and hash_location -
1237 * so we need to calculate the checksum.
1239 genhash = tcp_v4_md5_hash_skb(newhash,
1243 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1244 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1245 &iph->saddr, ntohs(th->source),
1246 &iph->daddr, ntohs(th->dest),
1247 genhash ? " tcp_v4_calc_md5_hash failed"
1256 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1258 .obj_size = sizeof(struct tcp_request_sock),
1259 .rtx_syn_ack = tcp_v4_rtx_synack,
1260 .send_ack = tcp_v4_reqsk_send_ack,
1261 .destructor = tcp_v4_reqsk_destructor,
1262 .send_reset = tcp_v4_send_reset,
1263 .syn_ack_timeout = tcp_syn_ack_timeout,
1266 #ifdef CONFIG_TCP_MD5SIG
1267 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1268 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1269 .calc_md5_hash = tcp_v4_md5_hash_skb,
1273 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1275 struct tcp_extend_values tmp_ext;
1276 struct tcp_options_received tmp_opt;
1277 const u8 *hash_location;
1278 struct request_sock *req;
1279 struct inet_request_sock *ireq;
1280 struct tcp_sock *tp = tcp_sk(sk);
1281 struct dst_entry *dst = NULL;
1282 __be32 saddr = ip_hdr(skb)->saddr;
1283 __be32 daddr = ip_hdr(skb)->daddr;
1284 __u32 isn = TCP_SKB_CB(skb)->when;
1285 bool want_cookie = false;
1287 /* Never answer to SYNs send to broadcast or multicast */
1288 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1291 /* TW buckets are converted to open requests without
1292 * limitations, they conserve resources and peer is
1293 * evidently real one.
1295 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1296 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1301 /* Accept backlog is full. If we have already queued enough
1302 * of warm entries in syn queue, drop request. It is better than
1303 * clogging syn queue with openreqs with exponentially increasing
1306 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1309 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1313 #ifdef CONFIG_TCP_MD5SIG
1314 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1317 tcp_clear_options(&tmp_opt);
1318 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1319 tmp_opt.user_mss = tp->rx_opt.user_mss;
1320 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1322 if (tmp_opt.cookie_plus > 0 &&
1323 tmp_opt.saw_tstamp &&
1324 !tp->rx_opt.cookie_out_never &&
1325 (sysctl_tcp_cookie_size > 0 ||
1326 (tp->cookie_values != NULL &&
1327 tp->cookie_values->cookie_desired > 0))) {
1329 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1330 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1332 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1333 goto drop_and_release;
1335 /* Secret recipe starts with IP addresses */
1336 *mess++ ^= (__force u32)daddr;
1337 *mess++ ^= (__force u32)saddr;
1339 /* plus variable length Initiator Cookie */
1342 *c++ ^= *hash_location++;
1344 want_cookie = false; /* not our kind of cookie */
1345 tmp_ext.cookie_out_never = 0; /* false */
1346 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1347 } else if (!tp->rx_opt.cookie_in_always) {
1348 /* redundant indications, but ensure initialization. */
1349 tmp_ext.cookie_out_never = 1; /* true */
1350 tmp_ext.cookie_plus = 0;
1352 goto drop_and_release;
1354 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1356 if (want_cookie && !tmp_opt.saw_tstamp)
1357 tcp_clear_options(&tmp_opt);
1359 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1360 tcp_openreq_init(req, &tmp_opt, skb);
1362 ireq = inet_rsk(req);
1363 ireq->loc_addr = daddr;
1364 ireq->rmt_addr = saddr;
1365 ireq->no_srccheck = inet_sk(sk)->transparent;
1366 ireq->opt = tcp_v4_save_options(sk, skb);
1368 if (security_inet_conn_request(sk, skb, req))
1371 if (!want_cookie || tmp_opt.tstamp_ok)
1372 TCP_ECN_create_request(req, skb);
1375 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1376 req->cookie_ts = tmp_opt.tstamp_ok;
1378 struct inet_peer *peer = NULL;
1381 /* VJ's idea. We save last timestamp seen
1382 * from the destination in peer table, when entering
1383 * state TIME-WAIT, and check against it before
1384 * accepting new connection request.
1386 * If "isn" is not zero, this request hit alive
1387 * timewait bucket, so that all the necessary checks
1388 * are made in the function processing timewait state.
1390 if (tmp_opt.saw_tstamp &&
1391 tcp_death_row.sysctl_tw_recycle &&
1392 (dst = inet_csk_route_req(sk, &fl4, req, want_cookie)) != NULL &&
1393 fl4.daddr == saddr &&
1394 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1395 inet_peer_refcheck(peer);
1396 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1397 (s32)(peer->tcp_ts - req->ts_recent) >
1399 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1400 goto drop_and_release;
1403 /* Kill the following clause, if you dislike this way. */
1404 else if (!sysctl_tcp_syncookies &&
1405 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1406 (sysctl_max_syn_backlog >> 2)) &&
1407 (!peer || !peer->tcp_ts_stamp) &&
1408 (!dst || !dst_metric(dst, RTAX_RTT))) {
1409 /* Without syncookies last quarter of
1410 * backlog is filled with destinations,
1411 * proven to be alive.
1412 * It means that we continue to communicate
1413 * to destinations, already remembered
1414 * to the moment of synflood.
1416 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1417 &saddr, ntohs(tcp_hdr(skb)->source));
1418 goto drop_and_release;
1421 isn = tcp_v4_init_sequence(skb);
1423 tcp_rsk(req)->snt_isn = isn;
1424 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1426 if (tcp_v4_send_synack(sk, dst, req,
1427 (struct request_values *)&tmp_ext,
1428 skb_get_queue_mapping(skb),
1433 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1443 EXPORT_SYMBOL(tcp_v4_conn_request);
1447 * The three way handshake has completed - we got a valid synack -
1448 * now create the new socket.
1450 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1451 struct request_sock *req,
1452 struct dst_entry *dst)
1454 struct inet_request_sock *ireq;
1455 struct inet_sock *newinet;
1456 struct tcp_sock *newtp;
1458 #ifdef CONFIG_TCP_MD5SIG
1459 struct tcp_md5sig_key *key;
1461 struct ip_options_rcu *inet_opt;
1463 if (sk_acceptq_is_full(sk))
1466 newsk = tcp_create_openreq_child(sk, req, skb);
1470 newsk->sk_gso_type = SKB_GSO_TCPV4;
1472 newtp = tcp_sk(newsk);
1473 newinet = inet_sk(newsk);
1474 ireq = inet_rsk(req);
1475 newinet->inet_daddr = ireq->rmt_addr;
1476 newinet->inet_rcv_saddr = ireq->loc_addr;
1477 newinet->inet_saddr = ireq->loc_addr;
1478 inet_opt = ireq->opt;
1479 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1481 newinet->mc_index = inet_iif(skb);
1482 newinet->mc_ttl = ip_hdr(skb)->ttl;
1483 newinet->rcv_tos = ip_hdr(skb)->tos;
1484 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1486 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1487 newinet->inet_id = newtp->write_seq ^ jiffies;
1490 dst = inet_csk_route_child_sock(sk, newsk, req);
1494 /* syncookie case : see end of cookie_v4_check() */
1496 sk_setup_caps(newsk, dst);
1498 tcp_mtup_init(newsk);
1499 tcp_sync_mss(newsk, dst_mtu(dst));
1500 newtp->advmss = dst_metric_advmss(dst);
1501 if (tcp_sk(sk)->rx_opt.user_mss &&
1502 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1503 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1505 tcp_initialize_rcv_mss(newsk);
1506 if (tcp_rsk(req)->snt_synack)
1507 tcp_valid_rtt_meas(newsk,
1508 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1509 newtp->total_retrans = req->retrans;
1511 #ifdef CONFIG_TCP_MD5SIG
1512 /* Copy over the MD5 key from the original socket */
1513 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1517 * We're using one, so create a matching key
1518 * on the newsk structure. If we fail to get
1519 * memory, then we end up not copying the key
1522 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1523 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1524 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1528 if (__inet_inherit_port(sk, newsk) < 0)
1530 __inet_hash_nolisten(newsk, NULL);
1535 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1539 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1542 tcp_clear_xmit_timers(newsk);
1543 tcp_cleanup_congestion_control(newsk);
1544 bh_unlock_sock(newsk);
1548 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1550 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1552 struct tcphdr *th = tcp_hdr(skb);
1553 const struct iphdr *iph = ip_hdr(skb);
1555 struct request_sock **prev;
1556 /* Find possible connection requests. */
1557 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1558 iph->saddr, iph->daddr);
1560 return tcp_check_req(sk, skb, req, prev);
1562 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1563 th->source, iph->daddr, th->dest, inet_iif(skb));
1566 if (nsk->sk_state != TCP_TIME_WAIT) {
1570 inet_twsk_put(inet_twsk(nsk));
1574 #ifdef CONFIG_SYN_COOKIES
1576 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1581 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1583 const struct iphdr *iph = ip_hdr(skb);
1585 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1586 if (!tcp_v4_check(skb->len, iph->saddr,
1587 iph->daddr, skb->csum)) {
1588 skb->ip_summed = CHECKSUM_UNNECESSARY;
1593 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1594 skb->len, IPPROTO_TCP, 0);
1596 if (skb->len <= 76) {
1597 return __skb_checksum_complete(skb);
1603 /* The socket must have it's spinlock held when we get
1606 * We have a potential double-lock case here, so even when
1607 * doing backlog processing we use the BH locking scheme.
1608 * This is because we cannot sleep with the original spinlock
1611 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1614 #ifdef CONFIG_TCP_MD5SIG
1616 * We really want to reject the packet as early as possible
1618 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1619 * o There is an MD5 option and we're not expecting one
1621 if (tcp_v4_inbound_md5_hash(sk, skb))
1625 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1626 sock_rps_save_rxhash(sk, skb);
1627 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1634 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1637 if (sk->sk_state == TCP_LISTEN) {
1638 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1643 sock_rps_save_rxhash(nsk, skb);
1644 if (tcp_child_process(sk, nsk, skb)) {
1651 sock_rps_save_rxhash(sk, skb);
1653 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1660 tcp_v4_send_reset(rsk, skb);
1663 /* Be careful here. If this function gets more complicated and
1664 * gcc suffers from register pressure on the x86, sk (in %ebx)
1665 * might be destroyed here. This current version compiles correctly,
1666 * but you have been warned.
1671 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1674 EXPORT_SYMBOL(tcp_v4_do_rcv);
1676 void tcp_v4_early_demux(struct sk_buff *skb)
1678 struct net *net = dev_net(skb->dev);
1679 const struct iphdr *iph;
1680 const struct tcphdr *th;
1681 struct net_device *dev;
1684 if (skb->pkt_type != PACKET_HOST)
1687 if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
1691 th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1693 if (th->doff < sizeof(struct tcphdr) / 4)
1696 if (!pskb_may_pull(skb, ip_hdrlen(skb) + th->doff * 4))
1700 sk = __inet_lookup_established(net, &tcp_hashinfo,
1701 iph->saddr, th->source,
1702 iph->daddr, ntohs(th->dest),
1706 skb->destructor = sock_edemux;
1707 if (sk->sk_state != TCP_TIME_WAIT) {
1708 struct dst_entry *dst = sk->sk_rx_dst;
1710 dst = dst_check(dst, 0);
1712 struct rtable *rt = (struct rtable *) dst;
1714 if (rt->rt_iif == dev->ifindex)
1715 skb_dst_set_noref(skb, dst);
1725 int tcp_v4_rcv(struct sk_buff *skb)
1727 const struct iphdr *iph;
1728 const struct tcphdr *th;
1731 struct net *net = dev_net(skb->dev);
1733 if (skb->pkt_type != PACKET_HOST)
1736 /* Count it even if it's bad */
1737 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1739 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1744 if (th->doff < sizeof(struct tcphdr) / 4)
1746 if (!pskb_may_pull(skb, th->doff * 4))
1749 /* An explanation is required here, I think.
1750 * Packet length and doff are validated by header prediction,
1751 * provided case of th->doff==0 is eliminated.
1752 * So, we defer the checks. */
1753 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1758 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1759 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1760 skb->len - th->doff * 4);
1761 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1762 TCP_SKB_CB(skb)->when = 0;
1763 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1764 TCP_SKB_CB(skb)->sacked = 0;
1766 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1771 if (sk->sk_state == TCP_TIME_WAIT)
1774 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1775 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1776 goto discard_and_relse;
1779 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1780 goto discard_and_relse;
1783 if (sk_filter(sk, skb))
1784 goto discard_and_relse;
1788 bh_lock_sock_nested(sk);
1790 if (!sock_owned_by_user(sk)) {
1791 #ifdef CONFIG_NET_DMA
1792 struct tcp_sock *tp = tcp_sk(sk);
1793 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1794 tp->ucopy.dma_chan = net_dma_find_channel();
1795 if (tp->ucopy.dma_chan)
1796 ret = tcp_v4_do_rcv(sk, skb);
1800 if (!tcp_prequeue(sk, skb))
1801 ret = tcp_v4_do_rcv(sk, skb);
1803 } else if (unlikely(sk_add_backlog(sk, skb,
1804 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1806 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1807 goto discard_and_relse;
1816 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1819 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1821 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1823 tcp_v4_send_reset(NULL, skb);
1827 /* Discard frame. */
1836 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1837 inet_twsk_put(inet_twsk(sk));
1841 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1842 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1843 inet_twsk_put(inet_twsk(sk));
1846 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1848 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1850 iph->daddr, th->dest,
1853 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1854 inet_twsk_put(inet_twsk(sk));
1858 /* Fall through to ACK */
1861 tcp_v4_timewait_ack(sk, skb);
1865 case TCP_TW_SUCCESS:;
1870 struct inet_peer *tcp_v4_get_peer(struct sock *sk)
1872 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1873 struct inet_sock *inet = inet_sk(sk);
1875 /* If we don't have a valid cached route, or we're doing IP
1876 * options which make the IPv4 header destination address
1877 * different from our peer's, do not bother with this.
1879 if (!rt || inet->cork.fl.u.ip4.daddr != inet->inet_daddr)
1881 return rt_get_peer_create(rt, inet->inet_daddr);
1883 EXPORT_SYMBOL(tcp_v4_get_peer);
1885 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1886 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1887 .twsk_unique = tcp_twsk_unique,
1888 .twsk_destructor= tcp_twsk_destructor,
1891 const struct inet_connection_sock_af_ops ipv4_specific = {
1892 .queue_xmit = ip_queue_xmit,
1893 .send_check = tcp_v4_send_check,
1894 .rebuild_header = inet_sk_rebuild_header,
1895 .conn_request = tcp_v4_conn_request,
1896 .syn_recv_sock = tcp_v4_syn_recv_sock,
1897 .get_peer = tcp_v4_get_peer,
1898 .net_header_len = sizeof(struct iphdr),
1899 .setsockopt = ip_setsockopt,
1900 .getsockopt = ip_getsockopt,
1901 .addr2sockaddr = inet_csk_addr2sockaddr,
1902 .sockaddr_len = sizeof(struct sockaddr_in),
1903 .bind_conflict = inet_csk_bind_conflict,
1904 #ifdef CONFIG_COMPAT
1905 .compat_setsockopt = compat_ip_setsockopt,
1906 .compat_getsockopt = compat_ip_getsockopt,
1909 EXPORT_SYMBOL(ipv4_specific);
1911 #ifdef CONFIG_TCP_MD5SIG
1912 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1913 .md5_lookup = tcp_v4_md5_lookup,
1914 .calc_md5_hash = tcp_v4_md5_hash_skb,
1915 .md5_parse = tcp_v4_parse_md5_keys,
1919 /* NOTE: A lot of things set to zero explicitly by call to
1920 * sk_alloc() so need not be done here.
1922 static int tcp_v4_init_sock(struct sock *sk)
1924 struct inet_connection_sock *icsk = inet_csk(sk);
1928 icsk->icsk_af_ops = &ipv4_specific;
1930 #ifdef CONFIG_TCP_MD5SIG
1931 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1937 void tcp_v4_destroy_sock(struct sock *sk)
1939 struct tcp_sock *tp = tcp_sk(sk);
1941 tcp_clear_xmit_timers(sk);
1943 tcp_cleanup_congestion_control(sk);
1945 /* Cleanup up the write buffer. */
1946 tcp_write_queue_purge(sk);
1948 /* Cleans up our, hopefully empty, out_of_order_queue. */
1949 __skb_queue_purge(&tp->out_of_order_queue);
1951 #ifdef CONFIG_TCP_MD5SIG
1952 /* Clean up the MD5 key list, if any */
1953 if (tp->md5sig_info) {
1954 tcp_clear_md5_list(sk);
1955 kfree_rcu(tp->md5sig_info, rcu);
1956 tp->md5sig_info = NULL;
1960 #ifdef CONFIG_NET_DMA
1961 /* Cleans up our sk_async_wait_queue */
1962 __skb_queue_purge(&sk->sk_async_wait_queue);
1965 /* Clean prequeue, it must be empty really */
1966 __skb_queue_purge(&tp->ucopy.prequeue);
1968 /* Clean up a referenced TCP bind bucket. */
1969 if (inet_csk(sk)->icsk_bind_hash)
1973 * If sendmsg cached page exists, toss it.
1975 if (sk->sk_sndmsg_page) {
1976 __free_page(sk->sk_sndmsg_page);
1977 sk->sk_sndmsg_page = NULL;
1980 /* TCP Cookie Transactions */
1981 if (tp->cookie_values != NULL) {
1982 kref_put(&tp->cookie_values->kref,
1983 tcp_cookie_values_release);
1984 tp->cookie_values = NULL;
1987 sk_sockets_allocated_dec(sk);
1988 sock_release_memcg(sk);
1990 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1992 #ifdef CONFIG_PROC_FS
1993 /* Proc filesystem TCP sock list dumping. */
1995 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1997 return hlist_nulls_empty(head) ? NULL :
1998 list_entry(head->first, struct inet_timewait_sock, tw_node);
2001 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2003 return !is_a_nulls(tw->tw_node.next) ?
2004 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2008 * Get next listener socket follow cur. If cur is NULL, get first socket
2009 * starting from bucket given in st->bucket; when st->bucket is zero the
2010 * very first socket in the hash table is returned.
2012 static void *listening_get_next(struct seq_file *seq, void *cur)
2014 struct inet_connection_sock *icsk;
2015 struct hlist_nulls_node *node;
2016 struct sock *sk = cur;
2017 struct inet_listen_hashbucket *ilb;
2018 struct tcp_iter_state *st = seq->private;
2019 struct net *net = seq_file_net(seq);
2022 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2023 spin_lock_bh(&ilb->lock);
2024 sk = sk_nulls_head(&ilb->head);
2028 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2032 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2033 struct request_sock *req = cur;
2035 icsk = inet_csk(st->syn_wait_sk);
2039 if (req->rsk_ops->family == st->family) {
2045 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2048 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2050 sk = sk_nulls_next(st->syn_wait_sk);
2051 st->state = TCP_SEQ_STATE_LISTENING;
2052 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2054 icsk = inet_csk(sk);
2055 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2056 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2058 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2059 sk = sk_nulls_next(sk);
2062 sk_nulls_for_each_from(sk, node) {
2063 if (!net_eq(sock_net(sk), net))
2065 if (sk->sk_family == st->family) {
2069 icsk = inet_csk(sk);
2070 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2071 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2073 st->uid = sock_i_uid(sk);
2074 st->syn_wait_sk = sk;
2075 st->state = TCP_SEQ_STATE_OPENREQ;
2079 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2081 spin_unlock_bh(&ilb->lock);
2083 if (++st->bucket < INET_LHTABLE_SIZE) {
2084 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2085 spin_lock_bh(&ilb->lock);
2086 sk = sk_nulls_head(&ilb->head);
2094 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2096 struct tcp_iter_state *st = seq->private;
2101 rc = listening_get_next(seq, NULL);
2103 while (rc && *pos) {
2104 rc = listening_get_next(seq, rc);
2110 static inline bool empty_bucket(struct tcp_iter_state *st)
2112 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2113 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2117 * Get first established socket starting from bucket given in st->bucket.
2118 * If st->bucket is zero, the very first socket in the hash is returned.
2120 static void *established_get_first(struct seq_file *seq)
2122 struct tcp_iter_state *st = seq->private;
2123 struct net *net = seq_file_net(seq);
2127 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2129 struct hlist_nulls_node *node;
2130 struct inet_timewait_sock *tw;
2131 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2133 /* Lockless fast path for the common case of empty buckets */
2134 if (empty_bucket(st))
2138 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2139 if (sk->sk_family != st->family ||
2140 !net_eq(sock_net(sk), net)) {
2146 st->state = TCP_SEQ_STATE_TIME_WAIT;
2147 inet_twsk_for_each(tw, node,
2148 &tcp_hashinfo.ehash[st->bucket].twchain) {
2149 if (tw->tw_family != st->family ||
2150 !net_eq(twsk_net(tw), net)) {
2156 spin_unlock_bh(lock);
2157 st->state = TCP_SEQ_STATE_ESTABLISHED;
2163 static void *established_get_next(struct seq_file *seq, void *cur)
2165 struct sock *sk = cur;
2166 struct inet_timewait_sock *tw;
2167 struct hlist_nulls_node *node;
2168 struct tcp_iter_state *st = seq->private;
2169 struct net *net = seq_file_net(seq);
2174 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2178 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2185 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2186 st->state = TCP_SEQ_STATE_ESTABLISHED;
2188 /* Look for next non empty bucket */
2190 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2193 if (st->bucket > tcp_hashinfo.ehash_mask)
2196 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2197 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2199 sk = sk_nulls_next(sk);
2201 sk_nulls_for_each_from(sk, node) {
2202 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2206 st->state = TCP_SEQ_STATE_TIME_WAIT;
2207 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2215 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2217 struct tcp_iter_state *st = seq->private;
2221 rc = established_get_first(seq);
2224 rc = established_get_next(seq, rc);
2230 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2233 struct tcp_iter_state *st = seq->private;
2235 st->state = TCP_SEQ_STATE_LISTENING;
2236 rc = listening_get_idx(seq, &pos);
2239 st->state = TCP_SEQ_STATE_ESTABLISHED;
2240 rc = established_get_idx(seq, pos);
2246 static void *tcp_seek_last_pos(struct seq_file *seq)
2248 struct tcp_iter_state *st = seq->private;
2249 int offset = st->offset;
2250 int orig_num = st->num;
2253 switch (st->state) {
2254 case TCP_SEQ_STATE_OPENREQ:
2255 case TCP_SEQ_STATE_LISTENING:
2256 if (st->bucket >= INET_LHTABLE_SIZE)
2258 st->state = TCP_SEQ_STATE_LISTENING;
2259 rc = listening_get_next(seq, NULL);
2260 while (offset-- && rc)
2261 rc = listening_get_next(seq, rc);
2266 case TCP_SEQ_STATE_ESTABLISHED:
2267 case TCP_SEQ_STATE_TIME_WAIT:
2268 st->state = TCP_SEQ_STATE_ESTABLISHED;
2269 if (st->bucket > tcp_hashinfo.ehash_mask)
2271 rc = established_get_first(seq);
2272 while (offset-- && rc)
2273 rc = established_get_next(seq, rc);
2281 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2283 struct tcp_iter_state *st = seq->private;
2286 if (*pos && *pos == st->last_pos) {
2287 rc = tcp_seek_last_pos(seq);
2292 st->state = TCP_SEQ_STATE_LISTENING;
2296 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2299 st->last_pos = *pos;
2303 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2305 struct tcp_iter_state *st = seq->private;
2308 if (v == SEQ_START_TOKEN) {
2309 rc = tcp_get_idx(seq, 0);
2313 switch (st->state) {
2314 case TCP_SEQ_STATE_OPENREQ:
2315 case TCP_SEQ_STATE_LISTENING:
2316 rc = listening_get_next(seq, v);
2318 st->state = TCP_SEQ_STATE_ESTABLISHED;
2321 rc = established_get_first(seq);
2324 case TCP_SEQ_STATE_ESTABLISHED:
2325 case TCP_SEQ_STATE_TIME_WAIT:
2326 rc = established_get_next(seq, v);
2331 st->last_pos = *pos;
2335 static void tcp_seq_stop(struct seq_file *seq, void *v)
2337 struct tcp_iter_state *st = seq->private;
2339 switch (st->state) {
2340 case TCP_SEQ_STATE_OPENREQ:
2342 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2343 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2345 case TCP_SEQ_STATE_LISTENING:
2346 if (v != SEQ_START_TOKEN)
2347 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2349 case TCP_SEQ_STATE_TIME_WAIT:
2350 case TCP_SEQ_STATE_ESTABLISHED:
2352 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2357 int tcp_seq_open(struct inode *inode, struct file *file)
2359 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2360 struct tcp_iter_state *s;
2363 err = seq_open_net(inode, file, &afinfo->seq_ops,
2364 sizeof(struct tcp_iter_state));
2368 s = ((struct seq_file *)file->private_data)->private;
2369 s->family = afinfo->family;
2373 EXPORT_SYMBOL(tcp_seq_open);
2375 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2378 struct proc_dir_entry *p;
2380 afinfo->seq_ops.start = tcp_seq_start;
2381 afinfo->seq_ops.next = tcp_seq_next;
2382 afinfo->seq_ops.stop = tcp_seq_stop;
2384 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2385 afinfo->seq_fops, afinfo);
2390 EXPORT_SYMBOL(tcp_proc_register);
2392 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2394 proc_net_remove(net, afinfo->name);
2396 EXPORT_SYMBOL(tcp_proc_unregister);
2398 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2399 struct seq_file *f, int i, int uid, int *len)
2401 const struct inet_request_sock *ireq = inet_rsk(req);
2402 int ttd = req->expires - jiffies;
2404 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2405 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2408 ntohs(inet_sk(sk)->inet_sport),
2410 ntohs(ireq->rmt_port),
2412 0, 0, /* could print option size, but that is af dependent. */
2413 1, /* timers active (only the expire timer) */
2414 jiffies_to_clock_t(ttd),
2417 0, /* non standard timer */
2418 0, /* open_requests have no inode */
2419 atomic_read(&sk->sk_refcnt),
2424 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2427 unsigned long timer_expires;
2428 const struct tcp_sock *tp = tcp_sk(sk);
2429 const struct inet_connection_sock *icsk = inet_csk(sk);
2430 const struct inet_sock *inet = inet_sk(sk);
2431 __be32 dest = inet->inet_daddr;
2432 __be32 src = inet->inet_rcv_saddr;
2433 __u16 destp = ntohs(inet->inet_dport);
2434 __u16 srcp = ntohs(inet->inet_sport);
2437 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2439 timer_expires = icsk->icsk_timeout;
2440 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2442 timer_expires = icsk->icsk_timeout;
2443 } else if (timer_pending(&sk->sk_timer)) {
2445 timer_expires = sk->sk_timer.expires;
2448 timer_expires = jiffies;
2451 if (sk->sk_state == TCP_LISTEN)
2452 rx_queue = sk->sk_ack_backlog;
2455 * because we dont lock socket, we might find a transient negative value
2457 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2459 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2460 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2461 i, src, srcp, dest, destp, sk->sk_state,
2462 tp->write_seq - tp->snd_una,
2465 jiffies_to_clock_t(timer_expires - jiffies),
2466 icsk->icsk_retransmits,
2468 icsk->icsk_probes_out,
2470 atomic_read(&sk->sk_refcnt), sk,
2471 jiffies_to_clock_t(icsk->icsk_rto),
2472 jiffies_to_clock_t(icsk->icsk_ack.ato),
2473 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2475 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2479 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2480 struct seq_file *f, int i, int *len)
2484 int ttd = tw->tw_ttd - jiffies;
2489 dest = tw->tw_daddr;
2490 src = tw->tw_rcv_saddr;
2491 destp = ntohs(tw->tw_dport);
2492 srcp = ntohs(tw->tw_sport);
2494 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2495 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2496 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2497 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2498 atomic_read(&tw->tw_refcnt), tw, len);
2503 static int tcp4_seq_show(struct seq_file *seq, void *v)
2505 struct tcp_iter_state *st;
2508 if (v == SEQ_START_TOKEN) {
2509 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2510 " sl local_address rem_address st tx_queue "
2511 "rx_queue tr tm->when retrnsmt uid timeout "
2517 switch (st->state) {
2518 case TCP_SEQ_STATE_LISTENING:
2519 case TCP_SEQ_STATE_ESTABLISHED:
2520 get_tcp4_sock(v, seq, st->num, &len);
2522 case TCP_SEQ_STATE_OPENREQ:
2523 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2525 case TCP_SEQ_STATE_TIME_WAIT:
2526 get_timewait4_sock(v, seq, st->num, &len);
2529 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2534 static const struct file_operations tcp_afinfo_seq_fops = {
2535 .owner = THIS_MODULE,
2536 .open = tcp_seq_open,
2538 .llseek = seq_lseek,
2539 .release = seq_release_net
2542 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2545 .seq_fops = &tcp_afinfo_seq_fops,
2547 .show = tcp4_seq_show,
2551 static int __net_init tcp4_proc_init_net(struct net *net)
2553 return tcp_proc_register(net, &tcp4_seq_afinfo);
2556 static void __net_exit tcp4_proc_exit_net(struct net *net)
2558 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2561 static struct pernet_operations tcp4_net_ops = {
2562 .init = tcp4_proc_init_net,
2563 .exit = tcp4_proc_exit_net,
2566 int __init tcp4_proc_init(void)
2568 return register_pernet_subsys(&tcp4_net_ops);
2571 void tcp4_proc_exit(void)
2573 unregister_pernet_subsys(&tcp4_net_ops);
2575 #endif /* CONFIG_PROC_FS */
2577 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2579 const struct iphdr *iph = skb_gro_network_header(skb);
2581 switch (skb->ip_summed) {
2582 case CHECKSUM_COMPLETE:
2583 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2585 skb->ip_summed = CHECKSUM_UNNECESSARY;
2591 NAPI_GRO_CB(skb)->flush = 1;
2595 return tcp_gro_receive(head, skb);
2598 int tcp4_gro_complete(struct sk_buff *skb)
2600 const struct iphdr *iph = ip_hdr(skb);
2601 struct tcphdr *th = tcp_hdr(skb);
2603 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2604 iph->saddr, iph->daddr, 0);
2605 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2607 return tcp_gro_complete(skb);
2610 struct proto tcp_prot = {
2612 .owner = THIS_MODULE,
2614 .connect = tcp_v4_connect,
2615 .disconnect = tcp_disconnect,
2616 .accept = inet_csk_accept,
2618 .init = tcp_v4_init_sock,
2619 .destroy = tcp_v4_destroy_sock,
2620 .shutdown = tcp_shutdown,
2621 .setsockopt = tcp_setsockopt,
2622 .getsockopt = tcp_getsockopt,
2623 .recvmsg = tcp_recvmsg,
2624 .sendmsg = tcp_sendmsg,
2625 .sendpage = tcp_sendpage,
2626 .backlog_rcv = tcp_v4_do_rcv,
2628 .unhash = inet_unhash,
2629 .get_port = inet_csk_get_port,
2630 .enter_memory_pressure = tcp_enter_memory_pressure,
2631 .sockets_allocated = &tcp_sockets_allocated,
2632 .orphan_count = &tcp_orphan_count,
2633 .memory_allocated = &tcp_memory_allocated,
2634 .memory_pressure = &tcp_memory_pressure,
2635 .sysctl_wmem = sysctl_tcp_wmem,
2636 .sysctl_rmem = sysctl_tcp_rmem,
2637 .max_header = MAX_TCP_HEADER,
2638 .obj_size = sizeof(struct tcp_sock),
2639 .slab_flags = SLAB_DESTROY_BY_RCU,
2640 .twsk_prot = &tcp_timewait_sock_ops,
2641 .rsk_prot = &tcp_request_sock_ops,
2642 .h.hashinfo = &tcp_hashinfo,
2643 .no_autobind = true,
2644 #ifdef CONFIG_COMPAT
2645 .compat_setsockopt = compat_tcp_setsockopt,
2646 .compat_getsockopt = compat_tcp_getsockopt,
2648 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2649 .init_cgroup = tcp_init_cgroup,
2650 .destroy_cgroup = tcp_destroy_cgroup,
2651 .proto_cgroup = tcp_proto_cgroup,
2654 EXPORT_SYMBOL(tcp_prot);
2656 static int __net_init tcp_sk_init(struct net *net)
2658 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2659 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2662 static void __net_exit tcp_sk_exit(struct net *net)
2664 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2667 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2669 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2672 static struct pernet_operations __net_initdata tcp_sk_ops = {
2673 .init = tcp_sk_init,
2674 .exit = tcp_sk_exit,
2675 .exit_batch = tcp_sk_exit_batch,
2678 void __init tcp_v4_init(void)
2680 inet_hashinfo_init(&tcp_hashinfo);
2681 if (register_pernet_subsys(&tcp_sk_ops))
2682 panic("Failed to create the TCP control socket.\n");