2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
63 #include <linux/slab.h>
65 #include <net/net_namespace.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
74 #include <net/netdma.h>
76 #include <linux/inet.h>
77 #include <linux/ipv6.h>
78 #include <linux/stddef.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
82 #include <linux/crypto.h>
83 #include <linux/scatterlist.h>
85 int sysctl_tcp_tw_reuse __read_mostly;
86 int sysctl_tcp_low_latency __read_mostly;
87 EXPORT_SYMBOL(sysctl_tcp_low_latency);
90 #ifdef CONFIG_TCP_MD5SIG
91 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, struct tcphdr *th);
97 struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
103 struct inet_hashinfo tcp_hashinfo;
104 EXPORT_SYMBOL(tcp_hashinfo);
106 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
108 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
111 tcp_hdr(skb)->source);
114 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
116 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
117 struct tcp_sock *tp = tcp_sk(sk);
119 /* With PAWS, it is safe from the viewpoint
120 of data integrity. Even without PAWS it is safe provided sequence
121 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
123 Actually, the idea is close to VJ's one, only timestamp cache is
124 held not per host, but per port pair and TW bucket is used as state
127 If TW bucket has been already destroyed we fall back to VJ's scheme
128 and use initial timestamp retrieved from peer table.
130 if (tcptw->tw_ts_recent_stamp &&
131 (twp == NULL || (sysctl_tcp_tw_reuse &&
132 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
133 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
134 if (tp->write_seq == 0)
136 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
137 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
144 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
146 /* This will initiate an outgoing connection. */
147 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
150 struct inet_sock *inet = inet_sk(sk);
151 struct tcp_sock *tp = tcp_sk(sk);
152 __be16 orig_sport, orig_dport;
153 __be32 daddr, nexthop;
157 struct ip_options_rcu *inet_opt;
159 if (addr_len < sizeof(struct sockaddr_in))
162 if (usin->sin_family != AF_INET)
163 return -EAFNOSUPPORT;
165 nexthop = daddr = usin->sin_addr.s_addr;
166 inet_opt = rcu_dereference_protected(inet->inet_opt,
167 sock_owned_by_user(sk));
168 if (inet_opt && inet_opt->opt.srr) {
171 nexthop = inet_opt->opt.faddr;
174 orig_sport = inet->inet_sport;
175 orig_dport = usin->sin_port;
176 fl4 = &inet->cork.fl.u.ip4;
177 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
178 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
180 orig_sport, orig_dport, sk, true);
183 if (err == -ENETUNREACH)
184 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
188 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
193 if (!inet_opt || !inet_opt->opt.srr)
196 if (!inet->inet_saddr)
197 inet->inet_saddr = fl4->saddr;
198 inet->inet_rcv_saddr = inet->inet_saddr;
200 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
201 /* Reset inherited state */
202 tp->rx_opt.ts_recent = 0;
203 tp->rx_opt.ts_recent_stamp = 0;
207 if (tcp_death_row.sysctl_tw_recycle &&
208 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
209 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
211 * VJ's idea. We save last timestamp seen from
212 * the destination in peer table, when entering state
213 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
214 * when trying new connection.
217 inet_peer_refcheck(peer);
218 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
219 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
220 tp->rx_opt.ts_recent = peer->tcp_ts;
225 inet->inet_dport = usin->sin_port;
226 inet->inet_daddr = daddr;
228 inet_csk(sk)->icsk_ext_hdr_len = 0;
230 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
232 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
234 /* Socket identity is still unknown (sport may be zero).
235 * However we set state to SYN-SENT and not releasing socket
236 * lock select source port, enter ourselves into the hash tables and
237 * complete initialization after this.
239 tcp_set_state(sk, TCP_SYN_SENT);
240 err = inet_hash_connect(&tcp_death_row, sk);
244 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
245 inet->inet_sport, inet->inet_dport, sk);
251 /* OK, now commit destination to socket. */
252 sk->sk_gso_type = SKB_GSO_TCPV4;
253 sk_setup_caps(sk, &rt->dst);
256 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
261 inet->inet_id = tp->write_seq ^ jiffies;
263 err = tcp_connect(sk);
272 * This unhashes the socket and releases the local port,
275 tcp_set_state(sk, TCP_CLOSE);
277 sk->sk_route_caps = 0;
278 inet->inet_dport = 0;
281 EXPORT_SYMBOL(tcp_v4_connect);
284 * This routine does path mtu discovery as defined in RFC1191.
286 static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
288 struct dst_entry *dst;
289 struct inet_sock *inet = inet_sk(sk);
291 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
292 * send out by Linux are always <576bytes so they should go through
295 if (sk->sk_state == TCP_LISTEN)
298 /* We don't check in the destentry if pmtu discovery is forbidden
299 * on this route. We just assume that no packet_to_big packets
300 * are send back when pmtu discovery is not active.
301 * There is a small race when the user changes this flag in the
302 * route, but I think that's acceptable.
304 if ((dst = __sk_dst_check(sk, 0)) == NULL)
307 dst->ops->update_pmtu(dst, mtu);
309 /* Something is about to be wrong... Remember soft error
310 * for the case, if this connection will not able to recover.
312 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
313 sk->sk_err_soft = EMSGSIZE;
317 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
318 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
319 tcp_sync_mss(sk, mtu);
321 /* Resend the TCP packet because it's
322 * clear that the old packet has been
323 * dropped. This is the new "fast" path mtu
326 tcp_simple_retransmit(sk);
327 } /* else let the usual retransmit timer handle it */
331 * This routine is called by the ICMP module when it gets some
332 * sort of error condition. If err < 0 then the socket should
333 * be closed and the error returned to the user. If err > 0
334 * it's just the icmp type << 8 | icmp code. After adjustment
335 * header points to the first 8 bytes of the tcp header. We need
336 * to find the appropriate port.
338 * The locking strategy used here is very "optimistic". When
339 * someone else accesses the socket the ICMP is just dropped
340 * and for some paths there is no check at all.
341 * A more general error queue to queue errors for later handling
342 * is probably better.
346 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
348 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
349 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
350 struct inet_connection_sock *icsk;
352 struct inet_sock *inet;
353 const int type = icmp_hdr(icmp_skb)->type;
354 const int code = icmp_hdr(icmp_skb)->code;
360 struct net *net = dev_net(icmp_skb->dev);
362 if (icmp_skb->len < (iph->ihl << 2) + 8) {
363 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
367 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
368 iph->saddr, th->source, inet_iif(icmp_skb));
370 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
373 if (sk->sk_state == TCP_TIME_WAIT) {
374 inet_twsk_put(inet_twsk(sk));
379 /* If too many ICMPs get dropped on busy
380 * servers this needs to be solved differently.
382 if (sock_owned_by_user(sk))
383 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
385 if (sk->sk_state == TCP_CLOSE)
388 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
389 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
395 seq = ntohl(th->seq);
396 if (sk->sk_state != TCP_LISTEN &&
397 !between(seq, tp->snd_una, tp->snd_nxt)) {
398 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
403 case ICMP_SOURCE_QUENCH:
404 /* Just silently ignore these. */
406 case ICMP_PARAMETERPROB:
409 case ICMP_DEST_UNREACH:
410 if (code > NR_ICMP_UNREACH)
413 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
414 if (!sock_owned_by_user(sk))
415 do_pmtu_discovery(sk, iph, info);
419 err = icmp_err_convert[code].errno;
420 /* check if icmp_skb allows revert of backoff
421 * (see draft-zimmermann-tcp-lcd) */
422 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
424 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
428 if (sock_owned_by_user(sk))
431 icsk->icsk_backoff--;
432 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
433 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
436 skb = tcp_write_queue_head(sk);
439 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
440 tcp_time_stamp - TCP_SKB_CB(skb)->when);
443 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
444 remaining, TCP_RTO_MAX);
446 /* RTO revert clocked out retransmission.
447 * Will retransmit now */
448 tcp_retransmit_timer(sk);
452 case ICMP_TIME_EXCEEDED:
459 switch (sk->sk_state) {
460 struct request_sock *req, **prev;
462 if (sock_owned_by_user(sk))
465 req = inet_csk_search_req(sk, &prev, th->dest,
466 iph->daddr, iph->saddr);
470 /* ICMPs are not backlogged, hence we cannot get
471 an established socket here.
475 if (seq != tcp_rsk(req)->snt_isn) {
476 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
481 * Still in SYN_RECV, just remove it silently.
482 * There is no good way to pass the error to the newly
483 * created socket, and POSIX does not want network
484 * errors returned from accept().
486 inet_csk_reqsk_queue_drop(sk, req, prev);
490 case TCP_SYN_RECV: /* Cannot happen.
491 It can f.e. if SYNs crossed.
493 if (!sock_owned_by_user(sk)) {
496 sk->sk_error_report(sk);
500 sk->sk_err_soft = err;
505 /* If we've already connected we will keep trying
506 * until we time out, or the user gives up.
508 * rfc1122 4.2.3.9 allows to consider as hard errors
509 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
510 * but it is obsoleted by pmtu discovery).
512 * Note, that in modern internet, where routing is unreliable
513 * and in each dark corner broken firewalls sit, sending random
514 * errors ordered by their masters even this two messages finally lose
515 * their original sense (even Linux sends invalid PORT_UNREACHs)
517 * Now we are in compliance with RFCs.
522 if (!sock_owned_by_user(sk) && inet->recverr) {
524 sk->sk_error_report(sk);
525 } else { /* Only an error on timeout */
526 sk->sk_err_soft = err;
534 static void __tcp_v4_send_check(struct sk_buff *skb,
535 __be32 saddr, __be32 daddr)
537 struct tcphdr *th = tcp_hdr(skb);
539 if (skb->ip_summed == CHECKSUM_PARTIAL) {
540 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
541 skb->csum_start = skb_transport_header(skb) - skb->head;
542 skb->csum_offset = offsetof(struct tcphdr, check);
544 th->check = tcp_v4_check(skb->len, saddr, daddr,
551 /* This routine computes an IPv4 TCP checksum. */
552 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
554 struct inet_sock *inet = inet_sk(sk);
556 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
558 EXPORT_SYMBOL(tcp_v4_send_check);
560 int tcp_v4_gso_send_check(struct sk_buff *skb)
562 const struct iphdr *iph;
565 if (!pskb_may_pull(skb, sizeof(*th)))
572 skb->ip_summed = CHECKSUM_PARTIAL;
573 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
578 * This routine will send an RST to the other tcp.
580 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
582 * Answer: if a packet caused RST, it is not for a socket
583 * existing in our system, if it is matched to a socket,
584 * it is just duplicate segment or bug in other side's TCP.
585 * So that we build reply only basing on parameters
586 * arrived with segment.
587 * Exception: precedence violation. We do not implement it in any case.
590 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
592 struct tcphdr *th = tcp_hdr(skb);
595 #ifdef CONFIG_TCP_MD5SIG
596 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
599 struct ip_reply_arg arg;
600 #ifdef CONFIG_TCP_MD5SIG
601 struct tcp_md5sig_key *key;
605 /* Never send a reset in response to a reset. */
609 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
612 /* Swap the send and the receive. */
613 memset(&rep, 0, sizeof(rep));
614 rep.th.dest = th->source;
615 rep.th.source = th->dest;
616 rep.th.doff = sizeof(struct tcphdr) / 4;
620 rep.th.seq = th->ack_seq;
623 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
624 skb->len - (th->doff << 2));
627 memset(&arg, 0, sizeof(arg));
628 arg.iov[0].iov_base = (unsigned char *)&rep;
629 arg.iov[0].iov_len = sizeof(rep.th);
631 #ifdef CONFIG_TCP_MD5SIG
632 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
634 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
636 (TCPOPT_MD5SIG << 8) |
638 /* Update length and the length the header thinks exists */
639 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
640 rep.th.doff = arg.iov[0].iov_len / 4;
642 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
643 key, ip_hdr(skb)->saddr,
644 ip_hdr(skb)->daddr, &rep.th);
647 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
648 ip_hdr(skb)->saddr, /* XXX */
649 arg.iov[0].iov_len, IPPROTO_TCP, 0);
650 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
651 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
653 net = dev_net(skb_dst(skb)->dev);
654 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
655 &arg, arg.iov[0].iov_len);
657 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
658 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
661 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
662 outside socket context is ugly, certainly. What can I do?
665 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
666 u32 win, u32 ts, int oif,
667 struct tcp_md5sig_key *key,
670 struct tcphdr *th = tcp_hdr(skb);
673 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
674 #ifdef CONFIG_TCP_MD5SIG
675 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
679 struct ip_reply_arg arg;
680 struct net *net = dev_net(skb_dst(skb)->dev);
682 memset(&rep.th, 0, sizeof(struct tcphdr));
683 memset(&arg, 0, sizeof(arg));
685 arg.iov[0].iov_base = (unsigned char *)&rep;
686 arg.iov[0].iov_len = sizeof(rep.th);
688 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
689 (TCPOPT_TIMESTAMP << 8) |
691 rep.opt[1] = htonl(tcp_time_stamp);
692 rep.opt[2] = htonl(ts);
693 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
696 /* Swap the send and the receive. */
697 rep.th.dest = th->source;
698 rep.th.source = th->dest;
699 rep.th.doff = arg.iov[0].iov_len / 4;
700 rep.th.seq = htonl(seq);
701 rep.th.ack_seq = htonl(ack);
703 rep.th.window = htons(win);
705 #ifdef CONFIG_TCP_MD5SIG
707 int offset = (ts) ? 3 : 0;
709 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
711 (TCPOPT_MD5SIG << 8) |
713 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
714 rep.th.doff = arg.iov[0].iov_len/4;
716 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
717 key, ip_hdr(skb)->saddr,
718 ip_hdr(skb)->daddr, &rep.th);
721 arg.flags = reply_flags;
722 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
723 ip_hdr(skb)->saddr, /* XXX */
724 arg.iov[0].iov_len, IPPROTO_TCP, 0);
725 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
727 arg.bound_dev_if = oif;
729 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
730 &arg, arg.iov[0].iov_len);
732 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
735 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
737 struct inet_timewait_sock *tw = inet_twsk(sk);
738 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
740 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
741 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
744 tcp_twsk_md5_key(tcptw),
745 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
751 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
752 struct request_sock *req)
754 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
755 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
758 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
759 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
763 * Send a SYN-ACK after having received a SYN.
764 * This still operates on a request_sock only, not on a big
767 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
768 struct request_sock *req,
769 struct request_values *rvp)
771 const struct inet_request_sock *ireq = inet_rsk(req);
774 struct sk_buff * skb;
776 /* First, grab a route. */
777 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
780 skb = tcp_make_synack(sk, dst, req, rvp);
783 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
785 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
788 err = net_xmit_eval(err);
795 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
796 struct request_values *rvp)
798 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
799 return tcp_v4_send_synack(sk, NULL, req, rvp);
803 * IPv4 request_sock destructor.
805 static void tcp_v4_reqsk_destructor(struct request_sock *req)
807 kfree(inet_rsk(req)->opt);
810 static void syn_flood_warning(const struct sk_buff *skb)
814 #ifdef CONFIG_SYN_COOKIES
815 if (sysctl_tcp_syncookies)
816 msg = "Sending cookies";
819 msg = "Dropping request";
821 pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
822 ntohs(tcp_hdr(skb)->dest), msg);
826 * Save and compile IPv4 options into the request_sock if needed.
828 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
831 const struct ip_options *opt = &(IPCB(skb)->opt);
832 struct ip_options_rcu *dopt = NULL;
834 if (opt && opt->optlen) {
835 int opt_size = sizeof(*dopt) + opt->optlen;
837 dopt = kmalloc(opt_size, GFP_ATOMIC);
839 if (ip_options_echo(&dopt->opt, skb)) {
848 #ifdef CONFIG_TCP_MD5SIG
850 * RFC2385 MD5 checksumming requires a mapping of
851 * IP address->MD5 Key.
852 * We need to maintain these in the sk structure.
855 /* Find the Key structure for an address. */
856 static struct tcp_md5sig_key *
857 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
859 struct tcp_sock *tp = tcp_sk(sk);
862 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
864 for (i = 0; i < tp->md5sig_info->entries4; i++) {
865 if (tp->md5sig_info->keys4[i].addr == addr)
866 return &tp->md5sig_info->keys4[i].base;
871 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
872 struct sock *addr_sk)
874 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
876 EXPORT_SYMBOL(tcp_v4_md5_lookup);
878 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
879 struct request_sock *req)
881 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
884 /* This can be called on a newly created socket, from other files */
885 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
886 u8 *newkey, u8 newkeylen)
888 /* Add Key to the list */
889 struct tcp_md5sig_key *key;
890 struct tcp_sock *tp = tcp_sk(sk);
891 struct tcp4_md5sig_key *keys;
893 key = tcp_v4_md5_do_lookup(sk, addr);
895 /* Pre-existing entry - just update that one. */
898 key->keylen = newkeylen;
900 struct tcp_md5sig_info *md5sig;
902 if (!tp->md5sig_info) {
903 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
905 if (!tp->md5sig_info) {
909 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
911 if (tcp_alloc_md5sig_pool(sk) == NULL) {
915 md5sig = tp->md5sig_info;
917 if (md5sig->alloced4 == md5sig->entries4) {
918 keys = kmalloc((sizeof(*keys) *
919 (md5sig->entries4 + 1)), GFP_ATOMIC);
922 tcp_free_md5sig_pool();
926 if (md5sig->entries4)
927 memcpy(keys, md5sig->keys4,
928 sizeof(*keys) * md5sig->entries4);
930 /* Free old key list, and reference new one */
931 kfree(md5sig->keys4);
932 md5sig->keys4 = keys;
936 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
937 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
938 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
942 EXPORT_SYMBOL(tcp_v4_md5_do_add);
944 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
945 u8 *newkey, u8 newkeylen)
947 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
951 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
953 struct tcp_sock *tp = tcp_sk(sk);
956 for (i = 0; i < tp->md5sig_info->entries4; i++) {
957 if (tp->md5sig_info->keys4[i].addr == addr) {
959 kfree(tp->md5sig_info->keys4[i].base.key);
960 tp->md5sig_info->entries4--;
962 if (tp->md5sig_info->entries4 == 0) {
963 kfree(tp->md5sig_info->keys4);
964 tp->md5sig_info->keys4 = NULL;
965 tp->md5sig_info->alloced4 = 0;
966 } else if (tp->md5sig_info->entries4 != i) {
967 /* Need to do some manipulation */
968 memmove(&tp->md5sig_info->keys4[i],
969 &tp->md5sig_info->keys4[i+1],
970 (tp->md5sig_info->entries4 - i) *
971 sizeof(struct tcp4_md5sig_key));
973 tcp_free_md5sig_pool();
979 EXPORT_SYMBOL(tcp_v4_md5_do_del);
981 static void tcp_v4_clear_md5_list(struct sock *sk)
983 struct tcp_sock *tp = tcp_sk(sk);
985 /* Free each key, then the set of key keys,
986 * the crypto element, and then decrement our
987 * hold on the last resort crypto.
989 if (tp->md5sig_info->entries4) {
991 for (i = 0; i < tp->md5sig_info->entries4; i++)
992 kfree(tp->md5sig_info->keys4[i].base.key);
993 tp->md5sig_info->entries4 = 0;
994 tcp_free_md5sig_pool();
996 if (tp->md5sig_info->keys4) {
997 kfree(tp->md5sig_info->keys4);
998 tp->md5sig_info->keys4 = NULL;
999 tp->md5sig_info->alloced4 = 0;
1003 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1006 struct tcp_md5sig cmd;
1007 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1010 if (optlen < sizeof(cmd))
1013 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1016 if (sin->sin_family != AF_INET)
1019 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1020 if (!tcp_sk(sk)->md5sig_info)
1022 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1025 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1028 if (!tcp_sk(sk)->md5sig_info) {
1029 struct tcp_sock *tp = tcp_sk(sk);
1030 struct tcp_md5sig_info *p;
1032 p = kzalloc(sizeof(*p), sk->sk_allocation);
1036 tp->md5sig_info = p;
1037 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1040 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1043 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1044 newkey, cmd.tcpm_keylen);
1047 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1048 __be32 daddr, __be32 saddr, int nbytes)
1050 struct tcp4_pseudohdr *bp;
1051 struct scatterlist sg;
1053 bp = &hp->md5_blk.ip4;
1056 * 1. the TCP pseudo-header (in the order: source IP address,
1057 * destination IP address, zero-padded protocol number, and
1063 bp->protocol = IPPROTO_TCP;
1064 bp->len = cpu_to_be16(nbytes);
1066 sg_init_one(&sg, bp, sizeof(*bp));
1067 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1070 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1071 __be32 daddr, __be32 saddr, struct tcphdr *th)
1073 struct tcp_md5sig_pool *hp;
1074 struct hash_desc *desc;
1076 hp = tcp_get_md5sig_pool();
1078 goto clear_hash_noput;
1079 desc = &hp->md5_desc;
1081 if (crypto_hash_init(desc))
1083 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1085 if (tcp_md5_hash_header(hp, th))
1087 if (tcp_md5_hash_key(hp, key))
1089 if (crypto_hash_final(desc, md5_hash))
1092 tcp_put_md5sig_pool();
1096 tcp_put_md5sig_pool();
1098 memset(md5_hash, 0, 16);
1102 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1103 struct sock *sk, struct request_sock *req,
1104 struct sk_buff *skb)
1106 struct tcp_md5sig_pool *hp;
1107 struct hash_desc *desc;
1108 struct tcphdr *th = tcp_hdr(skb);
1109 __be32 saddr, daddr;
1112 saddr = inet_sk(sk)->inet_saddr;
1113 daddr = inet_sk(sk)->inet_daddr;
1115 saddr = inet_rsk(req)->loc_addr;
1116 daddr = inet_rsk(req)->rmt_addr;
1118 const struct iphdr *iph = ip_hdr(skb);
1123 hp = tcp_get_md5sig_pool();
1125 goto clear_hash_noput;
1126 desc = &hp->md5_desc;
1128 if (crypto_hash_init(desc))
1131 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1133 if (tcp_md5_hash_header(hp, th))
1135 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1137 if (tcp_md5_hash_key(hp, key))
1139 if (crypto_hash_final(desc, md5_hash))
1142 tcp_put_md5sig_pool();
1146 tcp_put_md5sig_pool();
1148 memset(md5_hash, 0, 16);
1151 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1153 static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1156 * This gets called for each TCP segment that arrives
1157 * so we want to be efficient.
1158 * We have 3 drop cases:
1159 * o No MD5 hash and one expected.
1160 * o MD5 hash and we're not expecting one.
1161 * o MD5 hash and its wrong.
1163 __u8 *hash_location = NULL;
1164 struct tcp_md5sig_key *hash_expected;
1165 const struct iphdr *iph = ip_hdr(skb);
1166 struct tcphdr *th = tcp_hdr(skb);
1168 unsigned char newhash[16];
1170 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1171 hash_location = tcp_parse_md5sig_option(th);
1173 /* We've parsed the options - do we have a hash? */
1174 if (!hash_expected && !hash_location)
1177 if (hash_expected && !hash_location) {
1178 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1182 if (!hash_expected && hash_location) {
1183 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1187 /* Okay, so this is hash_expected and hash_location -
1188 * so we need to calculate the checksum.
1190 genhash = tcp_v4_md5_hash_skb(newhash,
1194 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1195 if (net_ratelimit()) {
1196 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1197 &iph->saddr, ntohs(th->source),
1198 &iph->daddr, ntohs(th->dest),
1199 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1208 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1210 .obj_size = sizeof(struct tcp_request_sock),
1211 .rtx_syn_ack = tcp_v4_rtx_synack,
1212 .send_ack = tcp_v4_reqsk_send_ack,
1213 .destructor = tcp_v4_reqsk_destructor,
1214 .send_reset = tcp_v4_send_reset,
1215 .syn_ack_timeout = tcp_syn_ack_timeout,
1218 #ifdef CONFIG_TCP_MD5SIG
1219 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1220 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1221 .calc_md5_hash = tcp_v4_md5_hash_skb,
1225 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1227 struct tcp_extend_values tmp_ext;
1228 struct tcp_options_received tmp_opt;
1230 struct request_sock *req;
1231 struct inet_request_sock *ireq;
1232 struct tcp_sock *tp = tcp_sk(sk);
1233 struct dst_entry *dst = NULL;
1234 __be32 saddr = ip_hdr(skb)->saddr;
1235 __be32 daddr = ip_hdr(skb)->daddr;
1236 __u32 isn = TCP_SKB_CB(skb)->when;
1237 #ifdef CONFIG_SYN_COOKIES
1238 int want_cookie = 0;
1240 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1243 /* Never answer to SYNs send to broadcast or multicast */
1244 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1247 /* TW buckets are converted to open requests without
1248 * limitations, they conserve resources and peer is
1249 * evidently real one.
1251 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1252 if (net_ratelimit())
1253 syn_flood_warning(skb);
1254 #ifdef CONFIG_SYN_COOKIES
1255 if (sysctl_tcp_syncookies) {
1262 /* Accept backlog is full. If we have already queued enough
1263 * of warm entries in syn queue, drop request. It is better than
1264 * clogging syn queue with openreqs with exponentially increasing
1267 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1270 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1274 #ifdef CONFIG_TCP_MD5SIG
1275 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1278 tcp_clear_options(&tmp_opt);
1279 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1280 tmp_opt.user_mss = tp->rx_opt.user_mss;
1281 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1283 if (tmp_opt.cookie_plus > 0 &&
1284 tmp_opt.saw_tstamp &&
1285 !tp->rx_opt.cookie_out_never &&
1286 (sysctl_tcp_cookie_size > 0 ||
1287 (tp->cookie_values != NULL &&
1288 tp->cookie_values->cookie_desired > 0))) {
1290 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1291 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1293 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1294 goto drop_and_release;
1296 /* Secret recipe starts with IP addresses */
1297 *mess++ ^= (__force u32)daddr;
1298 *mess++ ^= (__force u32)saddr;
1300 /* plus variable length Initiator Cookie */
1303 *c++ ^= *hash_location++;
1305 #ifdef CONFIG_SYN_COOKIES
1306 want_cookie = 0; /* not our kind of cookie */
1308 tmp_ext.cookie_out_never = 0; /* false */
1309 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1310 } else if (!tp->rx_opt.cookie_in_always) {
1311 /* redundant indications, but ensure initialization. */
1312 tmp_ext.cookie_out_never = 1; /* true */
1313 tmp_ext.cookie_plus = 0;
1315 goto drop_and_release;
1317 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1319 if (want_cookie && !tmp_opt.saw_tstamp)
1320 tcp_clear_options(&tmp_opt);
1322 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1323 tcp_openreq_init(req, &tmp_opt, skb);
1325 ireq = inet_rsk(req);
1326 ireq->loc_addr = daddr;
1327 ireq->rmt_addr = saddr;
1328 ireq->no_srccheck = inet_sk(sk)->transparent;
1329 ireq->opt = tcp_v4_save_options(sk, skb);
1331 if (security_inet_conn_request(sk, skb, req))
1334 if (!want_cookie || tmp_opt.tstamp_ok)
1335 TCP_ECN_create_request(req, tcp_hdr(skb));
1338 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1339 req->cookie_ts = tmp_opt.tstamp_ok;
1341 struct inet_peer *peer = NULL;
1344 /* VJ's idea. We save last timestamp seen
1345 * from the destination in peer table, when entering
1346 * state TIME-WAIT, and check against it before
1347 * accepting new connection request.
1349 * If "isn" is not zero, this request hit alive
1350 * timewait bucket, so that all the necessary checks
1351 * are made in the function processing timewait state.
1353 if (tmp_opt.saw_tstamp &&
1354 tcp_death_row.sysctl_tw_recycle &&
1355 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1356 fl4.daddr == saddr &&
1357 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1358 inet_peer_refcheck(peer);
1359 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1360 (s32)(peer->tcp_ts - req->ts_recent) >
1362 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1363 goto drop_and_release;
1366 /* Kill the following clause, if you dislike this way. */
1367 else if (!sysctl_tcp_syncookies &&
1368 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1369 (sysctl_max_syn_backlog >> 2)) &&
1370 (!peer || !peer->tcp_ts_stamp) &&
1371 (!dst || !dst_metric(dst, RTAX_RTT))) {
1372 /* Without syncookies last quarter of
1373 * backlog is filled with destinations,
1374 * proven to be alive.
1375 * It means that we continue to communicate
1376 * to destinations, already remembered
1377 * to the moment of synflood.
1379 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1380 &saddr, ntohs(tcp_hdr(skb)->source));
1381 goto drop_and_release;
1384 isn = tcp_v4_init_sequence(skb);
1386 tcp_rsk(req)->snt_isn = isn;
1387 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1389 if (tcp_v4_send_synack(sk, dst, req,
1390 (struct request_values *)&tmp_ext) ||
1394 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1404 EXPORT_SYMBOL(tcp_v4_conn_request);
1408 * The three way handshake has completed - we got a valid synack -
1409 * now create the new socket.
1411 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1412 struct request_sock *req,
1413 struct dst_entry *dst)
1415 struct inet_request_sock *ireq;
1416 struct inet_sock *newinet;
1417 struct tcp_sock *newtp;
1419 #ifdef CONFIG_TCP_MD5SIG
1420 struct tcp_md5sig_key *key;
1422 struct ip_options_rcu *inet_opt;
1424 if (sk_acceptq_is_full(sk))
1427 newsk = tcp_create_openreq_child(sk, req, skb);
1431 newsk->sk_gso_type = SKB_GSO_TCPV4;
1433 newtp = tcp_sk(newsk);
1434 newinet = inet_sk(newsk);
1435 ireq = inet_rsk(req);
1436 newinet->inet_daddr = ireq->rmt_addr;
1437 newinet->inet_rcv_saddr = ireq->loc_addr;
1438 newinet->inet_saddr = ireq->loc_addr;
1439 inet_opt = ireq->opt;
1440 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1442 newinet->mc_index = inet_iif(skb);
1443 newinet->mc_ttl = ip_hdr(skb)->ttl;
1444 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1446 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1447 newinet->inet_id = newtp->write_seq ^ jiffies;
1449 if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
1452 sk_setup_caps(newsk, dst);
1454 tcp_mtup_init(newsk);
1455 tcp_sync_mss(newsk, dst_mtu(dst));
1456 newtp->advmss = dst_metric_advmss(dst);
1457 if (tcp_sk(sk)->rx_opt.user_mss &&
1458 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1459 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1461 tcp_initialize_rcv_mss(newsk);
1462 if (tcp_rsk(req)->snt_synack)
1463 tcp_valid_rtt_meas(newsk,
1464 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1465 newtp->total_retrans = req->retrans;
1467 #ifdef CONFIG_TCP_MD5SIG
1468 /* Copy over the MD5 key from the original socket */
1469 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1472 * We're using one, so create a matching key
1473 * on the newsk structure. If we fail to get
1474 * memory, then we end up not copying the key
1477 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1479 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1480 newkey, key->keylen);
1481 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1485 if (__inet_inherit_port(sk, newsk) < 0)
1487 __inet_hash_nolisten(newsk, NULL);
1492 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1496 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1502 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1504 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1506 struct tcphdr *th = tcp_hdr(skb);
1507 const struct iphdr *iph = ip_hdr(skb);
1509 struct request_sock **prev;
1510 /* Find possible connection requests. */
1511 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1512 iph->saddr, iph->daddr);
1514 return tcp_check_req(sk, skb, req, prev);
1516 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1517 th->source, iph->daddr, th->dest, inet_iif(skb));
1520 if (nsk->sk_state != TCP_TIME_WAIT) {
1524 inet_twsk_put(inet_twsk(nsk));
1528 #ifdef CONFIG_SYN_COOKIES
1530 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1535 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1537 const struct iphdr *iph = ip_hdr(skb);
1539 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1540 if (!tcp_v4_check(skb->len, iph->saddr,
1541 iph->daddr, skb->csum)) {
1542 skb->ip_summed = CHECKSUM_UNNECESSARY;
1547 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1548 skb->len, IPPROTO_TCP, 0);
1550 if (skb->len <= 76) {
1551 return __skb_checksum_complete(skb);
1557 /* The socket must have it's spinlock held when we get
1560 * We have a potential double-lock case here, so even when
1561 * doing backlog processing we use the BH locking scheme.
1562 * This is because we cannot sleep with the original spinlock
1565 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1568 #ifdef CONFIG_TCP_MD5SIG
1570 * We really want to reject the packet as early as possible
1572 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1573 * o There is an MD5 option and we're not expecting one
1575 if (tcp_v4_inbound_md5_hash(sk, skb))
1579 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1580 sock_rps_save_rxhash(sk, skb->rxhash);
1581 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1588 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1591 if (sk->sk_state == TCP_LISTEN) {
1592 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1597 sock_rps_save_rxhash(nsk, skb->rxhash);
1598 if (tcp_child_process(sk, nsk, skb)) {
1605 sock_rps_save_rxhash(sk, skb->rxhash);
1607 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1614 tcp_v4_send_reset(rsk, skb);
1617 /* Be careful here. If this function gets more complicated and
1618 * gcc suffers from register pressure on the x86, sk (in %ebx)
1619 * might be destroyed here. This current version compiles correctly,
1620 * but you have been warned.
1625 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1628 EXPORT_SYMBOL(tcp_v4_do_rcv);
1634 int tcp_v4_rcv(struct sk_buff *skb)
1636 const struct iphdr *iph;
1640 struct net *net = dev_net(skb->dev);
1642 if (skb->pkt_type != PACKET_HOST)
1645 /* Count it even if it's bad */
1646 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1648 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1653 if (th->doff < sizeof(struct tcphdr) / 4)
1655 if (!pskb_may_pull(skb, th->doff * 4))
1658 /* An explanation is required here, I think.
1659 * Packet length and doff are validated by header prediction,
1660 * provided case of th->doff==0 is eliminated.
1661 * So, we defer the checks. */
1662 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1667 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1668 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1669 skb->len - th->doff * 4);
1670 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1671 TCP_SKB_CB(skb)->when = 0;
1672 TCP_SKB_CB(skb)->flags = iph->tos;
1673 TCP_SKB_CB(skb)->sacked = 0;
1675 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1680 if (sk->sk_state == TCP_TIME_WAIT)
1683 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1684 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1685 goto discard_and_relse;
1688 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1689 goto discard_and_relse;
1692 if (sk_filter(sk, skb))
1693 goto discard_and_relse;
1697 bh_lock_sock_nested(sk);
1699 if (!sock_owned_by_user(sk)) {
1700 #ifdef CONFIG_NET_DMA
1701 struct tcp_sock *tp = tcp_sk(sk);
1702 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1703 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1704 if (tp->ucopy.dma_chan)
1705 ret = tcp_v4_do_rcv(sk, skb);
1709 if (!tcp_prequeue(sk, skb))
1710 ret = tcp_v4_do_rcv(sk, skb);
1712 } else if (unlikely(sk_add_backlog(sk, skb))) {
1714 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1715 goto discard_and_relse;
1724 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1727 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1729 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1731 tcp_v4_send_reset(NULL, skb);
1735 /* Discard frame. */
1744 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1745 inet_twsk_put(inet_twsk(sk));
1749 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1750 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1751 inet_twsk_put(inet_twsk(sk));
1754 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1756 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1758 iph->daddr, th->dest,
1761 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1762 inet_twsk_put(inet_twsk(sk));
1766 /* Fall through to ACK */
1769 tcp_v4_timewait_ack(sk, skb);
1773 case TCP_TW_SUCCESS:;
1778 struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1780 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1781 struct inet_sock *inet = inet_sk(sk);
1782 struct inet_peer *peer;
1785 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1786 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1790 rt_bind_peer(rt, inet->inet_daddr, 1);
1792 *release_it = false;
1797 EXPORT_SYMBOL(tcp_v4_get_peer);
1799 void *tcp_v4_tw_get_peer(struct sock *sk)
1801 struct inet_timewait_sock *tw = inet_twsk(sk);
1803 return inet_getpeer_v4(tw->tw_daddr, 1);
1805 EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1807 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1808 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1809 .twsk_unique = tcp_twsk_unique,
1810 .twsk_destructor= tcp_twsk_destructor,
1811 .twsk_getpeer = tcp_v4_tw_get_peer,
1814 const struct inet_connection_sock_af_ops ipv4_specific = {
1815 .queue_xmit = ip_queue_xmit,
1816 .send_check = tcp_v4_send_check,
1817 .rebuild_header = inet_sk_rebuild_header,
1818 .conn_request = tcp_v4_conn_request,
1819 .syn_recv_sock = tcp_v4_syn_recv_sock,
1820 .get_peer = tcp_v4_get_peer,
1821 .net_header_len = sizeof(struct iphdr),
1822 .setsockopt = ip_setsockopt,
1823 .getsockopt = ip_getsockopt,
1824 .addr2sockaddr = inet_csk_addr2sockaddr,
1825 .sockaddr_len = sizeof(struct sockaddr_in),
1826 .bind_conflict = inet_csk_bind_conflict,
1827 #ifdef CONFIG_COMPAT
1828 .compat_setsockopt = compat_ip_setsockopt,
1829 .compat_getsockopt = compat_ip_getsockopt,
1832 EXPORT_SYMBOL(ipv4_specific);
1834 #ifdef CONFIG_TCP_MD5SIG
1835 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1836 .md5_lookup = tcp_v4_md5_lookup,
1837 .calc_md5_hash = tcp_v4_md5_hash_skb,
1838 .md5_add = tcp_v4_md5_add_func,
1839 .md5_parse = tcp_v4_parse_md5_keys,
1843 /* NOTE: A lot of things set to zero explicitly by call to
1844 * sk_alloc() so need not be done here.
1846 static int tcp_v4_init_sock(struct sock *sk)
1848 struct inet_connection_sock *icsk = inet_csk(sk);
1849 struct tcp_sock *tp = tcp_sk(sk);
1851 skb_queue_head_init(&tp->out_of_order_queue);
1852 tcp_init_xmit_timers(sk);
1853 tcp_prequeue_init(tp);
1855 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1856 tp->mdev = TCP_TIMEOUT_INIT;
1858 /* So many TCP implementations out there (incorrectly) count the
1859 * initial SYN frame in their delayed-ACK and congestion control
1860 * algorithms that we must have the following bandaid to talk
1861 * efficiently to them. -DaveM
1863 tp->snd_cwnd = TCP_INIT_CWND;
1865 /* See draft-stevens-tcpca-spec-01 for discussion of the
1866 * initialization of these values.
1868 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1869 tp->snd_cwnd_clamp = ~0;
1870 tp->mss_cache = TCP_MSS_DEFAULT;
1872 tp->reordering = sysctl_tcp_reordering;
1873 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1875 sk->sk_state = TCP_CLOSE;
1877 sk->sk_write_space = sk_stream_write_space;
1878 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1880 icsk->icsk_af_ops = &ipv4_specific;
1881 icsk->icsk_sync_mss = tcp_sync_mss;
1882 #ifdef CONFIG_TCP_MD5SIG
1883 tp->af_specific = &tcp_sock_ipv4_specific;
1886 /* TCP Cookie Transactions */
1887 if (sysctl_tcp_cookie_size > 0) {
1888 /* Default, cookies without s_data_payload. */
1890 kzalloc(sizeof(*tp->cookie_values),
1892 if (tp->cookie_values != NULL)
1893 kref_init(&tp->cookie_values->kref);
1895 /* Presumed zeroed, in order of appearance:
1896 * cookie_in_always, cookie_out_never,
1897 * s_data_constant, s_data_in, s_data_out
1899 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1900 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1903 percpu_counter_inc(&tcp_sockets_allocated);
1909 void tcp_v4_destroy_sock(struct sock *sk)
1911 struct tcp_sock *tp = tcp_sk(sk);
1913 tcp_clear_xmit_timers(sk);
1915 tcp_cleanup_congestion_control(sk);
1917 /* Cleanup up the write buffer. */
1918 tcp_write_queue_purge(sk);
1920 /* Cleans up our, hopefully empty, out_of_order_queue. */
1921 __skb_queue_purge(&tp->out_of_order_queue);
1923 #ifdef CONFIG_TCP_MD5SIG
1924 /* Clean up the MD5 key list, if any */
1925 if (tp->md5sig_info) {
1926 tcp_v4_clear_md5_list(sk);
1927 kfree(tp->md5sig_info);
1928 tp->md5sig_info = NULL;
1932 #ifdef CONFIG_NET_DMA
1933 /* Cleans up our sk_async_wait_queue */
1934 __skb_queue_purge(&sk->sk_async_wait_queue);
1937 /* Clean prequeue, it must be empty really */
1938 __skb_queue_purge(&tp->ucopy.prequeue);
1940 /* Clean up a referenced TCP bind bucket. */
1941 if (inet_csk(sk)->icsk_bind_hash)
1945 * If sendmsg cached page exists, toss it.
1947 if (sk->sk_sndmsg_page) {
1948 __free_page(sk->sk_sndmsg_page);
1949 sk->sk_sndmsg_page = NULL;
1952 /* TCP Cookie Transactions */
1953 if (tp->cookie_values != NULL) {
1954 kref_put(&tp->cookie_values->kref,
1955 tcp_cookie_values_release);
1956 tp->cookie_values = NULL;
1959 percpu_counter_dec(&tcp_sockets_allocated);
1961 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1963 #ifdef CONFIG_PROC_FS
1964 /* Proc filesystem TCP sock list dumping. */
1966 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1968 return hlist_nulls_empty(head) ? NULL :
1969 list_entry(head->first, struct inet_timewait_sock, tw_node);
1972 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1974 return !is_a_nulls(tw->tw_node.next) ?
1975 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1979 * Get next listener socket follow cur. If cur is NULL, get first socket
1980 * starting from bucket given in st->bucket; when st->bucket is zero the
1981 * very first socket in the hash table is returned.
1983 static void *listening_get_next(struct seq_file *seq, void *cur)
1985 struct inet_connection_sock *icsk;
1986 struct hlist_nulls_node *node;
1987 struct sock *sk = cur;
1988 struct inet_listen_hashbucket *ilb;
1989 struct tcp_iter_state *st = seq->private;
1990 struct net *net = seq_file_net(seq);
1993 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1994 spin_lock_bh(&ilb->lock);
1995 sk = sk_nulls_head(&ilb->head);
1999 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2003 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2004 struct request_sock *req = cur;
2006 icsk = inet_csk(st->syn_wait_sk);
2010 if (req->rsk_ops->family == st->family) {
2016 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2019 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2021 sk = sk_nulls_next(st->syn_wait_sk);
2022 st->state = TCP_SEQ_STATE_LISTENING;
2023 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2025 icsk = inet_csk(sk);
2026 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2027 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2029 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2030 sk = sk_nulls_next(sk);
2033 sk_nulls_for_each_from(sk, node) {
2034 if (!net_eq(sock_net(sk), net))
2036 if (sk->sk_family == st->family) {
2040 icsk = inet_csk(sk);
2041 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2042 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2044 st->uid = sock_i_uid(sk);
2045 st->syn_wait_sk = sk;
2046 st->state = TCP_SEQ_STATE_OPENREQ;
2050 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2052 spin_unlock_bh(&ilb->lock);
2054 if (++st->bucket < INET_LHTABLE_SIZE) {
2055 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2056 spin_lock_bh(&ilb->lock);
2057 sk = sk_nulls_head(&ilb->head);
2065 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2067 struct tcp_iter_state *st = seq->private;
2072 rc = listening_get_next(seq, NULL);
2074 while (rc && *pos) {
2075 rc = listening_get_next(seq, rc);
2081 static inline int empty_bucket(struct tcp_iter_state *st)
2083 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2084 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2088 * Get first established socket starting from bucket given in st->bucket.
2089 * If st->bucket is zero, the very first socket in the hash is returned.
2091 static void *established_get_first(struct seq_file *seq)
2093 struct tcp_iter_state *st = seq->private;
2094 struct net *net = seq_file_net(seq);
2098 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2100 struct hlist_nulls_node *node;
2101 struct inet_timewait_sock *tw;
2102 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2104 /* Lockless fast path for the common case of empty buckets */
2105 if (empty_bucket(st))
2109 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2110 if (sk->sk_family != st->family ||
2111 !net_eq(sock_net(sk), net)) {
2117 st->state = TCP_SEQ_STATE_TIME_WAIT;
2118 inet_twsk_for_each(tw, node,
2119 &tcp_hashinfo.ehash[st->bucket].twchain) {
2120 if (tw->tw_family != st->family ||
2121 !net_eq(twsk_net(tw), net)) {
2127 spin_unlock_bh(lock);
2128 st->state = TCP_SEQ_STATE_ESTABLISHED;
2134 static void *established_get_next(struct seq_file *seq, void *cur)
2136 struct sock *sk = cur;
2137 struct inet_timewait_sock *tw;
2138 struct hlist_nulls_node *node;
2139 struct tcp_iter_state *st = seq->private;
2140 struct net *net = seq_file_net(seq);
2145 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2149 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2156 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2157 st->state = TCP_SEQ_STATE_ESTABLISHED;
2159 /* Look for next non empty bucket */
2161 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2164 if (st->bucket > tcp_hashinfo.ehash_mask)
2167 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2168 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2170 sk = sk_nulls_next(sk);
2172 sk_nulls_for_each_from(sk, node) {
2173 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2177 st->state = TCP_SEQ_STATE_TIME_WAIT;
2178 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2186 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2188 struct tcp_iter_state *st = seq->private;
2192 rc = established_get_first(seq);
2195 rc = established_get_next(seq, rc);
2201 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2204 struct tcp_iter_state *st = seq->private;
2206 st->state = TCP_SEQ_STATE_LISTENING;
2207 rc = listening_get_idx(seq, &pos);
2210 st->state = TCP_SEQ_STATE_ESTABLISHED;
2211 rc = established_get_idx(seq, pos);
2217 static void *tcp_seek_last_pos(struct seq_file *seq)
2219 struct tcp_iter_state *st = seq->private;
2220 int offset = st->offset;
2221 int orig_num = st->num;
2224 switch (st->state) {
2225 case TCP_SEQ_STATE_OPENREQ:
2226 case TCP_SEQ_STATE_LISTENING:
2227 if (st->bucket >= INET_LHTABLE_SIZE)
2229 st->state = TCP_SEQ_STATE_LISTENING;
2230 rc = listening_get_next(seq, NULL);
2231 while (offset-- && rc)
2232 rc = listening_get_next(seq, rc);
2237 case TCP_SEQ_STATE_ESTABLISHED:
2238 case TCP_SEQ_STATE_TIME_WAIT:
2239 st->state = TCP_SEQ_STATE_ESTABLISHED;
2240 if (st->bucket > tcp_hashinfo.ehash_mask)
2242 rc = established_get_first(seq);
2243 while (offset-- && rc)
2244 rc = established_get_next(seq, rc);
2252 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2254 struct tcp_iter_state *st = seq->private;
2257 if (*pos && *pos == st->last_pos) {
2258 rc = tcp_seek_last_pos(seq);
2263 st->state = TCP_SEQ_STATE_LISTENING;
2267 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2270 st->last_pos = *pos;
2274 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2276 struct tcp_iter_state *st = seq->private;
2279 if (v == SEQ_START_TOKEN) {
2280 rc = tcp_get_idx(seq, 0);
2284 switch (st->state) {
2285 case TCP_SEQ_STATE_OPENREQ:
2286 case TCP_SEQ_STATE_LISTENING:
2287 rc = listening_get_next(seq, v);
2289 st->state = TCP_SEQ_STATE_ESTABLISHED;
2292 rc = established_get_first(seq);
2295 case TCP_SEQ_STATE_ESTABLISHED:
2296 case TCP_SEQ_STATE_TIME_WAIT:
2297 rc = established_get_next(seq, v);
2302 st->last_pos = *pos;
2306 static void tcp_seq_stop(struct seq_file *seq, void *v)
2308 struct tcp_iter_state *st = seq->private;
2310 switch (st->state) {
2311 case TCP_SEQ_STATE_OPENREQ:
2313 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2314 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2316 case TCP_SEQ_STATE_LISTENING:
2317 if (v != SEQ_START_TOKEN)
2318 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2320 case TCP_SEQ_STATE_TIME_WAIT:
2321 case TCP_SEQ_STATE_ESTABLISHED:
2323 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2328 static int tcp_seq_open(struct inode *inode, struct file *file)
2330 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2331 struct tcp_iter_state *s;
2334 err = seq_open_net(inode, file, &afinfo->seq_ops,
2335 sizeof(struct tcp_iter_state));
2339 s = ((struct seq_file *)file->private_data)->private;
2340 s->family = afinfo->family;
2345 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2348 struct proc_dir_entry *p;
2350 afinfo->seq_fops.open = tcp_seq_open;
2351 afinfo->seq_fops.read = seq_read;
2352 afinfo->seq_fops.llseek = seq_lseek;
2353 afinfo->seq_fops.release = seq_release_net;
2355 afinfo->seq_ops.start = tcp_seq_start;
2356 afinfo->seq_ops.next = tcp_seq_next;
2357 afinfo->seq_ops.stop = tcp_seq_stop;
2359 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2360 &afinfo->seq_fops, afinfo);
2365 EXPORT_SYMBOL(tcp_proc_register);
2367 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2369 proc_net_remove(net, afinfo->name);
2371 EXPORT_SYMBOL(tcp_proc_unregister);
2373 static void get_openreq4(struct sock *sk, struct request_sock *req,
2374 struct seq_file *f, int i, int uid, int *len)
2376 const struct inet_request_sock *ireq = inet_rsk(req);
2377 int ttd = req->expires - jiffies;
2379 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2380 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2383 ntohs(inet_sk(sk)->inet_sport),
2385 ntohs(ireq->rmt_port),
2387 0, 0, /* could print option size, but that is af dependent. */
2388 1, /* timers active (only the expire timer) */
2389 jiffies_to_clock_t(ttd),
2392 0, /* non standard timer */
2393 0, /* open_requests have no inode */
2394 atomic_read(&sk->sk_refcnt),
2399 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2402 unsigned long timer_expires;
2403 struct tcp_sock *tp = tcp_sk(sk);
2404 const struct inet_connection_sock *icsk = inet_csk(sk);
2405 struct inet_sock *inet = inet_sk(sk);
2406 __be32 dest = inet->inet_daddr;
2407 __be32 src = inet->inet_rcv_saddr;
2408 __u16 destp = ntohs(inet->inet_dport);
2409 __u16 srcp = ntohs(inet->inet_sport);
2412 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2414 timer_expires = icsk->icsk_timeout;
2415 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2417 timer_expires = icsk->icsk_timeout;
2418 } else if (timer_pending(&sk->sk_timer)) {
2420 timer_expires = sk->sk_timer.expires;
2423 timer_expires = jiffies;
2426 if (sk->sk_state == TCP_LISTEN)
2427 rx_queue = sk->sk_ack_backlog;
2430 * because we dont lock socket, we might find a transient negative value
2432 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2434 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2435 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2436 i, src, srcp, dest, destp, sk->sk_state,
2437 tp->write_seq - tp->snd_una,
2440 jiffies_to_clock_t(timer_expires - jiffies),
2441 icsk->icsk_retransmits,
2443 icsk->icsk_probes_out,
2445 atomic_read(&sk->sk_refcnt), sk,
2446 jiffies_to_clock_t(icsk->icsk_rto),
2447 jiffies_to_clock_t(icsk->icsk_ack.ato),
2448 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2450 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2454 static void get_timewait4_sock(struct inet_timewait_sock *tw,
2455 struct seq_file *f, int i, int *len)
2459 int ttd = tw->tw_ttd - jiffies;
2464 dest = tw->tw_daddr;
2465 src = tw->tw_rcv_saddr;
2466 destp = ntohs(tw->tw_dport);
2467 srcp = ntohs(tw->tw_sport);
2469 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2470 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2471 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2472 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2473 atomic_read(&tw->tw_refcnt), tw, len);
2478 static int tcp4_seq_show(struct seq_file *seq, void *v)
2480 struct tcp_iter_state *st;
2483 if (v == SEQ_START_TOKEN) {
2484 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2485 " sl local_address rem_address st tx_queue "
2486 "rx_queue tr tm->when retrnsmt uid timeout "
2492 switch (st->state) {
2493 case TCP_SEQ_STATE_LISTENING:
2494 case TCP_SEQ_STATE_ESTABLISHED:
2495 get_tcp4_sock(v, seq, st->num, &len);
2497 case TCP_SEQ_STATE_OPENREQ:
2498 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2500 case TCP_SEQ_STATE_TIME_WAIT:
2501 get_timewait4_sock(v, seq, st->num, &len);
2504 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2509 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2513 .owner = THIS_MODULE,
2516 .show = tcp4_seq_show,
2520 static int __net_init tcp4_proc_init_net(struct net *net)
2522 return tcp_proc_register(net, &tcp4_seq_afinfo);
2525 static void __net_exit tcp4_proc_exit_net(struct net *net)
2527 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2530 static struct pernet_operations tcp4_net_ops = {
2531 .init = tcp4_proc_init_net,
2532 .exit = tcp4_proc_exit_net,
2535 int __init tcp4_proc_init(void)
2537 return register_pernet_subsys(&tcp4_net_ops);
2540 void tcp4_proc_exit(void)
2542 unregister_pernet_subsys(&tcp4_net_ops);
2544 #endif /* CONFIG_PROC_FS */
2546 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2548 const struct iphdr *iph = skb_gro_network_header(skb);
2550 switch (skb->ip_summed) {
2551 case CHECKSUM_COMPLETE:
2552 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2554 skb->ip_summed = CHECKSUM_UNNECESSARY;
2560 NAPI_GRO_CB(skb)->flush = 1;
2564 return tcp_gro_receive(head, skb);
2567 int tcp4_gro_complete(struct sk_buff *skb)
2569 const struct iphdr *iph = ip_hdr(skb);
2570 struct tcphdr *th = tcp_hdr(skb);
2572 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2573 iph->saddr, iph->daddr, 0);
2574 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2576 return tcp_gro_complete(skb);
2579 struct proto tcp_prot = {
2581 .owner = THIS_MODULE,
2583 .connect = tcp_v4_connect,
2584 .disconnect = tcp_disconnect,
2585 .accept = inet_csk_accept,
2587 .init = tcp_v4_init_sock,
2588 .destroy = tcp_v4_destroy_sock,
2589 .shutdown = tcp_shutdown,
2590 .setsockopt = tcp_setsockopt,
2591 .getsockopt = tcp_getsockopt,
2592 .recvmsg = tcp_recvmsg,
2593 .sendmsg = tcp_sendmsg,
2594 .sendpage = tcp_sendpage,
2595 .backlog_rcv = tcp_v4_do_rcv,
2597 .unhash = inet_unhash,
2598 .get_port = inet_csk_get_port,
2599 .enter_memory_pressure = tcp_enter_memory_pressure,
2600 .sockets_allocated = &tcp_sockets_allocated,
2601 .orphan_count = &tcp_orphan_count,
2602 .memory_allocated = &tcp_memory_allocated,
2603 .memory_pressure = &tcp_memory_pressure,
2604 .sysctl_mem = sysctl_tcp_mem,
2605 .sysctl_wmem = sysctl_tcp_wmem,
2606 .sysctl_rmem = sysctl_tcp_rmem,
2607 .max_header = MAX_TCP_HEADER,
2608 .obj_size = sizeof(struct tcp_sock),
2609 .slab_flags = SLAB_DESTROY_BY_RCU,
2610 .twsk_prot = &tcp_timewait_sock_ops,
2611 .rsk_prot = &tcp_request_sock_ops,
2612 .h.hashinfo = &tcp_hashinfo,
2613 .no_autobind = true,
2614 #ifdef CONFIG_COMPAT
2615 .compat_setsockopt = compat_tcp_setsockopt,
2616 .compat_getsockopt = compat_tcp_getsockopt,
2619 EXPORT_SYMBOL(tcp_prot);
2622 static int __net_init tcp_sk_init(struct net *net)
2624 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2625 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2628 static void __net_exit tcp_sk_exit(struct net *net)
2630 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2633 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2635 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2638 static struct pernet_operations __net_initdata tcp_sk_ops = {
2639 .init = tcp_sk_init,
2640 .exit = tcp_sk_exit,
2641 .exit_batch = tcp_sk_exit_batch,
2644 void __init tcp_v4_init(void)
2646 inet_hashinfo_init(&tcp_hashinfo);
2647 if (register_pernet_subsys(&tcp_sk_ops))
2648 panic("Failed to create the TCP control socket.\n");