2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
63 #include <linux/slab.h>
65 #include <net/net_namespace.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
74 #include <net/netdma.h>
75 #include <net/secure_seq.h>
77 #include <linux/inet.h>
78 #include <linux/ipv6.h>
79 #include <linux/stddef.h>
80 #include <linux/proc_fs.h>
81 #include <linux/seq_file.h>
83 #include <linux/crypto.h>
84 #include <linux/scatterlist.h>
86 int sysctl_tcp_tw_reuse __read_mostly;
87 int sysctl_tcp_low_latency __read_mostly;
88 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91 #ifdef CONFIG_TCP_MD5SIG
92 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
104 struct inet_hashinfo tcp_hashinfo;
105 EXPORT_SYMBOL(tcp_hashinfo);
107 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
109 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
112 tcp_hdr(skb)->source);
115 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
117 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
118 struct tcp_sock *tp = tcp_sk(sk);
120 /* With PAWS, it is safe from the viewpoint
121 of data integrity. Even without PAWS it is safe provided sequence
122 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
124 Actually, the idea is close to VJ's one, only timestamp cache is
125 held not per host, but per port pair and TW bucket is used as state
128 If TW bucket has been already destroyed we fall back to VJ's scheme
129 and use initial timestamp retrieved from peer table.
131 if (tcptw->tw_ts_recent_stamp &&
132 (twp == NULL || (sysctl_tcp_tw_reuse &&
133 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
134 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
135 if (tp->write_seq == 0)
137 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
138 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
145 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
147 /* This will initiate an outgoing connection. */
148 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
150 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
151 struct inet_sock *inet = inet_sk(sk);
152 struct tcp_sock *tp = tcp_sk(sk);
153 __be16 orig_sport, orig_dport;
154 __be32 daddr, nexthop;
158 struct ip_options_rcu *inet_opt;
160 if (addr_len < sizeof(struct sockaddr_in))
163 if (usin->sin_family != AF_INET)
164 return -EAFNOSUPPORT;
166 nexthop = daddr = usin->sin_addr.s_addr;
167 inet_opt = rcu_dereference_protected(inet->inet_opt,
168 sock_owned_by_user(sk));
169 if (inet_opt && inet_opt->opt.srr) {
172 nexthop = inet_opt->opt.faddr;
175 orig_sport = inet->inet_sport;
176 orig_dport = usin->sin_port;
177 fl4 = &inet->cork.fl.u.ip4;
178 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
179 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
181 orig_sport, orig_dport, sk, true);
184 if (err == -ENETUNREACH)
185 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
189 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
194 if (!inet_opt || !inet_opt->opt.srr)
197 if (!inet->inet_saddr)
198 inet->inet_saddr = fl4->saddr;
199 inet->inet_rcv_saddr = inet->inet_saddr;
201 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
202 /* Reset inherited state */
203 tp->rx_opt.ts_recent = 0;
204 tp->rx_opt.ts_recent_stamp = 0;
208 if (tcp_death_row.sysctl_tw_recycle &&
209 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
210 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
212 * VJ's idea. We save last timestamp seen from
213 * the destination in peer table, when entering state
214 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
215 * when trying new connection.
218 inet_peer_refcheck(peer);
219 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
220 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
221 tp->rx_opt.ts_recent = peer->tcp_ts;
226 inet->inet_dport = usin->sin_port;
227 inet->inet_daddr = daddr;
229 inet_csk(sk)->icsk_ext_hdr_len = 0;
231 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
233 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
235 /* Socket identity is still unknown (sport may be zero).
236 * However we set state to SYN-SENT and not releasing socket
237 * lock select source port, enter ourselves into the hash tables and
238 * complete initialization after this.
240 tcp_set_state(sk, TCP_SYN_SENT);
241 err = inet_hash_connect(&tcp_death_row, sk);
245 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
246 inet->inet_sport, inet->inet_dport, sk);
252 /* OK, now commit destination to socket. */
253 sk->sk_gso_type = SKB_GSO_TCPV4;
254 sk_setup_caps(sk, &rt->dst);
257 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
262 inet->inet_id = tp->write_seq ^ jiffies;
264 err = tcp_connect(sk);
273 * This unhashes the socket and releases the local port,
276 tcp_set_state(sk, TCP_CLOSE);
278 sk->sk_route_caps = 0;
279 inet->inet_dport = 0;
282 EXPORT_SYMBOL(tcp_v4_connect);
285 * This routine does path mtu discovery as defined in RFC1191.
287 static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
289 struct dst_entry *dst;
290 struct inet_sock *inet = inet_sk(sk);
292 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
293 * send out by Linux are always <576bytes so they should go through
296 if (sk->sk_state == TCP_LISTEN)
299 /* We don't check in the destentry if pmtu discovery is forbidden
300 * on this route. We just assume that no packet_to_big packets
301 * are send back when pmtu discovery is not active.
302 * There is a small race when the user changes this flag in the
303 * route, but I think that's acceptable.
305 if ((dst = __sk_dst_check(sk, 0)) == NULL)
308 dst->ops->update_pmtu(dst, mtu);
310 /* Something is about to be wrong... Remember soft error
311 * for the case, if this connection will not able to recover.
313 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
314 sk->sk_err_soft = EMSGSIZE;
318 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
319 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
320 tcp_sync_mss(sk, mtu);
322 /* Resend the TCP packet because it's
323 * clear that the old packet has been
324 * dropped. This is the new "fast" path mtu
327 tcp_simple_retransmit(sk);
328 } /* else let the usual retransmit timer handle it */
332 * This routine is called by the ICMP module when it gets some
333 * sort of error condition. If err < 0 then the socket should
334 * be closed and the error returned to the user. If err > 0
335 * it's just the icmp type << 8 | icmp code. After adjustment
336 * header points to the first 8 bytes of the tcp header. We need
337 * to find the appropriate port.
339 * The locking strategy used here is very "optimistic". When
340 * someone else accesses the socket the ICMP is just dropped
341 * and for some paths there is no check at all.
342 * A more general error queue to queue errors for later handling
343 * is probably better.
347 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
349 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
350 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
351 struct inet_connection_sock *icsk;
353 struct inet_sock *inet;
354 const int type = icmp_hdr(icmp_skb)->type;
355 const int code = icmp_hdr(icmp_skb)->code;
361 struct net *net = dev_net(icmp_skb->dev);
363 if (icmp_skb->len < (iph->ihl << 2) + 8) {
364 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
368 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
369 iph->saddr, th->source, inet_iif(icmp_skb));
371 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
374 if (sk->sk_state == TCP_TIME_WAIT) {
375 inet_twsk_put(inet_twsk(sk));
380 /* If too many ICMPs get dropped on busy
381 * servers this needs to be solved differently.
383 if (sock_owned_by_user(sk))
384 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
386 if (sk->sk_state == TCP_CLOSE)
389 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
390 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
396 seq = ntohl(th->seq);
397 if (sk->sk_state != TCP_LISTEN &&
398 !between(seq, tp->snd_una, tp->snd_nxt)) {
399 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
404 case ICMP_SOURCE_QUENCH:
405 /* Just silently ignore these. */
407 case ICMP_PARAMETERPROB:
410 case ICMP_DEST_UNREACH:
411 if (code > NR_ICMP_UNREACH)
414 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
415 if (!sock_owned_by_user(sk))
416 do_pmtu_discovery(sk, iph, info);
420 err = icmp_err_convert[code].errno;
421 /* check if icmp_skb allows revert of backoff
422 * (see draft-zimmermann-tcp-lcd) */
423 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
425 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
429 if (sock_owned_by_user(sk))
432 icsk->icsk_backoff--;
433 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
434 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
437 skb = tcp_write_queue_head(sk);
440 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
441 tcp_time_stamp - TCP_SKB_CB(skb)->when);
444 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
445 remaining, TCP_RTO_MAX);
447 /* RTO revert clocked out retransmission.
448 * Will retransmit now */
449 tcp_retransmit_timer(sk);
453 case ICMP_TIME_EXCEEDED:
460 switch (sk->sk_state) {
461 struct request_sock *req, **prev;
463 if (sock_owned_by_user(sk))
466 req = inet_csk_search_req(sk, &prev, th->dest,
467 iph->daddr, iph->saddr);
471 /* ICMPs are not backlogged, hence we cannot get
472 an established socket here.
476 if (seq != tcp_rsk(req)->snt_isn) {
477 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
482 * Still in SYN_RECV, just remove it silently.
483 * There is no good way to pass the error to the newly
484 * created socket, and POSIX does not want network
485 * errors returned from accept().
487 inet_csk_reqsk_queue_drop(sk, req, prev);
491 case TCP_SYN_RECV: /* Cannot happen.
492 It can f.e. if SYNs crossed.
494 if (!sock_owned_by_user(sk)) {
497 sk->sk_error_report(sk);
501 sk->sk_err_soft = err;
506 /* If we've already connected we will keep trying
507 * until we time out, or the user gives up.
509 * rfc1122 4.2.3.9 allows to consider as hard errors
510 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
511 * but it is obsoleted by pmtu discovery).
513 * Note, that in modern internet, where routing is unreliable
514 * and in each dark corner broken firewalls sit, sending random
515 * errors ordered by their masters even this two messages finally lose
516 * their original sense (even Linux sends invalid PORT_UNREACHs)
518 * Now we are in compliance with RFCs.
523 if (!sock_owned_by_user(sk) && inet->recverr) {
525 sk->sk_error_report(sk);
526 } else { /* Only an error on timeout */
527 sk->sk_err_soft = err;
535 static void __tcp_v4_send_check(struct sk_buff *skb,
536 __be32 saddr, __be32 daddr)
538 struct tcphdr *th = tcp_hdr(skb);
540 if (skb->ip_summed == CHECKSUM_PARTIAL) {
541 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
542 skb->csum_start = skb_transport_header(skb) - skb->head;
543 skb->csum_offset = offsetof(struct tcphdr, check);
545 th->check = tcp_v4_check(skb->len, saddr, daddr,
552 /* This routine computes an IPv4 TCP checksum. */
553 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
555 const struct inet_sock *inet = inet_sk(sk);
557 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
559 EXPORT_SYMBOL(tcp_v4_send_check);
561 int tcp_v4_gso_send_check(struct sk_buff *skb)
563 const struct iphdr *iph;
566 if (!pskb_may_pull(skb, sizeof(*th)))
573 skb->ip_summed = CHECKSUM_PARTIAL;
574 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
579 * This routine will send an RST to the other tcp.
581 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
583 * Answer: if a packet caused RST, it is not for a socket
584 * existing in our system, if it is matched to a socket,
585 * it is just duplicate segment or bug in other side's TCP.
586 * So that we build reply only basing on parameters
587 * arrived with segment.
588 * Exception: precedence violation. We do not implement it in any case.
591 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
593 const struct tcphdr *th = tcp_hdr(skb);
596 #ifdef CONFIG_TCP_MD5SIG
597 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
600 struct ip_reply_arg arg;
601 #ifdef CONFIG_TCP_MD5SIG
602 struct tcp_md5sig_key *key;
606 /* Never send a reset in response to a reset. */
610 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
613 /* Swap the send and the receive. */
614 memset(&rep, 0, sizeof(rep));
615 rep.th.dest = th->source;
616 rep.th.source = th->dest;
617 rep.th.doff = sizeof(struct tcphdr) / 4;
621 rep.th.seq = th->ack_seq;
624 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
625 skb->len - (th->doff << 2));
628 memset(&arg, 0, sizeof(arg));
629 arg.iov[0].iov_base = (unsigned char *)&rep;
630 arg.iov[0].iov_len = sizeof(rep.th);
632 #ifdef CONFIG_TCP_MD5SIG
633 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
635 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
637 (TCPOPT_MD5SIG << 8) |
639 /* Update length and the length the header thinks exists */
640 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
641 rep.th.doff = arg.iov[0].iov_len / 4;
643 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
644 key, ip_hdr(skb)->saddr,
645 ip_hdr(skb)->daddr, &rep.th);
648 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
649 ip_hdr(skb)->saddr, /* XXX */
650 arg.iov[0].iov_len, IPPROTO_TCP, 0);
651 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
652 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
653 /* When socket is gone, all binding information is lost.
654 * routing might fail in this case. No choice here, if we choose to force
655 * input interface, we will misroute in case of asymmetric route.
658 arg.bound_dev_if = sk->sk_bound_dev_if;
660 net = dev_net(skb_dst(skb)->dev);
661 arg.tos = ip_hdr(skb)->tos;
662 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
663 &arg, arg.iov[0].iov_len);
665 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
666 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
669 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
670 outside socket context is ugly, certainly. What can I do?
673 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
674 u32 win, u32 ts, int oif,
675 struct tcp_md5sig_key *key,
676 int reply_flags, u8 tos)
678 const struct tcphdr *th = tcp_hdr(skb);
681 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
682 #ifdef CONFIG_TCP_MD5SIG
683 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
687 struct ip_reply_arg arg;
688 struct net *net = dev_net(skb_dst(skb)->dev);
690 memset(&rep.th, 0, sizeof(struct tcphdr));
691 memset(&arg, 0, sizeof(arg));
693 arg.iov[0].iov_base = (unsigned char *)&rep;
694 arg.iov[0].iov_len = sizeof(rep.th);
696 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
697 (TCPOPT_TIMESTAMP << 8) |
699 rep.opt[1] = htonl(tcp_time_stamp);
700 rep.opt[2] = htonl(ts);
701 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
704 /* Swap the send and the receive. */
705 rep.th.dest = th->source;
706 rep.th.source = th->dest;
707 rep.th.doff = arg.iov[0].iov_len / 4;
708 rep.th.seq = htonl(seq);
709 rep.th.ack_seq = htonl(ack);
711 rep.th.window = htons(win);
713 #ifdef CONFIG_TCP_MD5SIG
715 int offset = (ts) ? 3 : 0;
717 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
719 (TCPOPT_MD5SIG << 8) |
721 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
722 rep.th.doff = arg.iov[0].iov_len/4;
724 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
725 key, ip_hdr(skb)->saddr,
726 ip_hdr(skb)->daddr, &rep.th);
729 arg.flags = reply_flags;
730 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
731 ip_hdr(skb)->saddr, /* XXX */
732 arg.iov[0].iov_len, IPPROTO_TCP, 0);
733 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
735 arg.bound_dev_if = oif;
737 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
738 &arg, arg.iov[0].iov_len);
740 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
743 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
745 struct inet_timewait_sock *tw = inet_twsk(sk);
746 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
748 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
749 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
752 tcp_twsk_md5_key(tcptw),
753 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
760 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
761 struct request_sock *req)
763 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
764 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
767 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr),
768 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
773 * Send a SYN-ACK after having received a SYN.
774 * This still operates on a request_sock only, not on a big
777 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
778 struct request_sock *req,
779 struct request_values *rvp)
781 const struct inet_request_sock *ireq = inet_rsk(req);
784 struct sk_buff * skb;
786 /* First, grab a route. */
787 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
790 skb = tcp_make_synack(sk, dst, req, rvp);
793 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
795 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
798 err = net_xmit_eval(err);
805 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
806 struct request_values *rvp)
808 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
809 return tcp_v4_send_synack(sk, NULL, req, rvp);
813 * IPv4 request_sock destructor.
815 static void tcp_v4_reqsk_destructor(struct request_sock *req)
817 kfree(inet_rsk(req)->opt);
821 * Return 1 if a syncookie should be sent
823 int tcp_syn_flood_action(struct sock *sk,
824 const struct sk_buff *skb,
827 const char *msg = "Dropping request";
829 struct listen_sock *lopt;
833 #ifdef CONFIG_SYN_COOKIES
834 if (sysctl_tcp_syncookies) {
835 msg = "Sending cookies";
837 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
840 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
842 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
843 if (!lopt->synflood_warned) {
844 lopt->synflood_warned = 1;
845 pr_info("%s: Possible SYN flooding on port %d. %s. "
846 " Check SNMP counters.\n",
847 proto, ntohs(tcp_hdr(skb)->dest), msg);
851 EXPORT_SYMBOL(tcp_syn_flood_action);
854 * Save and compile IPv4 options into the request_sock if needed.
856 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
859 const struct ip_options *opt = &(IPCB(skb)->opt);
860 struct ip_options_rcu *dopt = NULL;
862 if (opt && opt->optlen) {
863 int opt_size = sizeof(*dopt) + opt->optlen;
865 dopt = kmalloc(opt_size, GFP_ATOMIC);
867 if (ip_options_echo(&dopt->opt, skb)) {
876 #ifdef CONFIG_TCP_MD5SIG
878 * RFC2385 MD5 checksumming requires a mapping of
879 * IP address->MD5 Key.
880 * We need to maintain these in the sk structure.
883 /* Find the Key structure for an address. */
884 static struct tcp_md5sig_key *
885 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
887 struct tcp_sock *tp = tcp_sk(sk);
890 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
892 for (i = 0; i < tp->md5sig_info->entries4; i++) {
893 if (tp->md5sig_info->keys4[i].addr == addr)
894 return &tp->md5sig_info->keys4[i].base;
899 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
900 struct sock *addr_sk)
902 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
904 EXPORT_SYMBOL(tcp_v4_md5_lookup);
906 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
907 struct request_sock *req)
909 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
912 /* This can be called on a newly created socket, from other files */
913 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
914 u8 *newkey, u8 newkeylen)
916 /* Add Key to the list */
917 struct tcp_md5sig_key *key;
918 struct tcp_sock *tp = tcp_sk(sk);
919 struct tcp4_md5sig_key *keys;
921 key = tcp_v4_md5_do_lookup(sk, addr);
923 /* Pre-existing entry - just update that one. */
926 key->keylen = newkeylen;
928 struct tcp_md5sig_info *md5sig;
930 if (!tp->md5sig_info) {
931 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
933 if (!tp->md5sig_info) {
937 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
940 md5sig = tp->md5sig_info;
941 if (md5sig->entries4 == 0 && !tcp_alloc_md5sig_pool()) {
946 if (md5sig->alloced4 == md5sig->entries4) {
947 keys = kmalloc((sizeof(*keys) *
948 (md5sig->entries4 + 1)), GFP_ATOMIC);
954 if (md5sig->entries4)
955 memcpy(keys, md5sig->keys4,
956 sizeof(*keys) * md5sig->entries4);
958 /* Free old key list, and reference new one */
959 kfree(md5sig->keys4);
960 md5sig->keys4 = keys;
964 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
965 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
966 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
970 EXPORT_SYMBOL(tcp_v4_md5_do_add);
972 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
973 u8 *newkey, u8 newkeylen)
975 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
979 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
981 struct tcp_sock *tp = tcp_sk(sk);
984 for (i = 0; i < tp->md5sig_info->entries4; i++) {
985 if (tp->md5sig_info->keys4[i].addr == addr) {
987 kfree(tp->md5sig_info->keys4[i].base.key);
988 tp->md5sig_info->entries4--;
990 if (tp->md5sig_info->entries4 == 0) {
991 kfree(tp->md5sig_info->keys4);
992 tp->md5sig_info->keys4 = NULL;
993 tp->md5sig_info->alloced4 = 0;
994 } else if (tp->md5sig_info->entries4 != i) {
995 /* Need to do some manipulation */
996 memmove(&tp->md5sig_info->keys4[i],
997 &tp->md5sig_info->keys4[i+1],
998 (tp->md5sig_info->entries4 - i) *
999 sizeof(struct tcp4_md5sig_key));
1006 EXPORT_SYMBOL(tcp_v4_md5_do_del);
1008 static void tcp_v4_clear_md5_list(struct sock *sk)
1010 struct tcp_sock *tp = tcp_sk(sk);
1012 /* Free each key, then the set of key keys,
1013 * the crypto element, and then decrement our
1014 * hold on the last resort crypto.
1016 if (tp->md5sig_info->entries4) {
1018 for (i = 0; i < tp->md5sig_info->entries4; i++)
1019 kfree(tp->md5sig_info->keys4[i].base.key);
1020 tp->md5sig_info->entries4 = 0;
1022 if (tp->md5sig_info->keys4) {
1023 kfree(tp->md5sig_info->keys4);
1024 tp->md5sig_info->keys4 = NULL;
1025 tp->md5sig_info->alloced4 = 0;
1029 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1032 struct tcp_md5sig cmd;
1033 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1036 if (optlen < sizeof(cmd))
1039 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1042 if (sin->sin_family != AF_INET)
1045 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1046 if (!tcp_sk(sk)->md5sig_info)
1048 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1051 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1054 if (!tcp_sk(sk)->md5sig_info) {
1055 struct tcp_sock *tp = tcp_sk(sk);
1056 struct tcp_md5sig_info *p;
1058 p = kzalloc(sizeof(*p), sk->sk_allocation);
1062 tp->md5sig_info = p;
1063 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1066 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1069 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1070 newkey, cmd.tcpm_keylen);
1073 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1074 __be32 daddr, __be32 saddr, int nbytes)
1076 struct tcp4_pseudohdr *bp;
1077 struct scatterlist sg;
1079 bp = &hp->md5_blk.ip4;
1082 * 1. the TCP pseudo-header (in the order: source IP address,
1083 * destination IP address, zero-padded protocol number, and
1089 bp->protocol = IPPROTO_TCP;
1090 bp->len = cpu_to_be16(nbytes);
1092 sg_init_one(&sg, bp, sizeof(*bp));
1093 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1096 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1097 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1099 struct tcp_md5sig_pool *hp;
1100 struct hash_desc *desc;
1102 hp = tcp_get_md5sig_pool();
1104 goto clear_hash_noput;
1105 desc = &hp->md5_desc;
1107 if (crypto_hash_init(desc))
1109 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1111 if (tcp_md5_hash_header(hp, th))
1113 if (tcp_md5_hash_key(hp, key))
1115 if (crypto_hash_final(desc, md5_hash))
1118 tcp_put_md5sig_pool();
1122 tcp_put_md5sig_pool();
1124 memset(md5_hash, 0, 16);
1128 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1129 const struct sock *sk, const struct request_sock *req,
1130 const struct sk_buff *skb)
1132 struct tcp_md5sig_pool *hp;
1133 struct hash_desc *desc;
1134 const struct tcphdr *th = tcp_hdr(skb);
1135 __be32 saddr, daddr;
1138 saddr = inet_sk(sk)->inet_saddr;
1139 daddr = inet_sk(sk)->inet_daddr;
1141 saddr = inet_rsk(req)->loc_addr;
1142 daddr = inet_rsk(req)->rmt_addr;
1144 const struct iphdr *iph = ip_hdr(skb);
1149 hp = tcp_get_md5sig_pool();
1151 goto clear_hash_noput;
1152 desc = &hp->md5_desc;
1154 if (crypto_hash_init(desc))
1157 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1159 if (tcp_md5_hash_header(hp, th))
1161 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1163 if (tcp_md5_hash_key(hp, key))
1165 if (crypto_hash_final(desc, md5_hash))
1168 tcp_put_md5sig_pool();
1172 tcp_put_md5sig_pool();
1174 memset(md5_hash, 0, 16);
1177 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1179 static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1182 * This gets called for each TCP segment that arrives
1183 * so we want to be efficient.
1184 * We have 3 drop cases:
1185 * o No MD5 hash and one expected.
1186 * o MD5 hash and we're not expecting one.
1187 * o MD5 hash and its wrong.
1189 const __u8 *hash_location = NULL;
1190 struct tcp_md5sig_key *hash_expected;
1191 const struct iphdr *iph = ip_hdr(skb);
1192 const struct tcphdr *th = tcp_hdr(skb);
1194 unsigned char newhash[16];
1196 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1197 hash_location = tcp_parse_md5sig_option(th);
1199 /* We've parsed the options - do we have a hash? */
1200 if (!hash_expected && !hash_location)
1203 if (hash_expected && !hash_location) {
1204 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1208 if (!hash_expected && hash_location) {
1209 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1213 /* Okay, so this is hash_expected and hash_location -
1214 * so we need to calculate the checksum.
1216 genhash = tcp_v4_md5_hash_skb(newhash,
1220 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1221 if (net_ratelimit()) {
1222 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1223 &iph->saddr, ntohs(th->source),
1224 &iph->daddr, ntohs(th->dest),
1225 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1234 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1236 .obj_size = sizeof(struct tcp_request_sock),
1237 .rtx_syn_ack = tcp_v4_rtx_synack,
1238 .send_ack = tcp_v4_reqsk_send_ack,
1239 .destructor = tcp_v4_reqsk_destructor,
1240 .send_reset = tcp_v4_send_reset,
1241 .syn_ack_timeout = tcp_syn_ack_timeout,
1244 #ifdef CONFIG_TCP_MD5SIG
1245 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1246 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1247 .calc_md5_hash = tcp_v4_md5_hash_skb,
1251 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1253 struct tcp_extend_values tmp_ext;
1254 struct tcp_options_received tmp_opt;
1255 const u8 *hash_location;
1256 struct request_sock *req;
1257 struct inet_request_sock *ireq;
1258 struct tcp_sock *tp = tcp_sk(sk);
1259 struct dst_entry *dst = NULL;
1260 __be32 saddr = ip_hdr(skb)->saddr;
1261 __be32 daddr = ip_hdr(skb)->daddr;
1262 __u32 isn = TCP_SKB_CB(skb)->when;
1263 int want_cookie = 0;
1265 /* Never answer to SYNs send to broadcast or multicast */
1266 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1269 /* TW buckets are converted to open requests without
1270 * limitations, they conserve resources and peer is
1271 * evidently real one.
1273 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1274 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1279 /* Accept backlog is full. If we have already queued enough
1280 * of warm entries in syn queue, drop request. It is better than
1281 * clogging syn queue with openreqs with exponentially increasing
1284 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1287 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1291 #ifdef CONFIG_TCP_MD5SIG
1292 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1295 tcp_clear_options(&tmp_opt);
1296 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1297 tmp_opt.user_mss = tp->rx_opt.user_mss;
1298 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1300 if (tmp_opt.cookie_plus > 0 &&
1301 tmp_opt.saw_tstamp &&
1302 !tp->rx_opt.cookie_out_never &&
1303 (sysctl_tcp_cookie_size > 0 ||
1304 (tp->cookie_values != NULL &&
1305 tp->cookie_values->cookie_desired > 0))) {
1307 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1308 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1310 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1311 goto drop_and_release;
1313 /* Secret recipe starts with IP addresses */
1314 *mess++ ^= (__force u32)daddr;
1315 *mess++ ^= (__force u32)saddr;
1317 /* plus variable length Initiator Cookie */
1320 *c++ ^= *hash_location++;
1322 want_cookie = 0; /* not our kind of cookie */
1323 tmp_ext.cookie_out_never = 0; /* false */
1324 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1325 } else if (!tp->rx_opt.cookie_in_always) {
1326 /* redundant indications, but ensure initialization. */
1327 tmp_ext.cookie_out_never = 1; /* true */
1328 tmp_ext.cookie_plus = 0;
1330 goto drop_and_release;
1332 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1334 if (want_cookie && !tmp_opt.saw_tstamp)
1335 tcp_clear_options(&tmp_opt);
1337 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1338 tcp_openreq_init(req, &tmp_opt, skb);
1340 ireq = inet_rsk(req);
1341 ireq->loc_addr = daddr;
1342 ireq->rmt_addr = saddr;
1343 ireq->no_srccheck = inet_sk(sk)->transparent;
1344 ireq->opt = tcp_v4_save_options(sk, skb);
1346 if (security_inet_conn_request(sk, skb, req))
1349 if (!want_cookie || tmp_opt.tstamp_ok)
1350 TCP_ECN_create_request(req, skb);
1353 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1354 req->cookie_ts = tmp_opt.tstamp_ok;
1356 struct inet_peer *peer = NULL;
1359 /* VJ's idea. We save last timestamp seen
1360 * from the destination in peer table, when entering
1361 * state TIME-WAIT, and check against it before
1362 * accepting new connection request.
1364 * If "isn" is not zero, this request hit alive
1365 * timewait bucket, so that all the necessary checks
1366 * are made in the function processing timewait state.
1368 if (tmp_opt.saw_tstamp &&
1369 tcp_death_row.sysctl_tw_recycle &&
1370 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1371 fl4.daddr == saddr &&
1372 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1373 inet_peer_refcheck(peer);
1374 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1375 (s32)(peer->tcp_ts - req->ts_recent) >
1377 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1378 goto drop_and_release;
1381 /* Kill the following clause, if you dislike this way. */
1382 else if (!sysctl_tcp_syncookies &&
1383 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1384 (sysctl_max_syn_backlog >> 2)) &&
1385 (!peer || !peer->tcp_ts_stamp) &&
1386 (!dst || !dst_metric(dst, RTAX_RTT))) {
1387 /* Without syncookies last quarter of
1388 * backlog is filled with destinations,
1389 * proven to be alive.
1390 * It means that we continue to communicate
1391 * to destinations, already remembered
1392 * to the moment of synflood.
1394 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1395 &saddr, ntohs(tcp_hdr(skb)->source));
1396 goto drop_and_release;
1399 isn = tcp_v4_init_sequence(skb);
1401 tcp_rsk(req)->snt_isn = isn;
1402 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1404 if (tcp_v4_send_synack(sk, dst, req,
1405 (struct request_values *)&tmp_ext) ||
1409 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1419 EXPORT_SYMBOL(tcp_v4_conn_request);
1423 * The three way handshake has completed - we got a valid synack -
1424 * now create the new socket.
1426 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1427 struct request_sock *req,
1428 struct dst_entry *dst)
1430 struct inet_request_sock *ireq;
1431 struct inet_sock *newinet;
1432 struct tcp_sock *newtp;
1434 #ifdef CONFIG_TCP_MD5SIG
1435 struct tcp_md5sig_key *key;
1437 struct ip_options_rcu *inet_opt;
1439 if (sk_acceptq_is_full(sk))
1442 newsk = tcp_create_openreq_child(sk, req, skb);
1446 newsk->sk_gso_type = SKB_GSO_TCPV4;
1448 newtp = tcp_sk(newsk);
1449 newinet = inet_sk(newsk);
1450 ireq = inet_rsk(req);
1451 newinet->inet_daddr = ireq->rmt_addr;
1452 newinet->inet_rcv_saddr = ireq->loc_addr;
1453 newinet->inet_saddr = ireq->loc_addr;
1454 inet_opt = ireq->opt;
1455 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1457 newinet->mc_index = inet_iif(skb);
1458 newinet->mc_ttl = ip_hdr(skb)->ttl;
1459 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1461 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1462 newinet->inet_id = newtp->write_seq ^ jiffies;
1465 dst = inet_csk_route_child_sock(sk, newsk, req);
1469 /* syncookie case : see end of cookie_v4_check() */
1471 sk_setup_caps(newsk, dst);
1473 tcp_mtup_init(newsk);
1474 tcp_sync_mss(newsk, dst_mtu(dst));
1475 newtp->advmss = dst_metric_advmss(dst);
1476 if (tcp_sk(sk)->rx_opt.user_mss &&
1477 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1478 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1480 tcp_initialize_rcv_mss(newsk);
1481 if (tcp_rsk(req)->snt_synack)
1482 tcp_valid_rtt_meas(newsk,
1483 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1484 newtp->total_retrans = req->retrans;
1486 #ifdef CONFIG_TCP_MD5SIG
1487 /* Copy over the MD5 key from the original socket */
1488 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1491 * We're using one, so create a matching key
1492 * on the newsk structure. If we fail to get
1493 * memory, then we end up not copying the key
1496 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1498 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1499 newkey, key->keylen);
1500 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1504 if (__inet_inherit_port(sk, newsk) < 0)
1506 __inet_hash_nolisten(newsk, NULL);
1511 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1515 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1518 inet_csk_prepare_forced_close(newsk);
1522 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1524 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1526 struct tcphdr *th = tcp_hdr(skb);
1527 const struct iphdr *iph = ip_hdr(skb);
1529 struct request_sock **prev;
1530 /* Find possible connection requests. */
1531 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1532 iph->saddr, iph->daddr);
1534 return tcp_check_req(sk, skb, req, prev);
1536 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1537 th->source, iph->daddr, th->dest, inet_iif(skb));
1540 if (nsk->sk_state != TCP_TIME_WAIT) {
1544 inet_twsk_put(inet_twsk(nsk));
1548 #ifdef CONFIG_SYN_COOKIES
1550 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1555 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1557 const struct iphdr *iph = ip_hdr(skb);
1559 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1560 if (!tcp_v4_check(skb->len, iph->saddr,
1561 iph->daddr, skb->csum)) {
1562 skb->ip_summed = CHECKSUM_UNNECESSARY;
1567 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1568 skb->len, IPPROTO_TCP, 0);
1570 if (skb->len <= 76) {
1571 return __skb_checksum_complete(skb);
1577 /* The socket must have it's spinlock held when we get
1580 * We have a potential double-lock case here, so even when
1581 * doing backlog processing we use the BH locking scheme.
1582 * This is because we cannot sleep with the original spinlock
1585 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1588 #ifdef CONFIG_TCP_MD5SIG
1590 * We really want to reject the packet as early as possible
1592 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1593 * o There is an MD5 option and we're not expecting one
1595 if (tcp_v4_inbound_md5_hash(sk, skb))
1599 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1600 sock_rps_save_rxhash(sk, skb);
1601 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1608 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1611 if (sk->sk_state == TCP_LISTEN) {
1612 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1617 sock_rps_save_rxhash(nsk, skb);
1618 if (tcp_child_process(sk, nsk, skb)) {
1625 sock_rps_save_rxhash(sk, skb);
1627 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1634 tcp_v4_send_reset(rsk, skb);
1637 /* Be careful here. If this function gets more complicated and
1638 * gcc suffers from register pressure on the x86, sk (in %ebx)
1639 * might be destroyed here. This current version compiles correctly,
1640 * but you have been warned.
1645 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1648 EXPORT_SYMBOL(tcp_v4_do_rcv);
1650 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1652 struct tcphdr *th = (struct tcphdr *)skb->data;
1653 unsigned int eaten = skb->len;
1656 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1659 TCP_SKB_CB(skb)->end_seq -= eaten;
1663 EXPORT_SYMBOL(tcp_filter);
1669 int tcp_v4_rcv(struct sk_buff *skb)
1671 const struct iphdr *iph;
1672 const struct tcphdr *th;
1675 struct net *net = dev_net(skb->dev);
1677 if (skb->pkt_type != PACKET_HOST)
1680 /* Count it even if it's bad */
1681 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1683 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1688 if (th->doff < sizeof(struct tcphdr) / 4)
1690 if (!pskb_may_pull(skb, th->doff * 4))
1693 /* An explanation is required here, I think.
1694 * Packet length and doff are validated by header prediction,
1695 * provided case of th->doff==0 is eliminated.
1696 * So, we defer the checks. */
1697 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1702 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1703 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1704 skb->len - th->doff * 4);
1705 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1706 TCP_SKB_CB(skb)->when = 0;
1707 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1708 TCP_SKB_CB(skb)->sacked = 0;
1710 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1715 if (sk->sk_state == TCP_TIME_WAIT)
1718 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1719 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1720 goto discard_and_relse;
1723 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1724 goto discard_and_relse;
1727 if (tcp_filter(sk, skb))
1728 goto discard_and_relse;
1729 th = (const struct tcphdr *)skb->data;
1734 bh_lock_sock_nested(sk);
1736 if (!sock_owned_by_user(sk)) {
1737 #ifdef CONFIG_NET_DMA
1738 struct tcp_sock *tp = tcp_sk(sk);
1739 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1740 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1741 if (tp->ucopy.dma_chan)
1742 ret = tcp_v4_do_rcv(sk, skb);
1746 if (!tcp_prequeue(sk, skb))
1747 ret = tcp_v4_do_rcv(sk, skb);
1749 } else if (unlikely(sk_add_backlog(sk, skb))) {
1751 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1752 goto discard_and_relse;
1761 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1764 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1766 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1768 tcp_v4_send_reset(NULL, skb);
1772 /* Discard frame. */
1781 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1782 inet_twsk_put(inet_twsk(sk));
1786 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1787 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1788 inet_twsk_put(inet_twsk(sk));
1791 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1793 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1795 iph->daddr, th->dest,
1798 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1799 inet_twsk_put(inet_twsk(sk));
1803 /* Fall through to ACK */
1806 tcp_v4_timewait_ack(sk, skb);
1810 case TCP_TW_SUCCESS:;
1815 struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1817 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1818 struct inet_sock *inet = inet_sk(sk);
1819 struct inet_peer *peer;
1822 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1823 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1827 rt_bind_peer(rt, inet->inet_daddr, 1);
1829 *release_it = false;
1834 EXPORT_SYMBOL(tcp_v4_get_peer);
1836 void *tcp_v4_tw_get_peer(struct sock *sk)
1838 const struct inet_timewait_sock *tw = inet_twsk(sk);
1840 return inet_getpeer_v4(tw->tw_daddr, 1);
1842 EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1844 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1845 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1846 .twsk_unique = tcp_twsk_unique,
1847 .twsk_destructor= tcp_twsk_destructor,
1848 .twsk_getpeer = tcp_v4_tw_get_peer,
1851 const struct inet_connection_sock_af_ops ipv4_specific = {
1852 .queue_xmit = ip_queue_xmit,
1853 .send_check = tcp_v4_send_check,
1854 .rebuild_header = inet_sk_rebuild_header,
1855 .conn_request = tcp_v4_conn_request,
1856 .syn_recv_sock = tcp_v4_syn_recv_sock,
1857 .get_peer = tcp_v4_get_peer,
1858 .net_header_len = sizeof(struct iphdr),
1859 .setsockopt = ip_setsockopt,
1860 .getsockopt = ip_getsockopt,
1861 .addr2sockaddr = inet_csk_addr2sockaddr,
1862 .sockaddr_len = sizeof(struct sockaddr_in),
1863 .bind_conflict = inet_csk_bind_conflict,
1864 #ifdef CONFIG_COMPAT
1865 .compat_setsockopt = compat_ip_setsockopt,
1866 .compat_getsockopt = compat_ip_getsockopt,
1869 EXPORT_SYMBOL(ipv4_specific);
1871 #ifdef CONFIG_TCP_MD5SIG
1872 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1873 .md5_lookup = tcp_v4_md5_lookup,
1874 .calc_md5_hash = tcp_v4_md5_hash_skb,
1875 .md5_add = tcp_v4_md5_add_func,
1876 .md5_parse = tcp_v4_parse_md5_keys,
1880 /* NOTE: A lot of things set to zero explicitly by call to
1881 * sk_alloc() so need not be done here.
1883 static int tcp_v4_init_sock(struct sock *sk)
1885 struct inet_connection_sock *icsk = inet_csk(sk);
1886 struct tcp_sock *tp = tcp_sk(sk);
1888 skb_queue_head_init(&tp->out_of_order_queue);
1889 tcp_init_xmit_timers(sk);
1890 tcp_prequeue_init(tp);
1892 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1893 tp->mdev = TCP_TIMEOUT_INIT;
1895 /* So many TCP implementations out there (incorrectly) count the
1896 * initial SYN frame in their delayed-ACK and congestion control
1897 * algorithms that we must have the following bandaid to talk
1898 * efficiently to them. -DaveM
1900 tp->snd_cwnd = TCP_INIT_CWND;
1902 /* See draft-stevens-tcpca-spec-01 for discussion of the
1903 * initialization of these values.
1905 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1906 tp->snd_cwnd_clamp = ~0;
1907 tp->mss_cache = TCP_MSS_DEFAULT;
1909 tp->reordering = sysctl_tcp_reordering;
1910 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1912 sk->sk_state = TCP_CLOSE;
1914 sk->sk_write_space = sk_stream_write_space;
1915 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1917 icsk->icsk_af_ops = &ipv4_specific;
1918 icsk->icsk_sync_mss = tcp_sync_mss;
1919 #ifdef CONFIG_TCP_MD5SIG
1920 tp->af_specific = &tcp_sock_ipv4_specific;
1923 /* TCP Cookie Transactions */
1924 if (sysctl_tcp_cookie_size > 0) {
1925 /* Default, cookies without s_data_payload. */
1927 kzalloc(sizeof(*tp->cookie_values),
1929 if (tp->cookie_values != NULL)
1930 kref_init(&tp->cookie_values->kref);
1932 /* Presumed zeroed, in order of appearance:
1933 * cookie_in_always, cookie_out_never,
1934 * s_data_constant, s_data_in, s_data_out
1936 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1937 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1940 percpu_counter_inc(&tcp_sockets_allocated);
1946 void tcp_v4_destroy_sock(struct sock *sk)
1948 struct tcp_sock *tp = tcp_sk(sk);
1950 tcp_clear_xmit_timers(sk);
1952 tcp_cleanup_congestion_control(sk);
1954 /* Cleanup up the write buffer. */
1955 tcp_write_queue_purge(sk);
1957 /* Cleans up our, hopefully empty, out_of_order_queue. */
1958 __skb_queue_purge(&tp->out_of_order_queue);
1960 #ifdef CONFIG_TCP_MD5SIG
1961 /* Clean up the MD5 key list, if any */
1962 if (tp->md5sig_info) {
1963 tcp_v4_clear_md5_list(sk);
1964 kfree(tp->md5sig_info);
1965 tp->md5sig_info = NULL;
1969 #ifdef CONFIG_NET_DMA
1970 /* Cleans up our sk_async_wait_queue */
1971 __skb_queue_purge(&sk->sk_async_wait_queue);
1974 /* Clean prequeue, it must be empty really */
1975 __skb_queue_purge(&tp->ucopy.prequeue);
1977 /* Clean up a referenced TCP bind bucket. */
1978 if (inet_csk(sk)->icsk_bind_hash)
1982 * If sendmsg cached page exists, toss it.
1984 if (sk->sk_sndmsg_page) {
1985 __free_page(sk->sk_sndmsg_page);
1986 sk->sk_sndmsg_page = NULL;
1989 /* TCP Cookie Transactions */
1990 if (tp->cookie_values != NULL) {
1991 kref_put(&tp->cookie_values->kref,
1992 tcp_cookie_values_release);
1993 tp->cookie_values = NULL;
1996 percpu_counter_dec(&tcp_sockets_allocated);
1998 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2000 #ifdef CONFIG_PROC_FS
2001 /* Proc filesystem TCP sock list dumping. */
2003 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2005 return hlist_nulls_empty(head) ? NULL :
2006 list_entry(head->first, struct inet_timewait_sock, tw_node);
2009 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2011 return !is_a_nulls(tw->tw_node.next) ?
2012 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2016 * Get next listener socket follow cur. If cur is NULL, get first socket
2017 * starting from bucket given in st->bucket; when st->bucket is zero the
2018 * very first socket in the hash table is returned.
2020 static void *listening_get_next(struct seq_file *seq, void *cur)
2022 struct inet_connection_sock *icsk;
2023 struct hlist_nulls_node *node;
2024 struct sock *sk = cur;
2025 struct inet_listen_hashbucket *ilb;
2026 struct tcp_iter_state *st = seq->private;
2027 struct net *net = seq_file_net(seq);
2030 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2031 spin_lock_bh(&ilb->lock);
2032 sk = sk_nulls_head(&ilb->head);
2036 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2040 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2041 struct request_sock *req = cur;
2043 icsk = inet_csk(st->syn_wait_sk);
2047 if (req->rsk_ops->family == st->family) {
2053 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2056 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2058 sk = sk_nulls_next(st->syn_wait_sk);
2059 st->state = TCP_SEQ_STATE_LISTENING;
2060 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2062 icsk = inet_csk(sk);
2063 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2064 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2066 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2067 sk = sk_nulls_next(sk);
2070 sk_nulls_for_each_from(sk, node) {
2071 if (!net_eq(sock_net(sk), net))
2073 if (sk->sk_family == st->family) {
2077 icsk = inet_csk(sk);
2078 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2079 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2081 st->uid = sock_i_uid(sk);
2082 st->syn_wait_sk = sk;
2083 st->state = TCP_SEQ_STATE_OPENREQ;
2087 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2089 spin_unlock_bh(&ilb->lock);
2091 if (++st->bucket < INET_LHTABLE_SIZE) {
2092 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2093 spin_lock_bh(&ilb->lock);
2094 sk = sk_nulls_head(&ilb->head);
2102 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2104 struct tcp_iter_state *st = seq->private;
2109 rc = listening_get_next(seq, NULL);
2111 while (rc && *pos) {
2112 rc = listening_get_next(seq, rc);
2118 static inline int empty_bucket(struct tcp_iter_state *st)
2120 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2121 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2125 * Get first established socket starting from bucket given in st->bucket.
2126 * If st->bucket is zero, the very first socket in the hash is returned.
2128 static void *established_get_first(struct seq_file *seq)
2130 struct tcp_iter_state *st = seq->private;
2131 struct net *net = seq_file_net(seq);
2135 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2137 struct hlist_nulls_node *node;
2138 struct inet_timewait_sock *tw;
2139 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2141 /* Lockless fast path for the common case of empty buckets */
2142 if (empty_bucket(st))
2146 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2147 if (sk->sk_family != st->family ||
2148 !net_eq(sock_net(sk), net)) {
2154 st->state = TCP_SEQ_STATE_TIME_WAIT;
2155 inet_twsk_for_each(tw, node,
2156 &tcp_hashinfo.ehash[st->bucket].twchain) {
2157 if (tw->tw_family != st->family ||
2158 !net_eq(twsk_net(tw), net)) {
2164 spin_unlock_bh(lock);
2165 st->state = TCP_SEQ_STATE_ESTABLISHED;
2171 static void *established_get_next(struct seq_file *seq, void *cur)
2173 struct sock *sk = cur;
2174 struct inet_timewait_sock *tw;
2175 struct hlist_nulls_node *node;
2176 struct tcp_iter_state *st = seq->private;
2177 struct net *net = seq_file_net(seq);
2182 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2186 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2193 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2194 st->state = TCP_SEQ_STATE_ESTABLISHED;
2196 /* Look for next non empty bucket */
2198 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2201 if (st->bucket > tcp_hashinfo.ehash_mask)
2204 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2205 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2207 sk = sk_nulls_next(sk);
2209 sk_nulls_for_each_from(sk, node) {
2210 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2214 st->state = TCP_SEQ_STATE_TIME_WAIT;
2215 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2223 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2225 struct tcp_iter_state *st = seq->private;
2229 rc = established_get_first(seq);
2232 rc = established_get_next(seq, rc);
2238 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2241 struct tcp_iter_state *st = seq->private;
2243 st->state = TCP_SEQ_STATE_LISTENING;
2244 rc = listening_get_idx(seq, &pos);
2247 st->state = TCP_SEQ_STATE_ESTABLISHED;
2248 rc = established_get_idx(seq, pos);
2254 static void *tcp_seek_last_pos(struct seq_file *seq)
2256 struct tcp_iter_state *st = seq->private;
2257 int offset = st->offset;
2258 int orig_num = st->num;
2261 switch (st->state) {
2262 case TCP_SEQ_STATE_OPENREQ:
2263 case TCP_SEQ_STATE_LISTENING:
2264 if (st->bucket >= INET_LHTABLE_SIZE)
2266 st->state = TCP_SEQ_STATE_LISTENING;
2267 rc = listening_get_next(seq, NULL);
2268 while (offset-- && rc)
2269 rc = listening_get_next(seq, rc);
2274 case TCP_SEQ_STATE_ESTABLISHED:
2275 case TCP_SEQ_STATE_TIME_WAIT:
2276 st->state = TCP_SEQ_STATE_ESTABLISHED;
2277 if (st->bucket > tcp_hashinfo.ehash_mask)
2279 rc = established_get_first(seq);
2280 while (offset-- && rc)
2281 rc = established_get_next(seq, rc);
2289 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2291 struct tcp_iter_state *st = seq->private;
2294 if (*pos && *pos == st->last_pos) {
2295 rc = tcp_seek_last_pos(seq);
2300 st->state = TCP_SEQ_STATE_LISTENING;
2304 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2307 st->last_pos = *pos;
2311 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2313 struct tcp_iter_state *st = seq->private;
2316 if (v == SEQ_START_TOKEN) {
2317 rc = tcp_get_idx(seq, 0);
2321 switch (st->state) {
2322 case TCP_SEQ_STATE_OPENREQ:
2323 case TCP_SEQ_STATE_LISTENING:
2324 rc = listening_get_next(seq, v);
2326 st->state = TCP_SEQ_STATE_ESTABLISHED;
2329 rc = established_get_first(seq);
2332 case TCP_SEQ_STATE_ESTABLISHED:
2333 case TCP_SEQ_STATE_TIME_WAIT:
2334 rc = established_get_next(seq, v);
2339 st->last_pos = *pos;
2343 static void tcp_seq_stop(struct seq_file *seq, void *v)
2345 struct tcp_iter_state *st = seq->private;
2347 switch (st->state) {
2348 case TCP_SEQ_STATE_OPENREQ:
2350 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2351 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2353 case TCP_SEQ_STATE_LISTENING:
2354 if (v != SEQ_START_TOKEN)
2355 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2357 case TCP_SEQ_STATE_TIME_WAIT:
2358 case TCP_SEQ_STATE_ESTABLISHED:
2360 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2365 int tcp_seq_open(struct inode *inode, struct file *file)
2367 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2368 struct tcp_iter_state *s;
2371 err = seq_open_net(inode, file, &afinfo->seq_ops,
2372 sizeof(struct tcp_iter_state));
2376 s = ((struct seq_file *)file->private_data)->private;
2377 s->family = afinfo->family;
2381 EXPORT_SYMBOL(tcp_seq_open);
2383 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2386 struct proc_dir_entry *p;
2388 afinfo->seq_ops.start = tcp_seq_start;
2389 afinfo->seq_ops.next = tcp_seq_next;
2390 afinfo->seq_ops.stop = tcp_seq_stop;
2392 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2393 afinfo->seq_fops, afinfo);
2398 EXPORT_SYMBOL(tcp_proc_register);
2400 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2402 proc_net_remove(net, afinfo->name);
2404 EXPORT_SYMBOL(tcp_proc_unregister);
2406 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2407 struct seq_file *f, int i, int uid, int *len)
2409 const struct inet_request_sock *ireq = inet_rsk(req);
2410 int ttd = req->expires - jiffies;
2412 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2413 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2416 ntohs(inet_sk(sk)->inet_sport),
2418 ntohs(ireq->rmt_port),
2420 0, 0, /* could print option size, but that is af dependent. */
2421 1, /* timers active (only the expire timer) */
2422 jiffies_to_clock_t(ttd),
2425 0, /* non standard timer */
2426 0, /* open_requests have no inode */
2427 atomic_read(&sk->sk_refcnt),
2432 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2435 unsigned long timer_expires;
2436 const struct tcp_sock *tp = tcp_sk(sk);
2437 const struct inet_connection_sock *icsk = inet_csk(sk);
2438 const struct inet_sock *inet = inet_sk(sk);
2439 __be32 dest = inet->inet_daddr;
2440 __be32 src = inet->inet_rcv_saddr;
2441 __u16 destp = ntohs(inet->inet_dport);
2442 __u16 srcp = ntohs(inet->inet_sport);
2445 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2447 timer_expires = icsk->icsk_timeout;
2448 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2450 timer_expires = icsk->icsk_timeout;
2451 } else if (timer_pending(&sk->sk_timer)) {
2453 timer_expires = sk->sk_timer.expires;
2456 timer_expires = jiffies;
2459 if (sk->sk_state == TCP_LISTEN)
2460 rx_queue = sk->sk_ack_backlog;
2463 * because we dont lock socket, we might find a transient negative value
2465 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2467 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2468 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2469 i, src, srcp, dest, destp, sk->sk_state,
2470 tp->write_seq - tp->snd_una,
2473 jiffies_to_clock_t(timer_expires - jiffies),
2474 icsk->icsk_retransmits,
2476 icsk->icsk_probes_out,
2478 atomic_read(&sk->sk_refcnt), sk,
2479 jiffies_to_clock_t(icsk->icsk_rto),
2480 jiffies_to_clock_t(icsk->icsk_ack.ato),
2481 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2483 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2487 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2488 struct seq_file *f, int i, int *len)
2492 int ttd = tw->tw_ttd - jiffies;
2497 dest = tw->tw_daddr;
2498 src = tw->tw_rcv_saddr;
2499 destp = ntohs(tw->tw_dport);
2500 srcp = ntohs(tw->tw_sport);
2502 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2503 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2504 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2505 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2506 atomic_read(&tw->tw_refcnt), tw, len);
2511 static int tcp4_seq_show(struct seq_file *seq, void *v)
2513 struct tcp_iter_state *st;
2516 if (v == SEQ_START_TOKEN) {
2517 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2518 " sl local_address rem_address st tx_queue "
2519 "rx_queue tr tm->when retrnsmt uid timeout "
2525 switch (st->state) {
2526 case TCP_SEQ_STATE_LISTENING:
2527 case TCP_SEQ_STATE_ESTABLISHED:
2528 get_tcp4_sock(v, seq, st->num, &len);
2530 case TCP_SEQ_STATE_OPENREQ:
2531 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2533 case TCP_SEQ_STATE_TIME_WAIT:
2534 get_timewait4_sock(v, seq, st->num, &len);
2537 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2542 static const struct file_operations tcp_afinfo_seq_fops = {
2543 .owner = THIS_MODULE,
2544 .open = tcp_seq_open,
2546 .llseek = seq_lseek,
2547 .release = seq_release_net
2550 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2553 .seq_fops = &tcp_afinfo_seq_fops,
2555 .show = tcp4_seq_show,
2559 static int __net_init tcp4_proc_init_net(struct net *net)
2561 return tcp_proc_register(net, &tcp4_seq_afinfo);
2564 static void __net_exit tcp4_proc_exit_net(struct net *net)
2566 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2569 static struct pernet_operations tcp4_net_ops = {
2570 .init = tcp4_proc_init_net,
2571 .exit = tcp4_proc_exit_net,
2574 int __init tcp4_proc_init(void)
2576 return register_pernet_subsys(&tcp4_net_ops);
2579 void tcp4_proc_exit(void)
2581 unregister_pernet_subsys(&tcp4_net_ops);
2583 #endif /* CONFIG_PROC_FS */
2585 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2587 const struct iphdr *iph = skb_gro_network_header(skb);
2589 switch (skb->ip_summed) {
2590 case CHECKSUM_COMPLETE:
2591 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2593 skb->ip_summed = CHECKSUM_UNNECESSARY;
2599 NAPI_GRO_CB(skb)->flush = 1;
2603 return tcp_gro_receive(head, skb);
2606 int tcp4_gro_complete(struct sk_buff *skb)
2608 const struct iphdr *iph = ip_hdr(skb);
2609 struct tcphdr *th = tcp_hdr(skb);
2611 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2612 iph->saddr, iph->daddr, 0);
2613 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2615 return tcp_gro_complete(skb);
2618 struct proto tcp_prot = {
2620 .owner = THIS_MODULE,
2622 .connect = tcp_v4_connect,
2623 .disconnect = tcp_disconnect,
2624 .accept = inet_csk_accept,
2626 .init = tcp_v4_init_sock,
2627 .destroy = tcp_v4_destroy_sock,
2628 .shutdown = tcp_shutdown,
2629 .setsockopt = tcp_setsockopt,
2630 .getsockopt = tcp_getsockopt,
2631 .recvmsg = tcp_recvmsg,
2632 .sendmsg = tcp_sendmsg,
2633 .sendpage = tcp_sendpage,
2634 .backlog_rcv = tcp_v4_do_rcv,
2636 .unhash = inet_unhash,
2637 .get_port = inet_csk_get_port,
2638 .enter_memory_pressure = tcp_enter_memory_pressure,
2639 .sockets_allocated = &tcp_sockets_allocated,
2640 .orphan_count = &tcp_orphan_count,
2641 .memory_allocated = &tcp_memory_allocated,
2642 .memory_pressure = &tcp_memory_pressure,
2643 .sysctl_mem = sysctl_tcp_mem,
2644 .sysctl_wmem = sysctl_tcp_wmem,
2645 .sysctl_rmem = sysctl_tcp_rmem,
2646 .max_header = MAX_TCP_HEADER,
2647 .obj_size = sizeof(struct tcp_sock),
2648 .slab_flags = SLAB_DESTROY_BY_RCU,
2649 .twsk_prot = &tcp_timewait_sock_ops,
2650 .rsk_prot = &tcp_request_sock_ops,
2651 .h.hashinfo = &tcp_hashinfo,
2652 .no_autobind = true,
2653 #ifdef CONFIG_COMPAT
2654 .compat_setsockopt = compat_tcp_setsockopt,
2655 .compat_getsockopt = compat_tcp_getsockopt,
2658 EXPORT_SYMBOL(tcp_prot);
2661 static int __net_init tcp_sk_init(struct net *net)
2663 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2664 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2667 static void __net_exit tcp_sk_exit(struct net *net)
2669 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2672 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2674 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2677 static struct pernet_operations __net_initdata tcp_sk_ops = {
2678 .init = tcp_sk_init,
2679 .exit = tcp_sk_exit,
2680 .exit_batch = tcp_sk_exit_batch,
2683 void __init tcp_v4_init(void)
2685 inet_hashinfo_init(&tcp_hashinfo);
2686 if (register_pernet_subsys(&tcp_sk_ops))
2687 panic("Failed to create the TCP control socket.\n");