X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fnet%2Ftcp.h;h=19f4150f4d4d166380f95079c1877af28e6d62a5;hb=22f6dacdfcfdc792d068e9c41234808860498d04;hp=1b94b9bfe2dc13754d4c2f6f4c9e26efd8b8c130;hpb=22ae77bc7ac115b9d518d5cbc13d39317079b2b0;p=pandora-kernel.git diff --git a/include/net/tcp.h b/include/net/tcp.h index 1b94b9bfe2dc..19f4150f4d4d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -41,6 +41,7 @@ #include #include #include +#include #include @@ -265,6 +266,19 @@ static inline int tcp_too_many_orphans(struct sock *sk, int num) atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); } +/* syncookies: remember time of last synqueue overflow */ +static inline void tcp_synq_overflow(struct sock *sk) +{ + tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies; +} + +/* syncookies: no recent synqueue overflow on this listening socket? */ +static inline int tcp_synq_no_recent_overflow(const struct sock *sk) +{ + unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT); +} + extern struct proto tcp_prot; #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) @@ -530,6 +544,17 @@ static inline void tcp_fast_path_check(struct sock *sk) tcp_fast_path_on(tp); } +/* Compute the actual rto_min value */ +static inline u32 tcp_rto_min(struct sock *sk) +{ + struct dst_entry *dst = __sk_dst_get(sk); + u32 rto_min = TCP_RTO_MIN; + + if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) + rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); + return rto_min; +} + /* Compute the actual receive window we are currently advertising. * Rcv_nxt can be after the window if our peer push more data * than the offered window. @@ -877,30 +902,32 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); - if (!sysctl_tcp_low_latency && tp->ucopy.task) { - __skb_queue_tail(&tp->ucopy.prequeue, skb); - tp->ucopy.memory += skb->truesize; - if (tp->ucopy.memory > sk->sk_rcvbuf) { - struct sk_buff *skb1; - - BUG_ON(sock_owned_by_user(sk)); - - while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { - sk_backlog_rcv(sk, skb1); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED); - } - - tp->ucopy.memory = 0; - } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { - wake_up_interruptible(sk->sk_sleep); - if (!inet_csk_ack_scheduled(sk)) - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, - (3 * TCP_RTO_MIN) / 4, - TCP_RTO_MAX); + if (sysctl_tcp_low_latency || !tp->ucopy.task) + return 0; + + __skb_queue_tail(&tp->ucopy.prequeue, skb); + tp->ucopy.memory += skb->truesize; + if (tp->ucopy.memory > sk->sk_rcvbuf) { + struct sk_buff *skb1; + + BUG_ON(sock_owned_by_user(sk)); + + while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { + sk_backlog_rcv(sk, skb1); + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPPREQUEUEDROPPED); } - return 1; + + tp->ucopy.memory = 0; + } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { + wake_up_interruptible_poll(sk->sk_sleep, + POLLIN | POLLRDNORM | POLLRDBAND); + if (!inet_csk_ack_scheduled(sk)) + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, + (3 * tcp_rto_min(sk)) / 4, + TCP_RTO_MAX); } - return 0; + return 1; }