Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[pandora-kernel.git] / include / net / tcp.h
index e54c76d..19f4150 100644 (file)
@@ -41,6 +41,7 @@
 #include <net/ip.h>
 #include <net/tcp_states.h>
 #include <net/inet_ecn.h>
+#include <net/dst.h>
 
 #include <linux/seq_file.h>
 
@@ -265,6 +266,19 @@ static inline int tcp_too_many_orphans(struct sock *sk, int num)
                 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
 }
 
+/* syncookies: remember time of last synqueue overflow */
+static inline void tcp_synq_overflow(struct sock *sk)
+{
+       tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
+}
+
+/* syncookies: no recent synqueue overflow on this listening socket? */
+static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
+{
+       unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+       return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT);
+}
+
 extern struct proto tcp_prot;
 
 #define TCP_INC_STATS(net, field)      SNMP_INC_STATS((net)->mib.tcp_statistics, field)
@@ -530,6 +544,17 @@ static inline void tcp_fast_path_check(struct sock *sk)
                tcp_fast_path_on(tp);
 }
 
+/* Compute the actual rto_min value */
+static inline u32 tcp_rto_min(struct sock *sk)
+{
+       struct dst_entry *dst = __sk_dst_get(sk);
+       u32 rto_min = TCP_RTO_MIN;
+
+       if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
+               rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
+       return rto_min;
+}
+
 /* Compute the actual receive window we are currently advertising.
  * Rcv_nxt can be after the window if our peer push more data
  * than the offered window.
@@ -616,21 +641,6 @@ static inline int tcp_skb_mss(const struct sk_buff *skb)
        return skb_shinfo(skb)->gso_size;
 }
 
-static inline void tcp_dec_pcount_approx_int(__u32 *count, const int decr)
-{
-       if (*count) {
-               *count -= decr;
-               if ((int)*count < 0)
-                       *count = 0;
-       }
-}
-
-static inline void tcp_dec_pcount_approx(__u32 *count,
-                                        const struct sk_buff *skb)
-{
-       tcp_dec_pcount_approx_int(count, tcp_skb_pcount(skb));
-}
-
 /* Events passed to congestion control interface */
 enum tcp_ca_event {
        CA_EVENT_TX_START,      /* first transmit when no packets in flight */
@@ -892,30 +902,32 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (!sysctl_tcp_low_latency && tp->ucopy.task) {
-               __skb_queue_tail(&tp->ucopy.prequeue, skb);
-               tp->ucopy.memory += skb->truesize;
-               if (tp->ucopy.memory > sk->sk_rcvbuf) {
-                       struct sk_buff *skb1;
-
-                       BUG_ON(sock_owned_by_user(sk));
-
-                       while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
-                               sk_backlog_rcv(sk, skb1);
-                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
-                       }
-
-                       tp->ucopy.memory = 0;
-               } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
-                       wake_up_interruptible(sk->sk_sleep);
-                       if (!inet_csk_ack_scheduled(sk))
-                               inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
-                                                         (3 * TCP_RTO_MIN) / 4,
-                                                         TCP_RTO_MAX);
+       if (sysctl_tcp_low_latency || !tp->ucopy.task)
+               return 0;
+
+       __skb_queue_tail(&tp->ucopy.prequeue, skb);
+       tp->ucopy.memory += skb->truesize;
+       if (tp->ucopy.memory > sk->sk_rcvbuf) {
+               struct sk_buff *skb1;
+
+               BUG_ON(sock_owned_by_user(sk));
+
+               while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
+                       sk_backlog_rcv(sk, skb1);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                        LINUX_MIB_TCPPREQUEUEDROPPED);
                }
-               return 1;
+
+               tp->ucopy.memory = 0;
+       } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
+               wake_up_interruptible_poll(sk->sk_sleep,
+                                          POLLIN | POLLRDNORM | POLLRDBAND);
+               if (!inet_csk_ack_scheduled(sk))
+                       inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+                                                 (3 * tcp_rto_min(sk)) / 4,
+                                                 TCP_RTO_MAX);
        }
-       return 0;
+       return 1;
 }