Merge branch 'upstream-fixes' into upstream
[pandora-kernel.git] / net / ipv4 / tcp_minisocks.c
index b1a63b2..624e2b2 100644 (file)
@@ -20,7 +20,6 @@
  *             Jorge Cwik, <jorge@laser.satlink.net>
  */
 
-#include <linux/config.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sysctl.h>
@@ -41,7 +40,7 @@ int sysctl_tcp_abort_on_overflow;
 struct inet_timewait_death_row tcp_death_row = {
        .sysctl_max_tw_buckets = NR_FILE * 2,
        .period         = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
-       .death_lock     = SPIN_LOCK_UNLOCKED,
+       .death_lock     = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
        .hashinfo       = &tcp_hashinfo,
        .tw_timer       = TIMER_INITIALIZER(inet_twdr_hangman, 0,
                                            (unsigned long)&tcp_death_row),
@@ -158,7 +157,7 @@ kill_with_rst:
                /* I am shamed, but failed to make it more elegant.
                 * Yes, it is direct reference to IP, which is impossible
                 * to generalize to IPv6. Taking into account that IPv6
-                * do not undertsnad recycling in any case, it not
+                * do not understand recycling in any case, it not
                 * a big problem in practice. --ANK */
                if (tw->tw_family == AF_INET &&
                    tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
@@ -194,7 +193,7 @@ kill_with_rst:
                /* In window segment, it may be only reset or bare ack. */
 
                if (th->rst) {
-                       /* This is TIME_WAIT assasination, in two flavors.
+                       /* This is TIME_WAIT assassination, in two flavors.
                         * Oh well... nobody has a sufficient solution to this
                         * protocol bug yet.
                         */
@@ -274,18 +273,18 @@ kill:
 void tcp_time_wait(struct sock *sk, int state, int timeo)
 {
        struct inet_timewait_sock *tw = NULL;
+       const struct inet_connection_sock *icsk = inet_csk(sk);
        const struct tcp_sock *tp = tcp_sk(sk);
        int recycle_ok = 0;
 
        if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
-               recycle_ok = tp->af_specific->remember_stamp(sk);
+               recycle_ok = icsk->icsk_af_ops->remember_stamp(sk);
 
        if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
                tw = inet_twsk_alloc(sk, state);
 
        if (tw != NULL) {
                struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
-               const struct inet_connection_sock *icsk = inet_csk(sk);
                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
 
                tw->tw_rcv_wscale       = tp->rx_opt.rcv_wscale;
@@ -298,10 +297,12 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
                if (tw->tw_family == PF_INET6) {
                        struct ipv6_pinfo *np = inet6_sk(sk);
-                       struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
+                       struct inet6_timewait_sock *tw6;
 
-                       ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr);
-                       ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr);
+                       tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
+                       tw6 = inet6_twsk((struct sock *)tw);
+                       ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
+                       ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
                        tw->tw_ipv6only = np->ipv6only;
                }
 #endif
@@ -380,6 +381,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                 */
                newtp->snd_cwnd = 2;
                newtp->snd_cwnd_cnt = 0;
+               newtp->bytes_acked = 0;
 
                newtp->frto_counter = 0;
                newtp->frto_highmark = 0;
@@ -437,8 +439,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                        newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
                newtp->rx_opt.mss_clamp = req->mss;
                TCP_ECN_openreq_child(newtp, req);
-               if (newtp->ecn_flags&TCP_ECN_OK)
-                       sock_set_flag(newsk, SOCK_NO_LARGESEND);
 
                TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
        }
@@ -455,7 +455,6 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
                           struct request_sock **prev)
 {
        struct tcphdr *th = skb->h.th;
-       struct tcp_sock *tp = tcp_sk(sk);
        u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
        int paws_reject = 0;
        struct tcp_options_received tmp_opt;
@@ -550,7 +549,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
 
        /* RFC793 page 36: "If the connection is in any non-synchronized state ...
         *                  and the incoming segment acknowledges something not yet
-        *                  sent (the segment carries an unaccaptable ACK) ...
+        *                  sent (the segment carries an unacceptable ACK) ...
         *                  a reset is sent."
         *
         * Invalid ACK: reset will be sent by listening socket
@@ -590,8 +589,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
                /* RFC793: "second check the RST bit" and
                 *         "fourth, check the SYN bit"
                 */
-               if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
+               if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
+                       TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
                        goto embryonic_reset;
+               }
 
                /* ACK sequence verified above, just make sure ACK is
                 * set.  If ACK not set, just silently drop the packet.
@@ -612,7 +613,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
                 * ESTABLISHED STATE. If it will be dropped after
                 * socket is created, wait for troubles.
                 */
-               child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
+               child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb,
+                                                                req, NULL);
                if (child == NULL)
                        goto listen_overflow;