MIPS: Allow suspend and hibernation again on uniprocessor kernels.
[pandora-kernel.git] / net / netfilter / nf_conntrack_core.c
index 5f72b94..7508f11 100644 (file)
@@ -335,7 +335,8 @@ begin:
        h = __nf_conntrack_find(net, tuple);
        if (h) {
                ct = nf_ct_tuplehash_to_ctrack(h);
-               if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+               if (unlikely(nf_ct_is_dying(ct) ||
+                            !atomic_inc_not_zero(&ct->ct_general.use)))
                        h = NULL;
                else {
                        if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
@@ -425,7 +426,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        /* Remove from unconfirmed list */
        hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
 
-       __nf_conntrack_hash_insert(ct, hash, repl_hash);
        /* Timer relative to confirmation time, not original
           setting time, otherwise we'd get timer wrap in
           weird delay cases. */
@@ -433,8 +433,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        add_timer(&ct->timeout);
        atomic_inc(&ct->ct_general.use);
        set_bit(IPS_CONFIRMED_BIT, &ct->status);
+
+       /* Since the lookup is lockless, hash insertion must be done after
+        * starting the timer and setting the CONFIRMED bit. The RCU barriers
+        * guarantee that no other CPU can find the conntrack before the above
+        * stores are visible.
+        */
+       __nf_conntrack_hash_insert(ct, hash, repl_hash);
        NF_CT_STAT_INC(net, insert);
        spin_unlock_bh(&nf_conntrack_lock);
+
        help = nfct_help(ct);
        if (help && help->helper)
                nf_conntrack_event_cache(IPCT_HELPER, ct);
@@ -503,7 +511,8 @@ static noinline int early_drop(struct net *net, unsigned int hash)
                        cnt++;
                }
 
-               if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+               if (ct && unlikely(nf_ct_is_dying(ct) ||
+                                  !atomic_inc_not_zero(&ct->ct_general.use)))
                        ct = NULL;
                if (ct || cnt >= NF_CT_EVICTION_RANGE)
                        break;
@@ -1267,13 +1276,19 @@ err_cache:
        return ret;
 }
 
+/*
+ * We need to use special "null" values, not used in hash table
+ */
+#define UNCONFIRMED_NULLS_VAL  ((1<<30)+0)
+#define DYING_NULLS_VAL                ((1<<30)+1)
+
 static int nf_conntrack_init_net(struct net *net)
 {
        int ret;
 
        atomic_set(&net->ct.count, 0);
-       INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
-       INIT_HLIST_NULLS_HEAD(&net->ct.dying, 0);
+       INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
+       INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
        net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
        if (!net->ct.stat) {
                ret = -ENOMEM;