Merge branch 'upstream-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[pandora-kernel.git] / net / ipv4 / inetpeer.c
index ce5fe3f..f072f38 100644 (file)
@@ -86,7 +86,7 @@ static struct inet_peer *peer_root = peer_avl_empty;
 static DEFINE_RWLOCK(peer_pool_lock);
 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
 
-static volatile int peer_total;
+static int peer_total;
 /* Exported for sysctl_net_ipv4.  */
 int inet_peer_threshold = 65536 + 128; /* start to throw entries more
                                         * aggressively at this stage */
@@ -94,10 +94,8 @@ int inet_peer_minttl = 120 * HZ;     /* TTL under high load: 120 sec */
 int inet_peer_maxttl = 10 * 60 * HZ;   /* usual time to live: 10 min */
 
 static struct inet_peer *inet_peer_unused_head;
-/* Exported for inet_putpeer inline function.  */
-struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
-DEFINE_SPINLOCK(inet_peer_unused_lock);
-#define PEER_MAX_CLEANUP_WORK 30
+static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
+static DEFINE_SPINLOCK(inet_peer_unused_lock);
 
 static void peer_check_expire(unsigned long dummy);
 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
@@ -126,12 +124,9 @@ void __init inet_initpeers(void)
 
        peer_cachep = kmem_cache_create("inet_peer_cache",
                        sizeof(struct inet_peer),
-                       0, SLAB_HWCACHE_ALIGN,
+                       0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
                        NULL, NULL);
 
-       if (!peer_cachep)
-               panic("cannot create inet_peer_cache");
-
        /* All the timers, started at system startup tend
           to synchronize. Perturb it a bit.
         */
@@ -166,7 +161,7 @@ static void unlink_from_unused(struct inet_peer *p)
        for (u = peer_root; u != peer_avl_empty; ) {            \
                if (daddr == u->v4daddr)                        \
                        break;                                  \
-               if (daddr < u->v4daddr)                         \
+               if ((__force __u32)daddr < (__force __u32)u->v4daddr)   \
                        v = &u->avl_left;                       \
                else                                            \
                        v = &u->avl_right;                      \
@@ -304,8 +299,7 @@ static void unlink_from_pool(struct inet_peer *p)
                        /* look for a node to insert instead of p */
                        struct inet_peer *t;
                        t = lookup_rightempty(p);
-                       if (*stackptr[-1] != t)
-                               BUG();
+                       BUG_ON(*stackptr[-1] != t);
                        **--stackptr = t->avl_left;
                        /* t is removed, t->v4daddr > x->v4daddr for any
                         * x in p->avl_left subtree.
@@ -314,8 +308,7 @@ static void unlink_from_pool(struct inet_peer *p)
                        t->avl_left = p->avl_left;
                        t->avl_right = p->avl_right;
                        t->avl_height = p->avl_height;
-                       if (delp[1] != &p->avl_left)
-                               BUG();
+                       BUG_ON(delp[1] != &p->avl_left);
                        delp[1] = &t->avl_left; /* was &p->avl_left */
                }
                peer_avl_rebalance(stack, stackptr);
@@ -345,7 +338,8 @@ static int cleanup_once(unsigned long ttl)
        spin_lock_bh(&inet_peer_unused_lock);
        p = inet_peer_unused_head;
        if (p != NULL) {
-               if (time_after(p->dtime + ttl, jiffies)) {
+               __u32 delta = (__u32)jiffies - p->dtime;
+               if (delta < ttl) {
                        /* Do not prune fresh entries. */
                        spin_unlock_bh(&inet_peer_unused_lock);
                        return -1;
@@ -373,7 +367,7 @@ static int cleanup_once(unsigned long ttl)
 }
 
 /* Called with or without local BH being disabled. */
-struct inet_peer *inet_getpeer(__u32 daddr, int create)
+struct inet_peer *inet_getpeer(__be32 daddr, int create)
 {
        struct inet_peer *p, *n;
        struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
@@ -437,7 +431,7 @@ out_free:
 /* Called with local BH disabled. */
 static void peer_check_expire(unsigned long dummy)
 {
-       int i;
+       unsigned long now = jiffies;
        int ttl;
 
        if (peer_total >= inet_peer_threshold)
@@ -446,7 +440,10 @@ static void peer_check_expire(unsigned long dummy)
                ttl = inet_peer_maxttl
                                - (inet_peer_maxttl - inet_peer_minttl) / HZ *
                                        peer_total / inet_peer_threshold * HZ;
-       for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++);
+       while (!cleanup_once(ttl)) {
+               if (jiffies != now)
+                       break;
+       }
 
        /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
         * interval depending on the total number of entries (more entries,
@@ -460,3 +457,16 @@ static void peer_check_expire(unsigned long dummy)
                                peer_total / inet_peer_threshold * HZ;
        add_timer(&peer_periodic_timer);
 }
+
+void inet_putpeer(struct inet_peer *p)
+{
+       spin_lock_bh(&inet_peer_unused_lock);
+       if (atomic_dec_and_test(&p->refcnt)) {
+               p->unused_prevp = inet_peer_unused_tailp;
+               p->unused_next = NULL;
+               *inet_peer_unused_tailp = p;
+               inet_peer_unused_tailp = &p->unused_next;
+               p->dtime = (__u32)jiffies;
+       }
+       spin_unlock_bh(&inet_peer_unused_lock);
+}