[NET]: reduce sizeof(struct inet_peer), cleanup, change in peer_check_expire()
authorEric Dumazet <dada1@cosmosbay.com>
Fri, 13 Oct 2006 04:21:06 +0000 (21:21 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Mon, 16 Oct 2006 06:14:17 +0000 (23:14 -0700)
1) shrink struct inet_peer on 64 bits platforms.

include/net/inetpeer.h
net/ipv4/inetpeer.c

index 925573f..f13cc0c 100644 (file)
@@ -19,7 +19,7 @@ struct inet_peer
 {
        struct inet_peer        *avl_left, *avl_right;
        struct inet_peer        *unused_next, **unused_prevp;
-       unsigned long           dtime;          /* the time of last use of not
+       __u32                   dtime;          /* the time of last use of not
                                                 * referenced entries */
        atomic_t                refcnt;
        __be32                  v4daddr;        /* peer's address */
@@ -35,21 +35,8 @@ void                 inet_initpeers(void) __init;
 /* can be called with or without local BH being disabled */
 struct inet_peer       *inet_getpeer(__be32 daddr, int create);
 
-extern spinlock_t inet_peer_unused_lock;
-extern struct inet_peer **inet_peer_unused_tailp;
 /* can be called from BH context or outside */
-static inline void     inet_putpeer(struct inet_peer *p)
-{
-       spin_lock_bh(&inet_peer_unused_lock);
-       if (atomic_dec_and_test(&p->refcnt)) {
-               p->unused_prevp = inet_peer_unused_tailp;
-               p->unused_next = NULL;
-               *inet_peer_unused_tailp = p;
-               inet_peer_unused_tailp = &p->unused_next;
-               p->dtime = jiffies;
-       }
-       spin_unlock_bh(&inet_peer_unused_lock);
-}
+extern void inet_putpeer(struct inet_peer *p);
 
 extern spinlock_t inet_peer_idlock;
 /* can be called with or without local BH being disabled */
index 2b1a54b..f072f38 100644 (file)
@@ -94,10 +94,8 @@ int inet_peer_minttl = 120 * HZ;     /* TTL under high load: 120 sec */
 int inet_peer_maxttl = 10 * 60 * HZ;   /* usual time to live: 10 min */
 
 static struct inet_peer *inet_peer_unused_head;
-/* Exported for inet_putpeer inline function.  */
-struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
-DEFINE_SPINLOCK(inet_peer_unused_lock);
-#define PEER_MAX_CLEANUP_WORK 30
+static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
+static DEFINE_SPINLOCK(inet_peer_unused_lock);
 
 static void peer_check_expire(unsigned long dummy);
 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
@@ -340,7 +338,8 @@ static int cleanup_once(unsigned long ttl)
        spin_lock_bh(&inet_peer_unused_lock);
        p = inet_peer_unused_head;
        if (p != NULL) {
-               if (time_after(p->dtime + ttl, jiffies)) {
+               __u32 delta = (__u32)jiffies - p->dtime;
+               if (delta < ttl) {
                        /* Do not prune fresh entries. */
                        spin_unlock_bh(&inet_peer_unused_lock);
                        return -1;
@@ -432,7 +431,7 @@ out_free:
 /* Called with local BH disabled. */
 static void peer_check_expire(unsigned long dummy)
 {
-       int i;
+       unsigned long now = jiffies;
        int ttl;
 
        if (peer_total >= inet_peer_threshold)
@@ -441,7 +440,10 @@ static void peer_check_expire(unsigned long dummy)
                ttl = inet_peer_maxttl
                                - (inet_peer_maxttl - inet_peer_minttl) / HZ *
                                        peer_total / inet_peer_threshold * HZ;
-       for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++);
+       while (!cleanup_once(ttl)) {
+               if (jiffies != now)
+                       break;
+       }
 
        /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
         * interval depending on the total number of entries (more entries,
@@ -455,3 +457,16 @@ static void peer_check_expire(unsigned long dummy)
                                peer_total / inet_peer_threshold * HZ;
        add_timer(&peer_periodic_timer);
 }
+
+void inet_putpeer(struct inet_peer *p)
+{
+       spin_lock_bh(&inet_peer_unused_lock);
+       if (atomic_dec_and_test(&p->refcnt)) {
+               p->unused_prevp = inet_peer_unused_tailp;
+               p->unused_next = NULL;
+               *inet_peer_unused_tailp = p;
+               inet_peer_unused_tailp = &p->unused_next;
+               p->dtime = (__u32)jiffies;
+       }
+       spin_unlock_bh(&inet_peer_unused_lock);
+}