inode: move to per-sb LRU locks
[pandora-kernel.git] / fs / inode.c
index 8c34913..0450e25 100644 (file)
@@ -33,7 +33,7 @@
  *
  * inode->i_lock protects:
  *   inode->i_state, inode->i_hash, __iget()
- * inode_lru_lock protects:
+ * inode->i_sb->s_inode_lru_lock protects:
  *   inode->i_sb->s_inode_lru, inode->i_lru
  * inode_sb_list_lock protects:
  *   sb->s_inodes, inode->i_sb_list
@@ -46,7 +46,7 @@
  *
  * inode_sb_list_lock
  *   inode->i_lock
- *     inode_lru_lock
+ *     inode->i_sb->s_inode_lru_lock
  *
  * inode_wb_list_lock
  *   inode->i_lock
@@ -64,8 +64,6 @@ static unsigned int i_hash_shift __read_mostly;
 static struct hlist_head *inode_hashtable __read_mostly;
 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
 
-static DEFINE_SPINLOCK(inode_lru_lock);
-
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
 
@@ -342,24 +340,24 @@ EXPORT_SYMBOL(ihold);
 
 static void inode_lru_list_add(struct inode *inode)
 {
-       spin_lock(&inode_lru_lock);
+       spin_lock(&inode->i_sb->s_inode_lru_lock);
        if (list_empty(&inode->i_lru)) {
                list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
                inode->i_sb->s_nr_inodes_unused++;
                this_cpu_inc(nr_unused);
        }
-       spin_unlock(&inode_lru_lock);
+       spin_unlock(&inode->i_sb->s_inode_lru_lock);
 }
 
 static void inode_lru_list_del(struct inode *inode)
 {
-       spin_lock(&inode_lru_lock);
+       spin_lock(&inode->i_sb->s_inode_lru_lock);
        if (!list_empty(&inode->i_lru)) {
                list_del_init(&inode->i_lru);
                inode->i_sb->s_nr_inodes_unused--;
                this_cpu_dec(nr_unused);
        }
-       spin_unlock(&inode_lru_lock);
+       spin_unlock(&inode->i_sb->s_inode_lru_lock);
 }
 
 /**
@@ -615,7 +613,8 @@ static int can_unuse(struct inode *inode)
 
 /*
  * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
- * temporary list and then are freed outside inode_lru_lock by dispose_list().
+ * temporary list and then are freed outside sb->s_inode_lru_lock by
+ * dispose_list().
  *
  * Any inodes which are pinned purely because of attached pagecache have their
  * pagecache removed.  If the inode has metadata buffers attached to
@@ -635,7 +634,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
        int nr_scanned;
        unsigned long reap = 0;
 
-       spin_lock(&inode_lru_lock);
+       spin_lock(&sb->s_inode_lru_lock);
        for (nr_scanned = *nr_to_scan; nr_scanned >= 0; nr_scanned--) {
                struct inode *inode;
 
@@ -645,7 +644,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
                inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
 
                /*
-                * we are inverting the inode_lru_lock/inode->i_lock here,
+                * we are inverting the sb->s_inode_lru_lock/inode->i_lock here,
                 * so use a trylock. If we fail to get the lock, just move the
                 * inode to the back of the list so we don't spin on it.
                 */
@@ -677,12 +676,12 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
                if (inode_has_buffers(inode) || inode->i_data.nrpages) {
                        __iget(inode);
                        spin_unlock(&inode->i_lock);
-                       spin_unlock(&inode_lru_lock);
+                       spin_unlock(&sb->s_inode_lru_lock);
                        if (remove_inode_buffers(inode))
                                reap += invalidate_mapping_pages(&inode->i_data,
                                                                0, -1);
                        iput(inode);
-                       spin_lock(&inode_lru_lock);
+                       spin_lock(&sb->s_inode_lru_lock);
 
                        if (inode != list_entry(sb->s_inode_lru.next,
                                                struct inode, i_lru))
@@ -707,7 +706,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
                __count_vm_events(KSWAPD_INODESTEAL, reap);
        else
                __count_vm_events(PGINODESTEAL, reap);
-       spin_unlock(&inode_lru_lock);
+       spin_unlock(&sb->s_inode_lru_lock);
        *nr_to_scan = nr_scanned;
 
        dispose_list(&freeable);