[PATCH] eth16i section fix
[pandora-kernel.git] / fs / dcache.c
index 385f5db..1b4a3a3 100644 (file)
@@ -14,7 +14,6 @@
  * the dcache entry is deleted or garbage collected.
  */
 
-#include <linux/config.h>
 #include <linux/syscalls.h>
 #include <linux/string.h>
 #include <linux/mm.h>
@@ -39,7 +38,7 @@ int sysctl_vfs_cache_pressure __read_mostly = 100;
 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
 
  __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
-static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
+static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 
 EXPORT_SYMBOL(dcache_lock);
 
@@ -359,12 +358,13 @@ restart:
 }
 
 /*
- * Throw away a dentry - free the inode, dput the parent.
- * This requires that the LRU list has already been
- * removed.
+ * Throw away a dentry - free the inode, dput the parent.  This requires that
+ * the LRU list has already been removed.
+ *
  * Called with dcache_lock, drops it and then regains.
+ * Called with dentry->d_lock held, drops it.
  */
-static inline void prune_one_dentry(struct dentry * dentry)
+static void prune_one_dentry(struct dentry * dentry)
 {
        struct dentry * parent;
 
@@ -405,7 +405,7 @@ static void prune_dcache(int count, struct super_block *sb)
                cond_resched_lock(&dcache_lock);
 
                tmp = dentry_unused.prev;
-               if (unlikely(sb)) {
+               if (sb) {
                        /* Try to find a dentry for this sb, but don't try
                         * too hard, if they aren't near the tail they will
                         * be moved down again soon
@@ -521,8 +521,7 @@ void shrink_dcache_sb(struct super_block * sb)
                dentry = list_entry(tmp, struct dentry, d_lru);
                if (dentry->d_sb != sb)
                        continue;
-               list_del(tmp);
-               list_add(tmp, &dentry_unused);
+               list_move(tmp, &dentry_unused);
        }
 
        /*
@@ -637,7 +636,7 @@ resume:
                 * of the unused list for prune_dcache
                 */
                if (!atomic_read(&dentry->d_count)) {
-                       list_add(&dentry->d_lru, dentry_unused.prev);
+                       list_add_tail(&dentry->d_lru, &dentry_unused);
                        dentry_stat.nr_unused++;
                        found++;
                }
@@ -686,46 +685,6 @@ void shrink_dcache_parent(struct dentry * parent)
                prune_dcache(found, parent->d_sb);
 }
 
-/**
- * shrink_dcache_anon - further prune the cache
- * @head: head of d_hash list of dentries to prune
- *
- * Prune the dentries that are anonymous
- *
- * parsing d_hash list does not hlist_for_each_entry_rcu() as it
- * done under dcache_lock.
- *
- */
-void shrink_dcache_anon(struct super_block *sb)
-{
-       struct hlist_node *lp;
-       struct hlist_head *head = &sb->s_anon;
-       int found;
-       do {
-               found = 0;
-               spin_lock(&dcache_lock);
-               hlist_for_each(lp, head) {
-                       struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
-                       if (!list_empty(&this->d_lru)) {
-                               dentry_stat.nr_unused--;
-                               list_del_init(&this->d_lru);
-                       }
-
-                       /* 
-                        * move only zero ref count dentries to the end 
-                        * of the unused list for prune_dcache
-                        */
-                       if (!atomic_read(&this->d_count)) {
-                               list_add_tail(&this->d_lru, &dentry_unused);
-                               dentry_stat.nr_unused++;
-                               found++;
-                       }
-               }
-               spin_unlock(&dcache_lock);
-               prune_dcache(found, sb);
-       } while(found);
-}
-
 /*
  * Scan `nr' dentries and return the number which remain.
  *
@@ -1380,10 +1339,10 @@ void d_move(struct dentry * dentry, struct dentry * target)
         */
        if (target < dentry) {
                spin_lock(&target->d_lock);
-               spin_lock(&dentry->d_lock);
+               spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
        } else {
                spin_lock(&dentry->d_lock);
-               spin_lock(&target->d_lock);
+               spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED);
        }
 
        /* Move the dentry to the target hash queue, if on different bucket */