vfs: drop conditional inode prefetch in __do_lookup_rcu
[pandora-kernel.git] / fs / dcache.c
index 129a357..fbdcbca 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/hardirq.h>
 #include <linux/bit_spinlock.h>
 #include <linux/rculist_bl.h>
+#include <linux/prefetch.h>
 #include "internal.h"
 
 /*
@@ -99,12 +100,9 @@ static struct kmem_cache *dentry_cache __read_mostly;
 static unsigned int d_hash_mask __read_mostly;
 static unsigned int d_hash_shift __read_mostly;
 
-struct dcache_hash_bucket {
-       struct hlist_bl_head head;
-};
-static struct dcache_hash_bucket *dentry_hashtable __read_mostly;
+static struct hlist_bl_head *dentry_hashtable __read_mostly;
 
-static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
+static inline struct hlist_bl_head *d_hash(struct dentry *parent,
                                        unsigned long hash)
 {
        hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
@@ -112,16 +110,6 @@ static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
        return dentry_hashtable + (hash & D_HASHMASK);
 }
 
-static inline void spin_lock_bucket(struct dcache_hash_bucket *b)
-{
-       bit_spin_lock(0, (unsigned long *)&b->head.first);
-}
-
-static inline void spin_unlock_bucket(struct dcache_hash_bucket *b)
-{
-       __bit_spin_unlock(0, (unsigned long *)&b->head.first);
-}
-
 /* Statistics gathering. */
 struct dentry_stat_t dentry_stat = {
        .age_limit = 45,
@@ -167,8 +155,8 @@ static void d_free(struct dentry *dentry)
        if (dentry->d_op && dentry->d_op->d_release)
                dentry->d_op->d_release(dentry);
 
-       /* if dentry was never inserted into hash, immediate free is OK */
-       if (hlist_bl_unhashed(&dentry->d_hash))
+       /* if dentry was never visible to RCU, immediate free is OK */
+       if (!(dentry->d_flags & DCACHE_RCUACCESS))
                __d_free(&dentry->d_u.d_rcu);
        else
                call_rcu(&dentry->d_u.d_rcu, __d_free);
@@ -330,28 +318,19 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
  */
 void __d_drop(struct dentry *dentry)
 {
-       if (!(dentry->d_flags & DCACHE_UNHASHED)) {
-               if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) {
-                       bit_spin_lock(0,
-                               (unsigned long *)&dentry->d_sb->s_anon.first);
-                       dentry->d_flags |= DCACHE_UNHASHED;
-                       hlist_bl_del_init(&dentry->d_hash);
-                       __bit_spin_unlock(0,
-                               (unsigned long *)&dentry->d_sb->s_anon.first);
-               } else {
-                       struct dcache_hash_bucket *b;
+       if (!d_unhashed(dentry)) {
+               struct hlist_bl_head *b;
+               if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+                       b = &dentry->d_sb->s_anon;
+               else
                        b = d_hash(dentry->d_parent, dentry->d_name.hash);
-                       spin_lock_bucket(b);
-                       /*
-                        * We may not actually need to put DCACHE_UNHASHED
-                        * manipulations under the hash lock, but follow
-                        * the principle of least surprise.
-                        */
-                       dentry->d_flags |= DCACHE_UNHASHED;
-                       hlist_bl_del_rcu(&dentry->d_hash);
-                       spin_unlock_bucket(b);
-                       dentry_rcuwalk_barrier(dentry);
-               }
+
+               hlist_bl_lock(b);
+               __hlist_bl_del(&dentry->d_hash);
+               dentry->d_hash.pprev = NULL;
+               hlist_bl_unlock(b);
+
+               dentry_rcuwalk_barrier(dentry);
        }
 }
 EXPORT_SYMBOL(__d_drop);
@@ -1241,7 +1220,7 @@ void shrink_dcache_parent(struct dentry * parent)
 EXPORT_SYMBOL(shrink_dcache_parent);
 
 /*
- * Scan `nr' dentries and return the number which remain.
+ * Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain.
  *
  * We need to avoid reentering the filesystem if the caller is performing a
  * GFP_NOFS allocation attempt.  One example deadlock is:
@@ -1252,8 +1231,12 @@ EXPORT_SYMBOL(shrink_dcache_parent);
  *
  * In this case we return -1 to tell the caller that we baled.
  */
-static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
+static int shrink_dcache_memory(struct shrinker *shrink,
+                               struct shrink_control *sc)
 {
+       int nr = sc->nr_to_scan;
+       gfp_t gfp_mask = sc->gfp_mask;
+
        if (nr) {
                if (!(gfp_mask & __GFP_FS))
                        return -1;
@@ -1304,7 +1287,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
        dname[name->len] = 0;
 
        dentry->d_count = 1;
-       dentry->d_flags = DCACHE_UNHASHED;
+       dentry->d_flags = 0;
        spin_lock_init(&dentry->d_lock);
        seqcount_init(&dentry->d_seq);
        dentry->d_inode = NULL;
@@ -1606,10 +1589,9 @@ struct dentry *d_obtain_alias(struct inode *inode)
        tmp->d_inode = inode;
        tmp->d_flags |= DCACHE_DISCONNECTED;
        list_add(&tmp->d_alias, &inode->i_dentry);
-       bit_spin_lock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
-       tmp->d_flags &= ~DCACHE_UNHASHED;
+       hlist_bl_lock(&tmp->d_sb->s_anon);
        hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
-       __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
+       hlist_bl_unlock(&tmp->d_sb->s_anon);
        spin_unlock(&tmp->d_lock);
        spin_unlock(&inode->i_lock);
        security_d_instantiate(tmp, inode);
@@ -1789,7 +1771,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
        unsigned int len = name->len;
        unsigned int hash = name->hash;
        const unsigned char *str = name->name;
-       struct dcache_hash_bucket *b = d_hash(parent, hash);
+       struct hlist_bl_head *b = d_hash(parent, hash);
        struct hlist_bl_node *node;
        struct dentry *dentry;
 
@@ -1813,7 +1795,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
         *
         * See Documentation/filesystems/path-lookup.txt for more details.
         */
-       hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+       hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
                struct inode *i;
                const char *tname;
                int tlen;
@@ -1831,8 +1813,6 @@ seqretry:
                tname = dentry->d_name.name;
                i = dentry->d_inode;
                prefetch(tname);
-               if (i)
-                       prefetch(i);
                /*
                 * This seqcount check is required to ensure name and
                 * len are loaded atomically, so as not to walk off the
@@ -1908,7 +1888,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
        unsigned int len = name->len;
        unsigned int hash = name->hash;
        const unsigned char *str = name->name;
-       struct dcache_hash_bucket *b = d_hash(parent, hash);
+       struct hlist_bl_head *b = d_hash(parent, hash);
        struct hlist_bl_node *node;
        struct dentry *found = NULL;
        struct dentry *dentry;
@@ -1935,7 +1915,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
         */
        rcu_read_lock();
        
-       hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+       hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
                const char *tname;
                int tlen;
 
@@ -2086,13 +2066,13 @@ again:
 }
 EXPORT_SYMBOL(d_delete);
 
-static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b)
+static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
 {
        BUG_ON(!d_unhashed(entry));
-       spin_lock_bucket(b);
-       entry->d_flags &= ~DCACHE_UNHASHED;
-       hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
-       spin_unlock_bucket(b);
+       hlist_bl_lock(b);
+       entry->d_flags |= DCACHE_RCUACCESS;
+       hlist_bl_add_head_rcu(&entry->d_hash, b);
+       hlist_bl_unlock(b);
 }
 
 static void _d_rehash(struct dentry * entry)
@@ -2231,14 +2211,15 @@ static void dentry_unlock_parents_for_move(struct dentry *dentry,
  * The hash value has to match the hash queue that the dentry is on..
  */
 /*
- * d_move - move a dentry
+ * __d_move - move a dentry
  * @dentry: entry to move
  * @target: new dentry
  *
  * Update the dcache to reflect the move of a file name. Negative
- * dcache entries should not be moved in this way.
+ * dcache entries should not be moved in this way.  Caller hold
+ * rename_lock.
  */
-void d_move(struct dentry * dentry, struct dentry * target)
+static void __d_move(struct dentry * dentry, struct dentry * target)
 {
        if (!dentry->d_inode)
                printk(KERN_WARNING "VFS: moving negative dcache entry\n");
@@ -2246,8 +2227,6 @@ void d_move(struct dentry * dentry, struct dentry * target)
        BUG_ON(d_ancestor(dentry, target));
        BUG_ON(d_ancestor(target, dentry));
 
-       write_seqlock(&rename_lock);
-
        dentry_lock_for_move(dentry, target);
 
        write_seqcount_begin(&dentry->d_seq);
@@ -2293,6 +2272,20 @@ void d_move(struct dentry * dentry, struct dentry * target)
        spin_unlock(&target->d_lock);
        fsnotify_d_move(dentry);
        spin_unlock(&dentry->d_lock);
+}
+
+/*
+ * d_move - move a dentry
+ * @dentry: entry to move
+ * @target: new dentry
+ *
+ * Update the dcache to reflect the move of a file name. Negative
+ * dcache entries should not be moved in this way.
+ */
+void d_move(struct dentry *dentry, struct dentry *target)
+{
+       write_seqlock(&rename_lock);
+       __d_move(dentry, target);
        write_sequnlock(&rename_lock);
 }
 EXPORT_SYMBOL(d_move);
@@ -2320,7 +2313,7 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
  * This helper attempts to cope with remotely renamed directories
  *
  * It assumes that the caller is already holding
- * dentry->d_parent->d_inode->i_mutex and the inode->i_lock
+ * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
  *
  * Note: If ever the locking in lock_rename() changes, then please
  * remember to update this too...
@@ -2335,11 +2328,6 @@ static struct dentry *__d_unalias(struct inode *inode,
        if (alias->d_parent == dentry->d_parent)
                goto out_unalias;
 
-       /* Check for loops */
-       ret = ERR_PTR(-ELOOP);
-       if (d_ancestor(alias, dentry))
-               goto out_err;
-
        /* See lock_rename() */
        ret = ERR_PTR(-EBUSY);
        if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
@@ -2349,7 +2337,7 @@ static struct dentry *__d_unalias(struct inode *inode,
                goto out_err;
        m2 = &alias->d_parent->d_inode->i_mutex;
 out_unalias:
-       d_move(alias, dentry);
+       __d_move(alias, dentry);
        ret = alias;
 out_err:
        spin_unlock(&inode->i_lock);
@@ -2434,15 +2422,24 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                alias = __d_find_alias(inode, 0);
                if (alias) {
                        actual = alias;
-                       /* Is this an anonymous mountpoint that we could splice
-                        * into our tree? */
-                       if (IS_ROOT(alias)) {
+                       write_seqlock(&rename_lock);
+
+                       if (d_ancestor(alias, dentry)) {
+                               /* Check for loops */
+                               actual = ERR_PTR(-ELOOP);
+                       } else if (IS_ROOT(alias)) {
+                               /* Is this an anonymous mountpoint that we
+                                * could splice into our tree? */
                                __d_materialise_dentry(dentry, alias);
+                               write_sequnlock(&rename_lock);
                                __d_drop(alias);
                                goto found;
+                       } else {
+                               /* Nope, but we must(!) avoid directory
+                                * aliasing */
+                               actual = __d_unalias(inode, dentry, alias);
                        }
-                       /* Nope, but we must(!) avoid directory aliasing */
-                       actual = __d_unalias(inode, dentry, alias);
+                       write_sequnlock(&rename_lock);
                        if (IS_ERR(actual))
                                dput(alias);
                        goto out_nolock;
@@ -3025,7 +3022,7 @@ static void __init dcache_init_early(void)
 
        dentry_hashtable =
                alloc_large_system_hash("Dentry cache",
-                                       sizeof(struct dcache_hash_bucket),
+                                       sizeof(struct hlist_bl_head),
                                        dhash_entries,
                                        13,
                                        HASH_EARLY,
@@ -3034,7 +3031,7 @@ static void __init dcache_init_early(void)
                                        0);
 
        for (loop = 0; loop < (1 << d_hash_shift); loop++)
-               INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
+               INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
 }
 
 static void __init dcache_init(void)
@@ -3057,7 +3054,7 @@ static void __init dcache_init(void)
 
        dentry_hashtable =
                alloc_large_system_hash("Dentry cache",
-                                       sizeof(struct dcache_hash_bucket),
+                                       sizeof(struct hlist_bl_head),
                                        dhash_entries,
                                        13,
                                        0,
@@ -3066,7 +3063,7 @@ static void __init dcache_init(void)
                                        0);
 
        for (loop = 0; loop < (1 << d_hash_shift); loop++)
-               INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
+               INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
 }
 
 /* SLAB cache for __getname() consumers */