One less parameter to __d_path
[pandora-kernel.git] / fs / dcache.c
index 678d39d..688aac9 100644 (file)
@@ -38,7 +38,7 @@ int sysctl_vfs_cache_pressure __read_mostly = 100;
 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
 
  __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
-static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 
 EXPORT_SYMBOL(dcache_lock);
 
@@ -89,7 +89,7 @@ static void d_free(struct dentry *dentry)
        if (dentry->d_op && dentry->d_op->d_release)
                dentry->d_op->d_release(dentry);
        /* if dentry was never inserted into hash, immediate free is OK */
-       if (dentry->d_hash.pprev == NULL)
+       if (hlist_unhashed(&dentry->d_hash))
                __d_free(dentry);
        else
                call_rcu(&dentry->d_u.d_rcu, d_callback);
@@ -381,20 +381,17 @@ restart:
  * Throw away a dentry - free the inode, dput the parent.  This requires that
  * the LRU list has already been removed.
  *
- * If prune_parents is true, try to prune ancestors as well.
+ * Try to prune ancestors as well.  This is necessary to prevent
+ * quadratic behavior of shrink_dcache_parent(), but is also expected
+ * to be beneficial in reducing dentry cache fragmentation.
  *
  * Called with dcache_lock, drops it and then regains.
  * Called with dentry->d_lock held, drops it.
  */
-static void prune_one_dentry(struct dentry * dentry, int prune_parents)
+static void prune_one_dentry(struct dentry * dentry)
 {
        __d_drop(dentry);
        dentry = d_kill(dentry);
-       if (!prune_parents) {
-               dput(dentry);
-               spin_lock(&dcache_lock);
-               return;
-       }
 
        /*
         * Prune ancestors.  Locking is simpler than in dput(),
@@ -422,7 +419,6 @@ static void prune_one_dentry(struct dentry * dentry, int prune_parents)
  * @count: number of entries to try and free
  * @sb: if given, ignore dentries for other superblocks
  *         which are being unmounted.
- * @prune_parents: if true, try to prune ancestors as well in one go
  *
  * Shrink the dcache. This is done when we need
  * more memory, or simply when we need to unmount
@@ -433,7 +429,7 @@ static void prune_one_dentry(struct dentry * dentry, int prune_parents)
  * all the dentries are in use.
  */
  
-static void prune_dcache(int count, struct super_block *sb, int prune_parents)
+static void prune_dcache(int count, struct super_block *sb)
 {
        spin_lock(&dcache_lock);
        for (; count ; count--) {
@@ -493,7 +489,7 @@ static void prune_dcache(int count, struct super_block *sb, int prune_parents)
                 * without taking the s_umount lock (I already hold it).
                 */
                if (sb && dentry->d_sb == sb) {
-                       prune_one_dentry(dentry, prune_parents);
+                       prune_one_dentry(dentry);
                        continue;
                }
                /*
@@ -508,7 +504,7 @@ static void prune_dcache(int count, struct super_block *sb, int prune_parents)
                s_umount = &dentry->d_sb->s_umount;
                if (down_read_trylock(s_umount)) {
                        if (dentry->d_sb->s_root != NULL) {
-                               prune_one_dentry(dentry, prune_parents);
+                               prune_one_dentry(dentry);
                                up_read(s_umount);
                                continue;
                        }
@@ -557,18 +553,18 @@ void shrink_dcache_sb(struct super_block * sb)
         * superblock to the most recent end of the unused list.
         */
        spin_lock(&dcache_lock);
-       list_for_each_safe(tmp, next, &dentry_unused) {
+       list_for_each_prev_safe(tmp, next, &dentry_unused) {
                dentry = list_entry(tmp, struct dentry, d_lru);
                if (dentry->d_sb != sb)
                        continue;
-               list_move(tmp, &dentry_unused);
+               list_move_tail(tmp, &dentry_unused);
        }
 
        /*
         * Pass two ... free the dentries for this superblock.
         */
 repeat:
-       list_for_each_safe(tmp, next, &dentry_unused) {
+       list_for_each_prev_safe(tmp, next, &dentry_unused) {
                dentry = list_entry(tmp, struct dentry, d_lru);
                if (dentry->d_sb != sb)
                        continue;
@@ -579,7 +575,7 @@ repeat:
                        spin_unlock(&dentry->d_lock);
                        continue;
                }
-               prune_one_dentry(dentry, 1);
+               prune_one_dentry(dentry);
                cond_resched_lock(&dcache_lock);
                goto repeat;
        }
@@ -858,7 +854,7 @@ void shrink_dcache_parent(struct dentry * parent)
        int found;
 
        while ((found = select_parent(parent)) != 0)
-               prune_dcache(found, parent->d_sb, 1);
+               prune_dcache(found, parent->d_sb);
 }
 
 /*
@@ -878,7 +874,7 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
        if (nr) {
                if (!(gfp_mask & __GFP_FS))
                        return -1;
-               prune_dcache(nr, NULL, 1);
+               prune_dcache(nr, NULL);
        }
        return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 }
@@ -903,7 +899,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
        struct dentry *dentry;
        char *dname;
 
-       dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 
+       dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
        if (!dentry)
                return NULL;
 
@@ -1412,9 +1408,6 @@ void d_delete(struct dentry * dentry)
        if (atomic_read(&dentry->d_count) == 1) {
                dentry_iput(dentry);
                fsnotify_nameremove(dentry, isdir);
-
-               /* remove this and other inotify debug checks after 2.6.18 */
-               dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
                return;
        }
 
@@ -1483,6 +1476,8 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
                         * dentry:internal, target:external.  Steal target's
                         * storage and make target internal.
                         */
+                       memcpy(target->d_iname, dentry->d_name.name,
+                                       dentry->d_name.len + 1);
                        dentry->d_name.name = target->d_name.name;
                        target->d_name.name = target->d_iname;
                }
@@ -1514,8 +1509,8 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
  * This forceful removal will result in ugly /proc output if
  * somebody holds a file open that got deleted due to a rename.
  * We could be nicer about the deleted file, and let it show
- * up under the name it got deleted rather than the name that
- * deleted it.
+ * up under the name it had before it was deleted rather than
+ * under the original name of the file that was moved on top of it.
  */
  
 /*
@@ -1546,7 +1541,7 @@ static void d_move_locked(struct dentry * dentry, struct dentry * target)
        }
 
        /* Move the dentry to the target hash queue, if on different bucket */
-       if (dentry->d_flags & DCACHE_UNHASHED)
+       if (d_unhashed(dentry))
                goto already_unhashed;
 
        hlist_del_rcu(&dentry->d_hash);
@@ -1781,9 +1776,8 @@ shouldnt_be_hashed:
  *
  * "buflen" should be positive. Caller holds the dcache_lock.
  */
-static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
-                       struct dentry *root, struct vfsmount *rootmnt,
-                       char *buffer, int buflen)
+static char *__d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
+                      struct path *root, char *buffer, int buflen)
 {
        char * end = buffer+buflen;
        char * retval;
@@ -1808,7 +1802,7 @@ static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
        for (;;) {
                struct dentry * parent;
 
-               if (dentry == root && vfsmnt == rootmnt)
+               if (dentry == root->dentry && vfsmnt == root->mnt)
                        break;
                if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
                        /* Global root? */
@@ -1854,8 +1848,7 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
                                char *buf, int buflen)
 {
        char *res;
-       struct vfsmount *rootmnt;
-       struct dentry *root;
+       struct path root;
 
        /*
         * We have various synthetic filesystems that never get mounted.  On
@@ -1868,14 +1861,13 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
                return dentry->d_op->d_dname(dentry, buf, buflen);
 
        read_lock(&current->fs->lock);
-       rootmnt = mntget(current->fs->rootmnt);
-       root = dget(current->fs->root);
+       root = current->fs->root;
+       path_get(&current->fs->root);
        read_unlock(&current->fs->lock);
        spin_lock(&dcache_lock);
-       res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
+       res = __d_path(dentry, vfsmnt, &root, buf, buflen);
        spin_unlock(&dcache_lock);
-       dput(root);
-       mntput(rootmnt);
+       path_put(&root);
        return res;
 }
 
@@ -1921,28 +1913,27 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
 asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
 {
        int error;
-       struct vfsmount *pwdmnt, *rootmnt;
-       struct dentry *pwd, *root;
+       struct path pwd, root;
        char *page = (char *) __get_free_page(GFP_USER);
 
        if (!page)
                return -ENOMEM;
 
        read_lock(&current->fs->lock);
-       pwdmnt = mntget(current->fs->pwdmnt);
-       pwd = dget(current->fs->pwd);
-       rootmnt = mntget(current->fs->rootmnt);
-       root = dget(current->fs->root);
+       pwd = current->fs->pwd;
+       path_get(&current->fs->pwd);
+       root = current->fs->root;
+       path_get(&current->fs->root);
        read_unlock(&current->fs->lock);
 
        error = -ENOENT;
        /* Has the current directory has been unlinked? */
        spin_lock(&dcache_lock);
-       if (pwd->d_parent == pwd || !d_unhashed(pwd)) {
+       if (pwd.dentry->d_parent == pwd.dentry || !d_unhashed(pwd.dentry)) {
                unsigned long len;
                char * cwd;
 
-               cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE);
+               cwd = __d_path(pwd.dentry, pwd.mnt, &root, page, PAGE_SIZE);
                spin_unlock(&dcache_lock);
 
                error = PTR_ERR(cwd);
@@ -1960,10 +1951,8 @@ asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
                spin_unlock(&dcache_lock);
 
 out:
-       dput(pwd);
-       mntput(pwdmnt);
-       dput(root);
-       mntput(rootmnt);
+       path_put(&pwd);
+       path_put(&root);
        free_page((unsigned long) page);
        return error;
 }
@@ -2108,7 +2097,7 @@ static void __init dcache_init_early(void)
                INIT_HLIST_HEAD(&dentry_hashtable[loop]);
 }
 
-static void __init dcache_init(unsigned long mempages)
+static void __init dcache_init(void)
 {
        int loop;
 
@@ -2170,10 +2159,10 @@ void __init vfs_caches_init(unsigned long mempages)
        filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
-       dcache_init(mempages);
-       inode_init(mempages);
+       dcache_init();
+       inode_init();
        files_init(mempages);
-       mnt_init(mempages);
+       mnt_init();
        bdev_cache_init();
        chrdev_init();
 }