#include <linux/hardirq.h>
#include "internal.h"
+/*
+ * Usage:
+ * dcache_inode_lock protects:
+ * - i_dentry, d_alias, d_inode
+ * dcache_hash_lock protects:
+ * - the dcache hash table, s_anon lists
+ * dcache_lru_lock protects:
+ * - the dcache lru lists and counters
+ * d_lock protects:
+ * - d_flags
+ * - d_name
+ * - d_lru
+ * - d_count
+ * - d_unhashed()
+ * - d_parent and d_subdirs
+ * - childrens' d_child and d_parent
+ * - d_alias, d_inode
+ *
+ * Ordering:
+ * dcache_lock
+ * dcache_inode_lock
+ * dentry->d_lock
+ * dcache_lru_lock
+ * dcache_hash_lock
+ *
+ * If there is an ancestor relationship:
+ * dentry->d_parent->...->d_parent->d_lock
+ * ...
+ * dentry->d_parent->d_lock
+ * dentry->d_lock
+ *
+ * If no ancestor relationship:
+ * if (dentry1 < dentry2)
+ * dentry1->d_lock
+ * dentry2->d_lock
+ */
int sysctl_vfs_cache_pressure __read_mostly = 100;
EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
- __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_inode_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_hash_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
+EXPORT_SYMBOL(rename_lock);
+EXPORT_SYMBOL(dcache_inode_lock);
EXPORT_SYMBOL(dcache_lock);
static struct kmem_cache *dentry_cache __read_mostly;
.age_limit = 45,
};
-static struct percpu_counter nr_dentry __cacheline_aligned_in_smp;
-static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(unsigned int, nr_dentry);
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
+static int get_nr_dentry(void)
+{
+ int i;
+ int sum = 0;
+ for_each_possible_cpu(i)
+ sum += per_cpu(nr_dentry, i);
+ return sum < 0 ? 0 : sum;
+}
+
int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
size_t *lenp, loff_t *ppos)
{
- dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry);
- dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
+ dentry_stat.nr_dentry = get_nr_dentry();
return proc_dointvec(table, write, buffer, lenp, ppos);
}
#endif
*/
static void d_free(struct dentry *dentry)
{
- percpu_counter_dec(&nr_dentry);
+ BUG_ON(dentry->d_count);
+ this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
*/
static void dentry_iput(struct dentry * dentry)
__releases(dentry->d_lock)
+ __releases(dcache_inode_lock)
__releases(dcache_lock)
{
struct inode *inode = dentry->d_inode;
dentry->d_inode = NULL;
list_del_init(&dentry->d_alias);
spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
if (!inode->i_nlink)
fsnotify_inoderemove(inode);
iput(inode);
} else {
spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
}
}
/*
- * dentry_lru_(add|del|move_tail) must be called with dcache_lock held.
+ * dentry_lru_(add|del|move_tail) must be called with d_lock held.
*/
static void dentry_lru_add(struct dentry *dentry)
{
if (list_empty(&dentry->d_lru)) {
+ spin_lock(&dcache_lru_lock);
list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
dentry->d_sb->s_nr_dentry_unused++;
- percpu_counter_inc(&nr_dentry_unused);
+ dentry_stat.nr_unused++;
+ spin_unlock(&dcache_lru_lock);
}
}
+static void __dentry_lru_del(struct dentry *dentry)
+{
+ list_del_init(&dentry->d_lru);
+ dentry->d_sb->s_nr_dentry_unused--;
+ dentry_stat.nr_unused--;
+}
+
static void dentry_lru_del(struct dentry *dentry)
{
if (!list_empty(&dentry->d_lru)) {
- list_del_init(&dentry->d_lru);
- dentry->d_sb->s_nr_dentry_unused--;
- percpu_counter_dec(&nr_dentry_unused);
+ spin_lock(&dcache_lru_lock);
+ __dentry_lru_del(dentry);
+ spin_unlock(&dcache_lru_lock);
}
}
static void dentry_lru_move_tail(struct dentry *dentry)
{
+ spin_lock(&dcache_lru_lock);
if (list_empty(&dentry->d_lru)) {
list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
dentry->d_sb->s_nr_dentry_unused++;
- percpu_counter_inc(&nr_dentry_unused);
+ dentry_stat.nr_unused++;
} else {
list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
}
+ spin_unlock(&dcache_lru_lock);
}
/**
* The dentry must already be unhashed and removed from the LRU.
*
* If this is the root of the dentry tree, return NULL.
+ *
+ * dcache_lock and d_lock and d_parent->d_lock must be held by caller, and
+ * are dropped by d_kill.
*/
-static struct dentry *d_kill(struct dentry *dentry)
+static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
__releases(dentry->d_lock)
+ __releases(parent->d_lock)
+ __releases(dcache_inode_lock)
__releases(dcache_lock)
{
- struct dentry *parent;
-
+ dentry->d_parent = NULL;
list_del(&dentry->d_u.d_child);
- /*drops the locks, at that point nobody can reach this dentry */
+ if (parent)
+ spin_unlock(&parent->d_lock);
dentry_iput(dentry);
- if (IS_ROOT(dentry))
- parent = NULL;
- else
- parent = dentry->d_parent;
+ /*
+ * dentry_iput drops the locks, at which point nobody (except
+ * transient RCU lookups) can reach this dentry.
+ */
d_free(dentry);
return parent;
}
+/**
+ * d_drop - drop a dentry
+ * @dentry: dentry to drop
+ *
+ * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
+ * be found through a VFS lookup any more. Note that this is different from
+ * deleting the dentry - d_delete will try to mark the dentry negative if
+ * possible, giving a successful _negative_ lookup, while d_drop will
+ * just make the cache lookup fail.
+ *
+ * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
+ * reason (NFS timeouts or autofs deletes).
+ *
+ * __d_drop requires dentry->d_lock.
+ */
+void __d_drop(struct dentry *dentry)
+{
+ if (!(dentry->d_flags & DCACHE_UNHASHED)) {
+ dentry->d_flags |= DCACHE_UNHASHED;
+ spin_lock(&dcache_hash_lock);
+ hlist_del_rcu(&dentry->d_hash);
+ spin_unlock(&dcache_hash_lock);
+ }
+}
+EXPORT_SYMBOL(__d_drop);
+
+void d_drop(struct dentry *dentry)
+{
+ spin_lock(&dcache_lock);
+ spin_lock(&dentry->d_lock);
+ __d_drop(dentry);
+ spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_lock);
+}
+EXPORT_SYMBOL(d_drop);
+
/*
* This is dput
*
void dput(struct dentry *dentry)
{
+ struct dentry *parent;
if (!dentry)
return;
repeat:
- if (atomic_read(&dentry->d_count) == 1)
+ if (dentry->d_count == 1)
might_sleep();
- if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
- return;
-
spin_lock(&dentry->d_lock);
- if (atomic_read(&dentry->d_count)) {
+ if (IS_ROOT(dentry))
+ parent = NULL;
+ else
+ parent = dentry->d_parent;
+ if (dentry->d_count == 1) {
+ if (!spin_trylock(&dcache_lock)) {
+ /*
+ * Something of a livelock possibility we could avoid
+ * by taking dcache_lock and trying again, but we
+ * want to reduce dcache_lock anyway so this will
+ * get improved.
+ */
+drop1:
+ spin_unlock(&dentry->d_lock);
+ goto repeat;
+ }
+ if (!spin_trylock(&dcache_inode_lock)) {
+drop2:
+ spin_unlock(&dcache_lock);
+ goto drop1;
+ }
+ if (parent && !spin_trylock(&parent->d_lock)) {
+ spin_unlock(&dcache_inode_lock);
+ goto drop2;
+ }
+ }
+ dentry->d_count--;
+ if (dentry->d_count) {
spin_unlock(&dentry->d_lock);
+ if (parent)
+ spin_unlock(&parent->d_lock);
spin_unlock(&dcache_lock);
return;
}
dentry_lru_add(dentry);
spin_unlock(&dentry->d_lock);
+ if (parent)
+ spin_unlock(&parent->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
return;
kill_it:
/* if dentry was on the d_lru list delete it from there */
dentry_lru_del(dentry);
- dentry = d_kill(dentry);
+ dentry = d_kill(dentry, parent);
if (dentry)
goto repeat;
}
* If it's already been dropped, return OK.
*/
spin_lock(&dcache_lock);
+ spin_lock(&dentry->d_lock);
if (d_unhashed(dentry)) {
+ spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
return 0;
}
* to get rid of unused child entries.
*/
if (!list_empty(&dentry->d_subdirs)) {
+ spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
shrink_dcache_parent(dentry);
spin_lock(&dcache_lock);
+ spin_lock(&dentry->d_lock);
}
/*
* we might still populate it if it was a
* working directory or similar).
*/
- spin_lock(&dentry->d_lock);
- if (atomic_read(&dentry->d_count) > 1) {
+ if (dentry->d_count > 1) {
if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(d_invalidate);
+/* This must be called with dcache_lock and d_lock held */
+static inline struct dentry * __dget_locked_dlock(struct dentry *dentry)
+{
+ dentry->d_count++;
+ dentry_lru_del(dentry);
+ return dentry;
+}
+
/* This should be called _only_ with dcache_lock held */
static inline struct dentry * __dget_locked(struct dentry *dentry)
{
- atomic_inc(&dentry->d_count);
- dentry_lru_del(dentry);
+ spin_lock(&dentry->d_lock);
+ __dget_locked_dlock(dentry);
+ spin_unlock(&dentry->d_lock);
return dentry;
}
+struct dentry * dget_locked_dlock(struct dentry *dentry)
+{
+ return __dget_locked_dlock(dentry);
+}
+
struct dentry * dget_locked(struct dentry *dentry)
{
return __dget_locked(dentry);
}
EXPORT_SYMBOL(dget_locked);
+struct dentry *dget_parent(struct dentry *dentry)
+{
+ struct dentry *ret;
+
+repeat:
+ spin_lock(&dentry->d_lock);
+ ret = dentry->d_parent;
+ if (!ret)
+ goto out;
+ if (dentry == ret) {
+ ret->d_count++;
+ goto out;
+ }
+ if (!spin_trylock(&ret->d_lock)) {
+ spin_unlock(&dentry->d_lock);
+ cpu_relax();
+ goto repeat;
+ }
+ BUG_ON(!ret->d_count);
+ ret->d_count++;
+ spin_unlock(&ret->d_lock);
+out:
+ spin_unlock(&dentry->d_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dget_parent);
+
/**
* d_find_alias - grab a hashed alias of inode
* @inode: inode in question
* any other hashed alias over that one unless @want_discon is set,
* in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
*/
-
-static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
+static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
{
- struct list_head *head, *next, *tmp;
- struct dentry *alias, *discon_alias=NULL;
+ struct dentry *alias, *discon_alias;
- head = &inode->i_dentry;
- next = inode->i_dentry.next;
- while (next != head) {
- tmp = next;
- next = tmp->next;
- prefetch(next);
- alias = list_entry(tmp, struct dentry, d_alias);
+again:
+ discon_alias = NULL;
+ list_for_each_entry(alias, &inode->i_dentry, d_alias) {
+ spin_lock(&alias->d_lock);
if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
if (IS_ROOT(alias) &&
- (alias->d_flags & DCACHE_DISCONNECTED))
+ (alias->d_flags & DCACHE_DISCONNECTED)) {
discon_alias = alias;
- else if (!want_discon) {
- __dget_locked(alias);
+ } else if (!want_discon) {
+ __dget_locked_dlock(alias);
+ spin_unlock(&alias->d_lock);
+ return alias;
+ }
+ }
+ spin_unlock(&alias->d_lock);
+ }
+ if (discon_alias) {
+ alias = discon_alias;
+ spin_lock(&alias->d_lock);
+ if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
+ if (IS_ROOT(alias) &&
+ (alias->d_flags & DCACHE_DISCONNECTED)) {
+ __dget_locked_dlock(alias);
+ spin_unlock(&alias->d_lock);
return alias;
}
}
+ spin_unlock(&alias->d_lock);
+ goto again;
}
- if (discon_alias)
- __dget_locked(discon_alias);
- return discon_alias;
+ return NULL;
}
-struct dentry * d_find_alias(struct inode *inode)
+struct dentry *d_find_alias(struct inode *inode)
{
struct dentry *de = NULL;
if (!list_empty(&inode->i_dentry)) {
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
de = __d_find_alias(inode, 0);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
}
return de;
struct dentry *dentry;
restart:
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
- if (!atomic_read(&dentry->d_count)) {
- __dget_locked(dentry);
+ if (!dentry->d_count) {
+ __dget_locked_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
dput(dentry);
goto restart;
}
spin_unlock(&dentry->d_lock);
}
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(d_prune_aliases);
* quadratic behavior of shrink_dcache_parent(), but is also expected
* to be beneficial in reducing dentry cache fragmentation.
*/
-static void prune_one_dentry(struct dentry * dentry)
+static void prune_one_dentry(struct dentry *dentry, struct dentry *parent)
__releases(dentry->d_lock)
+ __releases(parent->d_lock)
+ __releases(dcache_inode_lock)
__releases(dcache_lock)
- __acquires(dcache_lock)
{
__d_drop(dentry);
- dentry = d_kill(dentry);
+ dentry = d_kill(dentry, parent);
/*
* Prune ancestors. Locking is simpler than in dput(),
* because dcache_lock needs to be taken anyway.
*/
- spin_lock(&dcache_lock);
while (dentry) {
- if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock))
+ spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
+again:
+ spin_lock(&dentry->d_lock);
+ if (IS_ROOT(dentry))
+ parent = NULL;
+ else
+ parent = dentry->d_parent;
+ if (parent && !spin_trylock(&parent->d_lock)) {
+ spin_unlock(&dentry->d_lock);
+ goto again;
+ }
+ dentry->d_count--;
+ if (dentry->d_count) {
+ if (parent)
+ spin_unlock(&parent->d_lock);
+ spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_inode_lock);
+ spin_unlock(&dcache_lock);
return;
+ }
- if (dentry->d_op && dentry->d_op->d_delete)
- dentry->d_op->d_delete(dentry);
dentry_lru_del(dentry);
__d_drop(dentry);
- dentry = d_kill(dentry);
- spin_lock(&dcache_lock);
+ dentry = d_kill(dentry, parent);
}
}
struct dentry *dentry;
while (!list_empty(list)) {
+ struct dentry *parent;
+
dentry = list_entry(list->prev, struct dentry, d_lru);
- dentry_lru_del(dentry);
+
+ if (!spin_trylock(&dentry->d_lock)) {
+relock:
+ spin_unlock(&dcache_lru_lock);
+ cpu_relax();
+ spin_lock(&dcache_lru_lock);
+ continue;
+ }
/*
* We found an inuse dentry which was not removed from
* the LRU because of laziness during lookup. Do not free
* it - just keep it off the LRU list.
*/
- spin_lock(&dentry->d_lock);
- if (atomic_read(&dentry->d_count)) {
+ if (dentry->d_count) {
+ __dentry_lru_del(dentry);
spin_unlock(&dentry->d_lock);
continue;
}
- prune_one_dentry(dentry);
- /* dentry->d_lock was dropped in prune_one_dentry() */
- cond_resched_lock(&dcache_lock);
+ if (IS_ROOT(dentry))
+ parent = NULL;
+ else
+ parent = dentry->d_parent;
+ if (parent && !spin_trylock(&parent->d_lock)) {
+ spin_unlock(&dentry->d_lock);
+ goto relock;
+ }
+ __dentry_lru_del(dentry);
+ spin_unlock(&dcache_lru_lock);
+
+ prune_one_dentry(dentry, parent);
+ /* dcache_lock, dcache_inode_lock and dentry->d_lock dropped */
+ spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
+ spin_lock(&dcache_lru_lock);
}
}
int cnt = *count;
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
+relock:
+ spin_lock(&dcache_lru_lock);
while (!list_empty(&sb->s_dentry_lru)) {
dentry = list_entry(sb->s_dentry_lru.prev,
struct dentry, d_lru);
BUG_ON(dentry->d_sb != sb);
+ if (!spin_trylock(&dentry->d_lock)) {
+ spin_unlock(&dcache_lru_lock);
+ cpu_relax();
+ goto relock;
+ }
+
/*
* If we are honouring the DCACHE_REFERENCED flag and the
* dentry has this flag set, don't free it. Clear the flag
* and put it back on the LRU.
*/
- if (flags & DCACHE_REFERENCED) {
- spin_lock(&dentry->d_lock);
- if (dentry->d_flags & DCACHE_REFERENCED) {
- dentry->d_flags &= ~DCACHE_REFERENCED;
- list_move(&dentry->d_lru, &referenced);
- spin_unlock(&dentry->d_lock);
- cond_resched_lock(&dcache_lock);
- continue;
- }
+ if (flags & DCACHE_REFERENCED &&
+ dentry->d_flags & DCACHE_REFERENCED) {
+ dentry->d_flags &= ~DCACHE_REFERENCED;
+ list_move(&dentry->d_lru, &referenced);
spin_unlock(&dentry->d_lock);
+ } else {
+ list_move_tail(&dentry->d_lru, &tmp);
+ spin_unlock(&dentry->d_lock);
+ if (!--cnt)
+ break;
}
-
- list_move_tail(&dentry->d_lru, &tmp);
- if (!--cnt)
- break;
- cond_resched_lock(&dcache_lock);
+ /* XXX: re-add cond_resched_lock when dcache_lock goes away */
}
*count = cnt;
if (!list_empty(&referenced))
list_splice(&referenced, &sb->s_dentry_lru);
+ spin_unlock(&dcache_lru_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
-
}
/**
{
struct super_block *sb, *p = NULL;
int w_count;
- int unused = percpu_counter_sum_positive(&nr_dentry_unused);
+ int unused = dentry_stat.nr_unused;
int prune_ratio;
int pruned;
LIST_HEAD(tmp);
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
+ spin_lock(&dcache_lru_lock);
while (!list_empty(&sb->s_dentry_lru)) {
list_splice_init(&sb->s_dentry_lru, &tmp);
shrink_dentry_list(&tmp);
}
+ spin_unlock(&dcache_lru_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(shrink_dcache_sb);
/* detach this root from the system */
spin_lock(&dcache_lock);
+ spin_lock(&dentry->d_lock);
dentry_lru_del(dentry);
__d_drop(dentry);
+ spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
for (;;) {
/* this is a branch with children - detach all of them
* from the system in one go */
spin_lock(&dcache_lock);
+ spin_lock(&dentry->d_lock);
list_for_each_entry(loop, &dentry->d_subdirs,
d_u.d_child) {
+ spin_lock_nested(&loop->d_lock,
+ DENTRY_D_LOCK_NESTED);
dentry_lru_del(loop);
__d_drop(loop);
- cond_resched_lock(&dcache_lock);
+ spin_unlock(&loop->d_lock);
}
+ spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
/* move to the first child */
do {
struct inode *inode;
- if (atomic_read(&dentry->d_count) != 0) {
+ if (dentry->d_count != 0) {
printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%s}"
" still in use (%d)"
dentry->d_inode ?
dentry->d_inode->i_ino : 0UL,
dentry->d_name.name,
- atomic_read(&dentry->d_count),
+ dentry->d_count,
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
BUG();
}
- if (IS_ROOT(dentry))
+ if (IS_ROOT(dentry)) {
parent = NULL;
- else {
+ list_del(&dentry->d_u.d_child);
+ } else {
parent = dentry->d_parent;
- atomic_dec(&parent->d_count);
+ spin_lock(&parent->d_lock);
+ parent->d_count--;
+ list_del(&dentry->d_u.d_child);
+ spin_unlock(&parent->d_lock);
}
- list_del(&dentry->d_u.d_child);
detached++;
inode = dentry->d_inode;
dentry = sb->s_root;
sb->s_root = NULL;
- atomic_dec(&dentry->d_count);
+ spin_lock(&dentry->d_lock);
+ dentry->d_count--;
+ spin_unlock(&dentry->d_lock);
shrink_dcache_for_umount_subtree(dentry);
while (!hlist_empty(&sb->s_anon)) {
* Return true if the parent or its subdirectories contain
* a mount point
*/
-
int have_submounts(struct dentry *parent)
{
- struct dentry *this_parent = parent;
+ struct dentry *this_parent;
struct list_head *next;
+ unsigned seq;
+
+rename_retry:
+ this_parent = parent;
+ seq = read_seqbegin(&rename_lock);
spin_lock(&dcache_lock);
if (d_mountpoint(parent))
goto positive;
+ spin_lock(&this_parent->d_lock);
repeat:
next = this_parent->d_subdirs.next;
resume:
struct list_head *tmp = next;
struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
next = tmp->next;
+
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
/* Have we found a mount point ? */
- if (d_mountpoint(dentry))
+ if (d_mountpoint(dentry)) {
+ spin_unlock(&dentry->d_lock);
+ spin_unlock(&this_parent->d_lock);
goto positive;
+ }
if (!list_empty(&dentry->d_subdirs)) {
+ spin_unlock(&this_parent->d_lock);
+ spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
this_parent = dentry;
+ spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
goto repeat;
}
+ spin_unlock(&dentry->d_lock);
}
/*
* All done at this level ... ascend and resume the search.
*/
if (this_parent != parent) {
- next = this_parent->d_u.d_child.next;
- this_parent = this_parent->d_parent;
+ struct dentry *tmp;
+ struct dentry *child;
+
+ tmp = this_parent->d_parent;
+ rcu_read_lock();
+ spin_unlock(&this_parent->d_lock);
+ child = this_parent;
+ this_parent = tmp;
+ spin_lock(&this_parent->d_lock);
+ /* might go back up the wrong parent if we have had a rename
+ * or deletion */
+ if (this_parent != child->d_parent ||
+ read_seqretry(&rename_lock, seq)) {
+ spin_unlock(&this_parent->d_lock);
+ spin_unlock(&dcache_lock);
+ rcu_read_unlock();
+ goto rename_retry;
+ }
+ rcu_read_unlock();
+ next = child->d_u.d_child.next;
goto resume;
}
+ spin_unlock(&this_parent->d_lock);
spin_unlock(&dcache_lock);
+ if (read_seqretry(&rename_lock, seq))
+ goto rename_retry;
return 0; /* No mount points found in tree */
positive:
spin_unlock(&dcache_lock);
+ if (read_seqretry(&rename_lock, seq))
+ goto rename_retry;
return 1;
}
EXPORT_SYMBOL(have_submounts);
*/
static int select_parent(struct dentry * parent)
{
- struct dentry *this_parent = parent;
+ struct dentry *this_parent;
struct list_head *next;
+ unsigned seq;
int found = 0;
+rename_retry:
+ this_parent = parent;
+ seq = read_seqbegin(&rename_lock);
+
spin_lock(&dcache_lock);
+ spin_lock(&this_parent->d_lock);
repeat:
next = this_parent->d_subdirs.next;
resume:
struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
next = tmp->next;
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+
/*
* move only zero ref count dentries to the end
* of the unused list for prune_dcache
*/
- if (!atomic_read(&dentry->d_count)) {
+ if (!dentry->d_count) {
dentry_lru_move_tail(dentry);
found++;
} else {
* ensures forward progress). We'll be coming back to find
* the rest.
*/
- if (found && need_resched())
+ if (found && need_resched()) {
+ spin_unlock(&dentry->d_lock);
goto out;
+ }
/*
* Descend a level if the d_subdirs list is non-empty.
*/
if (!list_empty(&dentry->d_subdirs)) {
+ spin_unlock(&this_parent->d_lock);
+ spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
this_parent = dentry;
+ spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
goto repeat;
}
+
+ spin_unlock(&dentry->d_lock);
}
/*
* All done at this level ... ascend and resume the search.
*/
if (this_parent != parent) {
- next = this_parent->d_u.d_child.next;
- this_parent = this_parent->d_parent;
+ struct dentry *tmp;
+ struct dentry *child;
+
+ tmp = this_parent->d_parent;
+ rcu_read_lock();
+ spin_unlock(&this_parent->d_lock);
+ child = this_parent;
+ this_parent = tmp;
+ spin_lock(&this_parent->d_lock);
+ /* might go back up the wrong parent if we have had a rename
+ * or deletion */
+ if (this_parent != child->d_parent ||
+ read_seqretry(&rename_lock, seq)) {
+ spin_unlock(&this_parent->d_lock);
+ spin_unlock(&dcache_lock);
+ rcu_read_unlock();
+ goto rename_retry;
+ }
+ rcu_read_unlock();
+ next = child->d_u.d_child.next;
goto resume;
}
out:
+ spin_unlock(&this_parent->d_lock);
spin_unlock(&dcache_lock);
+ if (read_seqretry(&rename_lock, seq))
+ goto rename_retry;
return found;
}
*/
static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{
- int nr_unused;
-
if (nr) {
if (!(gfp_mask & __GFP_FS))
return -1;
prune_dcache(nr);
}
- nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
- return (nr_unused / 100) * sysctl_vfs_cache_pressure;
+ return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
static struct shrinker dcache_shrinker = {
memcpy(dname, name->name, name->len);
dname[name->len] = 0;
- atomic_set(&dentry->d_count, 1);
+ dentry->d_count = 1;
dentry->d_flags = DCACHE_UNHASHED;
spin_lock_init(&dentry->d_lock);
dentry->d_inode = NULL;
INIT_LIST_HEAD(&dentry->d_lru);
INIT_LIST_HEAD(&dentry->d_subdirs);
INIT_LIST_HEAD(&dentry->d_alias);
+ INIT_LIST_HEAD(&dentry->d_u.d_child);
if (parent) {
- dentry->d_parent = dget(parent);
+ spin_lock(&dcache_lock);
+ spin_lock(&parent->d_lock);
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ dentry->d_parent = dget_dlock(parent);
dentry->d_sb = parent->d_sb;
- } else {
- INIT_LIST_HEAD(&dentry->d_u.d_child);
- }
-
- spin_lock(&dcache_lock);
- if (parent)
list_add(&dentry->d_u.d_child, &parent->d_subdirs);
- spin_unlock(&dcache_lock);
+ spin_unlock(&dentry->d_lock);
+ spin_unlock(&parent->d_lock);
+ spin_unlock(&dcache_lock);
+ }
- percpu_counter_inc(&nr_dentry);
+ this_cpu_inc(nr_dentry);
return dentry;
}
/* the caller must hold dcache_lock */
static void __d_instantiate(struct dentry *dentry, struct inode *inode)
{
+ spin_lock(&dentry->d_lock);
if (inode)
list_add(&dentry->d_alias, &inode->i_dentry);
dentry->d_inode = inode;
+ spin_unlock(&dentry->d_lock);
fsnotify_d_instantiate(dentry, inode);
}
{
BUG_ON(!list_empty(&entry->d_alias));
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
__d_instantiate(entry, inode);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
security_d_instantiate(entry, inode);
}
list_for_each_entry(alias, &inode->i_dentry, d_alias) {
struct qstr *qstr = &alias->d_name;
+ /*
+ * Don't need alias->d_lock here, because aliases with
+ * d_parent == entry->d_parent are not subject to name or
+ * parent changes, because the parent inode i_mutex is held.
+ */
if (qstr->hash != hash)
continue;
if (alias->d_parent != entry->d_parent)
BUG_ON(!list_empty(&entry->d_alias));
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
result = __d_instantiate_unique(entry, inode);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
if (!result) {
tmp->d_parent = tmp; /* make sure dput doesn't croak */
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
res = __d_find_alias(inode, 0);
if (res) {
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
dput(tmp);
goto out_iput;
tmp->d_flags |= DCACHE_DISCONNECTED;
tmp->d_flags &= ~DCACHE_UNHASHED;
list_add(&tmp->d_alias, &inode->i_dentry);
+ spin_lock(&dcache_hash_lock);
hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon);
+ spin_unlock(&dcache_hash_lock);
spin_unlock(&tmp->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
return tmp;
if (inode && S_ISDIR(inode->i_mode)) {
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
new = __d_find_alias(inode, 1);
if (new) {
BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
security_d_instantiate(new, inode);
d_move(new, dentry);
} else {
/* already taking dcache_lock, so d_add() by hand */
__d_instantiate(dentry, inode);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
security_d_instantiate(dentry, inode);
d_rehash(dentry);
* already has a dentry.
*/
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
__d_instantiate(found, inode);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
security_d_instantiate(found, inode);
return found;
*/
new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
dget_locked(new);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
security_d_instantiate(found, inode);
d_move(new, found);
struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
{
struct dentry * dentry = NULL;
- unsigned long seq;
+ unsigned seq;
do {
seq = read_seqbegin(&rename_lock);
*/
qstr = &dentry->d_name;
if (parent->d_op && parent->d_op->d_compare) {
- if (parent->d_op->d_compare(parent, qstr, name))
+ if (parent->d_op->d_compare(parent, parent->d_inode,
+ dentry, dentry->d_inode,
+ qstr->len, qstr->name, name))
goto next;
} else {
if (qstr->len != len)
goto next;
}
- atomic_inc(&dentry->d_count);
+ dentry->d_count++;
found = dentry;
spin_unlock(&dentry->d_lock);
break;
*/
name->hash = full_name_hash(name->name, name->len);
if (dir->d_op && dir->d_op->d_hash) {
- if (dir->d_op->d_hash(dir, name) < 0)
+ if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
goto out;
}
dentry = d_lookup(dir, name);
}
/**
- * d_validate - verify dentry provided from insecure source
+ * d_validate - verify dentry provided from insecure source (deprecated)
* @dentry: The dentry alleged to be valid child of @dparent
* @dparent: The parent dentry (known to be valid)
*
* An insecure source has sent us a dentry, here we verify it and dget() it.
* This is used by ncpfs in its readdir implementation.
* Zero is returned in the dentry is invalid.
+ *
+ * This function is slow for big directories, and deprecated, do not use it.
*/
-int d_validate(struct dentry *dentry, struct dentry *parent)
+int d_validate(struct dentry *dentry, struct dentry *dparent)
{
- struct hlist_head *head = d_hash(parent, dentry->d_name.hash);
- struct hlist_node *node;
- struct dentry *d;
+ struct dentry *child;
- /* Check whether the ptr might be valid at all.. */
- if (!kmem_ptr_validate(dentry_cache, dentry))
- return 0;
- if (dentry->d_parent != parent)
- return 0;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(d, node, head, d_hash) {
- if (d == dentry) {
- dget(dentry);
+ spin_lock(&dcache_lock);
+ spin_lock(&dparent->d_lock);
+ list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
+ if (dentry == child) {
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ __dget_locked_dlock(dentry);
+ spin_unlock(&dentry->d_lock);
+ spin_unlock(&dparent->d_lock);
+ spin_unlock(&dcache_lock);
return 1;
}
}
- rcu_read_unlock();
+ spin_unlock(&dparent->d_lock);
+ spin_unlock(&dcache_lock);
+
return 0;
}
EXPORT_SYMBOL(d_validate);
* Are we the only user?
*/
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
spin_lock(&dentry->d_lock);
isdir = S_ISDIR(dentry->d_inode->i_mode);
- if (atomic_read(&dentry->d_count) == 1) {
+ if (dentry->d_count == 1) {
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
dentry_iput(dentry);
fsnotify_nameremove(dentry, isdir);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
fsnotify_nameremove(dentry, isdir);
{
spin_lock(&dcache_lock);
spin_lock(&entry->d_lock);
+ spin_lock(&dcache_hash_lock);
_d_rehash(entry);
+ spin_unlock(&dcache_hash_lock);
spin_unlock(&entry->d_lock);
spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(d_rehash);
-/*
- * When switching names, the actual string doesn't strictly have to
- * be preserved in the target - because we're dropping the target
- * anyway. As such, we can just do a simple memcpy() to copy over
- * the new name before we switch.
+/**
+ * dentry_update_name_case - update case insensitive dentry with a new name
+ * @dentry: dentry to be updated
+ * @name: new name
*
- * Note that we have to be a lot more careful about getting the hash
- * switched - we have to switch the hash value properly even if it
- * then no longer matches the actual (corrupted) string of the target.
- * The hash value has to match the hash queue that the dentry is on..
+ * Update a case insensitive dentry with new case of name.
+ *
+ * dentry must have been returned by d_lookup with name @name. Old and new
+ * name lengths must match (ie. no d_compare which allows mismatched name
+ * lengths).
+ *
+ * Parent inode i_mutex must be held over d_lookup and into this call (to
+ * keep renames and concurrent inserts, and readdir(2) away).
*/
+void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
+{
+ BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
+ BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
+
+ spin_lock(&dcache_lock);
+ spin_lock(&dentry->d_lock);
+ memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
+ spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_lock);
+}
+EXPORT_SYMBOL(dentry_update_name_case);
+
static void switch_names(struct dentry *dentry, struct dentry *target)
{
if (dname_external(target)) {
swap(dentry->d_name.len, target->d_name.len);
}
+static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
+{
+ /*
+ * XXXX: do we really need to take target->d_lock?
+ */
+ if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
+ spin_lock(&target->d_parent->d_lock);
+ else {
+ if (d_ancestor(dentry->d_parent, target->d_parent)) {
+ spin_lock(&dentry->d_parent->d_lock);
+ spin_lock_nested(&target->d_parent->d_lock,
+ DENTRY_D_LOCK_NESTED);
+ } else {
+ spin_lock(&target->d_parent->d_lock);
+ spin_lock_nested(&dentry->d_parent->d_lock,
+ DENTRY_D_LOCK_NESTED);
+ }
+ }
+ if (target < dentry) {
+ spin_lock_nested(&target->d_lock, 2);
+ spin_lock_nested(&dentry->d_lock, 3);
+ } else {
+ spin_lock_nested(&dentry->d_lock, 2);
+ spin_lock_nested(&target->d_lock, 3);
+ }
+}
+
+static void dentry_unlock_parents_for_move(struct dentry *dentry,
+ struct dentry *target)
+{
+ if (target->d_parent != dentry->d_parent)
+ spin_unlock(&dentry->d_parent->d_lock);
+ if (target->d_parent != target)
+ spin_unlock(&target->d_parent->d_lock);
+}
+
/*
- * We cannibalize "target" when moving dentry on top of it,
- * because it's going to be thrown away anyway. We could be more
- * polite about it, though.
- *
- * This forceful removal will result in ugly /proc output if
- * somebody holds a file open that got deleted due to a rename.
- * We could be nicer about the deleted file, and let it show
- * up under the name it had before it was deleted rather than
- * under the original name of the file that was moved on top of it.
+ * When switching names, the actual string doesn't strictly have to
+ * be preserved in the target - because we're dropping the target
+ * anyway. As such, we can just do a simple memcpy() to copy over
+ * the new name before we switch.
+ *
+ * Note that we have to be a lot more careful about getting the hash
+ * switched - we have to switch the hash value properly even if it
+ * then no longer matches the actual (corrupted) string of the target.
+ * The hash value has to match the hash queue that the dentry is on..
*/
-
/*
* d_move_locked - move a dentry
* @dentry: entry to move
*/
static void d_move_locked(struct dentry * dentry, struct dentry * target)
{
- struct hlist_head *list;
-
if (!dentry->d_inode)
printk(KERN_WARNING "VFS: moving negative dcache entry\n");
- write_seqlock(&rename_lock);
- /*
- * XXXX: do we really need to take target->d_lock?
- */
- if (target < dentry) {
- spin_lock(&target->d_lock);
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- } else {
- spin_lock(&dentry->d_lock);
- spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED);
- }
+ BUG_ON(d_ancestor(dentry, target));
+ BUG_ON(d_ancestor(target, dentry));
- /* Move the dentry to the target hash queue, if on different bucket */
- if (d_unhashed(dentry))
- goto already_unhashed;
+ write_seqlock(&rename_lock);
- hlist_del_rcu(&dentry->d_hash);
+ dentry_lock_for_move(dentry, target);
-already_unhashed:
- list = d_hash(target->d_parent, target->d_name.hash);
- __d_rehash(dentry, list);
+ /* Move the dentry to the target hash queue, if on different bucket */
+ spin_lock(&dcache_hash_lock);
+ if (!d_unhashed(dentry))
+ hlist_del_rcu(&dentry->d_hash);
+ __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
+ spin_unlock(&dcache_hash_lock);
/* Unhash the target: dput() will then get rid of it */
__d_drop(target);
}
list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
+
+ dentry_unlock_parents_for_move(dentry, target);
spin_unlock(&target->d_lock);
fsnotify_d_move(dentry);
spin_unlock(&dentry->d_lock);
*/
static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
__releases(dcache_lock)
+ __releases(dcache_inode_lock)
{
struct mutex *m1 = NULL, *m2 = NULL;
struct dentry *ret;
d_move_locked(alias, dentry);
ret = alias;
out_err:
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
if (m2)
mutex_unlock(m2);
/*
* Prepare an anonymous dentry for life in the superblock's dentry tree as a
* named dentry in place of the dentry to be replaced.
+ * returns with anon->d_lock held!
*/
static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
{
struct dentry *dparent, *aparent;
- switch_names(dentry, anon);
- swap(dentry->d_name.hash, anon->d_name.hash);
+ dentry_lock_for_move(anon, dentry);
dparent = dentry->d_parent;
aparent = anon->d_parent;
+ switch_names(dentry, anon);
+ swap(dentry->d_name.hash, anon->d_name.hash);
+
dentry->d_parent = (aparent == anon) ? dentry : aparent;
list_del(&dentry->d_u.d_child);
if (!IS_ROOT(dentry))
else
INIT_LIST_HEAD(&anon->d_u.d_child);
+ dentry_unlock_parents_for_move(anon, dentry);
+ spin_unlock(&dentry->d_lock);
+
+ /* anon->d_lock still locked, returns locked */
anon->d_flags &= ~DCACHE_DISCONNECTED;
}
BUG_ON(!d_unhashed(dentry));
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
if (!inode) {
actual = dentry;
/* Is this an anonymous mountpoint that we could splice
* into our tree? */
if (IS_ROOT(alias)) {
- spin_lock(&alias->d_lock);
__d_materialise_dentry(dentry, alias);
__d_drop(alias);
goto found;
found_lock:
spin_lock(&actual->d_lock);
found:
+ spin_lock(&dcache_hash_lock);
_d_rehash(actual);
+ spin_unlock(&dcache_hash_lock);
spin_unlock(&actual->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
out_nolock:
if (actual == dentry) {
return actual;
shouldnt_be_hashed:
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
BUG();
}
* @buffer: pointer to the end of the buffer
* @buflen: pointer to buffer length
*
- * Caller holds the dcache_lock.
+ * Caller holds the rename_lock.
*
* If path is not reachable from the supplied root, then the value of
* root is changed (without modifying refcounts).
}
parent = dentry->d_parent;
prefetch(parent);
+ spin_lock(&dentry->d_lock);
error = prepend_name(buffer, buflen, &dentry->d_name);
+ spin_unlock(&dentry->d_lock);
if (!error)
error = prepend(buffer, buflen, "/", 1);
if (error)
prepend(&res, &buflen, "\0", 1);
spin_lock(&dcache_lock);
+ write_seqlock(&rename_lock);
error = prepend_path(path, root, &res, &buflen);
+ write_sequnlock(&rename_lock);
spin_unlock(&dcache_lock);
if (error)
get_fs_root(current->fs, &root);
spin_lock(&dcache_lock);
+ write_seqlock(&rename_lock);
tmp = root;
error = path_with_deleted(path, &tmp, &res, &buflen);
if (error)
res = ERR_PTR(error);
+ write_sequnlock(&rename_lock);
spin_unlock(&dcache_lock);
path_put(&root);
return res;
get_fs_root(current->fs, &root);
spin_lock(&dcache_lock);
+ write_seqlock(&rename_lock);
tmp = root;
error = path_with_deleted(path, &tmp, &res, &buflen);
if (!error && !path_equal(&tmp, &root))
error = prepend_unreachable(&res, &buflen);
+ write_sequnlock(&rename_lock);
spin_unlock(&dcache_lock);
path_put(&root);
if (error)
/*
* Write full pathname from the root of the filesystem into the buffer.
*/
-char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
+static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
{
char *end = buf + buflen;
char *retval;
while (!IS_ROOT(dentry)) {
struct dentry *parent = dentry->d_parent;
+ int error;
prefetch(parent);
- if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
- (prepend(&end, &buflen, "/", 1) != 0))
+ spin_lock(&dentry->d_lock);
+ error = prepend_name(&end, &buflen, &dentry->d_name);
+ spin_unlock(&dentry->d_lock);
+ if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
goto Elong;
retval = end;
Elong:
return ERR_PTR(-ENAMETOOLONG);
}
-EXPORT_SYMBOL(__dentry_path);
+
+char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
+{
+ char *retval;
+
+ spin_lock(&dcache_lock);
+ write_seqlock(&rename_lock);
+ retval = __dentry_path(dentry, buf, buflen);
+ write_sequnlock(&rename_lock);
+ spin_unlock(&dcache_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(dentry_path_raw);
char *dentry_path(struct dentry *dentry, char *buf, int buflen)
{
char *retval;
spin_lock(&dcache_lock);
+ write_seqlock(&rename_lock);
if (d_unlinked(dentry)) {
p = buf + buflen;
if (prepend(&p, &buflen, "//deleted", 10) != 0)
buflen++;
}
retval = __dentry_path(dentry, buf, buflen);
+ write_sequnlock(&rename_lock);
spin_unlock(&dcache_lock);
if (!IS_ERR(retval) && p)
*p = '/'; /* restore '/' overriden with '\0' */
error = -ENOENT;
spin_lock(&dcache_lock);
+ write_seqlock(&rename_lock);
if (!d_unlinked(pwd.dentry)) {
unsigned long len;
struct path tmp = root;
prepend(&cwd, &buflen, "\0", 1);
error = prepend_path(&pwd, &tmp, &cwd, &buflen);
+ write_sequnlock(&rename_lock);
spin_unlock(&dcache_lock);
if (error)
if (copy_to_user(buf, cwd, len))
error = -EFAULT;
}
- } else
+ } else {
+ write_sequnlock(&rename_lock);
spin_unlock(&dcache_lock);
+ }
out:
path_put(&pwd);
int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
{
int result;
- unsigned long seq;
+ unsigned seq;
if (new_dentry == old_dentry)
return 1;
- /*
- * Need rcu_readlock to protect against the d_parent trashing
- * due to d_move
- */
- rcu_read_lock();
do {
/* for restarting inner loop in case of seq retry */
seq = read_seqbegin(&rename_lock);
+ /*
+ * Need rcu_readlock to protect against the d_parent trashing
+ * due to d_move
+ */
+ rcu_read_lock();
if (d_ancestor(old_dentry, new_dentry))
result = 1;
else
result = 0;
+ rcu_read_unlock();
} while (read_seqretry(&rename_lock, seq));
- rcu_read_unlock();
return result;
}
void d_genocide(struct dentry *root)
{
- struct dentry *this_parent = root;
+ struct dentry *this_parent;
struct list_head *next;
+ unsigned seq;
+rename_retry:
+ this_parent = root;
+ seq = read_seqbegin(&rename_lock);
spin_lock(&dcache_lock);
+ spin_lock(&this_parent->d_lock);
repeat:
next = this_parent->d_subdirs.next;
resume:
struct list_head *tmp = next;
struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
next = tmp->next;
- if (d_unhashed(dentry)||!dentry->d_inode)
+
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ if (d_unhashed(dentry) || !dentry->d_inode) {
+ spin_unlock(&dentry->d_lock);
continue;
+ }
if (!list_empty(&dentry->d_subdirs)) {
+ spin_unlock(&this_parent->d_lock);
+ spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
this_parent = dentry;
+ spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
goto repeat;
}
- atomic_dec(&dentry->d_count);
+ if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
+ dentry->d_flags |= DCACHE_GENOCIDE;
+ dentry->d_count--;
+ }
+ spin_unlock(&dentry->d_lock);
}
if (this_parent != root) {
- next = this_parent->d_u.d_child.next;
- atomic_dec(&this_parent->d_count);
- this_parent = this_parent->d_parent;
+ struct dentry *tmp;
+ struct dentry *child;
+
+ tmp = this_parent->d_parent;
+ if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
+ this_parent->d_flags |= DCACHE_GENOCIDE;
+ this_parent->d_count--;
+ }
+ rcu_read_lock();
+ spin_unlock(&this_parent->d_lock);
+ child = this_parent;
+ this_parent = tmp;
+ spin_lock(&this_parent->d_lock);
+ /* might go back up the wrong parent if we have had a rename
+ * or deletion */
+ if (this_parent != child->d_parent ||
+ read_seqretry(&rename_lock, seq)) {
+ spin_unlock(&this_parent->d_lock);
+ spin_unlock(&dcache_lock);
+ rcu_read_unlock();
+ goto rename_retry;
+ }
+ rcu_read_unlock();
+ next = child->d_u.d_child.next;
goto resume;
}
+ spin_unlock(&this_parent->d_lock);
spin_unlock(&dcache_lock);
+ if (read_seqretry(&rename_lock, seq))
+ goto rename_retry;
}
/**
{
int loop;
- percpu_counter_init(&nr_dentry, 0);
- percpu_counter_init(&nr_dentry_unused, 0);
-
/*
* A constructor could be added for stable state like the lists,
* but it is probably not worth it because of the cache nature