summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
3dd2ee4)
Now that the whole dcache_hash_bucket crap is gone, go all the way and
also remove the weird locking layering violations for locking the hash
buckets. Add hlist_bl_lock/unlock helpers to move the locking into the
list abstraction instead of requiring each caller to open code it.
After all allowing for the bit locks is the whole point of these helpers
over the plain hlist variant.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
return dentry_hashtable + (hash & D_HASHMASK);
}
return dentry_hashtable + (hash & D_HASHMASK);
}
-static inline void spin_lock_bucket(struct hlist_bl_head *b)
-{
- bit_spin_lock(0, (unsigned long *)&b->first);
-}
-
-static inline void spin_unlock_bucket(struct hlist_bl_head *b)
-{
- __bit_spin_unlock(0, (unsigned long *)&b->first);
-}
-
/* Statistics gathering. */
struct dentry_stat_t dentry_stat = {
.age_limit = 45,
/* Statistics gathering. */
struct dentry_stat_t dentry_stat = {
.age_limit = 45,
else
b = d_hash(dentry->d_parent, dentry->d_name.hash);
else
b = d_hash(dentry->d_parent, dentry->d_name.hash);
__hlist_bl_del(&dentry->d_hash);
dentry->d_hash.pprev = NULL;
__hlist_bl_del(&dentry->d_hash);
dentry->d_hash.pprev = NULL;
dentry_rcuwalk_barrier(dentry);
}
dentry_rcuwalk_barrier(dentry);
}
tmp->d_inode = inode;
tmp->d_flags |= DCACHE_DISCONNECTED;
list_add(&tmp->d_alias, &inode->i_dentry);
tmp->d_inode = inode;
tmp->d_flags |= DCACHE_DISCONNECTED;
list_add(&tmp->d_alias, &inode->i_dentry);
- spin_lock_bucket(&tmp->d_sb->s_anon);
+ hlist_bl_lock(&tmp->d_sb->s_anon);
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
- spin_unlock_bucket(&tmp->d_sb->s_anon);
+ hlist_bl_unlock(&tmp->d_sb->s_anon);
spin_unlock(&tmp->d_lock);
spin_unlock(&inode->i_lock);
security_d_instantiate(tmp, inode);
spin_unlock(&tmp->d_lock);
spin_unlock(&inode->i_lock);
security_d_instantiate(tmp, inode);
static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
{
BUG_ON(!d_unhashed(entry));
static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
{
BUG_ON(!d_unhashed(entry));
entry->d_flags |= DCACHE_RCUACCESS;
hlist_bl_add_head_rcu(&entry->d_hash, b);
entry->d_flags |= DCACHE_RCUACCESS;
hlist_bl_add_head_rcu(&entry->d_hash, b);
}
static void _d_rehash(struct dentry * entry)
}
static void _d_rehash(struct dentry * entry)
static inline void spin_lock_bucket(unsigned int hash)
{
static inline void spin_lock_bucket(unsigned int hash)
{
- struct hlist_bl_head *bl = &gl_hash_table[hash];
- bit_spin_lock(0, (unsigned long *)bl);
+ hlist_bl_lock(&gl_hash_table[hash]);
}
static inline void spin_unlock_bucket(unsigned int hash)
{
}
static inline void spin_unlock_bucket(unsigned int hash)
{
- struct hlist_bl_head *bl = &gl_hash_table[hash];
- __bit_spin_unlock(0, (unsigned long *)bl);
+ hlist_bl_unlock(&gl_hash_table[hash]);
}
static void gfs2_glock_dealloc(struct rcu_head *rcu)
}
static void gfs2_glock_dealloc(struct rcu_head *rcu)
#define _LINUX_LIST_BL_H
#include <linux/list.h>
#define _LINUX_LIST_BL_H
#include <linux/list.h>
+#include <linux/bit_spinlock.h>
/*
* Special version of lists, where head of the list has a lock in the lowest
/*
* Special version of lists, where head of the list has a lock in the lowest
+static inline void hlist_bl_lock(struct hlist_bl_head *b)
+{
+ bit_spin_lock(0, (unsigned long *)b);
+}
+
+static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+{
+ __bit_spin_unlock(0, (unsigned long *)b);
+}
+
/**
* hlist_bl_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop cursor.
/**
* hlist_bl_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop cursor.