summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
b854785)
There are several reasons why we want to do this:
- Firstly its large and thus we'll scale better with multiple
GFS2 fs mounted at the same time
- Secondly its easier to scale its size as required (thats a plan
for later patches)
- Thirdly, we can use kzalloc rather than vmalloc when allocating
the superblock (its now only 4888 bytes)
- Fourth its all part of my plan to eventually be able to use RCU
with the glock hash.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
static int dump_glock(struct gfs2_glock *gl);
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
static int dump_glock(struct gfs2_glock *gl);
+static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
+
/**
* relaxed_state_ok - is a requested lock compatible with the current lock mode?
* @actual: the current state of the lock
/**
* relaxed_state_ok - is a requested lock compatible with the current lock mode?
* @actual: the current state of the lock
* Returns: NULL, or the struct gfs2_glock with the requested number
*/
* Returns: NULL, or the struct gfs2_glock with the requested number
*/
-static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
+static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
const struct lm_lockname *name)
{
const struct lm_lockname *name)
{
- struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(sdp, name)];
+ struct gfs2_gl_hash_bucket *bucket = &gl_hash_table[gl_hash(sdp, name)];
struct gfs2_glock *gl;
read_lock(&bucket->hb_lock);
struct gfs2_glock *gl;
read_lock(&bucket->hb_lock);
name.ln_number = number;
name.ln_type = glops->go_type;
name.ln_number = number;
name.ln_type = glops->go_type;
- bucket = &sdp->sd_gl_hash[gl_hash(sdp, &name)];
+ bucket = &gl_hash_table[gl_hash(sdp, &name)];
read_lock(&bucket->hb_lock);
gl = search_bucket(bucket, sdp, &name);
read_lock(&bucket->hb_lock);
gl = search_bucket(bucket, sdp, &name);
set_bit(HIF_MUTEX, &gh.gh_iflags);
spin_lock(&gl->gl_spin);
set_bit(HIF_MUTEX, &gh.gh_iflags);
spin_lock(&gl->gl_spin);
- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
+ if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
list_add_tail(&gh.gh_list, &gl->gl_waiters1);
list_add_tail(&gh.gh_list, &gl->gl_waiters1);
gl->gl_owner = current;
gl->gl_ip = (unsigned long)__builtin_return_address(0);
complete(&gh.gh_wait);
gl->gl_owner = current;
gl->gl_ip = (unsigned long)__builtin_return_address(0);
complete(&gh.gh_wait);
int acquired = 1;
spin_lock(&gl->gl_spin);
int acquired = 1;
spin_lock(&gl->gl_spin);
- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
+ if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
gl->gl_owner = current;
gl->gl_ip = (unsigned long)__builtin_return_address(0);
}
gl->gl_owner = current;
gl->gl_ip = (unsigned long)__builtin_return_address(0);
}
spin_lock(&gl->gl_spin);
list_del_init(&gh->gh_list);
if (gl->gl_state == gh->gh_state ||
spin_lock(&gl->gl_spin);
list_del_init(&gh->gh_list);
if (gl->gl_state == gh->gh_state ||
- gl->gl_state == LM_ST_UNLOCKED)
+ gl->gl_state == LM_ST_UNLOCKED) {
if (gfs2_assert_warn(sdp, gh->gh_flags &
(LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
fs_warn(sdp, "ret = 0x%.8X\n", ret);
if (gfs2_assert_warn(sdp, gh->gh_flags &
(LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
fs_warn(sdp, "ret = 0x%.8X\n", ret);
return gh->gh_error;
gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
return gh->gh_error;
gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
- gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
- gh->gh_state,
+ gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
gh->gh_flags));
if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
gh->gh_flags));
if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
if (test_bit(GLF_PLUG, &gl->gl_flags))
continue;
if (test_bit(GLF_PLUG, &gl->gl_flags))
continue;
+ if (gl->gl_sbd != sdp)
+ continue;
/* examiner() must glock_put() */
gfs2_glock_hold(gl);
/* examiner() must glock_put() */
gfs2_glock_hold(gl);
unsigned int x;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
unsigned int x;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
+ examine_bucket(scan_glock, sdp, &gl_hash_table[x]);
cont = 0;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
cont = 0;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
- if (examine_bucket(clear_glock, sdp, &sdp->sd_gl_hash[x]))
+ if (examine_bucket(clear_glock, sdp, &gl_hash_table[x]))
cont = 1;
if (!wait || !cont)
cont = 1;
if (!wait || !cont)
- printk(KERN_INFO "Glock 0x%p (%u, %llu)\n",
- gl,
- gl->gl_name.ln_type,
+ printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number);
printk(KERN_INFO " gl_flags =");
(unsigned long long)gl->gl_name.ln_number);
printk(KERN_INFO " gl_flags =");
- for (x = 0; x < 32; x++)
+ for (x = 0; x < 32; x++) {
if (test_bit(x, &gl->gl_flags))
printk(" %u", x);
if (test_bit(x, &gl->gl_flags))
printk(" %u", x);
printk(" \n");
printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
printk(" \n");
printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
printk(KERN_INFO " reclaim = %s\n",
(list_empty(&gl->gl_reclaim)) ? "no" : "yes");
if (gl->gl_aspace)
printk(KERN_INFO " reclaim = %s\n",
(list_empty(&gl->gl_reclaim)) ? "no" : "yes");
if (gl->gl_aspace)
- printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n",
- gl->gl_aspace,
+ printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
gl->gl_aspace->i_mapping->nrpages);
else
printk(KERN_INFO " aspace = no\n");
gl->gl_aspace->i_mapping->nrpages);
else
printk(KERN_INFO " aspace = no\n");
int error = 0;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
int error = 0;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- bucket = &sdp->sd_gl_hash[x];
+ bucket = &gl_hash_table[x];
read_lock(&bucket->hb_lock);
list_for_each_entry(gl, &bucket->hb_list, gl_list) {
if (test_bit(GLF_PLUG, &gl->gl_flags))
continue;
read_lock(&bucket->hb_lock);
list_for_each_entry(gl, &bucket->hb_list, gl_list) {
if (test_bit(GLF_PLUG, &gl->gl_flags))
continue;
+ if (gl->gl_sbd != sdp)
+ continue;
error = dump_glock(gl);
if (error)
error = dump_glock(gl);
if (error)
+int __init gfs2_glock_init(void)
+{
+ unsigned i;
+ for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
+ struct gfs2_gl_hash_bucket *hb = &gl_hash_table[i];
+ rwlock_init(&hb->hb_lock);
+ INIT_LIST_HEAD(&hb->hb_list);
+ }
+ return 0;
+}
+
void gfs2_scand_internal(struct gfs2_sbd *sdp);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
void gfs2_scand_internal(struct gfs2_sbd *sdp);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
+int __init gfs2_glock_init(void);
+
#endif /* __GLOCK_DOT_H__ */
#endif /* __GLOCK_DOT_H__ */
struct gfs2_trans;
struct gfs2_ail;
struct gfs2_jdesc;
struct gfs2_trans;
struct gfs2_ail;
struct gfs2_jdesc;
-struct gfs2_gl_hash_bucket;
struct gfs2_sbd;
typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
struct gfs2_sbd;
typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
struct list_head bd_ail_gl_list;
};
struct list_head bd_ail_gl_list;
};
+struct gfs2_gl_hash_bucket {
+ rwlock_t hb_lock;
+ struct list_head hb_list;
+};
+
struct gfs2_glock_operations {
void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state,
int flags);
struct gfs2_glock_operations {
void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state,
int flags);
unsigned int gt_statfs_slow;
};
unsigned int gt_statfs_slow;
};
-struct gfs2_gl_hash_bucket {
- rwlock_t hb_lock;
- struct list_head hb_list;
-};
-
enum {
SDF_JOURNAL_CHECKED = 0,
SDF_JOURNAL_LIVE = 1,
enum {
SDF_JOURNAL_CHECKED = 0,
SDF_JOURNAL_LIVE = 1,
/* Lock Stuff */
struct lm_lockstruct sd_lockstruct;
/* Lock Stuff */
struct lm_lockstruct sd_lockstruct;
- struct gfs2_gl_hash_bucket sd_gl_hash[GFS2_GL_HASH_SIZE];
struct list_head sd_reclaim_list;
spinlock_t sd_reclaim_lock;
wait_queue_head_t sd_reclaim_wq;
struct list_head sd_reclaim_list;
spinlock_t sd_reclaim_lock;
wait_queue_head_t sd_reclaim_wq;
#include "ops_fstype.h"
#include "sys.h"
#include "util.h"
#include "ops_fstype.h"
#include "sys.h"
#include "util.h"
static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
{
static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
{
+ error = gfs2_glock_init();
+ if (error)
+ goto fail;
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
sizeof(struct gfs2_glock),
0, 0,
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
sizeof(struct gfs2_glock),
0, 0,
static struct gfs2_sbd *init_sbd(struct super_block *sb)
{
struct gfs2_sbd *sdp;
static struct gfs2_sbd *init_sbd(struct super_block *sb)
{
struct gfs2_sbd *sdp;
- sdp = vmalloc(sizeof(struct gfs2_sbd));
+ sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
- memset(sdp, 0, sizeof(struct gfs2_sbd));
-
sb->s_fs_info = sdp;
sdp->sd_vfs = sb;
gfs2_tune_init(&sdp->sd_tune);
sb->s_fs_info = sdp;
sdp->sd_vfs = sb;
gfs2_tune_init(&sdp->sd_tune);
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- rwlock_init(&sdp->sd_gl_hash[x].hb_lock);
- INIT_LIST_HEAD(&sdp->sd_gl_hash[x].hb_list);
- }
INIT_LIST_HEAD(&sdp->sd_reclaim_list);
spin_lock_init(&sdp->sd_reclaim_lock);
init_waitqueue_head(&sdp->sd_reclaim_wq);
INIT_LIST_HEAD(&sdp->sd_reclaim_list);
spin_lock_init(&sdp->sd_reclaim_lock);
init_waitqueue_head(&sdp->sd_reclaim_wq);