ocfs2/dlm: Track number of mles
authorSunil Mushran <sunil.mushran@oracle.com>
Thu, 26 Feb 2009 23:00:43 +0000 (15:00 -0800)
committerMark Fasheh <mfasheh@suse.com>
Fri, 3 Apr 2009 18:39:21 +0000 (11:39 -0700)
The lifetime of a mle is limited to the duration of the lockres mastery
process. While typically this lifetime is fairly short, we have noticed
the number of mles explode under certain circumstances. This patch tracks
the number of each different types of mles and should help us determine
how best to speed up the mastery process.

Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com>
Signed-off-by: Mark Fasheh <mfasheh@suse.com>
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c

index aa55271..67b3447 100644 (file)
@@ -52,7 +52,8 @@
 enum dlm_mle_type {
        DLM_MLE_BLOCK,
        DLM_MLE_MASTER,
-       DLM_MLE_MIGRATION
+       DLM_MLE_MIGRATION,
+       DLM_MLE_NUM_TYPES
 };
 
 struct dlm_lock_name {
@@ -156,6 +157,8 @@ struct dlm_ctxt
        struct list_head mle_hb_events;
 
        /* these give a really vague idea of the system load */
+       atomic_t mle_tot_count[DLM_MLE_NUM_TYPES];
+       atomic_t mle_cur_count[DLM_MLE_NUM_TYPES];
        atomic_t local_resources;
        atomic_t remote_resources;
        atomic_t unknown_resources;
index 869648c..0479bdf 100644 (file)
@@ -1608,6 +1608,11 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
        atomic_set(&dlm->remote_resources, 0);
        atomic_set(&dlm->unknown_resources, 0);
 
+       for (i = 0; i < DLM_MLE_NUM_TYPES; ++i) {
+               atomic_set(&dlm->mle_tot_count[i], 0);
+               atomic_set(&dlm->mle_cur_count[i], 0);
+       }
+
        spin_lock_init(&dlm->work_lock);
        INIT_LIST_HEAD(&dlm->work_list);
        INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
index 604552e..acfc928 100644 (file)
@@ -325,6 +325,9 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle,
                mle->u.mlename.hash = dlm_lockid_hash(name, namelen);
        }
 
+       atomic_inc(&dlm->mle_tot_count[mle->type]);
+       atomic_inc(&dlm->mle_cur_count[mle->type]);
+
        /* copy off the node_map and register hb callbacks on our copy */
        memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
        memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
@@ -467,6 +470,8 @@ static void dlm_mle_release(struct kref *kref)
        /* detach the mle from the domain node up/down events */
        __dlm_mle_detach_hb_events(dlm, mle);
 
+       atomic_dec(&dlm->mle_cur_count[mle->type]);
+
        /* NOTE: kfree under spinlock here.
         * if this is bad, we can move this to a freelist. */
        kmem_cache_free(dlm_mle_cache, mle);