2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kref.h>
19 #include <asm/semaphore.h>
20 #include <asm/uaccess.h>
32 /* Must be kept in sync with the beginning of struct gfs2_glock */
34 struct list_head gl_list;
35 unsigned long gl_flags;
39 struct gfs2_holder gr_gh;
40 struct work_struct gr_work;
43 typedef void (*glock_examiner) (struct gfs2_glock * gl);
46 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
47 * @actual: the current state of the lock
48 * @requested: the lock state that was requested by the caller
49 * @flags: the modifier flags passed in by the caller
51 * Returns: 1 if the locks are compatible, 0 otherwise
54 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
57 if (actual == requested)
63 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
66 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
73 * gl_hash() - Turn glock number into hash bucket number
74 * @lock: The glock number
76 * Returns: The number of the corresponding hash bucket
79 static unsigned int gl_hash(struct lm_lockname *name)
83 h = jhash(&name->ln_number, sizeof(uint64_t), 0);
84 h = jhash(&name->ln_type, sizeof(unsigned int), h);
85 h &= GFS2_GL_HASH_MASK;
91 * glock_free() - Perform a few checks and then release struct gfs2_glock
92 * @gl: The glock to release
94 * Also calls lock module to release its internal structure for this glock.
98 static void glock_free(struct gfs2_glock *gl)
100 struct gfs2_sbd *sdp = gl->gl_sbd;
101 struct inode *aspace = gl->gl_aspace;
103 gfs2_lm_put_lock(sdp, gl->gl_lock);
106 gfs2_aspace_put(aspace);
108 kmem_cache_free(gfs2_glock_cachep, gl);
112 * gfs2_glock_hold() - increment reference count on glock
113 * @gl: The glock to hold
117 void gfs2_glock_hold(struct gfs2_glock *gl)
119 kref_get(&gl->gl_ref);
122 /* All work is done after the return from kref_put() so we
123 can release the write_lock before the free. */
125 static void kill_glock(struct kref *kref)
127 struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
128 struct gfs2_sbd *sdp = gl->gl_sbd;
130 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
131 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
132 gfs2_assert(sdp, list_empty(&gl->gl_holders));
133 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
134 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
135 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
139 * gfs2_glock_put() - Decrement reference count on glock
140 * @gl: The glock to put
144 int gfs2_glock_put(struct gfs2_glock *gl)
146 struct gfs2_sbd *sdp = gl->gl_sbd;
147 struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
150 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
152 write_lock(&bucket->hb_lock);
153 if (kref_put(&gl->gl_ref, kill_glock)) {
154 list_del_init(&gl->gl_list);
155 write_unlock(&bucket->hb_lock);
160 write_unlock(&bucket->hb_lock);
162 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
167 * queue_empty - check to see if a glock's queue is empty
169 * @head: the head of the queue to check
171 * This function protects the list in the event that a process already
172 * has a holder on the list and is adding a second holder for itself.
173 * The glmutex lock is what generally prevents processes from working
174 * on the same glock at once, but the special case of adding a second
175 * holder for yourself ("recursive" locking) doesn't involve locking
176 * glmutex, making the spin lock necessary.
178 * Returns: 1 if the queue is empty
181 static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
184 spin_lock(&gl->gl_spin);
185 empty = list_empty(head);
186 spin_unlock(&gl->gl_spin);
191 * search_bucket() - Find struct gfs2_glock by lock number
192 * @bucket: the bucket to search
193 * @name: The lock name
195 * Returns: NULL, or the struct gfs2_glock with the requested number
198 static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
199 struct lm_lockname *name)
201 struct gfs2_glock *gl;
203 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
204 if (test_bit(GLF_PLUG, &gl->gl_flags))
206 if (!lm_name_equal(&gl->gl_name, name))
209 kref_get(&gl->gl_ref);
218 * gfs2_glock_find() - Find glock by lock number
219 * @sdp: The GFS2 superblock
220 * @name: The lock name
222 * Returns: NULL, or the struct gfs2_glock with the requested number
225 struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
226 struct lm_lockname *name)
228 struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)];
229 struct gfs2_glock *gl;
231 read_lock(&bucket->hb_lock);
232 gl = search_bucket(bucket, name);
233 read_unlock(&bucket->hb_lock);
239 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
240 * @sdp: The GFS2 superblock
241 * @number: the lock number
242 * @glops: The glock_operations to use
243 * @create: If 0, don't create the glock if it doesn't exist
244 * @glp: the glock is returned here
246 * This does not lock a glock, just finds/creates structures for one.
251 int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
252 struct gfs2_glock_operations *glops, int create,
253 struct gfs2_glock **glp)
255 struct lm_lockname name;
256 struct gfs2_glock *gl, *tmp;
257 struct gfs2_gl_hash_bucket *bucket;
260 name.ln_number = number;
261 name.ln_type = glops->go_type;
262 bucket = &sdp->sd_gl_hash[gl_hash(&name)];
264 read_lock(&bucket->hb_lock);
265 gl = search_bucket(bucket, &name);
266 read_unlock(&bucket->hb_lock);
273 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
277 memset(gl, 0, sizeof(struct gfs2_glock));
279 INIT_LIST_HEAD(&gl->gl_list);
281 kref_init(&gl->gl_ref);
283 spin_lock_init(&gl->gl_spin);
285 gl->gl_state = LM_ST_UNLOCKED;
286 INIT_LIST_HEAD(&gl->gl_holders);
287 INIT_LIST_HEAD(&gl->gl_waiters1);
288 INIT_LIST_HEAD(&gl->gl_waiters2);
289 INIT_LIST_HEAD(&gl->gl_waiters3);
293 gl->gl_bucket = bucket;
294 INIT_LIST_HEAD(&gl->gl_reclaim);
298 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
299 INIT_LIST_HEAD(&gl->gl_ail_list);
301 /* If this glock protects actual on-disk data or metadata blocks,
302 create a VFS inode to manage the pages/buffers holding them. */
303 if (glops == &gfs2_inode_glops ||
304 glops == &gfs2_rgrp_glops ||
305 glops == &gfs2_meta_glops) {
306 gl->gl_aspace = gfs2_aspace_get(sdp);
307 if (!gl->gl_aspace) {
313 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
317 write_lock(&bucket->hb_lock);
318 tmp = search_bucket(bucket, &name);
320 write_unlock(&bucket->hb_lock);
324 list_add_tail(&gl->gl_list, &bucket->hb_list);
325 write_unlock(&bucket->hb_lock);
334 gfs2_aspace_put(gl->gl_aspace);
337 kmem_cache_free(gfs2_glock_cachep, gl);
343 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
345 * @state: the state we're requesting
346 * @flags: the modifier flags
347 * @gh: the holder structure
351 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, int flags,
352 struct gfs2_holder *gh)
354 INIT_LIST_HEAD(&gh->gh_list);
356 gh->gh_owner = (flags & GL_NEVER_RECURSE) ? NULL : current;
357 gh->gh_state = state;
358 gh->gh_flags = flags;
361 init_completion(&gh->gh_wait);
363 if (gh->gh_state == LM_ST_EXCLUSIVE)
364 gh->gh_flags |= GL_LOCAL_EXCL;
370 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
371 * @state: the state we're requesting
372 * @flags: the modifier flags
373 * @gh: the holder structure
375 * Don't mess with the glock.
379 void gfs2_holder_reinit(unsigned int state, int flags, struct gfs2_holder *gh)
381 gh->gh_state = state;
382 gh->gh_flags = flags;
383 if (gh->gh_state == LM_ST_EXCLUSIVE)
384 gh->gh_flags |= GL_LOCAL_EXCL;
386 gh->gh_iflags &= 1 << HIF_ALLOCED;
390 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
391 * @gh: the holder structure
395 void gfs2_holder_uninit(struct gfs2_holder *gh)
397 gfs2_glock_put(gh->gh_gl);
402 * gfs2_holder_get - get a struct gfs2_holder structure
404 * @state: the state we're requesting
405 * @flags: the modifier flags
406 * @gfp_flags: __GFP_NOFAIL
408 * Figure out how big an impact this function has. Either:
409 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
410 * 2) Leave it like it is
412 * Returns: the holder structure, NULL on ENOMEM
415 struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl, unsigned int state,
416 int flags, gfp_t gfp_flags)
418 struct gfs2_holder *gh;
420 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
424 gfs2_holder_init(gl, state, flags, gh);
425 set_bit(HIF_ALLOCED, &gh->gh_iflags);
431 * gfs2_holder_put - get rid of a struct gfs2_holder structure
432 * @gh: the holder structure
436 void gfs2_holder_put(struct gfs2_holder *gh)
438 gfs2_holder_uninit(gh);
443 * handle_recurse - put other holder structures (marked recursive)
444 * into the holders list
445 * @gh: the holder structure
449 static void handle_recurse(struct gfs2_holder *gh)
451 struct gfs2_glock *gl = gh->gh_gl;
452 struct gfs2_sbd *sdp = gl->gl_sbd;
453 struct gfs2_holder *tmp_gh, *safe;
456 if (gfs2_assert_warn(sdp, gh->gh_owner))
459 list_for_each_entry_safe(tmp_gh, safe, &gl->gl_waiters3, gh_list) {
460 if (tmp_gh->gh_owner != gh->gh_owner)
463 gfs2_assert_warn(sdp,
464 test_bit(HIF_RECURSE, &tmp_gh->gh_iflags));
466 list_move_tail(&tmp_gh->gh_list, &gl->gl_holders);
467 tmp_gh->gh_error = 0;
468 set_bit(HIF_HOLDER, &tmp_gh->gh_iflags);
470 complete(&tmp_gh->gh_wait);
475 gfs2_assert_warn(sdp, found);
479 * do_unrecurse - a recursive holder was just dropped of the waiters3 list
482 * If there is only one other recursive holder, clear its HIF_RECURSE bit.
483 * If there is more than one, leave them alone.
487 static void do_unrecurse(struct gfs2_holder *gh)
489 struct gfs2_glock *gl = gh->gh_gl;
490 struct gfs2_sbd *sdp = gl->gl_sbd;
491 struct gfs2_holder *tmp_gh, *last_gh = NULL;
494 if (gfs2_assert_warn(sdp, gh->gh_owner))
497 list_for_each_entry(tmp_gh, &gl->gl_waiters3, gh_list) {
498 if (tmp_gh->gh_owner != gh->gh_owner)
501 gfs2_assert_warn(sdp,
502 test_bit(HIF_RECURSE, &tmp_gh->gh_iflags));
511 if (!gfs2_assert_warn(sdp, found))
512 clear_bit(HIF_RECURSE, &last_gh->gh_iflags);
516 * rq_mutex - process a mutex request in the queue
517 * @gh: the glock holder
519 * Returns: 1 if the queue is blocked
522 static int rq_mutex(struct gfs2_holder *gh)
524 struct gfs2_glock *gl = gh->gh_gl;
526 list_del_init(&gh->gh_list);
527 /* gh->gh_error never examined. */
528 set_bit(GLF_LOCK, &gl->gl_flags);
529 complete(&gh->gh_wait);
535 * rq_promote - process a promote request in the queue
536 * @gh: the glock holder
538 * Acquire a new inter-node lock, or change a lock state to more restrictive.
540 * Returns: 1 if the queue is blocked
543 static int rq_promote(struct gfs2_holder *gh)
545 struct gfs2_glock *gl = gh->gh_gl;
546 struct gfs2_sbd *sdp = gl->gl_sbd;
547 struct gfs2_glock_operations *glops = gl->gl_ops;
550 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
551 if (list_empty(&gl->gl_holders)) {
553 set_bit(GLF_LOCK, &gl->gl_flags);
554 spin_unlock(&gl->gl_spin);
556 if (atomic_read(&sdp->sd_reclaim_count) >
557 gfs2_tune_get(sdp, gt_reclaim_limit) &&
558 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
559 gfs2_reclaim_glock(sdp);
560 gfs2_reclaim_glock(sdp);
563 glops->go_xmote_th(gl, gh->gh_state,
566 spin_lock(&gl->gl_spin);
571 if (list_empty(&gl->gl_holders)) {
572 set_bit(HIF_FIRST, &gh->gh_iflags);
573 set_bit(GLF_LOCK, &gl->gl_flags);
576 struct gfs2_holder *next_gh;
577 if (gh->gh_flags & GL_LOCAL_EXCL)
579 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
581 if (next_gh->gh_flags & GL_LOCAL_EXCL)
583 recurse = test_bit(HIF_RECURSE, &gh->gh_iflags);
586 list_move_tail(&gh->gh_list, &gl->gl_holders);
588 set_bit(HIF_HOLDER, &gh->gh_iflags);
593 complete(&gh->gh_wait);
599 * rq_demote - process a demote request in the queue
600 * @gh: the glock holder
602 * Returns: 1 if the queue is blocked
605 static int rq_demote(struct gfs2_holder *gh)
607 struct gfs2_glock *gl = gh->gh_gl;
608 struct gfs2_glock_operations *glops = gl->gl_ops;
610 if (!list_empty(&gl->gl_holders))
613 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
614 list_del_init(&gh->gh_list);
616 spin_unlock(&gl->gl_spin);
617 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
620 complete(&gh->gh_wait);
621 spin_lock(&gl->gl_spin);
624 set_bit(GLF_LOCK, &gl->gl_flags);
625 spin_unlock(&gl->gl_spin);
627 if (gh->gh_state == LM_ST_UNLOCKED ||
628 gl->gl_state != LM_ST_EXCLUSIVE)
629 glops->go_drop_th(gl);
631 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
633 spin_lock(&gl->gl_spin);
640 * rq_greedy - process a queued request to drop greedy status
641 * @gh: the glock holder
643 * Returns: 1 if the queue is blocked
646 static int rq_greedy(struct gfs2_holder *gh)
648 struct gfs2_glock *gl = gh->gh_gl;
650 list_del_init(&gh->gh_list);
651 /* gh->gh_error never examined. */
652 clear_bit(GLF_GREEDY, &gl->gl_flags);
653 spin_unlock(&gl->gl_spin);
655 gfs2_holder_uninit(gh);
656 kfree(container_of(gh, struct greedy, gr_gh));
658 spin_lock(&gl->gl_spin);
664 * run_queue - process holder structures on a glock
669 static void run_queue(struct gfs2_glock *gl)
671 struct gfs2_holder *gh;
675 if (test_bit(GLF_LOCK, &gl->gl_flags))
678 if (!list_empty(&gl->gl_waiters1)) {
679 gh = list_entry(gl->gl_waiters1.next,
680 struct gfs2_holder, gh_list);
682 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
683 blocked = rq_mutex(gh);
685 gfs2_assert_warn(gl->gl_sbd, 0);
687 } else if (!list_empty(&gl->gl_waiters2) &&
688 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
689 gh = list_entry(gl->gl_waiters2.next,
690 struct gfs2_holder, gh_list);
692 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
693 blocked = rq_demote(gh);
694 else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
695 blocked = rq_greedy(gh);
697 gfs2_assert_warn(gl->gl_sbd, 0);
699 } else if (!list_empty(&gl->gl_waiters3)) {
700 gh = list_entry(gl->gl_waiters3.next,
701 struct gfs2_holder, gh_list);
703 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
704 blocked = rq_promote(gh);
706 gfs2_assert_warn(gl->gl_sbd, 0);
717 * gfs2_glmutex_lock - acquire a local lock on a glock
720 * Gives caller exclusive access to manipulate a glock structure.
723 void gfs2_glmutex_lock(struct gfs2_glock *gl)
725 struct gfs2_holder gh;
727 gfs2_holder_init(gl, 0, 0, &gh);
728 set_bit(HIF_MUTEX, &gh.gh_iflags);
730 spin_lock(&gl->gl_spin);
731 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
732 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
734 complete(&gh.gh_wait);
735 spin_unlock(&gl->gl_spin);
737 wait_for_completion(&gh.gh_wait);
738 gfs2_holder_uninit(&gh);
742 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
745 * Returns: 1 if the glock is acquired
748 int gfs2_glmutex_trylock(struct gfs2_glock *gl)
752 spin_lock(&gl->gl_spin);
753 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
755 spin_unlock(&gl->gl_spin);
761 * gfs2_glmutex_unlock - release a local lock on a glock
766 void gfs2_glmutex_unlock(struct gfs2_glock *gl)
768 spin_lock(&gl->gl_spin);
769 clear_bit(GLF_LOCK, &gl->gl_flags);
771 spin_unlock(&gl->gl_spin);
775 * handle_callback - add a demote request to a lock's queue
777 * @state: the state the caller wants us to change to
781 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
783 struct gfs2_holder *gh, *new_gh = NULL;
786 spin_lock(&gl->gl_spin);
788 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
789 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
790 gl->gl_req_gh != gh) {
791 if (gh->gh_state != state)
792 gh->gh_state = LM_ST_UNLOCKED;
798 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
801 spin_unlock(&gl->gl_spin);
803 new_gh = gfs2_holder_get(gl, state,
804 LM_FLAG_TRY | GL_NEVER_RECURSE,
805 GFP_KERNEL | __GFP_NOFAIL),
806 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
807 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
813 spin_unlock(&gl->gl_spin);
816 gfs2_holder_put(new_gh);
820 * state_change - record that the glock is now in a different state
822 * @new_state the new state
826 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
828 struct gfs2_sbd *sdp = gl->gl_sbd;
831 held1 = (gl->gl_state != LM_ST_UNLOCKED);
832 held2 = (new_state != LM_ST_UNLOCKED);
834 if (held1 != held2) {
841 gl->gl_state = new_state;
845 * xmote_bh - Called after the lock module is done acquiring a lock
846 * @gl: The glock in question
847 * @ret: the int returned from the lock module
851 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
853 struct gfs2_sbd *sdp = gl->gl_sbd;
854 struct gfs2_glock_operations *glops = gl->gl_ops;
855 struct gfs2_holder *gh = gl->gl_req_gh;
856 int prev_state = gl->gl_state;
859 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
860 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
861 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
863 state_change(gl, ret & LM_OUT_ST_MASK);
865 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
867 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
868 } else if (gl->gl_state == LM_ST_DEFERRED) {
869 /* We might not want to do this here.
870 Look at moving to the inode glops. */
872 glops->go_inval(gl, DIO_DATA);
875 /* Deal with each possible exit condition */
878 gl->gl_stamp = jiffies;
880 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
881 spin_lock(&gl->gl_spin);
882 list_del_init(&gh->gh_list);
884 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
886 spin_unlock(&gl->gl_spin);
888 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
889 spin_lock(&gl->gl_spin);
890 list_del_init(&gh->gh_list);
891 if (gl->gl_state == gh->gh_state ||
892 gl->gl_state == LM_ST_UNLOCKED)
895 if (gfs2_assert_warn(sdp, gh->gh_flags &
896 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
897 fs_warn(sdp, "ret = 0x%.8X\n", ret);
898 gh->gh_error = GLR_TRYFAILED;
900 spin_unlock(&gl->gl_spin);
902 if (ret & LM_OUT_CANCELED)
903 handle_callback(gl, LM_ST_UNLOCKED); /* Lame */
905 } else if (ret & LM_OUT_CANCELED) {
906 spin_lock(&gl->gl_spin);
907 list_del_init(&gh->gh_list);
908 gh->gh_error = GLR_CANCELED;
909 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
911 spin_unlock(&gl->gl_spin);
913 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
914 spin_lock(&gl->gl_spin);
915 list_move_tail(&gh->gh_list, &gl->gl_holders);
917 set_bit(HIF_HOLDER, &gh->gh_iflags);
918 spin_unlock(&gl->gl_spin);
920 set_bit(HIF_FIRST, &gh->gh_iflags);
924 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
925 spin_lock(&gl->gl_spin);
926 list_del_init(&gh->gh_list);
927 gh->gh_error = GLR_TRYFAILED;
928 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
930 spin_unlock(&gl->gl_spin);
933 if (gfs2_assert_withdraw(sdp, 0) == -1)
934 fs_err(sdp, "ret = 0x%.8X\n", ret);
937 if (glops->go_xmote_bh)
938 glops->go_xmote_bh(gl);
941 spin_lock(&gl->gl_spin);
942 gl->gl_req_gh = NULL;
943 gl->gl_req_bh = NULL;
944 clear_bit(GLF_LOCK, &gl->gl_flags);
946 spin_unlock(&gl->gl_spin);
952 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
955 complete(&gh->gh_wait);
960 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
961 * @gl: The glock in question
962 * @state: the requested state
963 * @flags: modifier flags to the lock call
967 void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
969 struct gfs2_sbd *sdp = gl->gl_sbd;
970 struct gfs2_glock_operations *glops = gl->gl_ops;
971 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
972 LM_FLAG_NOEXP | LM_FLAG_ANY |
974 unsigned int lck_ret;
976 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
977 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
978 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
979 gfs2_assert_warn(sdp, state != gl->gl_state);
981 if (gl->gl_state == LM_ST_EXCLUSIVE) {
984 DIO_METADATA | DIO_DATA | DIO_RELEASE);
988 gl->gl_req_bh = xmote_bh;
990 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state,
993 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
996 if (lck_ret & LM_OUT_ASYNC)
997 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
999 xmote_bh(gl, lck_ret);
1003 * drop_bh - Called after a lock module unlock completes
1005 * @ret: the return status
1007 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
1008 * Doesn't drop the reference on the glock the top half took out
1012 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
1014 struct gfs2_sbd *sdp = gl->gl_sbd;
1015 struct gfs2_glock_operations *glops = gl->gl_ops;
1016 struct gfs2_holder *gh = gl->gl_req_gh;
1018 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1020 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1021 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1022 gfs2_assert_warn(sdp, !ret);
1024 state_change(gl, LM_ST_UNLOCKED);
1026 if (glops->go_inval)
1027 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
1030 spin_lock(&gl->gl_spin);
1031 list_del_init(&gh->gh_list);
1033 spin_unlock(&gl->gl_spin);
1036 if (glops->go_drop_bh)
1037 glops->go_drop_bh(gl);
1039 spin_lock(&gl->gl_spin);
1040 gl->gl_req_gh = NULL;
1041 gl->gl_req_bh = NULL;
1042 clear_bit(GLF_LOCK, &gl->gl_flags);
1044 spin_unlock(&gl->gl_spin);
1049 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
1050 gfs2_holder_put(gh);
1052 complete(&gh->gh_wait);
1057 * gfs2_glock_drop_th - call into the lock module to unlock a lock
1062 void gfs2_glock_drop_th(struct gfs2_glock *gl)
1064 struct gfs2_sbd *sdp = gl->gl_sbd;
1065 struct gfs2_glock_operations *glops = gl->gl_ops;
1068 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1069 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1070 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1072 if (gl->gl_state == LM_ST_EXCLUSIVE) {
1075 DIO_METADATA | DIO_DATA | DIO_RELEASE);
1078 gfs2_glock_hold(gl);
1079 gl->gl_req_bh = drop_bh;
1081 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1083 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1089 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1093 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1094 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1096 * Don't cancel GL_NOCANCEL requests.
1099 static void do_cancels(struct gfs2_holder *gh)
1101 struct gfs2_glock *gl = gh->gh_gl;
1103 spin_lock(&gl->gl_spin);
1105 while (gl->gl_req_gh != gh &&
1106 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1107 !list_empty(&gh->gh_list)) {
1108 if (gl->gl_req_bh &&
1110 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1111 spin_unlock(&gl->gl_spin);
1112 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1114 spin_lock(&gl->gl_spin);
1116 spin_unlock(&gl->gl_spin);
1118 spin_lock(&gl->gl_spin);
1122 spin_unlock(&gl->gl_spin);
1126 * glock_wait_internal - wait on a glock acquisition
1127 * @gh: the glock holder
1129 * Returns: 0 on success
1132 static int glock_wait_internal(struct gfs2_holder *gh)
1134 struct gfs2_glock *gl = gh->gh_gl;
1135 struct gfs2_sbd *sdp = gl->gl_sbd;
1136 struct gfs2_glock_operations *glops = gl->gl_ops;
1138 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1141 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1142 spin_lock(&gl->gl_spin);
1143 if (gl->gl_req_gh != gh &&
1144 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1145 !list_empty(&gh->gh_list)) {
1146 list_del_init(&gh->gh_list);
1147 gh->gh_error = GLR_TRYFAILED;
1148 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
1151 spin_unlock(&gl->gl_spin);
1152 return gh->gh_error;
1154 spin_unlock(&gl->gl_spin);
1157 if (gh->gh_flags & LM_FLAG_PRIORITY)
1160 wait_for_completion(&gh->gh_wait);
1163 return gh->gh_error;
1165 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1166 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
1170 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1171 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1173 if (glops->go_lock) {
1174 gh->gh_error = glops->go_lock(gh);
1176 spin_lock(&gl->gl_spin);
1177 list_del_init(&gh->gh_list);
1178 if (test_and_clear_bit(HIF_RECURSE,
1181 spin_unlock(&gl->gl_spin);
1185 spin_lock(&gl->gl_spin);
1186 gl->gl_req_gh = NULL;
1187 gl->gl_req_bh = NULL;
1188 clear_bit(GLF_LOCK, &gl->gl_flags);
1189 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
1192 spin_unlock(&gl->gl_spin);
1195 return gh->gh_error;
1198 static inline struct gfs2_holder *
1199 find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1201 struct gfs2_holder *gh;
1203 list_for_each_entry(gh, head, gh_list) {
1204 if (gh->gh_owner == owner)
1214 * Make sure the new holder is compatible with the pre-existing one.
1218 static int recurse_check(struct gfs2_holder *existing, struct gfs2_holder *new,
1221 struct gfs2_sbd *sdp = existing->gh_gl->gl_sbd;
1223 if (gfs2_assert_warn(sdp, (new->gh_flags & LM_FLAG_ANY) ||
1224 !(existing->gh_flags & LM_FLAG_ANY)))
1227 if (gfs2_assert_warn(sdp, (existing->gh_flags & GL_LOCAL_EXCL) ||
1228 !(new->gh_flags & GL_LOCAL_EXCL)))
1231 if (gfs2_assert_warn(sdp, relaxed_state_ok(state, new->gh_state,
1238 set_bit(HIF_ABORTED, &new->gh_iflags);
1243 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1244 * @gh: the holder structure to add
1248 static void add_to_queue(struct gfs2_holder *gh)
1250 struct gfs2_glock *gl = gh->gh_gl;
1251 struct gfs2_holder *existing;
1256 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1258 if (recurse_check(existing, gh, gl->gl_state))
1261 list_add_tail(&gh->gh_list, &gl->gl_holders);
1262 set_bit(HIF_HOLDER, &gh->gh_iflags);
1265 complete(&gh->gh_wait);
1270 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1272 if (recurse_check(existing, gh, existing->gh_state))
1275 set_bit(HIF_RECURSE, &gh->gh_iflags);
1276 set_bit(HIF_RECURSE, &existing->gh_iflags);
1278 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1284 if (gh->gh_flags & LM_FLAG_PRIORITY)
1285 list_add(&gh->gh_list, &gl->gl_waiters3);
1287 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1291 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1292 * @gh: the holder structure
1294 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1296 * Returns: 0, GLR_TRYFAILED, or errno on failure
1299 int gfs2_glock_nq(struct gfs2_holder *gh)
1301 struct gfs2_glock *gl = gh->gh_gl;
1302 struct gfs2_sbd *sdp = gl->gl_sbd;
1306 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1307 set_bit(HIF_ABORTED, &gh->gh_iflags);
1311 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1313 spin_lock(&gl->gl_spin);
1316 spin_unlock(&gl->gl_spin);
1318 if (!(gh->gh_flags & GL_ASYNC)) {
1319 error = glock_wait_internal(gh);
1320 if (error == GLR_CANCELED) {
1326 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1332 * gfs2_glock_poll - poll to see if an async request has been completed
1335 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1338 int gfs2_glock_poll(struct gfs2_holder *gh)
1340 struct gfs2_glock *gl = gh->gh_gl;
1343 spin_lock(&gl->gl_spin);
1345 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1347 else if (list_empty(&gh->gh_list)) {
1348 if (gh->gh_error == GLR_CANCELED) {
1349 spin_unlock(&gl->gl_spin);
1351 if (gfs2_glock_nq(gh))
1358 spin_unlock(&gl->gl_spin);
1364 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1365 * @gh: the holder structure
1367 * Returns: 0, GLR_TRYFAILED, or errno on failure
1370 int gfs2_glock_wait(struct gfs2_holder *gh)
1374 error = glock_wait_internal(gh);
1375 if (error == GLR_CANCELED) {
1377 gh->gh_flags &= ~GL_ASYNC;
1378 error = gfs2_glock_nq(gh);
1385 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1386 * @gh: the glock holder
1390 void gfs2_glock_dq(struct gfs2_holder *gh)
1392 struct gfs2_glock *gl = gh->gh_gl;
1393 struct gfs2_sbd *sdp = gl->gl_sbd;
1394 struct gfs2_glock_operations *glops = gl->gl_ops;
1396 if (gh->gh_flags & GL_SYNC)
1397 set_bit(GLF_SYNC, &gl->gl_flags);
1399 if (gh->gh_flags & GL_NOCACHE)
1400 handle_callback(gl, LM_ST_UNLOCKED);
1402 gfs2_glmutex_lock(gl);
1404 spin_lock(&gl->gl_spin);
1405 list_del_init(&gh->gh_list);
1407 if (list_empty(&gl->gl_holders)) {
1408 spin_unlock(&gl->gl_spin);
1410 if (glops->go_unlock)
1411 glops->go_unlock(gh);
1413 if (test_bit(GLF_SYNC, &gl->gl_flags)) {
1415 glops->go_sync(gl, DIO_METADATA | DIO_DATA);
1418 gl->gl_stamp = jiffies;
1420 spin_lock(&gl->gl_spin);
1423 clear_bit(GLF_LOCK, &gl->gl_flags);
1425 spin_unlock(&gl->gl_spin);
1429 * gfs2_glock_prefetch - Try to prefetch a glock
1431 * @state: the state to prefetch in
1432 * @flags: flags passed to go_xmote_th()
1436 void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, int flags)
1438 struct gfs2_glock_operations *glops = gl->gl_ops;
1440 spin_lock(&gl->gl_spin);
1442 if (test_bit(GLF_LOCK, &gl->gl_flags) ||
1443 !list_empty(&gl->gl_holders) ||
1444 !list_empty(&gl->gl_waiters1) ||
1445 !list_empty(&gl->gl_waiters2) ||
1446 !list_empty(&gl->gl_waiters3) ||
1447 relaxed_state_ok(gl->gl_state, state, flags)) {
1448 spin_unlock(&gl->gl_spin);
1452 set_bit(GLF_PREFETCH, &gl->gl_flags);
1453 set_bit(GLF_LOCK, &gl->gl_flags);
1454 spin_unlock(&gl->gl_spin);
1456 glops->go_xmote_th(gl, state, flags);
1460 * gfs2_glock_force_drop - Force a glock to be uncached
1465 void gfs2_glock_force_drop(struct gfs2_glock *gl)
1467 struct gfs2_holder gh;
1469 gfs2_holder_init(gl, LM_ST_UNLOCKED, GL_NEVER_RECURSE, &gh);
1470 set_bit(HIF_DEMOTE, &gh.gh_iflags);
1472 spin_lock(&gl->gl_spin);
1473 list_add_tail(&gh.gh_list, &gl->gl_waiters2);
1475 spin_unlock(&gl->gl_spin);
1477 wait_for_completion(&gh.gh_wait);
1478 gfs2_holder_uninit(&gh);
1481 static void greedy_work(void *data)
1483 struct greedy *gr = (struct greedy *)data;
1484 struct gfs2_holder *gh = &gr->gr_gh;
1485 struct gfs2_glock *gl = gh->gh_gl;
1486 struct gfs2_glock_operations *glops = gl->gl_ops;
1488 clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1490 if (glops->go_greedy)
1491 glops->go_greedy(gl);
1493 spin_lock(&gl->gl_spin);
1495 if (list_empty(&gl->gl_waiters2)) {
1496 clear_bit(GLF_GREEDY, &gl->gl_flags);
1497 spin_unlock(&gl->gl_spin);
1498 gfs2_holder_uninit(gh);
1501 gfs2_glock_hold(gl);
1502 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1504 spin_unlock(&gl->gl_spin);
1510 * gfs2_glock_be_greedy -
1514 * Returns: 0 if go_greedy will be called, 1 otherwise
1517 int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1520 struct gfs2_holder *gh;
1523 gl->gl_sbd->sd_args.ar_localcaching ||
1524 test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1527 gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1529 clear_bit(GLF_GREEDY, &gl->gl_flags);
1534 gfs2_holder_init(gl, 0, GL_NEVER_RECURSE, gh);
1535 set_bit(HIF_GREEDY, &gh->gh_iflags);
1536 INIT_WORK(&gr->gr_work, greedy_work, gr);
1538 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1539 schedule_delayed_work(&gr->gr_work, time);
1545 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
1547 * @state: the state we're requesting
1548 * @flags: the modifier flags
1549 * @gh: the holder structure
1551 * Returns: 0, GLR_*, or errno
1554 int gfs2_glock_nq_init(struct gfs2_glock *gl, unsigned int state, int flags,
1555 struct gfs2_holder *gh)
1559 gfs2_holder_init(gl, state, flags, gh);
1561 error = gfs2_glock_nq(gh);
1563 gfs2_holder_uninit(gh);
1569 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1570 * @gh: the holder structure
1574 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1577 gfs2_holder_uninit(gh);
1581 * gfs2_glock_nq_num - acquire a glock based on lock number
1582 * @sdp: the filesystem
1583 * @number: the lock number
1584 * @glops: the glock operations for the type of glock
1585 * @state: the state to acquire the glock in
1586 * @flags: modifier flags for the aquisition
1587 * @gh: the struct gfs2_holder
1592 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number,
1593 struct gfs2_glock_operations *glops, unsigned int state,
1594 int flags, struct gfs2_holder *gh)
1596 struct gfs2_glock *gl;
1599 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1601 error = gfs2_glock_nq_init(gl, state, flags, gh);
1609 * glock_compare - Compare two struct gfs2_glock structures for sorting
1610 * @arg_a: the first structure
1611 * @arg_b: the second structure
1615 static int glock_compare(const void *arg_a, const void *arg_b)
1617 struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
1618 struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
1619 struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1620 struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1623 if (a->ln_number > b->ln_number)
1625 else if (a->ln_number < b->ln_number)
1628 if (gh_a->gh_state == LM_ST_SHARED &&
1629 gh_b->gh_state == LM_ST_EXCLUSIVE)
1631 else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
1632 (gh_b->gh_flags & GL_LOCAL_EXCL))
1640 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1641 * @num_gh: the number of structures
1642 * @ghs: an array of struct gfs2_holder structures
1644 * Returns: 0 on success (all glocks acquired),
1645 * errno on failure (no glocks acquired)
1648 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1649 struct gfs2_holder **p)
1654 for (x = 0; x < num_gh; x++)
1657 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1659 for (x = 0; x < num_gh; x++) {
1660 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1662 error = gfs2_glock_nq(p[x]);
1665 gfs2_glock_dq(p[x]);
1674 * gfs2_glock_nq_m - acquire multiple glocks
1675 * @num_gh: the number of structures
1676 * @ghs: an array of struct gfs2_holder structures
1678 * Figure out how big an impact this function has. Either:
1679 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1680 * 2) Forget async stuff and just call nq_m_sync()
1681 * 3) Leave it like it is
1683 * Returns: 0 on success (all glocks acquired),
1684 * errno on failure (no glocks acquired)
1687 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1691 int borked = 0, serious = 0;
1698 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1699 return gfs2_glock_nq(ghs);
1702 e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1706 for (x = 0; x < num_gh; x++) {
1707 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1708 error = gfs2_glock_nq(&ghs[x]);
1717 for (x = 0; x < num_gh; x++) {
1718 error = e[x] = glock_wait_internal(&ghs[x]);
1721 if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1731 for (x = 0; x < num_gh; x++)
1733 gfs2_glock_dq(&ghs[x]);
1738 for (x = 0; x < num_gh; x++)
1739 gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1741 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1750 * gfs2_glock_dq_m - release multiple glocks
1751 * @num_gh: the number of structures
1752 * @ghs: an array of struct gfs2_holder structures
1756 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1760 for (x = 0; x < num_gh; x++)
1761 gfs2_glock_dq(&ghs[x]);
1765 * gfs2_glock_dq_uninit_m - release multiple glocks
1766 * @num_gh: the number of structures
1767 * @ghs: an array of struct gfs2_holder structures
1771 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1775 for (x = 0; x < num_gh; x++)
1776 gfs2_glock_dq_uninit(&ghs[x]);
1780 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1781 * @sdp: the filesystem
1782 * @number: the lock number
1783 * @glops: the glock operations for the type of glock
1784 * @state: the state to acquire the glock in
1785 * @flags: modifier flags for the aquisition
1790 void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number,
1791 struct gfs2_glock_operations *glops,
1792 unsigned int state, int flags)
1794 struct gfs2_glock *gl;
1797 if (atomic_read(&sdp->sd_reclaim_count) <
1798 gfs2_tune_get(sdp, gt_reclaim_limit)) {
1799 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1801 gfs2_glock_prefetch(gl, state, flags);
1808 * gfs2_lvb_hold - attach a LVB from a glock
1809 * @gl: The glock in question
1813 int gfs2_lvb_hold(struct gfs2_glock *gl)
1817 gfs2_glmutex_lock(gl);
1819 if (!atomic_read(&gl->gl_lvb_count)) {
1820 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1822 gfs2_glmutex_unlock(gl);
1825 gfs2_glock_hold(gl);
1827 atomic_inc(&gl->gl_lvb_count);
1829 gfs2_glmutex_unlock(gl);
1835 * gfs2_lvb_unhold - detach a LVB from a glock
1836 * @gl: The glock in question
1840 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1842 gfs2_glock_hold(gl);
1843 gfs2_glmutex_lock(gl);
1845 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1846 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1847 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1852 gfs2_glmutex_unlock(gl);
1856 void gfs2_lvb_sync(struct gfs2_glock *gl)
1858 gfs2_glmutex_lock(gl);
1860 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count));
1861 if (!gfs2_assert_warn(gl->gl_sbd, gfs2_glock_is_held_excl(gl)))
1862 gfs2_lm_sync_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1864 gfs2_glmutex_unlock(gl);
1867 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1870 struct gfs2_glock *gl;
1872 gl = gfs2_glock_find(sdp, name);
1876 if (gl->gl_ops->go_callback)
1877 gl->gl_ops->go_callback(gl, state);
1878 handle_callback(gl, state);
1880 spin_lock(&gl->gl_spin);
1882 spin_unlock(&gl->gl_spin);
1888 * gfs2_glock_cb - Callback used by locking module
1889 * @fsdata: Pointer to the superblock
1890 * @type: Type of callback
1891 * @data: Type dependent data pointer
1893 * Called by the locking module when it wants to tell us something.
1894 * Either we need to drop a lock, one of our ASYNC requests completed, or
1895 * a journal from another client needs to be recovered.
1898 void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data)
1900 struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata;
1904 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_UNLOCKED);
1908 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_DEFERRED);
1912 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_SHARED);
1916 struct lm_async_cb *async = (struct lm_async_cb *)data;
1917 struct gfs2_glock *gl;
1919 gl = gfs2_glock_find(sdp, &async->lc_name);
1920 if (gfs2_assert_warn(sdp, gl))
1922 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1923 gl->gl_req_bh(gl, async->lc_ret);
1929 case LM_CB_NEED_RECOVERY:
1930 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1931 if (sdp->sd_recoverd_process)
1932 wake_up_process(sdp->sd_recoverd_process);
1935 case LM_CB_DROPLOCKS:
1936 gfs2_gl_hash_clear(sdp, NO_WAIT);
1937 gfs2_quota_scan(sdp);
1941 gfs2_assert_warn(sdp, 0);
1947 * gfs2_try_toss_inode - try to remove a particular inode struct from cache
1948 * sdp: the filesystem
1949 * inum: the inode number
1953 void gfs2_try_toss_inode(struct gfs2_sbd *sdp, struct gfs2_inum *inum)
1955 struct gfs2_glock *gl;
1956 struct gfs2_inode *ip;
1959 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops,
1964 if (!gfs2_glmutex_trylock(gl))
1971 if (atomic_read(&ip->i_count))
1974 gfs2_inode_destroy(ip);
1977 gfs2_glmutex_unlock(gl);
1984 * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1985 * iopen glock from memory
1986 * @io_gl: the iopen glock
1987 * @state: the state into which the glock should be put
1991 void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
1993 struct gfs2_glock *i_gl;
1995 if (state != LM_ST_UNLOCKED)
1998 spin_lock(&io_gl->gl_spin);
1999 i_gl = get_gl2gl(io_gl);
2001 gfs2_glock_hold(i_gl);
2002 spin_unlock(&io_gl->gl_spin);
2004 spin_unlock(&io_gl->gl_spin);
2008 if (gfs2_glmutex_trylock(i_gl)) {
2009 struct gfs2_inode *ip = get_gl2ip(i_gl);
2011 gfs2_try_toss_vnode(ip);
2012 gfs2_glmutex_unlock(i_gl);
2013 gfs2_glock_schedule_for_reclaim(i_gl);
2016 gfs2_glmutex_unlock(i_gl);
2020 gfs2_glock_put(i_gl);
2024 * demote_ok - Check to see if it's ok to unlock a glock
2027 * Returns: 1 if it's ok
2030 static int demote_ok(struct gfs2_glock *gl)
2032 struct gfs2_sbd *sdp = gl->gl_sbd;
2033 struct gfs2_glock_operations *glops = gl->gl_ops;
2036 if (test_bit(GLF_STICKY, &gl->gl_flags))
2038 else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
2039 demote = time_after_eq(jiffies,
2041 gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
2042 else if (glops->go_demote_ok)
2043 demote = glops->go_demote_ok(gl);
2049 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
2054 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
2056 struct gfs2_sbd *sdp = gl->gl_sbd;
2058 spin_lock(&sdp->sd_reclaim_lock);
2059 if (list_empty(&gl->gl_reclaim)) {
2060 gfs2_glock_hold(gl);
2061 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
2062 atomic_inc(&sdp->sd_reclaim_count);
2064 spin_unlock(&sdp->sd_reclaim_lock);
2066 wake_up(&sdp->sd_reclaim_wq);
2070 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
2071 * @sdp: the filesystem
2073 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
2074 * different glock and we notice that there are a lot of glocks in the
2079 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
2081 struct gfs2_glock *gl;
2083 spin_lock(&sdp->sd_reclaim_lock);
2084 if (list_empty(&sdp->sd_reclaim_list)) {
2085 spin_unlock(&sdp->sd_reclaim_lock);
2088 gl = list_entry(sdp->sd_reclaim_list.next,
2089 struct gfs2_glock, gl_reclaim);
2090 list_del_init(&gl->gl_reclaim);
2091 spin_unlock(&sdp->sd_reclaim_lock);
2093 atomic_dec(&sdp->sd_reclaim_count);
2094 atomic_inc(&sdp->sd_reclaimed);
2096 if (gfs2_glmutex_trylock(gl)) {
2097 if (gl->gl_ops == &gfs2_inode_glops) {
2098 struct gfs2_inode *ip = get_gl2ip(gl);
2099 if (ip && !atomic_read(&ip->i_count))
2100 gfs2_inode_destroy(ip);
2102 if (queue_empty(gl, &gl->gl_holders) &&
2103 gl->gl_state != LM_ST_UNLOCKED &&
2105 handle_callback(gl, LM_ST_UNLOCKED);
2106 gfs2_glmutex_unlock(gl);
2113 * examine_bucket - Call a function for glock in a hash bucket
2114 * @examiner: the function
2115 * @sdp: the filesystem
2116 * @bucket: the bucket
2118 * Returns: 1 if the bucket has entries
2121 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
2122 struct gfs2_gl_hash_bucket *bucket)
2124 struct glock_plug plug;
2125 struct list_head *tmp;
2126 struct gfs2_glock *gl;
2129 /* Add "plug" to end of bucket list, work back up list from there */
2130 memset(&plug.gl_flags, 0, sizeof(unsigned long));
2131 set_bit(GLF_PLUG, &plug.gl_flags);
2133 write_lock(&bucket->hb_lock);
2134 list_add(&plug.gl_list, &bucket->hb_list);
2135 write_unlock(&bucket->hb_lock);
2138 write_lock(&bucket->hb_lock);
2141 tmp = plug.gl_list.next;
2143 if (tmp == &bucket->hb_list) {
2144 list_del(&plug.gl_list);
2145 entries = !list_empty(&bucket->hb_list);
2146 write_unlock(&bucket->hb_lock);
2149 gl = list_entry(tmp, struct gfs2_glock, gl_list);
2151 /* Move plug up list */
2152 list_move(&plug.gl_list, &gl->gl_list);
2154 if (test_bit(GLF_PLUG, &gl->gl_flags))
2157 /* examiner() must glock_put() */
2158 gfs2_glock_hold(gl);
2163 write_unlock(&bucket->hb_lock);
2170 * scan_glock - look at a glock and see if we can reclaim it
2171 * @gl: the glock to look at
2175 static void scan_glock(struct gfs2_glock *gl)
2177 if (gfs2_glmutex_trylock(gl)) {
2178 if (gl->gl_ops == &gfs2_inode_glops) {
2179 struct gfs2_inode *ip = get_gl2ip(gl);
2180 if (ip && !atomic_read(&ip->i_count))
2183 if (queue_empty(gl, &gl->gl_holders) &&
2184 gl->gl_state != LM_ST_UNLOCKED &&
2188 gfs2_glmutex_unlock(gl);
2196 gfs2_glmutex_unlock(gl);
2197 gfs2_glock_schedule_for_reclaim(gl);
2202 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
2203 * @sdp: the filesystem
2207 void gfs2_scand_internal(struct gfs2_sbd *sdp)
2211 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2212 examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
2218 * clear_glock - look at a glock and see if we can free it from glock cache
2219 * @gl: the glock to look at
2223 static void clear_glock(struct gfs2_glock *gl)
2225 struct gfs2_sbd *sdp = gl->gl_sbd;
2228 spin_lock(&sdp->sd_reclaim_lock);
2229 if (!list_empty(&gl->gl_reclaim)) {
2230 list_del_init(&gl->gl_reclaim);
2231 atomic_dec(&sdp->sd_reclaim_count);
2232 released = gfs2_glock_put(gl);
2233 gfs2_assert(sdp, !released);
2235 spin_unlock(&sdp->sd_reclaim_lock);
2237 if (gfs2_glmutex_trylock(gl)) {
2238 if (gl->gl_ops == &gfs2_inode_glops) {
2239 struct gfs2_inode *ip = get_gl2ip(gl);
2240 if (ip && !atomic_read(&ip->i_count))
2241 gfs2_inode_destroy(ip);
2243 if (queue_empty(gl, &gl->gl_holders) &&
2244 gl->gl_state != LM_ST_UNLOCKED)
2245 handle_callback(gl, LM_ST_UNLOCKED);
2247 gfs2_glmutex_unlock(gl);
2254 * gfs2_gl_hash_clear - Empty out the glock hash table
2255 * @sdp: the filesystem
2256 * @wait: wait until it's all gone
2258 * Called when unmounting the filesystem, or when inter-node lock manager
2259 * requests DROPLOCKS because it is running out of capacity.
2262 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2273 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2274 if (examine_bucket(clear_glock, sdp,
2275 &sdp->sd_gl_hash[x]))
2281 if (time_after_eq(jiffies,
2282 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
2283 fs_warn(sdp, "Unmount seems to be stalled. "
2284 "Dumping lock state...\n");
2285 gfs2_dump_lockstate(sdp);
2289 /* invalidate_inodes() requires that the sb inodes list
2290 not change, but an async completion callback for an
2291 unlock can occur which does glock_put() which
2292 can call iput() which will change the sb inodes list.
2293 invalidate_inodes_mutex prevents glock_put()'s during
2294 an invalidate_inodes() */
2296 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
2297 invalidate_inodes(sdp->sd_vfs);
2298 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
2304 * Diagnostic routines to help debug distributed deadlock
2308 * dump_holder - print information about a glock holder
2309 * @str: a string naming the type of holder
2310 * @gh: the glock holder
2312 * Returns: 0 on success, -ENOBUFS when we run out of space
2315 static int dump_holder(char *str, struct gfs2_holder *gh)
2318 int error = -ENOBUFS;
2320 printk(" %s\n", str);
2321 printk(" owner = %ld\n",
2322 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
2323 printk(" gh_state = %u\n", gh->gh_state);
2324 printk(" gh_flags =");
2325 for (x = 0; x < 32; x++)
2326 if (gh->gh_flags & (1 << x))
2329 printk(" error = %d\n", gh->gh_error);
2330 printk(" gh_iflags =");
2331 for (x = 0; x < 32; x++)
2332 if (test_bit(x, &gh->gh_iflags))
2342 * dump_inode - print information about an inode
2345 * Returns: 0 on success, -ENOBUFS when we run out of space
2348 static int dump_inode(struct gfs2_inode *ip)
2351 int error = -ENOBUFS;
2353 printk(" Inode:\n");
2354 printk(" num = %llu %llu\n",
2355 ip->i_num.no_formal_ino, ip->i_num.no_addr);
2356 printk(" type = %u\n", IF2DT(ip->i_di.di_mode));
2357 printk(" i_count = %d\n", atomic_read(&ip->i_count));
2358 printk(" i_flags =");
2359 for (x = 0; x < 32; x++)
2360 if (test_bit(x, &ip->i_flags))
2363 printk(" vnode = %s\n", (ip->i_vnode) ? "yes" : "no");
2371 * dump_glock - print information about a glock
2373 * @count: where we are in the buffer
2375 * Returns: 0 on success, -ENOBUFS when we run out of space
2378 static int dump_glock(struct gfs2_glock *gl)
2380 struct gfs2_holder *gh;
2382 int error = -ENOBUFS;
2384 spin_lock(&gl->gl_spin);
2386 printk("Glock (%u, %llu)\n",
2387 gl->gl_name.ln_type,
2388 gl->gl_name.ln_number);
2389 printk(" gl_flags =");
2390 for (x = 0; x < 32; x++)
2391 if (test_bit(x, &gl->gl_flags))
2394 printk(" gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
2395 printk(" gl_state = %u\n", gl->gl_state);
2396 printk(" req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2397 printk(" req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2398 printk(" lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
2399 printk(" object = %s\n", (gl->gl_object) ? "yes" : "no");
2400 printk(" le = %s\n",
2401 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
2402 printk(" reclaim = %s\n",
2403 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2405 printk(" aspace = %lu\n",
2406 gl->gl_aspace->i_mapping->nrpages);
2408 printk(" aspace = no\n");
2409 printk(" ail = %d\n", atomic_read(&gl->gl_ail_count));
2410 if (gl->gl_req_gh) {
2411 error = dump_holder("Request", gl->gl_req_gh);
2415 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
2416 error = dump_holder("Holder", gh);
2420 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
2421 error = dump_holder("Waiter1", gh);
2425 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
2426 error = dump_holder("Waiter2", gh);
2430 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
2431 error = dump_holder("Waiter3", gh);
2435 if (gl->gl_ops == &gfs2_inode_glops && get_gl2ip(gl)) {
2436 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2437 list_empty(&gl->gl_holders)) {
2438 error = dump_inode(get_gl2ip(gl));
2443 printk(" Inode: busy\n");
2450 spin_unlock(&gl->gl_spin);
2456 * gfs2_dump_lockstate - print out the current lockstate
2457 * @sdp: the filesystem
2458 * @ub: the buffer to copy the information into
2460 * If @ub is NULL, dump the lockstate to the console.
2464 int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2466 struct gfs2_gl_hash_bucket *bucket;
2467 struct gfs2_glock *gl;
2471 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2472 bucket = &sdp->sd_gl_hash[x];
2474 read_lock(&bucket->hb_lock);
2476 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
2477 if (test_bit(GLF_PLUG, &gl->gl_flags))
2480 error = dump_glock(gl);
2485 read_unlock(&bucket->hb_lock);