[GFS2] Macros removal in gfs2.h
[pandora-kernel.git] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License v.2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kref.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <asm/semaphore.h>
21 #include <asm/uaccess.h>
22
23 #include "gfs2.h"
24 #include "lm_interface.h"
25 #include "incore.h"
26 #include "glock.h"
27 #include "glops.h"
28 #include "inode.h"
29 #include "lm.h"
30 #include "lops.h"
31 #include "meta_io.h"
32 #include "quota.h"
33 #include "super.h"
34 #include "util.h"
35
36 /*  Must be kept in sync with the beginning of struct gfs2_glock  */
37 struct glock_plug {
38         struct list_head gl_list;
39         unsigned long gl_flags;
40 };
41
42 struct greedy {
43         struct gfs2_holder gr_gh;
44         struct work_struct gr_work;
45 };
46
47 typedef void (*glock_examiner) (struct gfs2_glock * gl);
48
49 /**
50  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
51  * @actual: the current state of the lock
52  * @requested: the lock state that was requested by the caller
53  * @flags: the modifier flags passed in by the caller
54  *
55  * Returns: 1 if the locks are compatible, 0 otherwise
56  */
57
58 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
59                                    int flags)
60 {
61         if (actual == requested)
62                 return 1;
63
64         if (flags & GL_EXACT)
65                 return 0;
66
67         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
68                 return 1;
69
70         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
71                 return 1;
72
73         return 0;
74 }
75
76 /**
77  * gl_hash() - Turn glock number into hash bucket number
78  * @lock: The glock number
79  *
80  * Returns: The number of the corresponding hash bucket
81  */
82
83 static unsigned int gl_hash(struct lm_lockname *name)
84 {
85         unsigned int h;
86
87         h = jhash(&name->ln_number, sizeof(uint64_t), 0);
88         h = jhash(&name->ln_type, sizeof(unsigned int), h);
89         h &= GFS2_GL_HASH_MASK;
90
91         return h;
92 }
93
94 /**
95  * glock_free() - Perform a few checks and then release struct gfs2_glock
96  * @gl: The glock to release
97  *
98  * Also calls lock module to release its internal structure for this glock.
99  *
100  */
101
102 static void glock_free(struct gfs2_glock *gl)
103 {
104         struct gfs2_sbd *sdp = gl->gl_sbd;
105         struct inode *aspace = gl->gl_aspace;
106
107         gfs2_lm_put_lock(sdp, gl->gl_lock);
108
109         if (aspace)
110                 gfs2_aspace_put(aspace);
111
112         kmem_cache_free(gfs2_glock_cachep, gl);
113 }
114
115 /**
116  * gfs2_glock_hold() - increment reference count on glock
117  * @gl: The glock to hold
118  *
119  */
120
121 void gfs2_glock_hold(struct gfs2_glock *gl)
122 {
123         kref_get(&gl->gl_ref);
124 }
125
126 /* All work is done after the return from kref_put() so we
127    can release the write_lock before the free. */
128
129 static void kill_glock(struct kref *kref)
130 {
131         struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
132         struct gfs2_sbd *sdp = gl->gl_sbd;
133
134         gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
135         gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
136         gfs2_assert(sdp, list_empty(&gl->gl_holders));
137         gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
138         gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
139         gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
140 }
141
142 /**
143  * gfs2_glock_put() - Decrement reference count on glock
144  * @gl: The glock to put
145  *
146  */
147
148 int gfs2_glock_put(struct gfs2_glock *gl)
149 {
150         struct gfs2_sbd *sdp = gl->gl_sbd;
151         struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
152         int rv = 0;
153
154         mutex_lock(&sdp->sd_invalidate_inodes_mutex);
155
156         write_lock(&bucket->hb_lock);
157         if (kref_put(&gl->gl_ref, kill_glock)) {
158                 list_del_init(&gl->gl_list);
159                 write_unlock(&bucket->hb_lock);
160                 glock_free(gl);
161                 rv = 1;
162                 goto out;
163         }
164         write_unlock(&bucket->hb_lock);
165  out:
166         mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
167         return rv;
168 }
169
170 /**
171  * queue_empty - check to see if a glock's queue is empty
172  * @gl: the glock
173  * @head: the head of the queue to check
174  *
175  * This function protects the list in the event that a process already
176  * has a holder on the list and is adding a second holder for itself.
177  * The glmutex lock is what generally prevents processes from working
178  * on the same glock at once, but the special case of adding a second
179  * holder for yourself ("recursive" locking) doesn't involve locking
180  * glmutex, making the spin lock necessary.
181  *
182  * Returns: 1 if the queue is empty
183  */
184
185 static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
186 {
187         int empty;
188         spin_lock(&gl->gl_spin);
189         empty = list_empty(head);
190         spin_unlock(&gl->gl_spin);
191         return empty;
192 }
193
194 /**
195  * search_bucket() - Find struct gfs2_glock by lock number
196  * @bucket: the bucket to search
197  * @name: The lock name
198  *
199  * Returns: NULL, or the struct gfs2_glock with the requested number
200  */
201
202 static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
203                                         struct lm_lockname *name)
204 {
205         struct gfs2_glock *gl;
206
207         list_for_each_entry(gl, &bucket->hb_list, gl_list) {
208                 if (test_bit(GLF_PLUG, &gl->gl_flags))
209                         continue;
210                 if (!lm_name_equal(&gl->gl_name, name))
211                         continue;
212
213                 kref_get(&gl->gl_ref);
214
215                 return gl;
216         }
217
218         return NULL;
219 }
220
221 /**
222  * gfs2_glock_find() - Find glock by lock number
223  * @sdp: The GFS2 superblock
224  * @name: The lock name
225  *
226  * Returns: NULL, or the struct gfs2_glock with the requested number
227  */
228
229 struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
230                                    struct lm_lockname *name)
231 {
232         struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)];
233         struct gfs2_glock *gl;
234
235         read_lock(&bucket->hb_lock);
236         gl = search_bucket(bucket, name);
237         read_unlock(&bucket->hb_lock);
238
239         return gl;
240 }
241
242 /**
243  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
244  * @sdp: The GFS2 superblock
245  * @number: the lock number
246  * @glops: The glock_operations to use
247  * @create: If 0, don't create the glock if it doesn't exist
248  * @glp: the glock is returned here
249  *
250  * This does not lock a glock, just finds/creates structures for one.
251  *
252  * Returns: errno
253  */
254
255 int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
256                    struct gfs2_glock_operations *glops, int create,
257                    struct gfs2_glock **glp)
258 {
259         struct lm_lockname name;
260         struct gfs2_glock *gl, *tmp;
261         struct gfs2_gl_hash_bucket *bucket;
262         int error;
263
264         name.ln_number = number;
265         name.ln_type = glops->go_type;
266         bucket = &sdp->sd_gl_hash[gl_hash(&name)];
267
268         read_lock(&bucket->hb_lock);
269         gl = search_bucket(bucket, &name);
270         read_unlock(&bucket->hb_lock);
271
272         if (gl || !create) {
273                 *glp = gl;
274                 return 0;
275         }
276
277         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
278         if (!gl)
279                 return -ENOMEM;
280
281         memset(gl, 0, sizeof(struct gfs2_glock));
282
283         INIT_LIST_HEAD(&gl->gl_list);
284         gl->gl_name = name;
285         kref_init(&gl->gl_ref);
286
287         spin_lock_init(&gl->gl_spin);
288
289         gl->gl_state = LM_ST_UNLOCKED;
290         INIT_LIST_HEAD(&gl->gl_holders);
291         INIT_LIST_HEAD(&gl->gl_waiters1);
292         INIT_LIST_HEAD(&gl->gl_waiters2);
293         INIT_LIST_HEAD(&gl->gl_waiters3);
294
295         gl->gl_ops = glops;
296
297         gl->gl_bucket = bucket;
298         INIT_LIST_HEAD(&gl->gl_reclaim);
299
300         gl->gl_sbd = sdp;
301
302         lops_init_le(&gl->gl_le, &gfs2_glock_lops);
303         INIT_LIST_HEAD(&gl->gl_ail_list);
304
305         /* If this glock protects actual on-disk data or metadata blocks,
306            create a VFS inode to manage the pages/buffers holding them. */
307         if (glops == &gfs2_inode_glops ||
308             glops == &gfs2_rgrp_glops ||
309             glops == &gfs2_meta_glops) {
310                 gl->gl_aspace = gfs2_aspace_get(sdp);
311                 if (!gl->gl_aspace) {
312                         error = -ENOMEM;
313                         goto fail;
314                 }
315         }
316
317         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
318         if (error)
319                 goto fail_aspace;
320
321         write_lock(&bucket->hb_lock);
322         tmp = search_bucket(bucket, &name);
323         if (tmp) {
324                 write_unlock(&bucket->hb_lock);
325                 glock_free(gl);
326                 gl = tmp;
327         } else {
328                 list_add_tail(&gl->gl_list, &bucket->hb_list);
329                 write_unlock(&bucket->hb_lock);
330         }
331
332         *glp = gl;
333
334         return 0;
335
336  fail_aspace:
337         if (gl->gl_aspace)
338                 gfs2_aspace_put(gl->gl_aspace);
339
340  fail:
341         kmem_cache_free(gfs2_glock_cachep, gl); 
342
343         return error;
344 }
345
346 /**
347  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
348  * @gl: the glock
349  * @state: the state we're requesting
350  * @flags: the modifier flags
351  * @gh: the holder structure
352  *
353  */
354
355 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, int flags,
356                       struct gfs2_holder *gh)
357 {
358         INIT_LIST_HEAD(&gh->gh_list);
359         gh->gh_gl = gl;
360         gh->gh_owner = (flags & GL_NEVER_RECURSE) ? NULL : current;
361         gh->gh_state = state;
362         gh->gh_flags = flags;
363         gh->gh_error = 0;
364         gh->gh_iflags = 0;
365         init_completion(&gh->gh_wait);
366
367         if (gh->gh_state == LM_ST_EXCLUSIVE)
368                 gh->gh_flags |= GL_LOCAL_EXCL;
369
370         gfs2_glock_hold(gl);
371 }
372
373 /**
374  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
375  * @state: the state we're requesting
376  * @flags: the modifier flags
377  * @gh: the holder structure
378  *
379  * Don't mess with the glock.
380  *
381  */
382
383 void gfs2_holder_reinit(unsigned int state, int flags, struct gfs2_holder *gh)
384 {
385         gh->gh_state = state;
386         gh->gh_flags = flags;
387         if (gh->gh_state == LM_ST_EXCLUSIVE)
388                 gh->gh_flags |= GL_LOCAL_EXCL;
389
390         gh->gh_iflags &= 1 << HIF_ALLOCED;
391 }
392
393 /**
394  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
395  * @gh: the holder structure
396  *
397  */
398
399 void gfs2_holder_uninit(struct gfs2_holder *gh)
400 {
401         gfs2_glock_put(gh->gh_gl);
402         gh->gh_gl = NULL;
403 }
404
405 /**
406  * gfs2_holder_get - get a struct gfs2_holder structure
407  * @gl: the glock
408  * @state: the state we're requesting
409  * @flags: the modifier flags
410  * @gfp_flags: __GFP_NOFAIL
411  *
412  * Figure out how big an impact this function has.  Either:
413  * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
414  * 2) Leave it like it is
415  *
416  * Returns: the holder structure, NULL on ENOMEM
417  */
418
419 struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl, unsigned int state,
420                                     int flags, gfp_t gfp_flags)
421 {
422         struct gfs2_holder *gh;
423
424         gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
425         if (!gh)
426                 return NULL;
427
428         gfs2_holder_init(gl, state, flags, gh);
429         set_bit(HIF_ALLOCED, &gh->gh_iflags);
430
431         return gh;
432 }
433
434 /**
435  * gfs2_holder_put - get rid of a struct gfs2_holder structure
436  * @gh: the holder structure
437  *
438  */
439
440 void gfs2_holder_put(struct gfs2_holder *gh)
441 {
442         gfs2_holder_uninit(gh);
443         kfree(gh);
444 }
445
446 /**
447  * handle_recurse - put other holder structures (marked recursive)
448  *                  into the holders list
449  * @gh: the holder structure
450  *
451  */
452
453 static void handle_recurse(struct gfs2_holder *gh)
454 {
455         struct gfs2_glock *gl = gh->gh_gl;
456         struct gfs2_sbd *sdp = gl->gl_sbd;
457         struct gfs2_holder *tmp_gh, *safe;
458         int found = 0;
459
460         if (gfs2_assert_warn(sdp, gh->gh_owner))
461                 return;
462
463         list_for_each_entry_safe(tmp_gh, safe, &gl->gl_waiters3, gh_list) {
464                 if (tmp_gh->gh_owner != gh->gh_owner)
465                         continue;
466
467                 gfs2_assert_warn(sdp,
468                                  test_bit(HIF_RECURSE, &tmp_gh->gh_iflags));
469
470                 list_move_tail(&tmp_gh->gh_list, &gl->gl_holders);
471                 tmp_gh->gh_error = 0;
472                 set_bit(HIF_HOLDER, &tmp_gh->gh_iflags);
473
474                 complete(&tmp_gh->gh_wait);
475
476                 found = 1;
477         }
478
479         gfs2_assert_warn(sdp, found);
480 }
481
482 /**
483  * do_unrecurse - a recursive holder was just dropped of the waiters3 list
484  * @gh: the holder
485  *
486  * If there is only one other recursive holder, clear its HIF_RECURSE bit.
487  * If there is more than one, leave them alone.
488  *
489  */
490
491 static void do_unrecurse(struct gfs2_holder *gh)
492 {
493         struct gfs2_glock *gl = gh->gh_gl;
494         struct gfs2_sbd *sdp = gl->gl_sbd;
495         struct gfs2_holder *tmp_gh, *last_gh = NULL;
496         int found = 0;
497
498         if (gfs2_assert_warn(sdp, gh->gh_owner))
499                 return;
500
501         list_for_each_entry(tmp_gh, &gl->gl_waiters3, gh_list) {
502                 if (tmp_gh->gh_owner != gh->gh_owner)
503                         continue;
504
505                 gfs2_assert_warn(sdp,
506                                  test_bit(HIF_RECURSE, &tmp_gh->gh_iflags));
507
508                 if (found)
509                         return;
510
511                 found = 1;
512                 last_gh = tmp_gh;
513         }
514
515         if (!gfs2_assert_warn(sdp, found))
516                 clear_bit(HIF_RECURSE, &last_gh->gh_iflags);
517 }
518
519 /**
520  * rq_mutex - process a mutex request in the queue
521  * @gh: the glock holder
522  *
523  * Returns: 1 if the queue is blocked
524  */
525
526 static int rq_mutex(struct gfs2_holder *gh)
527 {
528         struct gfs2_glock *gl = gh->gh_gl;
529
530         list_del_init(&gh->gh_list);
531         /*  gh->gh_error never examined.  */
532         set_bit(GLF_LOCK, &gl->gl_flags);
533         complete(&gh->gh_wait);
534
535         return 1;
536 }
537
538 /**
539  * rq_promote - process a promote request in the queue
540  * @gh: the glock holder
541  *
542  * Acquire a new inter-node lock, or change a lock state to more restrictive.
543  *
544  * Returns: 1 if the queue is blocked
545  */
546
547 static int rq_promote(struct gfs2_holder *gh)
548 {
549         struct gfs2_glock *gl = gh->gh_gl;
550         struct gfs2_sbd *sdp = gl->gl_sbd;
551         struct gfs2_glock_operations *glops = gl->gl_ops;
552         int recurse;
553
554         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
555                 if (list_empty(&gl->gl_holders)) {
556                         gl->gl_req_gh = gh;
557                         set_bit(GLF_LOCK, &gl->gl_flags);
558                         spin_unlock(&gl->gl_spin);
559
560                         if (atomic_read(&sdp->sd_reclaim_count) >
561                             gfs2_tune_get(sdp, gt_reclaim_limit) &&
562                             !(gh->gh_flags & LM_FLAG_PRIORITY)) {
563                                 gfs2_reclaim_glock(sdp);
564                                 gfs2_reclaim_glock(sdp);
565                         }
566
567                         glops->go_xmote_th(gl, gh->gh_state,
568                                            gh->gh_flags);
569
570                         spin_lock(&gl->gl_spin);
571                 }
572                 return 1;
573         }
574
575         if (list_empty(&gl->gl_holders)) {
576                 set_bit(HIF_FIRST, &gh->gh_iflags);
577                 set_bit(GLF_LOCK, &gl->gl_flags);
578                 recurse = 0;
579         } else {
580                 struct gfs2_holder *next_gh;
581                 if (gh->gh_flags & GL_LOCAL_EXCL)
582                         return 1;
583                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
584                                      gh_list);
585                 if (next_gh->gh_flags & GL_LOCAL_EXCL)
586                          return 1;
587                 recurse = test_bit(HIF_RECURSE, &gh->gh_iflags);
588         }
589
590         list_move_tail(&gh->gh_list, &gl->gl_holders);
591         gh->gh_error = 0;
592         set_bit(HIF_HOLDER, &gh->gh_iflags);
593
594         if (recurse)
595                 handle_recurse(gh);
596
597         complete(&gh->gh_wait);
598
599         return 0;
600 }
601
602 /**
603  * rq_demote - process a demote request in the queue
604  * @gh: the glock holder
605  *
606  * Returns: 1 if the queue is blocked
607  */
608
609 static int rq_demote(struct gfs2_holder *gh)
610 {
611         struct gfs2_glock *gl = gh->gh_gl;
612         struct gfs2_glock_operations *glops = gl->gl_ops;
613
614         if (!list_empty(&gl->gl_holders))
615                 return 1;
616
617         if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
618                 list_del_init(&gh->gh_list);
619                 gh->gh_error = 0;
620                 spin_unlock(&gl->gl_spin);
621                 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
622                         gfs2_holder_put(gh);
623                 else
624                         complete(&gh->gh_wait);
625                 spin_lock(&gl->gl_spin);
626         } else {
627                 gl->gl_req_gh = gh;
628                 set_bit(GLF_LOCK, &gl->gl_flags);
629                 spin_unlock(&gl->gl_spin);
630
631                 if (gh->gh_state == LM_ST_UNLOCKED ||
632                     gl->gl_state != LM_ST_EXCLUSIVE)
633                         glops->go_drop_th(gl);
634                 else
635                         glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
636
637                 spin_lock(&gl->gl_spin);
638         }
639
640         return 0;
641 }
642
643 /**
644  * rq_greedy - process a queued request to drop greedy status
645  * @gh: the glock holder
646  *
647  * Returns: 1 if the queue is blocked
648  */
649
650 static int rq_greedy(struct gfs2_holder *gh)
651 {
652         struct gfs2_glock *gl = gh->gh_gl;
653
654         list_del_init(&gh->gh_list);
655         /*  gh->gh_error never examined.  */
656         clear_bit(GLF_GREEDY, &gl->gl_flags);
657         spin_unlock(&gl->gl_spin);
658
659         gfs2_holder_uninit(gh);
660         kfree(container_of(gh, struct greedy, gr_gh));
661
662         spin_lock(&gl->gl_spin);                
663
664         return 0;
665 }
666
667 /**
668  * run_queue - process holder structures on a glock
669  * @gl: the glock
670  *
671  */
672
673 static void run_queue(struct gfs2_glock *gl)
674 {
675         struct gfs2_holder *gh;
676         int blocked = 1;
677
678         for (;;) {
679                 if (test_bit(GLF_LOCK, &gl->gl_flags))
680                         break;
681
682                 if (!list_empty(&gl->gl_waiters1)) {
683                         gh = list_entry(gl->gl_waiters1.next,
684                                         struct gfs2_holder, gh_list);
685
686                         if (test_bit(HIF_MUTEX, &gh->gh_iflags))
687                                 blocked = rq_mutex(gh);
688                         else
689                                 gfs2_assert_warn(gl->gl_sbd, 0);
690
691                 } else if (!list_empty(&gl->gl_waiters2) &&
692                            !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
693                         gh = list_entry(gl->gl_waiters2.next,
694                                         struct gfs2_holder, gh_list);
695
696                         if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
697                                 blocked = rq_demote(gh);
698                         else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
699                                 blocked = rq_greedy(gh);
700                         else
701                                 gfs2_assert_warn(gl->gl_sbd, 0);
702
703                 } else if (!list_empty(&gl->gl_waiters3)) {
704                         gh = list_entry(gl->gl_waiters3.next,
705                                         struct gfs2_holder, gh_list);
706
707                         if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
708                                 blocked = rq_promote(gh);
709                         else
710                                 gfs2_assert_warn(gl->gl_sbd, 0);
711
712                 } else
713                         break;
714
715                 if (blocked)
716                         break;
717         }
718 }
719
720 /**
721  * gfs2_glmutex_lock - acquire a local lock on a glock
722  * @gl: the glock
723  *
724  * Gives caller exclusive access to manipulate a glock structure.
725  */
726
727 void gfs2_glmutex_lock(struct gfs2_glock *gl)
728 {
729         struct gfs2_holder gh;
730
731         gfs2_holder_init(gl, 0, 0, &gh);
732         set_bit(HIF_MUTEX, &gh.gh_iflags);
733
734         spin_lock(&gl->gl_spin);
735         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
736                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
737         else
738                 complete(&gh.gh_wait);
739         spin_unlock(&gl->gl_spin);
740
741         wait_for_completion(&gh.gh_wait);
742         gfs2_holder_uninit(&gh);
743 }
744
745 /**
746  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
747  * @gl: the glock
748  *
749  * Returns: 1 if the glock is acquired
750  */
751
752 int gfs2_glmutex_trylock(struct gfs2_glock *gl)
753 {
754         int acquired = 1;
755
756         spin_lock(&gl->gl_spin);
757         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
758                 acquired = 0;
759         spin_unlock(&gl->gl_spin);
760
761         return acquired;
762 }
763
764 /**
765  * gfs2_glmutex_unlock - release a local lock on a glock
766  * @gl: the glock
767  *
768  */
769
770 void gfs2_glmutex_unlock(struct gfs2_glock *gl)
771 {
772         spin_lock(&gl->gl_spin);
773         clear_bit(GLF_LOCK, &gl->gl_flags);
774         run_queue(gl);
775         spin_unlock(&gl->gl_spin);
776 }
777
778 /**
779  * handle_callback - add a demote request to a lock's queue
780  * @gl: the glock
781  * @state: the state the caller wants us to change to
782  *
783  */
784
785 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
786 {
787         struct gfs2_holder *gh, *new_gh = NULL;
788
789  restart:
790         spin_lock(&gl->gl_spin);
791
792         list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
793                 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
794                     gl->gl_req_gh != gh) {
795                         if (gh->gh_state != state)
796                                 gh->gh_state = LM_ST_UNLOCKED;
797                         goto out;
798                 }
799         }
800
801         if (new_gh) {
802                 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
803                 new_gh = NULL;
804         } else {
805                 spin_unlock(&gl->gl_spin);
806
807                 new_gh = gfs2_holder_get(gl, state,
808                                          LM_FLAG_TRY | GL_NEVER_RECURSE,
809                                          GFP_KERNEL | __GFP_NOFAIL),
810                 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
811                 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
812
813                 goto restart;
814         }
815
816  out:
817         spin_unlock(&gl->gl_spin);
818
819         if (new_gh)
820                 gfs2_holder_put(new_gh);
821 }
822
823 /**
824  * state_change - record that the glock is now in a different state
825  * @gl: the glock
826  * @new_state the new state
827  *
828  */
829
830 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
831 {
832         int held1, held2;
833
834         held1 = (gl->gl_state != LM_ST_UNLOCKED);
835         held2 = (new_state != LM_ST_UNLOCKED);
836
837         if (held1 != held2) {
838                 if (held2)
839                         gfs2_glock_hold(gl);
840                 else
841                         gfs2_glock_put(gl);
842         }
843
844         gl->gl_state = new_state;
845 }
846
847 /**
848  * xmote_bh - Called after the lock module is done acquiring a lock
849  * @gl: The glock in question
850  * @ret: the int returned from the lock module
851  *
852  */
853
854 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
855 {
856         struct gfs2_sbd *sdp = gl->gl_sbd;
857         struct gfs2_glock_operations *glops = gl->gl_ops;
858         struct gfs2_holder *gh = gl->gl_req_gh;
859         int prev_state = gl->gl_state;
860         int op_done = 1;
861
862         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
863         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
864         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
865
866         state_change(gl, ret & LM_OUT_ST_MASK);
867
868         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
869                 if (glops->go_inval)
870                         glops->go_inval(gl, DIO_METADATA | DIO_DATA);
871         } else if (gl->gl_state == LM_ST_DEFERRED) {
872                 /* We might not want to do this here.
873                    Look at moving to the inode glops. */
874                 if (glops->go_inval)
875                         glops->go_inval(gl, DIO_DATA);
876         }
877
878         /*  Deal with each possible exit condition  */
879
880         if (!gh)
881                 gl->gl_stamp = jiffies;
882
883         else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
884                 spin_lock(&gl->gl_spin);
885                 list_del_init(&gh->gh_list);
886                 gh->gh_error = -EIO;
887                 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
888                         do_unrecurse(gh);
889                 spin_unlock(&gl->gl_spin);
890
891         } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
892                 spin_lock(&gl->gl_spin);
893                 list_del_init(&gh->gh_list);
894                 if (gl->gl_state == gh->gh_state ||
895                     gl->gl_state == LM_ST_UNLOCKED)
896                         gh->gh_error = 0;
897                 else {
898                         if (gfs2_assert_warn(sdp, gh->gh_flags &
899                                         (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
900                                 fs_warn(sdp, "ret = 0x%.8X\n", ret);
901                         gh->gh_error = GLR_TRYFAILED;
902                 }
903                 spin_unlock(&gl->gl_spin);
904
905                 if (ret & LM_OUT_CANCELED)
906                         handle_callback(gl, LM_ST_UNLOCKED); /* Lame */
907
908         } else if (ret & LM_OUT_CANCELED) {
909                 spin_lock(&gl->gl_spin);
910                 list_del_init(&gh->gh_list);
911                 gh->gh_error = GLR_CANCELED;
912                 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
913                         do_unrecurse(gh);
914                 spin_unlock(&gl->gl_spin);
915
916         } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
917                 spin_lock(&gl->gl_spin);
918                 list_move_tail(&gh->gh_list, &gl->gl_holders);
919                 gh->gh_error = 0;
920                 set_bit(HIF_HOLDER, &gh->gh_iflags);
921                 spin_unlock(&gl->gl_spin);
922
923                 set_bit(HIF_FIRST, &gh->gh_iflags);
924
925                 op_done = 0;
926
927         } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
928                 spin_lock(&gl->gl_spin);
929                 list_del_init(&gh->gh_list);
930                 gh->gh_error = GLR_TRYFAILED;
931                 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
932                         do_unrecurse(gh);
933                 spin_unlock(&gl->gl_spin);
934
935         } else {
936                 if (gfs2_assert_withdraw(sdp, 0) == -1)
937                         fs_err(sdp, "ret = 0x%.8X\n", ret);
938         }
939
940         if (glops->go_xmote_bh)
941                 glops->go_xmote_bh(gl);
942
943         if (op_done) {
944                 spin_lock(&gl->gl_spin);
945                 gl->gl_req_gh = NULL;
946                 gl->gl_req_bh = NULL;
947                 clear_bit(GLF_LOCK, &gl->gl_flags);
948                 run_queue(gl);
949                 spin_unlock(&gl->gl_spin);
950         }
951
952         gfs2_glock_put(gl);
953
954         if (gh) {
955                 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
956                         gfs2_holder_put(gh);
957                 else
958                         complete(&gh->gh_wait);
959         }
960 }
961
962 /**
963  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
964  * @gl: The glock in question
965  * @state: the requested state
966  * @flags: modifier flags to the lock call
967  *
968  */
969
970 void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
971 {
972         struct gfs2_sbd *sdp = gl->gl_sbd;
973         struct gfs2_glock_operations *glops = gl->gl_ops;
974         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
975                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
976                                  LM_FLAG_PRIORITY);
977         unsigned int lck_ret;
978
979         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
980         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
981         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
982         gfs2_assert_warn(sdp, state != gl->gl_state);
983
984         if (gl->gl_state == LM_ST_EXCLUSIVE) {
985                 if (glops->go_sync)
986                         glops->go_sync(gl,
987                                        DIO_METADATA | DIO_DATA | DIO_RELEASE);
988         }
989
990         gfs2_glock_hold(gl);
991         gl->gl_req_bh = xmote_bh;
992
993         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state,
994                                lck_flags);
995
996         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
997                 return;
998
999         if (lck_ret & LM_OUT_ASYNC)
1000                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
1001         else
1002                 xmote_bh(gl, lck_ret);
1003 }
1004
1005 /**
1006  * drop_bh - Called after a lock module unlock completes
1007  * @gl: the glock
1008  * @ret: the return status
1009  *
1010  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
1011  * Doesn't drop the reference on the glock the top half took out
1012  *
1013  */
1014
1015 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
1016 {
1017         struct gfs2_sbd *sdp = gl->gl_sbd;
1018         struct gfs2_glock_operations *glops = gl->gl_ops;
1019         struct gfs2_holder *gh = gl->gl_req_gh;
1020
1021         clear_bit(GLF_PREFETCH, &gl->gl_flags);
1022
1023         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1024         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1025         gfs2_assert_warn(sdp, !ret);
1026
1027         state_change(gl, LM_ST_UNLOCKED);
1028
1029         if (glops->go_inval)
1030                 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
1031
1032         if (gh) {
1033                 spin_lock(&gl->gl_spin);
1034                 list_del_init(&gh->gh_list);
1035                 gh->gh_error = 0;
1036                 spin_unlock(&gl->gl_spin);
1037         }
1038
1039         if (glops->go_drop_bh)
1040                 glops->go_drop_bh(gl);
1041
1042         spin_lock(&gl->gl_spin);
1043         gl->gl_req_gh = NULL;
1044         gl->gl_req_bh = NULL;
1045         clear_bit(GLF_LOCK, &gl->gl_flags);
1046         run_queue(gl);
1047         spin_unlock(&gl->gl_spin);
1048
1049         gfs2_glock_put(gl);
1050
1051         if (gh) {
1052                 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
1053                         gfs2_holder_put(gh);
1054                 else
1055                         complete(&gh->gh_wait);
1056         }
1057 }
1058
1059 /**
1060  * gfs2_glock_drop_th - call into the lock module to unlock a lock
1061  * @gl: the glock
1062  *
1063  */
1064
1065 void gfs2_glock_drop_th(struct gfs2_glock *gl)
1066 {
1067         struct gfs2_sbd *sdp = gl->gl_sbd;
1068         struct gfs2_glock_operations *glops = gl->gl_ops;
1069         unsigned int ret;
1070
1071         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1072         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1073         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1074
1075         if (gl->gl_state == LM_ST_EXCLUSIVE) {
1076                 if (glops->go_sync)
1077                         glops->go_sync(gl,
1078                                        DIO_METADATA | DIO_DATA | DIO_RELEASE);
1079         }
1080
1081         gfs2_glock_hold(gl);
1082         gl->gl_req_bh = drop_bh;
1083
1084         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1085
1086         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1087                 return;
1088
1089         if (!ret)
1090                 drop_bh(gl, ret);
1091         else
1092                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1093 }
1094
1095 /**
1096  * do_cancels - cancel requests for locks stuck waiting on an expire flag
1097  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1098  *
1099  * Don't cancel GL_NOCANCEL requests.
1100  */
1101
1102 static void do_cancels(struct gfs2_holder *gh)
1103 {
1104         struct gfs2_glock *gl = gh->gh_gl;
1105
1106         spin_lock(&gl->gl_spin);
1107
1108         while (gl->gl_req_gh != gh &&
1109                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1110                !list_empty(&gh->gh_list)) {
1111                 if (gl->gl_req_bh &&
1112                     !(gl->gl_req_gh &&
1113                       (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1114                         spin_unlock(&gl->gl_spin);
1115                         gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1116                         msleep(100);
1117                         spin_lock(&gl->gl_spin);
1118                 } else {
1119                         spin_unlock(&gl->gl_spin);
1120                         msleep(100);
1121                         spin_lock(&gl->gl_spin);
1122                 }
1123         }
1124
1125         spin_unlock(&gl->gl_spin);
1126 }
1127
1128 /**
1129  * glock_wait_internal - wait on a glock acquisition
1130  * @gh: the glock holder
1131  *
1132  * Returns: 0 on success
1133  */
1134
1135 static int glock_wait_internal(struct gfs2_holder *gh)
1136 {
1137         struct gfs2_glock *gl = gh->gh_gl;
1138         struct gfs2_sbd *sdp = gl->gl_sbd;
1139         struct gfs2_glock_operations *glops = gl->gl_ops;
1140
1141         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1142                 return -EIO;
1143
1144         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1145                 spin_lock(&gl->gl_spin);
1146                 if (gl->gl_req_gh != gh &&
1147                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1148                     !list_empty(&gh->gh_list)) {
1149                         list_del_init(&gh->gh_list);
1150                         gh->gh_error = GLR_TRYFAILED;
1151                         if (test_bit(HIF_RECURSE, &gh->gh_iflags))
1152                                 do_unrecurse(gh);
1153                         run_queue(gl);
1154                         spin_unlock(&gl->gl_spin);
1155                         return gh->gh_error;
1156                 }
1157                 spin_unlock(&gl->gl_spin);
1158         }
1159
1160         if (gh->gh_flags & LM_FLAG_PRIORITY)
1161                 do_cancels(gh);
1162
1163         wait_for_completion(&gh->gh_wait);
1164
1165         if (gh->gh_error)
1166                 return gh->gh_error;
1167
1168         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1169         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
1170                                                    gh->gh_state,
1171                                                    gh->gh_flags));
1172
1173         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1174                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1175
1176                 if (glops->go_lock) {
1177                         gh->gh_error = glops->go_lock(gh);
1178                         if (gh->gh_error) {
1179                                 spin_lock(&gl->gl_spin);
1180                                 list_del_init(&gh->gh_list);
1181                                 if (test_and_clear_bit(HIF_RECURSE,
1182                                                        &gh->gh_iflags))
1183                                         do_unrecurse(gh);
1184                                 spin_unlock(&gl->gl_spin);
1185                         }
1186                 }
1187
1188                 spin_lock(&gl->gl_spin);
1189                 gl->gl_req_gh = NULL;
1190                 gl->gl_req_bh = NULL;
1191                 clear_bit(GLF_LOCK, &gl->gl_flags);
1192                 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
1193                         handle_recurse(gh);
1194                 run_queue(gl);
1195                 spin_unlock(&gl->gl_spin);
1196         }
1197
1198         return gh->gh_error;
1199 }
1200
1201 static inline struct gfs2_holder *
1202 find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1203 {
1204         struct gfs2_holder *gh;
1205
1206         list_for_each_entry(gh, head, gh_list) {
1207                 if (gh->gh_owner == owner)
1208                         return gh;
1209         }
1210
1211         return NULL;
1212 }
1213
1214 /**
1215  * recurse_check -
1216  *
1217  * Make sure the new holder is compatible with the pre-existing one.
1218  *
1219  */
1220
1221 static int recurse_check(struct gfs2_holder *existing, struct gfs2_holder *new,
1222                          unsigned int state)
1223 {
1224         struct gfs2_sbd *sdp = existing->gh_gl->gl_sbd;
1225
1226         if (gfs2_assert_warn(sdp, (new->gh_flags & LM_FLAG_ANY) ||
1227                                   !(existing->gh_flags & LM_FLAG_ANY)))
1228                 goto fail;
1229
1230         if (gfs2_assert_warn(sdp, (existing->gh_flags & GL_LOCAL_EXCL) ||
1231                                   !(new->gh_flags & GL_LOCAL_EXCL)))
1232                 goto fail;
1233
1234         if (gfs2_assert_warn(sdp, relaxed_state_ok(state, new->gh_state,
1235                                                    new->gh_flags)))
1236                 goto fail;
1237
1238         return 0;
1239
1240  fail:
1241         set_bit(HIF_ABORTED, &new->gh_iflags);
1242         return -EINVAL;
1243 }
1244
1245 /**
1246  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1247  * @gh: the holder structure to add
1248  *
1249  */
1250
1251 static void add_to_queue(struct gfs2_holder *gh)
1252 {
1253         struct gfs2_glock *gl = gh->gh_gl;
1254         struct gfs2_holder *existing;
1255
1256         if (!gh->gh_owner)
1257                 goto out;
1258
1259         existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1260         if (existing) {
1261                 if (recurse_check(existing, gh, gl->gl_state))
1262                         return;
1263
1264                 list_add_tail(&gh->gh_list, &gl->gl_holders);
1265                 set_bit(HIF_HOLDER, &gh->gh_iflags);
1266
1267                 gh->gh_error = 0;
1268                 complete(&gh->gh_wait);
1269
1270                 return;
1271         }
1272
1273         existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1274         if (existing) {
1275                 if (recurse_check(existing, gh, existing->gh_state))
1276                         return;
1277
1278                 set_bit(HIF_RECURSE, &gh->gh_iflags);
1279                 set_bit(HIF_RECURSE, &existing->gh_iflags);
1280
1281                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1282
1283                 return;
1284         }
1285
1286  out:
1287         if (gh->gh_flags & LM_FLAG_PRIORITY)
1288                 list_add(&gh->gh_list, &gl->gl_waiters3);
1289         else
1290                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);  
1291 }
1292
1293 /**
1294  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1295  * @gh: the holder structure
1296  *
1297  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1298  *
1299  * Returns: 0, GLR_TRYFAILED, or errno on failure
1300  */
1301
1302 int gfs2_glock_nq(struct gfs2_holder *gh)
1303 {
1304         struct gfs2_glock *gl = gh->gh_gl;
1305         struct gfs2_sbd *sdp = gl->gl_sbd;
1306         int error = 0;
1307
1308  restart:
1309         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1310                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1311                 return -EIO;
1312         }
1313
1314         set_bit(HIF_PROMOTE, &gh->gh_iflags);
1315
1316         spin_lock(&gl->gl_spin);
1317         add_to_queue(gh);
1318         run_queue(gl);
1319         spin_unlock(&gl->gl_spin);
1320
1321         if (!(gh->gh_flags & GL_ASYNC)) {
1322                 error = glock_wait_internal(gh);
1323                 if (error == GLR_CANCELED) {
1324                         msleep(1000);
1325                         goto restart;
1326                 }
1327         }
1328
1329         clear_bit(GLF_PREFETCH, &gl->gl_flags);
1330
1331         return error;
1332 }
1333
1334 /**
1335  * gfs2_glock_poll - poll to see if an async request has been completed
1336  * @gh: the holder
1337  *
1338  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1339  */
1340
1341 int gfs2_glock_poll(struct gfs2_holder *gh)
1342 {
1343         struct gfs2_glock *gl = gh->gh_gl;
1344         int ready = 0;
1345
1346         spin_lock(&gl->gl_spin);
1347
1348         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1349                 ready = 1;
1350         else if (list_empty(&gh->gh_list)) {
1351                 if (gh->gh_error == GLR_CANCELED) {
1352                         spin_unlock(&gl->gl_spin);
1353                         msleep(1000);
1354                         if (gfs2_glock_nq(gh))
1355                                 return 1;
1356                         return 0;
1357                 } else
1358                         ready = 1;
1359         }
1360
1361         spin_unlock(&gl->gl_spin);
1362
1363         return ready;
1364 }
1365
1366 /**
1367  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1368  * @gh: the holder structure
1369  *
1370  * Returns: 0, GLR_TRYFAILED, or errno on failure
1371  */
1372
1373 int gfs2_glock_wait(struct gfs2_holder *gh)
1374 {
1375         int error;
1376
1377         error = glock_wait_internal(gh);
1378         if (error == GLR_CANCELED) {
1379                 msleep(1000);
1380                 gh->gh_flags &= ~GL_ASYNC;
1381                 error = gfs2_glock_nq(gh);
1382         }
1383
1384         return error;
1385 }
1386
1387 /**
1388  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1389  * @gh: the glock holder
1390  *
1391  */
1392
1393 void gfs2_glock_dq(struct gfs2_holder *gh)
1394 {
1395         struct gfs2_glock *gl = gh->gh_gl;
1396         struct gfs2_glock_operations *glops = gl->gl_ops;
1397
1398         if (gh->gh_flags & GL_SYNC)
1399                 set_bit(GLF_SYNC, &gl->gl_flags);
1400
1401         if (gh->gh_flags & GL_NOCACHE)
1402                 handle_callback(gl, LM_ST_UNLOCKED);
1403
1404         gfs2_glmutex_lock(gl);
1405
1406         spin_lock(&gl->gl_spin);
1407         list_del_init(&gh->gh_list);
1408
1409         if (list_empty(&gl->gl_holders)) {
1410                 spin_unlock(&gl->gl_spin);
1411
1412                 if (glops->go_unlock)
1413                         glops->go_unlock(gh);
1414
1415                 if (test_bit(GLF_SYNC, &gl->gl_flags)) {
1416                         if (glops->go_sync)
1417                                 glops->go_sync(gl, DIO_METADATA | DIO_DATA);
1418                 }
1419
1420                 gl->gl_stamp = jiffies;
1421
1422                 spin_lock(&gl->gl_spin);
1423         }
1424
1425         clear_bit(GLF_LOCK, &gl->gl_flags);
1426         run_queue(gl);
1427         spin_unlock(&gl->gl_spin);
1428 }
1429
1430 /**
1431  * gfs2_glock_prefetch - Try to prefetch a glock
1432  * @gl: the glock
1433  * @state: the state to prefetch in
1434  * @flags: flags passed to go_xmote_th()
1435  *
1436  */
1437
1438 void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, int flags)
1439 {
1440         struct gfs2_glock_operations *glops = gl->gl_ops;
1441
1442         spin_lock(&gl->gl_spin);
1443
1444         if (test_bit(GLF_LOCK, &gl->gl_flags) ||
1445             !list_empty(&gl->gl_holders) ||
1446             !list_empty(&gl->gl_waiters1) ||
1447             !list_empty(&gl->gl_waiters2) ||
1448             !list_empty(&gl->gl_waiters3) ||
1449             relaxed_state_ok(gl->gl_state, state, flags)) {
1450                 spin_unlock(&gl->gl_spin);
1451                 return;
1452         }
1453
1454         set_bit(GLF_PREFETCH, &gl->gl_flags);
1455         set_bit(GLF_LOCK, &gl->gl_flags);
1456         spin_unlock(&gl->gl_spin);
1457
1458         glops->go_xmote_th(gl, state, flags);
1459 }
1460
1461 /**
1462  * gfs2_glock_force_drop - Force a glock to be uncached
1463  * @gl: the glock
1464  *
1465  */
1466
1467 void gfs2_glock_force_drop(struct gfs2_glock *gl)
1468 {
1469         struct gfs2_holder gh;
1470
1471         gfs2_holder_init(gl, LM_ST_UNLOCKED, GL_NEVER_RECURSE, &gh);
1472         set_bit(HIF_DEMOTE, &gh.gh_iflags);
1473
1474         spin_lock(&gl->gl_spin);
1475         list_add_tail(&gh.gh_list, &gl->gl_waiters2);
1476         run_queue(gl);
1477         spin_unlock(&gl->gl_spin);
1478
1479         wait_for_completion(&gh.gh_wait);
1480         gfs2_holder_uninit(&gh);
1481 }
1482
1483 static void greedy_work(void *data)
1484 {
1485         struct greedy *gr = (struct greedy *)data;
1486         struct gfs2_holder *gh = &gr->gr_gh;
1487         struct gfs2_glock *gl = gh->gh_gl;
1488         struct gfs2_glock_operations *glops = gl->gl_ops;
1489
1490         clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1491
1492         if (glops->go_greedy)
1493                 glops->go_greedy(gl);
1494
1495         spin_lock(&gl->gl_spin);
1496
1497         if (list_empty(&gl->gl_waiters2)) {
1498                 clear_bit(GLF_GREEDY, &gl->gl_flags);
1499                 spin_unlock(&gl->gl_spin);
1500                 gfs2_holder_uninit(gh);
1501                 kfree(gr);
1502         } else {
1503                 gfs2_glock_hold(gl);
1504                 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1505                 run_queue(gl);
1506                 spin_unlock(&gl->gl_spin);
1507                 gfs2_glock_put(gl);
1508         }
1509 }
1510
1511 /**
1512  * gfs2_glock_be_greedy -
1513  * @gl:
1514  * @time:
1515  *
1516  * Returns: 0 if go_greedy will be called, 1 otherwise
1517  */
1518
1519 int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1520 {
1521         struct greedy *gr;
1522         struct gfs2_holder *gh;
1523
1524         if (!time ||
1525             gl->gl_sbd->sd_args.ar_localcaching ||
1526             test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1527                 return 1;
1528
1529         gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1530         if (!gr) {
1531                 clear_bit(GLF_GREEDY, &gl->gl_flags);
1532                 return 1;
1533         }
1534         gh = &gr->gr_gh;
1535
1536         gfs2_holder_init(gl, 0, GL_NEVER_RECURSE, gh);
1537         set_bit(HIF_GREEDY, &gh->gh_iflags);
1538         INIT_WORK(&gr->gr_work, greedy_work, gr);
1539
1540         set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1541         schedule_delayed_work(&gr->gr_work, time);
1542
1543         return 0;
1544 }
1545
1546 /**
1547  * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
1548  * @gl: the glock
1549  * @state: the state we're requesting
1550  * @flags: the modifier flags
1551  * @gh: the holder structure
1552  *
1553  * Returns: 0, GLR_*, or errno
1554  */
1555
1556 int gfs2_glock_nq_init(struct gfs2_glock *gl, unsigned int state, int flags,
1557                        struct gfs2_holder *gh)
1558 {
1559         int error;
1560
1561         gfs2_holder_init(gl, state, flags, gh);
1562
1563         error = gfs2_glock_nq(gh);
1564         if (error)
1565                 gfs2_holder_uninit(gh);
1566
1567         return error;
1568 }
1569
1570 /**
1571  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1572  * @gh: the holder structure
1573  *
1574  */
1575
1576 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1577 {
1578         gfs2_glock_dq(gh);
1579         gfs2_holder_uninit(gh);
1580 }
1581
1582 /**
1583  * gfs2_glock_nq_num - acquire a glock based on lock number
1584  * @sdp: the filesystem
1585  * @number: the lock number
1586  * @glops: the glock operations for the type of glock
1587  * @state: the state to acquire the glock in
1588  * @flags: modifier flags for the aquisition
1589  * @gh: the struct gfs2_holder
1590  *
1591  * Returns: errno
1592  */
1593
1594 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number,
1595                       struct gfs2_glock_operations *glops, unsigned int state,
1596                       int flags, struct gfs2_holder *gh)
1597 {
1598         struct gfs2_glock *gl;
1599         int error;
1600
1601         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1602         if (!error) {
1603                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1604                 gfs2_glock_put(gl);
1605         }
1606
1607         return error;
1608 }
1609
1610 /**
1611  * glock_compare - Compare two struct gfs2_glock structures for sorting
1612  * @arg_a: the first structure
1613  * @arg_b: the second structure
1614  *
1615  */
1616
1617 static int glock_compare(const void *arg_a, const void *arg_b)
1618 {
1619         struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
1620         struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
1621         struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1622         struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1623         int ret = 0;
1624
1625         if (a->ln_number > b->ln_number)
1626                 ret = 1;
1627         else if (a->ln_number < b->ln_number)
1628                 ret = -1;
1629         else {
1630                 if (gh_a->gh_state == LM_ST_SHARED &&
1631                     gh_b->gh_state == LM_ST_EXCLUSIVE)
1632                         ret = 1;
1633                 else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
1634                          (gh_b->gh_flags & GL_LOCAL_EXCL))
1635                         ret = 1;
1636         }
1637
1638         return ret;
1639 }
1640
1641 /**
1642  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1643  * @num_gh: the number of structures
1644  * @ghs: an array of struct gfs2_holder structures
1645  *
1646  * Returns: 0 on success (all glocks acquired),
1647  *          errno on failure (no glocks acquired)
1648  */
1649
1650 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1651                      struct gfs2_holder **p)
1652 {
1653         unsigned int x;
1654         int error = 0;
1655
1656         for (x = 0; x < num_gh; x++)
1657                 p[x] = &ghs[x];
1658
1659         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1660
1661         for (x = 0; x < num_gh; x++) {
1662                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1663
1664                 error = gfs2_glock_nq(p[x]);
1665                 if (error) {
1666                         while (x--)
1667                                 gfs2_glock_dq(p[x]);
1668                         break;
1669                 }
1670         }
1671
1672         return error;
1673 }
1674
1675 /**
1676  * gfs2_glock_nq_m - acquire multiple glocks
1677  * @num_gh: the number of structures
1678  * @ghs: an array of struct gfs2_holder structures
1679  *
1680  * Figure out how big an impact this function has.  Either:
1681  * 1) Replace this code with code that calls gfs2_glock_prefetch()
1682  * 2) Forget async stuff and just call nq_m_sync()
1683  * 3) Leave it like it is
1684  *
1685  * Returns: 0 on success (all glocks acquired),
1686  *          errno on failure (no glocks acquired)
1687  */
1688
1689 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1690 {
1691         int *e;
1692         unsigned int x;
1693         int borked = 0, serious = 0;
1694         int error = 0;
1695
1696         if (!num_gh)
1697                 return 0;
1698
1699         if (num_gh == 1) {
1700                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1701                 return gfs2_glock_nq(ghs);
1702         }
1703
1704         e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1705         if (!e)
1706                 return -ENOMEM;
1707
1708         for (x = 0; x < num_gh; x++) {
1709                 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1710                 error = gfs2_glock_nq(&ghs[x]);
1711                 if (error) {
1712                         borked = 1;
1713                         serious = error;
1714                         num_gh = x;
1715                         break;
1716                 }
1717         }
1718
1719         for (x = 0; x < num_gh; x++) {
1720                 error = e[x] = glock_wait_internal(&ghs[x]);
1721                 if (error) {
1722                         borked = 1;
1723                         if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1724                                 serious = error;
1725                 }
1726         }
1727
1728         if (!borked) {
1729                 kfree(e);
1730                 return 0;
1731         }
1732
1733         for (x = 0; x < num_gh; x++)
1734                 if (!e[x])
1735                         gfs2_glock_dq(&ghs[x]);
1736
1737         if (serious)
1738                 error = serious;
1739         else {
1740                 for (x = 0; x < num_gh; x++)
1741                         gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1742                                           &ghs[x]);
1743                 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1744         }
1745
1746         kfree(e);
1747
1748         return error;
1749 }
1750
1751 /**
1752  * gfs2_glock_dq_m - release multiple glocks
1753  * @num_gh: the number of structures
1754  * @ghs: an array of struct gfs2_holder structures
1755  *
1756  */
1757
1758 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1759 {
1760         unsigned int x;
1761
1762         for (x = 0; x < num_gh; x++)
1763                 gfs2_glock_dq(&ghs[x]);
1764 }
1765
1766 /**
1767  * gfs2_glock_dq_uninit_m - release multiple glocks
1768  * @num_gh: the number of structures
1769  * @ghs: an array of struct gfs2_holder structures
1770  *
1771  */
1772
1773 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1774 {
1775         unsigned int x;
1776
1777         for (x = 0; x < num_gh; x++)
1778                 gfs2_glock_dq_uninit(&ghs[x]);
1779 }
1780
1781 /**
1782  * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1783  * @sdp: the filesystem
1784  * @number: the lock number
1785  * @glops: the glock operations for the type of glock
1786  * @state: the state to acquire the glock in
1787  * @flags: modifier flags for the aquisition
1788  *
1789  * Returns: errno
1790  */
1791
1792 void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number,
1793                              struct gfs2_glock_operations *glops,
1794                              unsigned int state, int flags)
1795 {
1796         struct gfs2_glock *gl;
1797         int error;
1798
1799         if (atomic_read(&sdp->sd_reclaim_count) <
1800             gfs2_tune_get(sdp, gt_reclaim_limit)) {
1801                 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1802                 if (!error) {
1803                         gfs2_glock_prefetch(gl, state, flags);
1804                         gfs2_glock_put(gl);
1805                 }
1806         }
1807 }
1808
1809 /**
1810  * gfs2_lvb_hold - attach a LVB from a glock
1811  * @gl: The glock in question
1812  *
1813  */
1814
1815 int gfs2_lvb_hold(struct gfs2_glock *gl)
1816 {
1817         int error;
1818
1819         gfs2_glmutex_lock(gl);
1820
1821         if (!atomic_read(&gl->gl_lvb_count)) {
1822                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1823                 if (error) {
1824                         gfs2_glmutex_unlock(gl);
1825                         return error;
1826                 }
1827                 gfs2_glock_hold(gl);
1828         }
1829         atomic_inc(&gl->gl_lvb_count);
1830
1831         gfs2_glmutex_unlock(gl);
1832
1833         return 0;
1834 }
1835
1836 /**
1837  * gfs2_lvb_unhold - detach a LVB from a glock
1838  * @gl: The glock in question
1839  *
1840  */
1841
1842 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1843 {
1844         gfs2_glock_hold(gl);
1845         gfs2_glmutex_lock(gl);
1846
1847         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1848         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1849                 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1850                 gl->gl_lvb = NULL;
1851                 gfs2_glock_put(gl);
1852         }
1853
1854         gfs2_glmutex_unlock(gl);
1855         gfs2_glock_put(gl);
1856 }
1857
1858 void gfs2_lvb_sync(struct gfs2_glock *gl)
1859 {
1860         gfs2_glmutex_lock(gl);
1861
1862         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count));
1863         if (!gfs2_assert_warn(gl->gl_sbd, gfs2_glock_is_held_excl(gl)))
1864                 gfs2_lm_sync_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1865
1866         gfs2_glmutex_unlock(gl);
1867 }
1868
1869 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1870                         unsigned int state)
1871 {
1872         struct gfs2_glock *gl;
1873
1874         gl = gfs2_glock_find(sdp, name);
1875         if (!gl)
1876                 return;
1877
1878         if (gl->gl_ops->go_callback)
1879                 gl->gl_ops->go_callback(gl, state);
1880         handle_callback(gl, state);
1881
1882         spin_lock(&gl->gl_spin);
1883         run_queue(gl);
1884         spin_unlock(&gl->gl_spin);
1885
1886         gfs2_glock_put(gl);
1887 }
1888
1889 /**
1890  * gfs2_glock_cb - Callback used by locking module
1891  * @fsdata: Pointer to the superblock
1892  * @type: Type of callback
1893  * @data: Type dependent data pointer
1894  *
1895  * Called by the locking module when it wants to tell us something.
1896  * Either we need to drop a lock, one of our ASYNC requests completed, or
1897  * a journal from another client needs to be recovered.
1898  */
1899
1900 void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data)
1901 {
1902         struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata;
1903
1904         switch (type) {
1905         case LM_CB_NEED_E:
1906                 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_UNLOCKED);
1907                 return;
1908
1909         case LM_CB_NEED_D:
1910                 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_DEFERRED);
1911                 return;
1912
1913         case LM_CB_NEED_S:
1914                 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_SHARED);
1915                 return;
1916
1917         case LM_CB_ASYNC: {
1918                 struct lm_async_cb *async = (struct lm_async_cb *)data;
1919                 struct gfs2_glock *gl;
1920
1921                 gl = gfs2_glock_find(sdp, &async->lc_name);
1922                 if (gfs2_assert_warn(sdp, gl))
1923                         return;
1924                 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1925                         gl->gl_req_bh(gl, async->lc_ret);
1926                 gfs2_glock_put(gl);
1927
1928                 return;
1929         }
1930
1931         case LM_CB_NEED_RECOVERY:
1932                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1933                 if (sdp->sd_recoverd_process)
1934                         wake_up_process(sdp->sd_recoverd_process);
1935                 return;
1936
1937         case LM_CB_DROPLOCKS:
1938                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1939                 gfs2_quota_scan(sdp);
1940                 return;
1941
1942         default:
1943                 gfs2_assert_warn(sdp, 0);
1944                 return;
1945         }
1946 }
1947
1948 /**
1949  * gfs2_try_toss_inode - try to remove a particular inode struct from cache
1950  * sdp: the filesystem
1951  * inum: the inode number
1952  *
1953  */
1954
1955 void gfs2_try_toss_inode(struct gfs2_sbd *sdp, struct gfs2_inum *inum)
1956 {
1957         struct gfs2_glock *gl;
1958         struct gfs2_inode *ip;
1959         int error;
1960
1961         error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops,
1962                                NO_CREATE, &gl);
1963         if (error || !gl)
1964                 return;
1965
1966         if (!gfs2_glmutex_trylock(gl))
1967                 goto out;
1968
1969         ip = gl->gl_object;
1970         if (!ip)
1971                 goto out_unlock;
1972
1973         if (atomic_read(&ip->i_count))
1974                 goto out_unlock;
1975
1976         gfs2_inode_destroy(ip);
1977
1978  out_unlock:
1979         gfs2_glmutex_unlock(gl);
1980
1981  out:
1982         gfs2_glock_put(gl);
1983 }
1984
1985 /**
1986  * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1987  *                          iopen glock from memory
1988  * @io_gl: the iopen glock
1989  * @state: the state into which the glock should be put
1990  *
1991  */
1992
1993 void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
1994 {
1995         struct gfs2_glock *i_gl;
1996
1997         if (state != LM_ST_UNLOCKED)
1998                 return;
1999
2000         spin_lock(&io_gl->gl_spin);
2001         i_gl = io_gl->gl_object;
2002         if (i_gl) {
2003                 gfs2_glock_hold(i_gl);
2004                 spin_unlock(&io_gl->gl_spin);
2005         } else {
2006                 spin_unlock(&io_gl->gl_spin);
2007                 return;
2008         }
2009
2010         if (gfs2_glmutex_trylock(i_gl)) {
2011                 struct gfs2_inode *ip = i_gl->gl_object;
2012                 if (ip) {
2013                         gfs2_try_toss_vnode(ip);
2014                         gfs2_glmutex_unlock(i_gl);
2015                         gfs2_glock_schedule_for_reclaim(i_gl);
2016                         goto out;
2017                 }
2018                 gfs2_glmutex_unlock(i_gl);
2019         }
2020
2021  out:
2022         gfs2_glock_put(i_gl);
2023 }
2024
2025 /**
2026  * demote_ok - Check to see if it's ok to unlock a glock
2027  * @gl: the glock
2028  *
2029  * Returns: 1 if it's ok
2030  */
2031
2032 static int demote_ok(struct gfs2_glock *gl)
2033 {
2034         struct gfs2_sbd *sdp = gl->gl_sbd;
2035         struct gfs2_glock_operations *glops = gl->gl_ops;
2036         int demote = 1;
2037
2038         if (test_bit(GLF_STICKY, &gl->gl_flags))
2039                 demote = 0;
2040         else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
2041                 demote = time_after_eq(jiffies,
2042                                     gl->gl_stamp +
2043                                     gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
2044         else if (glops->go_demote_ok)
2045                 demote = glops->go_demote_ok(gl);
2046
2047         return demote;
2048 }
2049
2050 /**
2051  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
2052  * @gl: the glock
2053  *
2054  */
2055
2056 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
2057 {
2058         struct gfs2_sbd *sdp = gl->gl_sbd;
2059
2060         spin_lock(&sdp->sd_reclaim_lock);
2061         if (list_empty(&gl->gl_reclaim)) {
2062                 gfs2_glock_hold(gl);
2063                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
2064                 atomic_inc(&sdp->sd_reclaim_count);
2065         }
2066         spin_unlock(&sdp->sd_reclaim_lock);
2067
2068         wake_up(&sdp->sd_reclaim_wq);
2069 }
2070
2071 /**
2072  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
2073  * @sdp: the filesystem
2074  *
2075  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
2076  * different glock and we notice that there are a lot of glocks in the
2077  * reclaim list.
2078  *
2079  */
2080
2081 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
2082 {
2083         struct gfs2_glock *gl;
2084
2085         spin_lock(&sdp->sd_reclaim_lock);
2086         if (list_empty(&sdp->sd_reclaim_list)) {
2087                 spin_unlock(&sdp->sd_reclaim_lock);
2088                 return;
2089         }
2090         gl = list_entry(sdp->sd_reclaim_list.next,
2091                         struct gfs2_glock, gl_reclaim);
2092         list_del_init(&gl->gl_reclaim);
2093         spin_unlock(&sdp->sd_reclaim_lock);
2094
2095         atomic_dec(&sdp->sd_reclaim_count);
2096         atomic_inc(&sdp->sd_reclaimed);
2097
2098         if (gfs2_glmutex_trylock(gl)) {
2099                 if (gl->gl_ops == &gfs2_inode_glops) {
2100                         struct gfs2_inode *ip = gl->gl_object;
2101                         if (ip && !atomic_read(&ip->i_count))
2102                                 gfs2_inode_destroy(ip);
2103                 }
2104                 if (queue_empty(gl, &gl->gl_holders) &&
2105                     gl->gl_state != LM_ST_UNLOCKED &&
2106                     demote_ok(gl))
2107                         handle_callback(gl, LM_ST_UNLOCKED);
2108                 gfs2_glmutex_unlock(gl);
2109         }
2110
2111         gfs2_glock_put(gl);
2112 }
2113
2114 /**
2115  * examine_bucket - Call a function for glock in a hash bucket
2116  * @examiner: the function
2117  * @sdp: the filesystem
2118  * @bucket: the bucket
2119  *
2120  * Returns: 1 if the bucket has entries
2121  */
2122
2123 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
2124                           struct gfs2_gl_hash_bucket *bucket)
2125 {
2126         struct glock_plug plug;
2127         struct list_head *tmp;
2128         struct gfs2_glock *gl;
2129         int entries;
2130
2131         /* Add "plug" to end of bucket list, work back up list from there */
2132         memset(&plug.gl_flags, 0, sizeof(unsigned long));
2133         set_bit(GLF_PLUG, &plug.gl_flags);
2134
2135         write_lock(&bucket->hb_lock);
2136         list_add(&plug.gl_list, &bucket->hb_list);
2137         write_unlock(&bucket->hb_lock);
2138
2139         for (;;) {
2140                 write_lock(&bucket->hb_lock);
2141
2142                 for (;;) {
2143                         tmp = plug.gl_list.next;
2144
2145                         if (tmp == &bucket->hb_list) {
2146                                 list_del(&plug.gl_list);
2147                                 entries = !list_empty(&bucket->hb_list);
2148                                 write_unlock(&bucket->hb_lock);
2149                                 return entries;
2150                         }
2151                         gl = list_entry(tmp, struct gfs2_glock, gl_list);
2152
2153                         /* Move plug up list */
2154                         list_move(&plug.gl_list, &gl->gl_list);
2155
2156                         if (test_bit(GLF_PLUG, &gl->gl_flags))
2157                                 continue;
2158
2159                         /* examiner() must glock_put() */
2160                         gfs2_glock_hold(gl);
2161
2162                         break;
2163                 }
2164
2165                 write_unlock(&bucket->hb_lock);
2166
2167                 examiner(gl);
2168         }
2169 }
2170
2171 /**
2172  * scan_glock - look at a glock and see if we can reclaim it
2173  * @gl: the glock to look at
2174  *
2175  */
2176
2177 static void scan_glock(struct gfs2_glock *gl)
2178 {
2179         if (gfs2_glmutex_trylock(gl)) {
2180                 if (gl->gl_ops == &gfs2_inode_glops) {
2181                         struct gfs2_inode *ip = gl->gl_object;
2182                         if (ip && !atomic_read(&ip->i_count))
2183                                 goto out_schedule;
2184                 }
2185                 if (queue_empty(gl, &gl->gl_holders) &&
2186                     gl->gl_state != LM_ST_UNLOCKED &&
2187                     demote_ok(gl))
2188                         goto out_schedule;
2189
2190                 gfs2_glmutex_unlock(gl);
2191         }
2192
2193         gfs2_glock_put(gl);
2194
2195         return;
2196
2197  out_schedule:
2198         gfs2_glmutex_unlock(gl);
2199         gfs2_glock_schedule_for_reclaim(gl);
2200         gfs2_glock_put(gl);
2201 }
2202
2203 /**
2204  * gfs2_scand_internal - Look for glocks and inodes to toss from memory
2205  * @sdp: the filesystem
2206  *
2207  */
2208
2209 void gfs2_scand_internal(struct gfs2_sbd *sdp)
2210 {
2211         unsigned int x;
2212
2213         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2214                 examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
2215                 cond_resched();
2216         }
2217 }
2218
2219 /**
2220  * clear_glock - look at a glock and see if we can free it from glock cache
2221  * @gl: the glock to look at
2222  *
2223  */
2224
2225 static void clear_glock(struct gfs2_glock *gl)
2226 {
2227         struct gfs2_sbd *sdp = gl->gl_sbd;
2228         int released;
2229
2230         spin_lock(&sdp->sd_reclaim_lock);
2231         if (!list_empty(&gl->gl_reclaim)) {
2232                 list_del_init(&gl->gl_reclaim);
2233                 atomic_dec(&sdp->sd_reclaim_count);
2234                 released = gfs2_glock_put(gl);
2235                 gfs2_assert(sdp, !released);
2236         }
2237         spin_unlock(&sdp->sd_reclaim_lock);
2238
2239         if (gfs2_glmutex_trylock(gl)) {
2240                 if (gl->gl_ops == &gfs2_inode_glops) {
2241                         struct gfs2_inode *ip = gl->gl_object;
2242                         if (ip && !atomic_read(&ip->i_count))
2243                                 gfs2_inode_destroy(ip);
2244                 }
2245                 if (queue_empty(gl, &gl->gl_holders) &&
2246                     gl->gl_state != LM_ST_UNLOCKED)
2247                         handle_callback(gl, LM_ST_UNLOCKED);
2248
2249                 gfs2_glmutex_unlock(gl);
2250         }
2251
2252         gfs2_glock_put(gl);
2253 }
2254
2255 /**
2256  * gfs2_gl_hash_clear - Empty out the glock hash table
2257  * @sdp: the filesystem
2258  * @wait: wait until it's all gone
2259  *
2260  * Called when unmounting the filesystem, or when inter-node lock manager
2261  * requests DROPLOCKS because it is running out of capacity.
2262  */
2263
2264 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2265 {
2266         unsigned long t;
2267         unsigned int x;
2268         int cont;
2269
2270         t = jiffies;
2271
2272         for (;;) {
2273                 cont = 0;
2274
2275                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2276                         if (examine_bucket(clear_glock, sdp,
2277                                            &sdp->sd_gl_hash[x]))
2278                                 cont = 1;
2279
2280                 if (!wait || !cont)
2281                         break;
2282
2283                 if (time_after_eq(jiffies,
2284                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
2285                         fs_warn(sdp, "Unmount seems to be stalled. "
2286                                      "Dumping lock state...\n");
2287                         gfs2_dump_lockstate(sdp);
2288                         t = jiffies;
2289                 }
2290
2291                 /* invalidate_inodes() requires that the sb inodes list
2292                    not change, but an async completion callback for an
2293                    unlock can occur which does glock_put() which
2294                    can call iput() which will change the sb inodes list.
2295                    invalidate_inodes_mutex prevents glock_put()'s during
2296                    an invalidate_inodes() */
2297
2298                 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
2299                 invalidate_inodes(sdp->sd_vfs);
2300                 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
2301                 yield();
2302         }
2303 }
2304
2305 /*
2306  *  Diagnostic routines to help debug distributed deadlock
2307  */
2308
2309 /**
2310  * dump_holder - print information about a glock holder
2311  * @str: a string naming the type of holder
2312  * @gh: the glock holder
2313  *
2314  * Returns: 0 on success, -ENOBUFS when we run out of space
2315  */
2316
2317 static int dump_holder(char *str, struct gfs2_holder *gh)
2318 {
2319         unsigned int x;
2320         int error = -ENOBUFS;
2321
2322         printk(KERN_INFO "  %s\n", str);
2323         printk(KERN_INFO "    owner = %ld\n",
2324                    (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
2325         printk(KERN_INFO "    gh_state = %u\n", gh->gh_state);
2326         printk(KERN_INFO "    gh_flags =");
2327         for (x = 0; x < 32; x++)
2328                 if (gh->gh_flags & (1 << x))
2329                         printk(" %u", x);
2330         printk(" \n");
2331         printk(KERN_INFO "    error = %d\n", gh->gh_error);
2332         printk(KERN_INFO "    gh_iflags =");
2333         for (x = 0; x < 32; x++)
2334                 if (test_bit(x, &gh->gh_iflags))
2335                         printk(" %u", x);
2336         printk(" \n");
2337
2338         error = 0;
2339
2340         return error;
2341 }
2342
2343 /**
2344  * dump_inode - print information about an inode
2345  * @ip: the inode
2346  *
2347  * Returns: 0 on success, -ENOBUFS when we run out of space
2348  */
2349
2350 static int dump_inode(struct gfs2_inode *ip)
2351 {
2352         unsigned int x;
2353         int error = -ENOBUFS;
2354
2355         printk(KERN_INFO "  Inode:\n");
2356         printk(KERN_INFO "    num = %llu %llu\n",
2357                     ip->i_num.no_formal_ino, ip->i_num.no_addr);
2358         printk(KERN_INFO "    type = %u\n", IF2DT(ip->i_di.di_mode));
2359         printk(KERN_INFO "    i_count = %d\n", atomic_read(&ip->i_count));
2360         printk(KERN_INFO "    i_flags =");
2361         for (x = 0; x < 32; x++)
2362                 if (test_bit(x, &ip->i_flags))
2363                         printk(" %u", x);
2364         printk(" \n");
2365         printk(KERN_INFO "    vnode = %s\n", (ip->i_vnode) ? "yes" : "no");
2366
2367         error = 0;
2368
2369         return error;
2370 }
2371
2372 /**
2373  * dump_glock - print information about a glock
2374  * @gl: the glock
2375  * @count: where we are in the buffer
2376  *
2377  * Returns: 0 on success, -ENOBUFS when we run out of space
2378  */
2379
2380 static int dump_glock(struct gfs2_glock *gl)
2381 {
2382         struct gfs2_holder *gh;
2383         unsigned int x;
2384         int error = -ENOBUFS;
2385
2386         spin_lock(&gl->gl_spin);
2387
2388         printk(KERN_INFO "Glock (%u, %llu)\n",
2389                     gl->gl_name.ln_type,
2390                     gl->gl_name.ln_number);
2391         printk(KERN_INFO "  gl_flags =");
2392         for (x = 0; x < 32; x++)
2393                 if (test_bit(x, &gl->gl_flags))
2394                         printk(" %u", x);
2395         printk(" \n");
2396         printk(KERN_INFO "  gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
2397         printk(KERN_INFO "  gl_state = %u\n", gl->gl_state);
2398         printk(KERN_INFO "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2399         printk(KERN_INFO "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2400         printk(KERN_INFO "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
2401         printk(KERN_INFO "  object = %s\n", (gl->gl_object) ? "yes" : "no");
2402         printk(KERN_INFO "  le = %s\n",
2403                    (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
2404         printk(KERN_INFO "  reclaim = %s\n",
2405                     (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2406         if (gl->gl_aspace)
2407                 printk(KERN_INFO "  aspace = %lu\n",
2408                             gl->gl_aspace->i_mapping->nrpages);
2409         else
2410                 printk(KERN_INFO "  aspace = no\n");
2411         printk(KERN_INFO "  ail = %d\n", atomic_read(&gl->gl_ail_count));
2412         if (gl->gl_req_gh) {
2413                 error = dump_holder("Request", gl->gl_req_gh);
2414                 if (error)
2415                         goto out;
2416         }
2417         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
2418                 error = dump_holder("Holder", gh);
2419                 if (error)
2420                         goto out;
2421         }
2422         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
2423                 error = dump_holder("Waiter1", gh);
2424                 if (error)
2425                         goto out;
2426         }
2427         list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
2428                 error = dump_holder("Waiter2", gh);
2429                 if (error)
2430                         goto out;
2431         }
2432         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
2433                 error = dump_holder("Waiter3", gh);
2434                 if (error)
2435                         goto out;
2436         }
2437         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
2438                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2439                     list_empty(&gl->gl_holders)) {
2440                         error = dump_inode(gl->gl_object);
2441                         if (error)
2442                                 goto out;
2443                 } else {
2444                         error = -ENOBUFS;
2445                         printk(KERN_INFO "  Inode: busy\n");
2446                 }
2447         }
2448
2449         error = 0;
2450
2451  out:
2452         spin_unlock(&gl->gl_spin);
2453
2454         return error;
2455 }
2456
2457 /**
2458  * gfs2_dump_lockstate - print out the current lockstate
2459  * @sdp: the filesystem
2460  * @ub: the buffer to copy the information into
2461  *
2462  * If @ub is NULL, dump the lockstate to the console.
2463  *
2464  */
2465
2466 int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2467 {
2468         struct gfs2_gl_hash_bucket *bucket;
2469         struct gfs2_glock *gl;
2470         unsigned int x;
2471         int error = 0;
2472
2473         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2474                 bucket = &sdp->sd_gl_hash[x];
2475
2476                 read_lock(&bucket->hb_lock);
2477
2478                 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
2479                         if (test_bit(GLF_PLUG, &gl->gl_flags))
2480                                 continue;
2481
2482                         error = dump_glock(gl);
2483                         if (error)
2484                                 break;
2485                 }
2486
2487                 read_unlock(&bucket->hb_lock);
2488
2489                 if (error)
2490                         break;
2491         }
2492
2493
2494         return error;
2495 }
2496