Merge branch 'topic/usb-audio' into for-linus
[pandora-kernel.git] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <linux/rwsem.h>
23 #include <asm/uaccess.h>
24 #include <linux/seq_file.h>
25 #include <linux/debugfs.h>
26 #include <linux/kthread.h>
27 #include <linux/freezer.h>
28 #include <linux/workqueue.h>
29 #include <linux/jiffies.h>
30
31 #include "gfs2.h"
32 #include "incore.h"
33 #include "glock.h"
34 #include "glops.h"
35 #include "inode.h"
36 #include "lops.h"
37 #include "meta_io.h"
38 #include "quota.h"
39 #include "super.h"
40 #include "util.h"
41 #include "bmap.h"
42 #define CREATE_TRACE_POINTS
43 #include "trace_gfs2.h"
44
45 struct gfs2_gl_hash_bucket {
46         struct hlist_head hb_list;
47 };
48
49 struct gfs2_glock_iter {
50         int hash;                       /* hash bucket index         */
51         struct gfs2_sbd *sdp;           /* incore superblock         */
52         struct gfs2_glock *gl;          /* current glock struct      */
53         char string[512];               /* scratch space             */
54 };
55
56 typedef void (*glock_examiner) (struct gfs2_glock * gl);
57
58 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
59 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
60 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
61 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
62
63 static DECLARE_RWSEM(gfs2_umount_flush_sem);
64 static struct dentry *gfs2_root;
65 static struct workqueue_struct *glock_workqueue;
66 struct workqueue_struct *gfs2_delete_workqueue;
67 static LIST_HEAD(lru_list);
68 static atomic_t lru_count = ATOMIC_INIT(0);
69 static DEFINE_SPINLOCK(lru_lock);
70
71 #define GFS2_GL_HASH_SHIFT      15
72 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
73 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
74
75 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
76 static struct dentry *gfs2_root;
77
78 /*
79  * Despite what you might think, the numbers below are not arbitrary :-)
80  * They are taken from the ipv4 routing hash code, which is well tested
81  * and thus should be nearly optimal. Later on we might tweek the numbers
82  * but for now this should be fine.
83  *
84  * The reason for putting the locks in a separate array from the list heads
85  * is that we can have fewer locks than list heads and save memory. We use
86  * the same hash function for both, but with a different hash mask.
87  */
88 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
89         defined(CONFIG_PROVE_LOCKING)
90
91 #ifdef CONFIG_LOCKDEP
92 # define GL_HASH_LOCK_SZ        256
93 #else
94 # if NR_CPUS >= 32
95 #  define GL_HASH_LOCK_SZ       4096
96 # elif NR_CPUS >= 16
97 #  define GL_HASH_LOCK_SZ       2048
98 # elif NR_CPUS >= 8
99 #  define GL_HASH_LOCK_SZ       1024
100 # elif NR_CPUS >= 4
101 #  define GL_HASH_LOCK_SZ       512
102 # else
103 #  define GL_HASH_LOCK_SZ       256
104 # endif
105 #endif
106
107 /* We never want more locks than chains */
108 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
109 # undef GL_HASH_LOCK_SZ
110 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
111 #endif
112
113 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
114
115 static inline rwlock_t *gl_lock_addr(unsigned int x)
116 {
117         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
118 }
119 #else /* not SMP, so no spinlocks required */
120 static inline rwlock_t *gl_lock_addr(unsigned int x)
121 {
122         return NULL;
123 }
124 #endif
125
126 /**
127  * gl_hash() - Turn glock number into hash bucket number
128  * @lock: The glock number
129  *
130  * Returns: The number of the corresponding hash bucket
131  */
132
133 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
134                             const struct lm_lockname *name)
135 {
136         unsigned int h;
137
138         h = jhash(&name->ln_number, sizeof(u64), 0);
139         h = jhash(&name->ln_type, sizeof(unsigned int), h);
140         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
141         h &= GFS2_GL_HASH_MASK;
142
143         return h;
144 }
145
146 /**
147  * glock_free() - Perform a few checks and then release struct gfs2_glock
148  * @gl: The glock to release
149  *
150  * Also calls lock module to release its internal structure for this glock.
151  *
152  */
153
154 static void glock_free(struct gfs2_glock *gl)
155 {
156         struct gfs2_sbd *sdp = gl->gl_sbd;
157         struct inode *aspace = gl->gl_aspace;
158
159         if (aspace)
160                 gfs2_aspace_put(aspace);
161         trace_gfs2_glock_put(gl);
162         sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
163 }
164
165 /**
166  * gfs2_glock_hold() - increment reference count on glock
167  * @gl: The glock to hold
168  *
169  */
170
171 void gfs2_glock_hold(struct gfs2_glock *gl)
172 {
173         GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
174         atomic_inc(&gl->gl_ref);
175 }
176
177 /**
178  * demote_ok - Check to see if it's ok to unlock a glock
179  * @gl: the glock
180  *
181  * Returns: 1 if it's ok
182  */
183
184 static int demote_ok(const struct gfs2_glock *gl)
185 {
186         const struct gfs2_glock_operations *glops = gl->gl_ops;
187
188         if (gl->gl_state == LM_ST_UNLOCKED)
189                 return 0;
190         if (!list_empty(&gl->gl_holders))
191                 return 0;
192         if (glops->go_demote_ok)
193                 return glops->go_demote_ok(gl);
194         return 1;
195 }
196
197 /**
198  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
199  * @gl: the glock
200  *
201  */
202
203 static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
204 {
205         int may_reclaim;
206         may_reclaim = (demote_ok(gl) &&
207                        (atomic_read(&gl->gl_ref) == 1 ||
208                         (gl->gl_name.ln_type == LM_TYPE_INODE &&
209                          atomic_read(&gl->gl_ref) <= 2)));
210         spin_lock(&lru_lock);
211         if (list_empty(&gl->gl_lru) && may_reclaim) {
212                 list_add_tail(&gl->gl_lru, &lru_list);
213                 atomic_inc(&lru_count);
214         }
215         spin_unlock(&lru_lock);
216 }
217
218 /**
219  * gfs2_glock_put_nolock() - Decrement reference count on glock
220  * @gl: The glock to put
221  *
222  * This function should only be used if the caller has its own reference
223  * to the glock, in addition to the one it is dropping.
224  */
225
226 void gfs2_glock_put_nolock(struct gfs2_glock *gl)
227 {
228         if (atomic_dec_and_test(&gl->gl_ref))
229                 GLOCK_BUG_ON(gl, 1);
230         gfs2_glock_schedule_for_reclaim(gl);
231 }
232
233 /**
234  * gfs2_glock_put() - Decrement reference count on glock
235  * @gl: The glock to put
236  *
237  */
238
239 int gfs2_glock_put(struct gfs2_glock *gl)
240 {
241         int rv = 0;
242
243         write_lock(gl_lock_addr(gl->gl_hash));
244         if (atomic_dec_and_test(&gl->gl_ref)) {
245                 hlist_del(&gl->gl_list);
246                 write_unlock(gl_lock_addr(gl->gl_hash));
247                 spin_lock(&lru_lock);
248                 if (!list_empty(&gl->gl_lru)) {
249                         list_del_init(&gl->gl_lru);
250                         atomic_dec(&lru_count);
251                 }
252                 spin_unlock(&lru_lock);
253                 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
254                 glock_free(gl);
255                 rv = 1;
256                 goto out;
257         }
258         spin_lock(&gl->gl_spin);
259         gfs2_glock_schedule_for_reclaim(gl);
260         spin_unlock(&gl->gl_spin);
261         write_unlock(gl_lock_addr(gl->gl_hash));
262 out:
263         return rv;
264 }
265
266 /**
267  * search_bucket() - Find struct gfs2_glock by lock number
268  * @bucket: the bucket to search
269  * @name: The lock name
270  *
271  * Returns: NULL, or the struct gfs2_glock with the requested number
272  */
273
274 static struct gfs2_glock *search_bucket(unsigned int hash,
275                                         const struct gfs2_sbd *sdp,
276                                         const struct lm_lockname *name)
277 {
278         struct gfs2_glock *gl;
279         struct hlist_node *h;
280
281         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
282                 if (!lm_name_equal(&gl->gl_name, name))
283                         continue;
284                 if (gl->gl_sbd != sdp)
285                         continue;
286
287                 atomic_inc(&gl->gl_ref);
288
289                 return gl;
290         }
291
292         return NULL;
293 }
294
295 /**
296  * may_grant - check if its ok to grant a new lock
297  * @gl: The glock
298  * @gh: The lock request which we wish to grant
299  *
300  * Returns: true if its ok to grant the lock
301  */
302
303 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
304 {
305         const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
306         if ((gh->gh_state == LM_ST_EXCLUSIVE ||
307              gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
308                 return 0;
309         if (gl->gl_state == gh->gh_state)
310                 return 1;
311         if (gh->gh_flags & GL_EXACT)
312                 return 0;
313         if (gl->gl_state == LM_ST_EXCLUSIVE) {
314                 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
315                         return 1;
316                 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
317                         return 1;
318         }
319         if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
320                 return 1;
321         return 0;
322 }
323
324 static void gfs2_holder_wake(struct gfs2_holder *gh)
325 {
326         clear_bit(HIF_WAIT, &gh->gh_iflags);
327         smp_mb__after_clear_bit();
328         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
329 }
330
331 /**
332  * do_promote - promote as many requests as possible on the current queue
333  * @gl: The glock
334  * 
335  * Returns: 1 if there is a blocked holder at the head of the list, or 2
336  *          if a type specific operation is underway.
337  */
338
339 static int do_promote(struct gfs2_glock *gl)
340 __releases(&gl->gl_spin)
341 __acquires(&gl->gl_spin)
342 {
343         const struct gfs2_glock_operations *glops = gl->gl_ops;
344         struct gfs2_holder *gh, *tmp;
345         int ret;
346
347 restart:
348         list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
349                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
350                         continue;
351                 if (may_grant(gl, gh)) {
352                         if (gh->gh_list.prev == &gl->gl_holders &&
353                             glops->go_lock) {
354                                 spin_unlock(&gl->gl_spin);
355                                 /* FIXME: eliminate this eventually */
356                                 ret = glops->go_lock(gh);
357                                 spin_lock(&gl->gl_spin);
358                                 if (ret) {
359                                         if (ret == 1)
360                                                 return 2;
361                                         gh->gh_error = ret;
362                                         list_del_init(&gh->gh_list);
363                                         trace_gfs2_glock_queue(gh, 0);
364                                         gfs2_holder_wake(gh);
365                                         goto restart;
366                                 }
367                                 set_bit(HIF_HOLDER, &gh->gh_iflags);
368                                 trace_gfs2_promote(gh, 1);
369                                 gfs2_holder_wake(gh);
370                                 goto restart;
371                         }
372                         set_bit(HIF_HOLDER, &gh->gh_iflags);
373                         trace_gfs2_promote(gh, 0);
374                         gfs2_holder_wake(gh);
375                         continue;
376                 }
377                 if (gh->gh_list.prev == &gl->gl_holders)
378                         return 1;
379                 break;
380         }
381         return 0;
382 }
383
384 /**
385  * do_error - Something unexpected has happened during a lock request
386  *
387  */
388
389 static inline void do_error(struct gfs2_glock *gl, const int ret)
390 {
391         struct gfs2_holder *gh, *tmp;
392
393         list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
394                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
395                         continue;
396                 if (ret & LM_OUT_ERROR)
397                         gh->gh_error = -EIO;
398                 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
399                         gh->gh_error = GLR_TRYFAILED;
400                 else
401                         continue;
402                 list_del_init(&gh->gh_list);
403                 trace_gfs2_glock_queue(gh, 0);
404                 gfs2_holder_wake(gh);
405         }
406 }
407
408 /**
409  * find_first_waiter - find the first gh that's waiting for the glock
410  * @gl: the glock
411  */
412
413 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
414 {
415         struct gfs2_holder *gh;
416
417         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
418                 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
419                         return gh;
420         }
421         return NULL;
422 }
423
424 /**
425  * state_change - record that the glock is now in a different state
426  * @gl: the glock
427  * @new_state the new state
428  *
429  */
430
431 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
432 {
433         int held1, held2;
434
435         held1 = (gl->gl_state != LM_ST_UNLOCKED);
436         held2 = (new_state != LM_ST_UNLOCKED);
437
438         if (held1 != held2) {
439                 if (held2)
440                         gfs2_glock_hold(gl);
441                 else
442                         gfs2_glock_put_nolock(gl);
443         }
444
445         gl->gl_state = new_state;
446         gl->gl_tchange = jiffies;
447 }
448
449 static void gfs2_demote_wake(struct gfs2_glock *gl)
450 {
451         gl->gl_demote_state = LM_ST_EXCLUSIVE;
452         clear_bit(GLF_DEMOTE, &gl->gl_flags);
453         smp_mb__after_clear_bit();
454         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
455 }
456
457 /**
458  * finish_xmote - The DLM has replied to one of our lock requests
459  * @gl: The glock
460  * @ret: The status from the DLM
461  *
462  */
463
464 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
465 {
466         const struct gfs2_glock_operations *glops = gl->gl_ops;
467         struct gfs2_holder *gh;
468         unsigned state = ret & LM_OUT_ST_MASK;
469         int rv;
470
471         spin_lock(&gl->gl_spin);
472         trace_gfs2_glock_state_change(gl, state);
473         state_change(gl, state);
474         gh = find_first_waiter(gl);
475
476         /* Demote to UN request arrived during demote to SH or DF */
477         if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
478             state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
479                 gl->gl_target = LM_ST_UNLOCKED;
480
481         /* Check for state != intended state */
482         if (unlikely(state != gl->gl_target)) {
483                 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
484                         /* move to back of queue and try next entry */
485                         if (ret & LM_OUT_CANCELED) {
486                                 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
487                                         list_move_tail(&gh->gh_list, &gl->gl_holders);
488                                 gh = find_first_waiter(gl);
489                                 gl->gl_target = gh->gh_state;
490                                 goto retry;
491                         }
492                         /* Some error or failed "try lock" - report it */
493                         if ((ret & LM_OUT_ERROR) ||
494                             (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
495                                 gl->gl_target = gl->gl_state;
496                                 do_error(gl, ret);
497                                 goto out;
498                         }
499                 }
500                 switch(state) {
501                 /* Unlocked due to conversion deadlock, try again */
502                 case LM_ST_UNLOCKED:
503 retry:
504                         do_xmote(gl, gh, gl->gl_target);
505                         break;
506                 /* Conversion fails, unlock and try again */
507                 case LM_ST_SHARED:
508                 case LM_ST_DEFERRED:
509                         do_xmote(gl, gh, LM_ST_UNLOCKED);
510                         break;
511                 default: /* Everything else */
512                         printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
513                         GLOCK_BUG_ON(gl, 1);
514                 }
515                 spin_unlock(&gl->gl_spin);
516                 gfs2_glock_put(gl);
517                 return;
518         }
519
520         /* Fast path - we got what we asked for */
521         if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
522                 gfs2_demote_wake(gl);
523         if (state != LM_ST_UNLOCKED) {
524                 if (glops->go_xmote_bh) {
525                         spin_unlock(&gl->gl_spin);
526                         rv = glops->go_xmote_bh(gl, gh);
527                         if (rv == -EAGAIN)
528                                 return;
529                         spin_lock(&gl->gl_spin);
530                         if (rv) {
531                                 do_error(gl, rv);
532                                 goto out;
533                         }
534                 }
535                 rv = do_promote(gl);
536                 if (rv == 2)
537                         goto out_locked;
538         }
539 out:
540         clear_bit(GLF_LOCK, &gl->gl_flags);
541 out_locked:
542         spin_unlock(&gl->gl_spin);
543         gfs2_glock_put(gl);
544 }
545
546 static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
547                                  unsigned int req_state,
548                                  unsigned int flags)
549 {
550         int ret = LM_OUT_ERROR;
551
552         if (!sdp->sd_lockstruct.ls_ops->lm_lock)
553                 return req_state == LM_ST_UNLOCKED ? 0 : req_state;
554
555         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
556                 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock,
557                                                          req_state, flags);
558         return ret;
559 }
560
561 /**
562  * do_xmote - Calls the DLM to change the state of a lock
563  * @gl: The lock state
564  * @gh: The holder (only for promotes)
565  * @target: The target lock state
566  *
567  */
568
569 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
570 __releases(&gl->gl_spin)
571 __acquires(&gl->gl_spin)
572 {
573         const struct gfs2_glock_operations *glops = gl->gl_ops;
574         struct gfs2_sbd *sdp = gl->gl_sbd;
575         unsigned int lck_flags = gh ? gh->gh_flags : 0;
576         int ret;
577
578         lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
579                       LM_FLAG_PRIORITY);
580         BUG_ON(gl->gl_state == target);
581         BUG_ON(gl->gl_state == gl->gl_target);
582         if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
583             glops->go_inval) {
584                 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
585                 do_error(gl, 0); /* Fail queued try locks */
586         }
587         spin_unlock(&gl->gl_spin);
588         if (glops->go_xmote_th)
589                 glops->go_xmote_th(gl);
590         if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
591                 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
592         clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
593
594         gfs2_glock_hold(gl);
595         if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
596             gl->gl_state == LM_ST_DEFERRED) &&
597             !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
598                 lck_flags |= LM_FLAG_TRY_1CB;
599         ret = gfs2_lm_lock(sdp, gl, target, lck_flags);
600
601         if (!(ret & LM_OUT_ASYNC)) {
602                 finish_xmote(gl, ret);
603                 gfs2_glock_hold(gl);
604                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
605                         gfs2_glock_put(gl);
606         } else {
607                 GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
608         }
609         spin_lock(&gl->gl_spin);
610 }
611
612 /**
613  * find_first_holder - find the first "holder" gh
614  * @gl: the glock
615  */
616
617 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
618 {
619         struct gfs2_holder *gh;
620
621         if (!list_empty(&gl->gl_holders)) {
622                 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
623                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
624                         return gh;
625         }
626         return NULL;
627 }
628
629 /**
630  * run_queue - do all outstanding tasks related to a glock
631  * @gl: The glock in question
632  * @nonblock: True if we must not block in run_queue
633  *
634  */
635
636 static void run_queue(struct gfs2_glock *gl, const int nonblock)
637 __releases(&gl->gl_spin)
638 __acquires(&gl->gl_spin)
639 {
640         struct gfs2_holder *gh = NULL;
641         int ret;
642
643         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
644                 return;
645
646         GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
647
648         if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
649             gl->gl_demote_state != gl->gl_state) {
650                 if (find_first_holder(gl))
651                         goto out_unlock;
652                 if (nonblock)
653                         goto out_sched;
654                 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
655                 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
656                 gl->gl_target = gl->gl_demote_state;
657         } else {
658                 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
659                         gfs2_demote_wake(gl);
660                 ret = do_promote(gl);
661                 if (ret == 0)
662                         goto out_unlock;
663                 if (ret == 2)
664                         goto out;
665                 gh = find_first_waiter(gl);
666                 gl->gl_target = gh->gh_state;
667                 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
668                         do_error(gl, 0); /* Fail queued try locks */
669         }
670         do_xmote(gl, gh, gl->gl_target);
671 out:
672         return;
673
674 out_sched:
675         gfs2_glock_hold(gl);
676         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
677                 gfs2_glock_put_nolock(gl);
678 out_unlock:
679         clear_bit(GLF_LOCK, &gl->gl_flags);
680         goto out;
681 }
682
683 static void delete_work_func(struct work_struct *work)
684 {
685         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
686         struct gfs2_sbd *sdp = gl->gl_sbd;
687         struct gfs2_inode *ip = NULL;
688         struct inode *inode;
689         u64 no_addr = 0;
690
691         spin_lock(&gl->gl_spin);
692         ip = (struct gfs2_inode *)gl->gl_object;
693         if (ip)
694                 no_addr = ip->i_no_addr;
695         spin_unlock(&gl->gl_spin);
696         if (ip) {
697                 inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
698                 if (inode) {
699                         d_prune_aliases(inode);
700                         iput(inode);
701                 }
702         }
703         gfs2_glock_put(gl);
704 }
705
706 static void glock_work_func(struct work_struct *work)
707 {
708         unsigned long delay = 0;
709         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
710
711         if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
712                 finish_xmote(gl, gl->gl_reply);
713         down_read(&gfs2_umount_flush_sem);
714         spin_lock(&gl->gl_spin);
715         if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
716             gl->gl_state != LM_ST_UNLOCKED &&
717             gl->gl_demote_state != LM_ST_EXCLUSIVE) {
718                 unsigned long holdtime, now = jiffies;
719                 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
720                 if (time_before(now, holdtime))
721                         delay = holdtime - now;
722                 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
723         }
724         run_queue(gl, 0);
725         spin_unlock(&gl->gl_spin);
726         up_read(&gfs2_umount_flush_sem);
727         if (!delay ||
728             queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
729                 gfs2_glock_put(gl);
730 }
731
732 /**
733  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
734  * @sdp: The GFS2 superblock
735  * @number: the lock number
736  * @glops: The glock_operations to use
737  * @create: If 0, don't create the glock if it doesn't exist
738  * @glp: the glock is returned here
739  *
740  * This does not lock a glock, just finds/creates structures for one.
741  *
742  * Returns: errno
743  */
744
745 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
746                    const struct gfs2_glock_operations *glops, int create,
747                    struct gfs2_glock **glp)
748 {
749         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
750         struct gfs2_glock *gl, *tmp;
751         unsigned int hash = gl_hash(sdp, &name);
752         int error;
753
754         read_lock(gl_lock_addr(hash));
755         gl = search_bucket(hash, sdp, &name);
756         read_unlock(gl_lock_addr(hash));
757
758         *glp = gl;
759         if (gl)
760                 return 0;
761         if (!create)
762                 return -ENOENT;
763
764         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
765         if (!gl)
766                 return -ENOMEM;
767
768         gl->gl_flags = 0;
769         gl->gl_name = name;
770         atomic_set(&gl->gl_ref, 1);
771         gl->gl_state = LM_ST_UNLOCKED;
772         gl->gl_target = LM_ST_UNLOCKED;
773         gl->gl_demote_state = LM_ST_EXCLUSIVE;
774         gl->gl_hash = hash;
775         gl->gl_ops = glops;
776         snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
777         memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
778         gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
779         gl->gl_tchange = jiffies;
780         gl->gl_object = NULL;
781         gl->gl_sbd = sdp;
782         gl->gl_aspace = NULL;
783         INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
784         INIT_WORK(&gl->gl_delete, delete_work_func);
785
786         /* If this glock protects actual on-disk data or metadata blocks,
787            create a VFS inode to manage the pages/buffers holding them. */
788         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
789                 gl->gl_aspace = gfs2_aspace_get(sdp);
790                 if (!gl->gl_aspace) {
791                         error = -ENOMEM;
792                         goto fail;
793                 }
794         }
795
796         write_lock(gl_lock_addr(hash));
797         tmp = search_bucket(hash, sdp, &name);
798         if (tmp) {
799                 write_unlock(gl_lock_addr(hash));
800                 glock_free(gl);
801                 gl = tmp;
802         } else {
803                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
804                 write_unlock(gl_lock_addr(hash));
805         }
806
807         *glp = gl;
808
809         return 0;
810
811 fail:
812         kmem_cache_free(gfs2_glock_cachep, gl);
813         return error;
814 }
815
816 /**
817  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
818  * @gl: the glock
819  * @state: the state we're requesting
820  * @flags: the modifier flags
821  * @gh: the holder structure
822  *
823  */
824
825 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
826                       struct gfs2_holder *gh)
827 {
828         INIT_LIST_HEAD(&gh->gh_list);
829         gh->gh_gl = gl;
830         gh->gh_ip = (unsigned long)__builtin_return_address(0);
831         gh->gh_owner_pid = get_pid(task_pid(current));
832         gh->gh_state = state;
833         gh->gh_flags = flags;
834         gh->gh_error = 0;
835         gh->gh_iflags = 0;
836         gfs2_glock_hold(gl);
837 }
838
839 /**
840  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
841  * @state: the state we're requesting
842  * @flags: the modifier flags
843  * @gh: the holder structure
844  *
845  * Don't mess with the glock.
846  *
847  */
848
849 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
850 {
851         gh->gh_state = state;
852         gh->gh_flags = flags;
853         gh->gh_iflags = 0;
854         gh->gh_ip = (unsigned long)__builtin_return_address(0);
855 }
856
857 /**
858  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
859  * @gh: the holder structure
860  *
861  */
862
863 void gfs2_holder_uninit(struct gfs2_holder *gh)
864 {
865         put_pid(gh->gh_owner_pid);
866         gfs2_glock_put(gh->gh_gl);
867         gh->gh_gl = NULL;
868         gh->gh_ip = 0;
869 }
870
871 /**
872  * gfs2_glock_holder_wait
873  * @word: unused
874  *
875  * This function and gfs2_glock_demote_wait both show up in the WCHAN
876  * field. Thus I've separated these otherwise identical functions in
877  * order to be more informative to the user.
878  */
879
880 static int gfs2_glock_holder_wait(void *word)
881 {
882         schedule();
883         return 0;
884 }
885
886 static int gfs2_glock_demote_wait(void *word)
887 {
888         schedule();
889         return 0;
890 }
891
892 static void wait_on_holder(struct gfs2_holder *gh)
893 {
894         might_sleep();
895         wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
896 }
897
898 static void wait_on_demote(struct gfs2_glock *gl)
899 {
900         might_sleep();
901         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
902 }
903
904 /**
905  * handle_callback - process a demote request
906  * @gl: the glock
907  * @state: the state the caller wants us to change to
908  *
909  * There are only two requests that we are going to see in actual
910  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
911  */
912
913 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
914                             unsigned long delay)
915 {
916         int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
917
918         set_bit(bit, &gl->gl_flags);
919         if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
920                 gl->gl_demote_state = state;
921                 gl->gl_demote_time = jiffies;
922         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
923                         gl->gl_demote_state != state) {
924                 gl->gl_demote_state = LM_ST_UNLOCKED;
925         }
926         if (gl->gl_ops->go_callback)
927                 gl->gl_ops->go_callback(gl);
928         trace_gfs2_demote_rq(gl);
929 }
930
931 /**
932  * gfs2_glock_wait - wait on a glock acquisition
933  * @gh: the glock holder
934  *
935  * Returns: 0 on success
936  */
937
938 int gfs2_glock_wait(struct gfs2_holder *gh)
939 {
940         wait_on_holder(gh);
941         return gh->gh_error;
942 }
943
944 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
945 {
946         va_list args;
947
948         va_start(args, fmt);
949         if (seq) {
950                 struct gfs2_glock_iter *gi = seq->private;
951                 vsprintf(gi->string, fmt, args);
952                 seq_printf(seq, gi->string);
953         } else {
954                 printk(KERN_ERR " ");
955                 vprintk(fmt, args);
956         }
957         va_end(args);
958 }
959
960 /**
961  * add_to_queue - Add a holder to the wait queue (but look for recursion)
962  * @gh: the holder structure to add
963  *
964  * Eventually we should move the recursive locking trap to a
965  * debugging option or something like that. This is the fast
966  * path and needs to have the minimum number of distractions.
967  * 
968  */
969
970 static inline void add_to_queue(struct gfs2_holder *gh)
971 __releases(&gl->gl_spin)
972 __acquires(&gl->gl_spin)
973 {
974         struct gfs2_glock *gl = gh->gh_gl;
975         struct gfs2_sbd *sdp = gl->gl_sbd;
976         struct list_head *insert_pt = NULL;
977         struct gfs2_holder *gh2;
978         int try_lock = 0;
979
980         BUG_ON(gh->gh_owner_pid == NULL);
981         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
982                 BUG();
983
984         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
985                 if (test_bit(GLF_LOCK, &gl->gl_flags))
986                         try_lock = 1;
987                 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
988                         goto fail;
989         }
990
991         list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
992                 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
993                     (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
994                         goto trap_recursive;
995                 if (try_lock &&
996                     !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
997                     !may_grant(gl, gh)) {
998 fail:
999                         gh->gh_error = GLR_TRYFAILED;
1000                         gfs2_holder_wake(gh);
1001                         return;
1002                 }
1003                 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1004                         continue;
1005                 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1006                         insert_pt = &gh2->gh_list;
1007         }
1008         if (likely(insert_pt == NULL)) {
1009                 list_add_tail(&gh->gh_list, &gl->gl_holders);
1010                 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1011                         goto do_cancel;
1012                 return;
1013         }
1014         trace_gfs2_glock_queue(gh, 1);
1015         list_add_tail(&gh->gh_list, insert_pt);
1016 do_cancel:
1017         gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1018         if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1019                 spin_unlock(&gl->gl_spin);
1020                 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1021                         sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1022                 spin_lock(&gl->gl_spin);
1023         }
1024         return;
1025
1026 trap_recursive:
1027         print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1028         printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1029         printk(KERN_ERR "lock type: %d req lock state : %d\n",
1030                gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1031         print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1032         printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1033         printk(KERN_ERR "lock type: %d req lock state : %d\n",
1034                gh->gh_gl->gl_name.ln_type, gh->gh_state);
1035         __dump_glock(NULL, gl);
1036         BUG();
1037 }
1038
1039 /**
1040  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1041  * @gh: the holder structure
1042  *
1043  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1044  *
1045  * Returns: 0, GLR_TRYFAILED, or errno on failure
1046  */
1047
1048 int gfs2_glock_nq(struct gfs2_holder *gh)
1049 {
1050         struct gfs2_glock *gl = gh->gh_gl;
1051         struct gfs2_sbd *sdp = gl->gl_sbd;
1052         int error = 0;
1053
1054         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1055                 return -EIO;
1056
1057         spin_lock(&gl->gl_spin);
1058         add_to_queue(gh);
1059         run_queue(gl, 1);
1060         spin_unlock(&gl->gl_spin);
1061
1062         if (!(gh->gh_flags & GL_ASYNC))
1063                 error = gfs2_glock_wait(gh);
1064
1065         return error;
1066 }
1067
1068 /**
1069  * gfs2_glock_poll - poll to see if an async request has been completed
1070  * @gh: the holder
1071  *
1072  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1073  */
1074
1075 int gfs2_glock_poll(struct gfs2_holder *gh)
1076 {
1077         return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1078 }
1079
1080 /**
1081  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1082  * @gh: the glock holder
1083  *
1084  */
1085
1086 void gfs2_glock_dq(struct gfs2_holder *gh)
1087 {
1088         struct gfs2_glock *gl = gh->gh_gl;
1089         const struct gfs2_glock_operations *glops = gl->gl_ops;
1090         unsigned delay = 0;
1091         int fast_path = 0;
1092
1093         spin_lock(&gl->gl_spin);
1094         if (gh->gh_flags & GL_NOCACHE)
1095                 handle_callback(gl, LM_ST_UNLOCKED, 0);
1096
1097         list_del_init(&gh->gh_list);
1098         if (find_first_holder(gl) == NULL) {
1099                 if (glops->go_unlock) {
1100                         GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1101                         spin_unlock(&gl->gl_spin);
1102                         glops->go_unlock(gh);
1103                         spin_lock(&gl->gl_spin);
1104                         clear_bit(GLF_LOCK, &gl->gl_flags);
1105                 }
1106                 if (list_empty(&gl->gl_holders) &&
1107                     !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1108                     !test_bit(GLF_DEMOTE, &gl->gl_flags))
1109                         fast_path = 1;
1110         }
1111         trace_gfs2_glock_queue(gh, 0);
1112         spin_unlock(&gl->gl_spin);
1113         if (likely(fast_path))
1114                 return;
1115
1116         gfs2_glock_hold(gl);
1117         if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1118             !test_bit(GLF_DEMOTE, &gl->gl_flags))
1119                 delay = gl->gl_ops->go_min_hold_time;
1120         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1121                 gfs2_glock_put(gl);
1122 }
1123
1124 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1125 {
1126         struct gfs2_glock *gl = gh->gh_gl;
1127         gfs2_glock_dq(gh);
1128         wait_on_demote(gl);
1129 }
1130
1131 /**
1132  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1133  * @gh: the holder structure
1134  *
1135  */
1136
1137 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1138 {
1139         gfs2_glock_dq(gh);
1140         gfs2_holder_uninit(gh);
1141 }
1142
1143 /**
1144  * gfs2_glock_nq_num - acquire a glock based on lock number
1145  * @sdp: the filesystem
1146  * @number: the lock number
1147  * @glops: the glock operations for the type of glock
1148  * @state: the state to acquire the glock in
1149  * @flags: modifier flags for the aquisition
1150  * @gh: the struct gfs2_holder
1151  *
1152  * Returns: errno
1153  */
1154
1155 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1156                       const struct gfs2_glock_operations *glops,
1157                       unsigned int state, int flags, struct gfs2_holder *gh)
1158 {
1159         struct gfs2_glock *gl;
1160         int error;
1161
1162         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1163         if (!error) {
1164                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1165                 gfs2_glock_put(gl);
1166         }
1167
1168         return error;
1169 }
1170
1171 /**
1172  * glock_compare - Compare two struct gfs2_glock structures for sorting
1173  * @arg_a: the first structure
1174  * @arg_b: the second structure
1175  *
1176  */
1177
1178 static int glock_compare(const void *arg_a, const void *arg_b)
1179 {
1180         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1181         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1182         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1183         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1184
1185         if (a->ln_number > b->ln_number)
1186                 return 1;
1187         if (a->ln_number < b->ln_number)
1188                 return -1;
1189         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1190         return 0;
1191 }
1192
1193 /**
1194  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1195  * @num_gh: the number of structures
1196  * @ghs: an array of struct gfs2_holder structures
1197  *
1198  * Returns: 0 on success (all glocks acquired),
1199  *          errno on failure (no glocks acquired)
1200  */
1201
1202 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1203                      struct gfs2_holder **p)
1204 {
1205         unsigned int x;
1206         int error = 0;
1207
1208         for (x = 0; x < num_gh; x++)
1209                 p[x] = &ghs[x];
1210
1211         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1212
1213         for (x = 0; x < num_gh; x++) {
1214                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1215
1216                 error = gfs2_glock_nq(p[x]);
1217                 if (error) {
1218                         while (x--)
1219                                 gfs2_glock_dq(p[x]);
1220                         break;
1221                 }
1222         }
1223
1224         return error;
1225 }
1226
1227 /**
1228  * gfs2_glock_nq_m - acquire multiple glocks
1229  * @num_gh: the number of structures
1230  * @ghs: an array of struct gfs2_holder structures
1231  *
1232  *
1233  * Returns: 0 on success (all glocks acquired),
1234  *          errno on failure (no glocks acquired)
1235  */
1236
1237 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1238 {
1239         struct gfs2_holder *tmp[4];
1240         struct gfs2_holder **pph = tmp;
1241         int error = 0;
1242
1243         switch(num_gh) {
1244         case 0:
1245                 return 0;
1246         case 1:
1247                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1248                 return gfs2_glock_nq(ghs);
1249         default:
1250                 if (num_gh <= 4)
1251                         break;
1252                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1253                 if (!pph)
1254                         return -ENOMEM;
1255         }
1256
1257         error = nq_m_sync(num_gh, ghs, pph);
1258
1259         if (pph != tmp)
1260                 kfree(pph);
1261
1262         return error;
1263 }
1264
1265 /**
1266  * gfs2_glock_dq_m - release multiple glocks
1267  * @num_gh: the number of structures
1268  * @ghs: an array of struct gfs2_holder structures
1269  *
1270  */
1271
1272 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1273 {
1274         unsigned int x;
1275
1276         for (x = 0; x < num_gh; x++)
1277                 gfs2_glock_dq(&ghs[x]);
1278 }
1279
1280 /**
1281  * gfs2_glock_dq_uninit_m - release multiple glocks
1282  * @num_gh: the number of structures
1283  * @ghs: an array of struct gfs2_holder structures
1284  *
1285  */
1286
1287 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1288 {
1289         unsigned int x;
1290
1291         for (x = 0; x < num_gh; x++)
1292                 gfs2_glock_dq_uninit(&ghs[x]);
1293 }
1294
1295 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1296 {
1297         unsigned long delay = 0;
1298         unsigned long holdtime;
1299         unsigned long now = jiffies;
1300
1301         gfs2_glock_hold(gl);
1302         holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1303         if (time_before(now, holdtime))
1304                 delay = holdtime - now;
1305         if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1306                 delay = gl->gl_ops->go_min_hold_time;
1307
1308         spin_lock(&gl->gl_spin);
1309         handle_callback(gl, state, delay);
1310         spin_unlock(&gl->gl_spin);
1311         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1312                 gfs2_glock_put(gl);
1313 }
1314
1315 /**
1316  * gfs2_glock_complete - Callback used by locking
1317  * @gl: Pointer to the glock
1318  * @ret: The return value from the dlm
1319  *
1320  */
1321
1322 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1323 {
1324         struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1325         gl->gl_reply = ret;
1326         if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1327                 struct gfs2_holder *gh;
1328                 spin_lock(&gl->gl_spin);
1329                 gh = find_first_waiter(gl);
1330                 if ((!(gh && (gh->gh_flags & LM_FLAG_NOEXP)) &&
1331                      (gl->gl_target != LM_ST_UNLOCKED)) ||
1332                     ((ret & ~LM_OUT_ST_MASK) != 0))
1333                         set_bit(GLF_FROZEN, &gl->gl_flags);
1334                 spin_unlock(&gl->gl_spin);
1335                 if (test_bit(GLF_FROZEN, &gl->gl_flags))
1336                         return;
1337         }
1338         set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1339         gfs2_glock_hold(gl);
1340         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1341                 gfs2_glock_put(gl);
1342 }
1343
1344
1345 static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1346 {
1347         struct gfs2_glock *gl;
1348         int may_demote;
1349         int nr_skipped = 0;
1350         LIST_HEAD(skipped);
1351
1352         if (nr == 0)
1353                 goto out;
1354
1355         if (!(gfp_mask & __GFP_FS))
1356                 return -1;
1357
1358         spin_lock(&lru_lock);
1359         while(nr && !list_empty(&lru_list)) {
1360                 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1361                 list_del_init(&gl->gl_lru);
1362                 atomic_dec(&lru_count);
1363
1364                 /* Check if glock is about to be freed */
1365                 if (atomic_read(&gl->gl_ref) == 0)
1366                         continue;
1367
1368                 /* Test for being demotable */
1369                 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1370                         gfs2_glock_hold(gl);
1371                         spin_unlock(&lru_lock);
1372                         spin_lock(&gl->gl_spin);
1373                         may_demote = demote_ok(gl);
1374                         if (may_demote) {
1375                                 handle_callback(gl, LM_ST_UNLOCKED, 0);
1376                                 nr--;
1377                         }
1378                         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1379                                 gfs2_glock_put_nolock(gl);
1380                         spin_unlock(&gl->gl_spin);
1381                         clear_bit(GLF_LOCK, &gl->gl_flags);
1382                         spin_lock(&lru_lock);
1383                         continue;
1384                 }
1385                 nr_skipped++;
1386                 list_add(&gl->gl_lru, &skipped);
1387         }
1388         list_splice(&skipped, &lru_list);
1389         atomic_add(nr_skipped, &lru_count);
1390         spin_unlock(&lru_lock);
1391 out:
1392         return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1393 }
1394
1395 static struct shrinker glock_shrinker = {
1396         .shrink = gfs2_shrink_glock_memory,
1397         .seeks = DEFAULT_SEEKS,
1398 };
1399
1400 /**
1401  * examine_bucket - Call a function for glock in a hash bucket
1402  * @examiner: the function
1403  * @sdp: the filesystem
1404  * @bucket: the bucket
1405  *
1406  * Returns: 1 if the bucket has entries
1407  */
1408
1409 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1410                           unsigned int hash)
1411 {
1412         struct gfs2_glock *gl, *prev = NULL;
1413         int has_entries = 0;
1414         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1415
1416         read_lock(gl_lock_addr(hash));
1417         /* Can't use hlist_for_each_entry - don't want prefetch here */
1418         if (hlist_empty(head))
1419                 goto out;
1420         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1421         while(1) {
1422                 if (!sdp || gl->gl_sbd == sdp) {
1423                         gfs2_glock_hold(gl);
1424                         read_unlock(gl_lock_addr(hash));
1425                         if (prev)
1426                                 gfs2_glock_put(prev);
1427                         prev = gl;
1428                         examiner(gl);
1429                         has_entries = 1;
1430                         read_lock(gl_lock_addr(hash));
1431                 }
1432                 if (gl->gl_list.next == NULL)
1433                         break;
1434                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1435         }
1436 out:
1437         read_unlock(gl_lock_addr(hash));
1438         if (prev)
1439                 gfs2_glock_put(prev);
1440         cond_resched();
1441         return has_entries;
1442 }
1443
1444
1445 /**
1446  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1447  * @gl: The glock to thaw
1448  *
1449  * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1450  * so this has to result in the ref count being dropped by one.
1451  */
1452
1453 static void thaw_glock(struct gfs2_glock *gl)
1454 {
1455         if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1456                 return;
1457         set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1458         gfs2_glock_hold(gl);
1459         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1460                 gfs2_glock_put(gl);
1461 }
1462
1463 /**
1464  * clear_glock - look at a glock and see if we can free it from glock cache
1465  * @gl: the glock to look at
1466  *
1467  */
1468
1469 static void clear_glock(struct gfs2_glock *gl)
1470 {
1471         spin_lock(&lru_lock);
1472         if (!list_empty(&gl->gl_lru)) {
1473                 list_del_init(&gl->gl_lru);
1474                 atomic_dec(&lru_count);
1475         }
1476         spin_unlock(&lru_lock);
1477
1478         spin_lock(&gl->gl_spin);
1479         if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
1480                 handle_callback(gl, LM_ST_UNLOCKED, 0);
1481         spin_unlock(&gl->gl_spin);
1482         gfs2_glock_hold(gl);
1483         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1484                 gfs2_glock_put(gl);
1485 }
1486
1487 /**
1488  * gfs2_glock_thaw - Thaw any frozen glocks
1489  * @sdp: The super block
1490  *
1491  */
1492
1493 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1494 {
1495         unsigned x;
1496
1497         for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1498                 examine_bucket(thaw_glock, sdp, x);
1499 }
1500
1501 /**
1502  * gfs2_gl_hash_clear - Empty out the glock hash table
1503  * @sdp: the filesystem
1504  * @wait: wait until it's all gone
1505  *
1506  * Called when unmounting the filesystem.
1507  */
1508
1509 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1510 {
1511         unsigned long t;
1512         unsigned int x;
1513         int cont;
1514
1515         t = jiffies;
1516
1517         for (;;) {
1518                 cont = 0;
1519                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1520                         if (examine_bucket(clear_glock, sdp, x))
1521                                 cont = 1;
1522                 }
1523
1524                 if (!cont)
1525                         break;
1526
1527                 if (time_after_eq(jiffies,
1528                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1529                         fs_warn(sdp, "Unmount seems to be stalled. "
1530                                      "Dumping lock state...\n");
1531                         gfs2_dump_lockstate(sdp);
1532                         t = jiffies;
1533                 }
1534
1535                 down_write(&gfs2_umount_flush_sem);
1536                 invalidate_inodes(sdp->sd_vfs);
1537                 up_write(&gfs2_umount_flush_sem);
1538                 msleep(10);
1539         }
1540 }
1541
1542 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1543 {
1544         struct gfs2_glock *gl = ip->i_gl;
1545         int ret;
1546
1547         ret = gfs2_truncatei_resume(ip);
1548         gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1549
1550         spin_lock(&gl->gl_spin);
1551         clear_bit(GLF_LOCK, &gl->gl_flags);
1552         run_queue(gl, 1);
1553         spin_unlock(&gl->gl_spin);
1554 }
1555
1556 static const char *state2str(unsigned state)
1557 {
1558         switch(state) {
1559         case LM_ST_UNLOCKED:
1560                 return "UN";
1561         case LM_ST_SHARED:
1562                 return "SH";
1563         case LM_ST_DEFERRED:
1564                 return "DF";
1565         case LM_ST_EXCLUSIVE:
1566                 return "EX";
1567         }
1568         return "??";
1569 }
1570
1571 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1572 {
1573         char *p = buf;
1574         if (flags & LM_FLAG_TRY)
1575                 *p++ = 't';
1576         if (flags & LM_FLAG_TRY_1CB)
1577                 *p++ = 'T';
1578         if (flags & LM_FLAG_NOEXP)
1579                 *p++ = 'e';
1580         if (flags & LM_FLAG_ANY)
1581                 *p++ = 'A';
1582         if (flags & LM_FLAG_PRIORITY)
1583                 *p++ = 'p';
1584         if (flags & GL_ASYNC)
1585                 *p++ = 'a';
1586         if (flags & GL_EXACT)
1587                 *p++ = 'E';
1588         if (flags & GL_NOCACHE)
1589                 *p++ = 'c';
1590         if (test_bit(HIF_HOLDER, &iflags))
1591                 *p++ = 'H';
1592         if (test_bit(HIF_WAIT, &iflags))
1593                 *p++ = 'W';
1594         if (test_bit(HIF_FIRST, &iflags))
1595                 *p++ = 'F';
1596         *p = 0;
1597         return buf;
1598 }
1599
1600 /**
1601  * dump_holder - print information about a glock holder
1602  * @seq: the seq_file struct
1603  * @gh: the glock holder
1604  *
1605  * Returns: 0 on success, -ENOBUFS when we run out of space
1606  */
1607
1608 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1609 {
1610         struct task_struct *gh_owner = NULL;
1611         char buffer[KSYM_SYMBOL_LEN];
1612         char flags_buf[32];
1613
1614         sprint_symbol(buffer, gh->gh_ip);
1615         if (gh->gh_owner_pid)
1616                 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1617         gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1618                   state2str(gh->gh_state),
1619                   hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1620                   gh->gh_error, 
1621                   gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1622                   gh_owner ? gh_owner->comm : "(ended)", buffer);
1623         return 0;
1624 }
1625
1626 static const char *gflags2str(char *buf, const unsigned long *gflags)
1627 {
1628         char *p = buf;
1629         if (test_bit(GLF_LOCK, gflags))
1630                 *p++ = 'l';
1631         if (test_bit(GLF_DEMOTE, gflags))
1632                 *p++ = 'D';
1633         if (test_bit(GLF_PENDING_DEMOTE, gflags))
1634                 *p++ = 'd';
1635         if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1636                 *p++ = 'p';
1637         if (test_bit(GLF_DIRTY, gflags))
1638                 *p++ = 'y';
1639         if (test_bit(GLF_LFLUSH, gflags))
1640                 *p++ = 'f';
1641         if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1642                 *p++ = 'i';
1643         if (test_bit(GLF_REPLY_PENDING, gflags))
1644                 *p++ = 'r';
1645         if (test_bit(GLF_INITIAL, gflags))
1646                 *p++ = 'I';
1647         if (test_bit(GLF_FROZEN, gflags))
1648                 *p++ = 'F';
1649         *p = 0;
1650         return buf;
1651 }
1652
1653 /**
1654  * __dump_glock - print information about a glock
1655  * @seq: The seq_file struct
1656  * @gl: the glock
1657  *
1658  * The file format is as follows:
1659  * One line per object, capital letters are used to indicate objects
1660  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1661  * other objects are indented by a single space and follow the glock to
1662  * which they are related. Fields are indicated by lower case letters
1663  * followed by a colon and the field value, except for strings which are in
1664  * [] so that its possible to see if they are composed of spaces for
1665  * example. The field's are n = number (id of the object), f = flags,
1666  * t = type, s = state, r = refcount, e = error, p = pid.
1667  *
1668  * Returns: 0 on success, -ENOBUFS when we run out of space
1669  */
1670
1671 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1672 {
1673         const struct gfs2_glock_operations *glops = gl->gl_ops;
1674         unsigned long long dtime;
1675         const struct gfs2_holder *gh;
1676         char gflags_buf[32];
1677         int error = 0;
1678
1679         dtime = jiffies - gl->gl_demote_time;
1680         dtime *= 1000000/HZ; /* demote time in uSec */
1681         if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1682                 dtime = 0;
1683         gfs2_print_dbg(seq, "G:  s:%s n:%u/%llu f:%s t:%s d:%s/%llu a:%d r:%d\n",
1684                   state2str(gl->gl_state),
1685                   gl->gl_name.ln_type,
1686                   (unsigned long long)gl->gl_name.ln_number,
1687                   gflags2str(gflags_buf, &gl->gl_flags),
1688                   state2str(gl->gl_target),
1689                   state2str(gl->gl_demote_state), dtime,
1690                   atomic_read(&gl->gl_ail_count),
1691                   atomic_read(&gl->gl_ref));
1692
1693         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1694                 error = dump_holder(seq, gh);
1695                 if (error)
1696                         goto out;
1697         }
1698         if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1699                 error = glops->go_dump(seq, gl);
1700 out:
1701         return error;
1702 }
1703
1704 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1705 {
1706         int ret;
1707         spin_lock(&gl->gl_spin);
1708         ret = __dump_glock(seq, gl);
1709         spin_unlock(&gl->gl_spin);
1710         return ret;
1711 }
1712
1713 /**
1714  * gfs2_dump_lockstate - print out the current lockstate
1715  * @sdp: the filesystem
1716  * @ub: the buffer to copy the information into
1717  *
1718  * If @ub is NULL, dump the lockstate to the console.
1719  *
1720  */
1721
1722 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1723 {
1724         struct gfs2_glock *gl;
1725         struct hlist_node *h;
1726         unsigned int x;
1727         int error = 0;
1728
1729         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1730
1731                 read_lock(gl_lock_addr(x));
1732
1733                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1734                         if (gl->gl_sbd != sdp)
1735                                 continue;
1736
1737                         error = dump_glock(NULL, gl);
1738                         if (error)
1739                                 break;
1740                 }
1741
1742                 read_unlock(gl_lock_addr(x));
1743
1744                 if (error)
1745                         break;
1746         }
1747
1748
1749         return error;
1750 }
1751
1752
1753 int __init gfs2_glock_init(void)
1754 {
1755         unsigned i;
1756         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1757                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1758         }
1759 #ifdef GL_HASH_LOCK_SZ
1760         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1761                 rwlock_init(&gl_hash_locks[i]);
1762         }
1763 #endif
1764
1765         glock_workqueue = create_workqueue("glock_workqueue");
1766         if (IS_ERR(glock_workqueue))
1767                 return PTR_ERR(glock_workqueue);
1768         gfs2_delete_workqueue = create_workqueue("delete_workqueue");
1769         if (IS_ERR(gfs2_delete_workqueue)) {
1770                 destroy_workqueue(glock_workqueue);
1771                 return PTR_ERR(gfs2_delete_workqueue);
1772         }
1773
1774         register_shrinker(&glock_shrinker);
1775
1776         return 0;
1777 }
1778
1779 void gfs2_glock_exit(void)
1780 {
1781         unregister_shrinker(&glock_shrinker);
1782         destroy_workqueue(glock_workqueue);
1783         destroy_workqueue(gfs2_delete_workqueue);
1784 }
1785
1786 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1787 {
1788         struct gfs2_glock *gl;
1789
1790 restart:
1791         read_lock(gl_lock_addr(gi->hash));
1792         gl = gi->gl;
1793         if (gl) {
1794                 gi->gl = hlist_entry(gl->gl_list.next,
1795                                      struct gfs2_glock, gl_list);
1796         } else {
1797                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1798                                      struct gfs2_glock, gl_list);
1799         }
1800         if (gi->gl)
1801                 gfs2_glock_hold(gi->gl);
1802         read_unlock(gl_lock_addr(gi->hash));
1803         if (gl)
1804                 gfs2_glock_put(gl);
1805         while (gi->gl == NULL) {
1806                 gi->hash++;
1807                 if (gi->hash >= GFS2_GL_HASH_SIZE)
1808                         return 1;
1809                 read_lock(gl_lock_addr(gi->hash));
1810                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1811                                      struct gfs2_glock, gl_list);
1812                 if (gi->gl)
1813                         gfs2_glock_hold(gi->gl);
1814                 read_unlock(gl_lock_addr(gi->hash));
1815         }
1816
1817         if (gi->sdp != gi->gl->gl_sbd)
1818                 goto restart;
1819
1820         return 0;
1821 }
1822
1823 static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1824 {
1825         if (gi->gl)
1826                 gfs2_glock_put(gi->gl);
1827         gi->gl = NULL;
1828 }
1829
1830 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1831 {
1832         struct gfs2_glock_iter *gi = seq->private;
1833         loff_t n = *pos;
1834
1835         gi->hash = 0;
1836
1837         do {
1838                 if (gfs2_glock_iter_next(gi)) {
1839                         gfs2_glock_iter_free(gi);
1840                         return NULL;
1841                 }
1842         } while (n--);
1843
1844         return gi->gl;
1845 }
1846
1847 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1848                                  loff_t *pos)
1849 {
1850         struct gfs2_glock_iter *gi = seq->private;
1851
1852         (*pos)++;
1853
1854         if (gfs2_glock_iter_next(gi)) {
1855                 gfs2_glock_iter_free(gi);
1856                 return NULL;
1857         }
1858
1859         return gi->gl;
1860 }
1861
1862 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1863 {
1864         struct gfs2_glock_iter *gi = seq->private;
1865         gfs2_glock_iter_free(gi);
1866 }
1867
1868 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1869 {
1870         return dump_glock(seq, iter_ptr);
1871 }
1872
1873 static const struct seq_operations gfs2_glock_seq_ops = {
1874         .start = gfs2_glock_seq_start,
1875         .next  = gfs2_glock_seq_next,
1876         .stop  = gfs2_glock_seq_stop,
1877         .show  = gfs2_glock_seq_show,
1878 };
1879
1880 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1881 {
1882         int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1883                                    sizeof(struct gfs2_glock_iter));
1884         if (ret == 0) {
1885                 struct seq_file *seq = file->private_data;
1886                 struct gfs2_glock_iter *gi = seq->private;
1887                 gi->sdp = inode->i_private;
1888         }
1889         return ret;
1890 }
1891
1892 static const struct file_operations gfs2_debug_fops = {
1893         .owner   = THIS_MODULE,
1894         .open    = gfs2_debugfs_open,
1895         .read    = seq_read,
1896         .llseek  = seq_lseek,
1897         .release = seq_release_private,
1898 };
1899
1900 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1901 {
1902         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1903         if (!sdp->debugfs_dir)
1904                 return -ENOMEM;
1905         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1906                                                          S_IFREG | S_IRUGO,
1907                                                          sdp->debugfs_dir, sdp,
1908                                                          &gfs2_debug_fops);
1909         if (!sdp->debugfs_dentry_glocks)
1910                 return -ENOMEM;
1911
1912         return 0;
1913 }
1914
1915 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1916 {
1917         if (sdp && sdp->debugfs_dir) {
1918                 if (sdp->debugfs_dentry_glocks) {
1919                         debugfs_remove(sdp->debugfs_dentry_glocks);
1920                         sdp->debugfs_dentry_glocks = NULL;
1921                 }
1922                 debugfs_remove(sdp->debugfs_dir);
1923                 sdp->debugfs_dir = NULL;
1924         }
1925 }
1926
1927 int gfs2_register_debugfs(void)
1928 {
1929         gfs2_root = debugfs_create_dir("gfs2", NULL);
1930         return gfs2_root ? 0 : -ENOMEM;
1931 }
1932
1933 void gfs2_unregister_debugfs(void)
1934 {
1935         debugfs_remove(gfs2_root);
1936         gfs2_root = NULL;
1937 }