1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/kthread.h>
31 #include <linux/pagemap.h>
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
35 #include <cluster/heartbeat.h>
36 #include <cluster/nodemanager.h>
37 #include <cluster/tcp.h>
39 #define MLOG_MASK_PREFIX ML_DLM_GLUE
40 #include <cluster/masklog.h>
43 #include "ocfs2_lockingver.h"
48 #include "extent_map.h"
50 #include "heartbeat.h"
53 #include "stackglue.h"
58 #include "buffer_head_io.h"
60 struct ocfs2_mask_waiter {
61 struct list_head mw_item;
63 struct completion mw_complete;
64 unsigned long mw_mask;
65 unsigned long mw_goal;
68 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
69 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
70 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
73 * Return value from ->downconvert_worker functions.
75 * These control the precise actions of ocfs2_unblock_lock()
76 * and ocfs2_process_blocked_lock()
79 enum ocfs2_unblock_action {
80 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
81 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
82 * ->post_unlock callback */
83 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
84 * ->post_unlock() callback. */
87 struct ocfs2_unblock_ctl {
89 enum ocfs2_unblock_action unblock_action;
92 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
94 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
96 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
99 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
102 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
103 struct ocfs2_lock_res *lockres);
106 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
108 /* This aids in debugging situations where a bad LVB might be involved. */
109 static void ocfs2_dump_meta_lvb_info(u64 level,
110 const char *function,
112 struct ocfs2_lock_res *lockres)
114 struct ocfs2_meta_lvb *lvb =
115 (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb);
117 mlog(level, "LVB information for %s (called from %s:%u):\n",
118 lockres->l_name, function, line);
119 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
120 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
121 be32_to_cpu(lvb->lvb_igeneration));
122 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
123 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
124 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
125 be16_to_cpu(lvb->lvb_imode));
126 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
127 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
128 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
129 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
130 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
131 be32_to_cpu(lvb->lvb_iattr));
136 * OCFS2 Lock Resource Operations
138 * These fine tune the behavior of the generic dlmglue locking infrastructure.
140 * The most basic of lock types can point ->l_priv to their respective
141 * struct ocfs2_super and allow the default actions to manage things.
143 * Right now, each lock type also needs to implement an init function,
144 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
145 * should be called when the lock is no longer needed (i.e., object
148 struct ocfs2_lock_res_ops {
150 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
151 * this callback if ->l_priv is not an ocfs2_super pointer
153 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
156 * Optionally called in the downconvert thread after a
157 * successful downconvert. The lockres will not be referenced
158 * after this callback is called, so it is safe to free
161 * The exact semantics of when this is called are controlled
162 * by ->downconvert_worker()
164 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
167 * Allow a lock type to add checks to determine whether it is
168 * safe to downconvert a lock. Return 0 to re-queue the
169 * downconvert at a later time, nonzero to continue.
171 * For most locks, the default checks that there are no
172 * incompatible holders are sufficient.
174 * Called with the lockres spinlock held.
176 int (*check_downconvert)(struct ocfs2_lock_res *, int);
179 * Allows a lock type to populate the lock value block. This
180 * is called on downconvert, and when we drop a lock.
182 * Locks that want to use this should set LOCK_TYPE_USES_LVB
183 * in the flags field.
185 * Called with the lockres spinlock held.
187 void (*set_lvb)(struct ocfs2_lock_res *);
190 * Called from the downconvert thread when it is determined
191 * that a lock will be downconverted. This is called without
192 * any locks held so the function can do work that might
193 * schedule (syncing out data, etc).
195 * This should return any one of the ocfs2_unblock_action
196 * values, depending on what it wants the thread to do.
198 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
201 * LOCK_TYPE_* flags which describe the specific requirements
202 * of a lock type. Descriptions of each individual flag follow.
208 * Some locks want to "refresh" potentially stale data when a
209 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
210 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
211 * individual lockres l_flags member from the ast function. It is
212 * expected that the locking wrapper will clear the
213 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
215 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
218 * Indicate that a lock type makes use of the lock value block. The
219 * ->set_lvb lock type callback must be defined.
221 #define LOCK_TYPE_USES_LVB 0x2
223 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
224 .get_osb = ocfs2_get_inode_osb,
228 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
229 .get_osb = ocfs2_get_inode_osb,
230 .check_downconvert = ocfs2_check_meta_downconvert,
231 .set_lvb = ocfs2_set_meta_lvb,
232 .downconvert_worker = ocfs2_data_convert_worker,
233 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
236 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
237 .flags = LOCK_TYPE_REQUIRES_REFRESH,
240 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
244 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
245 .get_osb = ocfs2_get_dentry_osb,
246 .post_unlock = ocfs2_dentry_post_unlock,
247 .downconvert_worker = ocfs2_dentry_convert_worker,
251 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
252 .get_osb = ocfs2_get_inode_osb,
256 static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
257 .get_osb = ocfs2_get_file_osb,
261 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
263 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
264 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
265 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
268 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
270 BUG_ON(!ocfs2_is_inode_lock(lockres));
272 return (struct inode *) lockres->l_priv;
275 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
277 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
279 return (struct ocfs2_dentry_lock *)lockres->l_priv;
282 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
284 if (lockres->l_ops->get_osb)
285 return lockres->l_ops->get_osb(lockres);
287 return (struct ocfs2_super *)lockres->l_priv;
290 static int ocfs2_lock_create(struct ocfs2_super *osb,
291 struct ocfs2_lock_res *lockres,
294 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
296 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
297 struct ocfs2_lock_res *lockres,
299 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
300 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
301 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
302 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
303 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
304 struct ocfs2_lock_res *lockres);
305 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
307 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
308 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
309 _err, _func, _lockres->l_name); \
311 static int ocfs2_downconvert_thread(void *arg);
312 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
313 struct ocfs2_lock_res *lockres);
314 static int ocfs2_inode_lock_update(struct inode *inode,
315 struct buffer_head **bh);
316 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
317 static inline int ocfs2_highest_compat_lock_level(int level);
318 static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
320 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
321 struct ocfs2_lock_res *lockres,
324 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
325 struct ocfs2_lock_res *lockres);
326 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
327 struct ocfs2_lock_res *lockres);
330 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
339 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
341 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
342 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
343 (long long)blkno, generation);
345 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
347 mlog(0, "built lock resource with name: %s\n", name);
352 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
354 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
355 struct ocfs2_dlm_debug *dlm_debug)
357 mlog(0, "Add tracking for lockres %s\n", res->l_name);
359 spin_lock(&ocfs2_dlm_tracking_lock);
360 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
361 spin_unlock(&ocfs2_dlm_tracking_lock);
364 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
366 spin_lock(&ocfs2_dlm_tracking_lock);
367 if (!list_empty(&res->l_debug_list))
368 list_del_init(&res->l_debug_list);
369 spin_unlock(&ocfs2_dlm_tracking_lock);
372 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
373 struct ocfs2_lock_res *res,
374 enum ocfs2_lock_type type,
375 struct ocfs2_lock_res_ops *ops,
382 res->l_level = DLM_LOCK_IV;
383 res->l_requested = DLM_LOCK_IV;
384 res->l_blocking = DLM_LOCK_IV;
385 res->l_action = OCFS2_AST_INVALID;
386 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
388 res->l_flags = OCFS2_LOCK_INITIALIZED;
390 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
393 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
395 /* This also clears out the lock status block */
396 memset(res, 0, sizeof(struct ocfs2_lock_res));
397 spin_lock_init(&res->l_lock);
398 init_waitqueue_head(&res->l_event);
399 INIT_LIST_HEAD(&res->l_blocked_list);
400 INIT_LIST_HEAD(&res->l_mask_waiters);
403 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
404 enum ocfs2_lock_type type,
405 unsigned int generation,
408 struct ocfs2_lock_res_ops *ops;
411 case OCFS2_LOCK_TYPE_RW:
412 ops = &ocfs2_inode_rw_lops;
414 case OCFS2_LOCK_TYPE_META:
415 ops = &ocfs2_inode_inode_lops;
417 case OCFS2_LOCK_TYPE_OPEN:
418 ops = &ocfs2_inode_open_lops;
421 mlog_bug_on_msg(1, "type: %d\n", type);
422 ops = NULL; /* thanks, gcc */
426 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
427 generation, res->l_name);
428 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
431 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
433 struct inode *inode = ocfs2_lock_res_inode(lockres);
435 return OCFS2_SB(inode->i_sb);
438 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
440 struct ocfs2_file_private *fp = lockres->l_priv;
442 return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
445 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
447 __be64 inode_blkno_be;
449 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
452 return be64_to_cpu(inode_blkno_be);
455 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
457 struct ocfs2_dentry_lock *dl = lockres->l_priv;
459 return OCFS2_SB(dl->dl_inode->i_sb);
462 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
463 u64 parent, struct inode *inode)
466 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
467 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
468 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
470 ocfs2_lock_res_init_once(lockres);
473 * Unfortunately, the standard lock naming scheme won't work
474 * here because we have two 16 byte values to use. Instead,
475 * we'll stuff the inode number as a binary value. We still
476 * want error prints to show something without garbling the
477 * display, so drop a null byte in there before the inode
478 * number. A future version of OCFS2 will likely use all
479 * binary lock names. The stringified names have been a
480 * tremendous aid in debugging, but now that the debugfs
481 * interface exists, we can mangle things there if need be.
483 * NOTE: We also drop the standard "pad" value (the total lock
484 * name size stays the same though - the last part is all
485 * zeros due to the memset in ocfs2_lock_res_init_once()
487 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
489 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
492 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
494 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
497 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
498 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
502 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
503 struct ocfs2_super *osb)
505 /* Superblock lockres doesn't come from a slab so we call init
506 * once on it manually. */
507 ocfs2_lock_res_init_once(res);
508 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
510 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
511 &ocfs2_super_lops, osb);
514 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
515 struct ocfs2_super *osb)
517 /* Rename lockres doesn't come from a slab so we call init
518 * once on it manually. */
519 ocfs2_lock_res_init_once(res);
520 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
521 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
522 &ocfs2_rename_lops, osb);
525 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
526 struct ocfs2_file_private *fp)
528 struct inode *inode = fp->fp_file->f_mapping->host;
529 struct ocfs2_inode_info *oi = OCFS2_I(inode);
531 ocfs2_lock_res_init_once(lockres);
532 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
533 inode->i_generation, lockres->l_name);
534 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
535 OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
537 lockres->l_flags |= OCFS2_LOCK_NOCACHE;
540 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
544 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
547 ocfs2_remove_lockres_tracking(res);
549 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
550 "Lockres %s is on the blocked list\n",
552 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
553 "Lockres %s has mask waiters pending\n",
555 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
556 "Lockres %s is locked\n",
558 mlog_bug_on_msg(res->l_ro_holders,
559 "Lockres %s has %u ro holders\n",
560 res->l_name, res->l_ro_holders);
561 mlog_bug_on_msg(res->l_ex_holders,
562 "Lockres %s has %u ex holders\n",
563 res->l_name, res->l_ex_holders);
565 /* Need to clear out the lock status block for the dlm */
566 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
572 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
581 lockres->l_ex_holders++;
584 lockres->l_ro_holders++;
593 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
602 BUG_ON(!lockres->l_ex_holders);
603 lockres->l_ex_holders--;
606 BUG_ON(!lockres->l_ro_holders);
607 lockres->l_ro_holders--;
615 /* WARNING: This function lives in a world where the only three lock
616 * levels are EX, PR, and NL. It *will* have to be adjusted when more
617 * lock types are added. */
618 static inline int ocfs2_highest_compat_lock_level(int level)
620 int new_level = DLM_LOCK_EX;
622 if (level == DLM_LOCK_EX)
623 new_level = DLM_LOCK_NL;
624 else if (level == DLM_LOCK_PR)
625 new_level = DLM_LOCK_PR;
629 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
630 unsigned long newflags)
632 struct ocfs2_mask_waiter *mw, *tmp;
634 assert_spin_locked(&lockres->l_lock);
636 lockres->l_flags = newflags;
638 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
639 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
642 list_del_init(&mw->mw_item);
644 complete(&mw->mw_complete);
647 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
649 lockres_set_flags(lockres, lockres->l_flags | or);
651 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
654 lockres_set_flags(lockres, lockres->l_flags & ~clear);
657 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
661 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
662 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
663 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
664 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
666 lockres->l_level = lockres->l_requested;
667 if (lockres->l_level <=
668 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
669 lockres->l_blocking = DLM_LOCK_NL;
670 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
672 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
677 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
681 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
682 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
684 /* Convert from RO to EX doesn't really need anything as our
685 * information is already up to data. Convert from NL to
686 * *anything* however should mark ourselves as needing an
688 if (lockres->l_level == DLM_LOCK_NL &&
689 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
690 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
692 lockres->l_level = lockres->l_requested;
693 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
698 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
702 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
703 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
705 if (lockres->l_requested > DLM_LOCK_NL &&
706 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
707 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
708 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
710 lockres->l_level = lockres->l_requested;
711 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
712 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
717 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
720 int needs_downconvert = 0;
723 assert_spin_locked(&lockres->l_lock);
725 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
727 if (level > lockres->l_blocking) {
728 /* only schedule a downconvert if we haven't already scheduled
729 * one that goes low enough to satisfy the level we're
730 * blocking. this also catches the case where we get
732 if (ocfs2_highest_compat_lock_level(level) <
733 ocfs2_highest_compat_lock_level(lockres->l_blocking))
734 needs_downconvert = 1;
736 lockres->l_blocking = level;
739 mlog_exit(needs_downconvert);
740 return needs_downconvert;
743 static void ocfs2_blocking_ast(void *opaque, int level)
745 struct ocfs2_lock_res *lockres = opaque;
746 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
747 int needs_downconvert;
750 BUG_ON(level <= DLM_LOCK_NL);
752 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
753 lockres->l_name, level, lockres->l_level,
754 ocfs2_lock_type_string(lockres->l_type));
757 * We can skip the bast for locks which don't enable caching -
758 * they'll be dropped at the earliest possible time anyway.
760 if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
763 spin_lock_irqsave(&lockres->l_lock, flags);
764 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
765 if (needs_downconvert)
766 ocfs2_schedule_blocked_lock(osb, lockres);
767 spin_unlock_irqrestore(&lockres->l_lock, flags);
769 wake_up(&lockres->l_event);
771 ocfs2_wake_downconvert_thread(osb);
774 static void ocfs2_locking_ast(void *opaque)
776 struct ocfs2_lock_res *lockres = opaque;
779 spin_lock_irqsave(&lockres->l_lock, flags);
781 if (ocfs2_dlm_lock_status(&lockres->l_lksb)) {
782 mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
784 ocfs2_dlm_lock_status(&lockres->l_lksb));
785 spin_unlock_irqrestore(&lockres->l_lock, flags);
789 switch(lockres->l_action) {
790 case OCFS2_AST_ATTACH:
791 ocfs2_generic_handle_attach_action(lockres);
792 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
794 case OCFS2_AST_CONVERT:
795 ocfs2_generic_handle_convert_action(lockres);
797 case OCFS2_AST_DOWNCONVERT:
798 ocfs2_generic_handle_downconvert_action(lockres);
801 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
802 "lockres flags = 0x%lx, unlock action: %u\n",
803 lockres->l_name, lockres->l_action, lockres->l_flags,
804 lockres->l_unlock_action);
808 /* set it to something invalid so if we get called again we
810 lockres->l_action = OCFS2_AST_INVALID;
812 wake_up(&lockres->l_event);
813 spin_unlock_irqrestore(&lockres->l_lock, flags);
816 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
822 spin_lock_irqsave(&lockres->l_lock, flags);
823 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
825 lockres->l_action = OCFS2_AST_INVALID;
827 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
828 spin_unlock_irqrestore(&lockres->l_lock, flags);
830 wake_up(&lockres->l_event);
834 /* Note: If we detect another process working on the lock (i.e.,
835 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
836 * to do the right thing in that case.
838 static int ocfs2_lock_create(struct ocfs2_super *osb,
839 struct ocfs2_lock_res *lockres,
848 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
851 spin_lock_irqsave(&lockres->l_lock, flags);
852 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
853 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
854 spin_unlock_irqrestore(&lockres->l_lock, flags);
858 lockres->l_action = OCFS2_AST_ATTACH;
859 lockres->l_requested = level;
860 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
861 spin_unlock_irqrestore(&lockres->l_lock, flags);
863 ret = ocfs2_dlm_lock(osb->cconn,
868 OCFS2_LOCK_ID_MAX_LEN - 1,
871 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
872 ocfs2_recover_from_dlm_error(lockres, 1);
875 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
882 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
888 spin_lock_irqsave(&lockres->l_lock, flags);
889 ret = lockres->l_flags & flag;
890 spin_unlock_irqrestore(&lockres->l_lock, flags);
895 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
898 wait_event(lockres->l_event,
899 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
902 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
905 wait_event(lockres->l_event,
906 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
909 /* predict what lock level we'll be dropping down to on behalf
910 * of another node, and return true if the currently wanted
911 * level will be compatible with it. */
912 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
915 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
917 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
920 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
922 INIT_LIST_HEAD(&mw->mw_item);
923 init_completion(&mw->mw_complete);
926 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
928 wait_for_completion(&mw->mw_complete);
929 /* Re-arm the completion in case we want to wait on it again */
930 INIT_COMPLETION(mw->mw_complete);
931 return mw->mw_status;
934 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
935 struct ocfs2_mask_waiter *mw,
939 BUG_ON(!list_empty(&mw->mw_item));
941 assert_spin_locked(&lockres->l_lock);
943 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
948 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
949 * if the mask still hadn't reached its goal */
950 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
951 struct ocfs2_mask_waiter *mw)
956 spin_lock_irqsave(&lockres->l_lock, flags);
957 if (!list_empty(&mw->mw_item)) {
958 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
961 list_del_init(&mw->mw_item);
962 init_completion(&mw->mw_complete);
964 spin_unlock_irqrestore(&lockres->l_lock, flags);
970 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
971 struct ocfs2_lock_res *lockres)
975 ret = wait_for_completion_interruptible(&mw->mw_complete);
977 lockres_remove_mask_waiter(lockres, mw);
980 /* Re-arm the completion in case we want to wait on it again */
981 INIT_COMPLETION(mw->mw_complete);
985 static int ocfs2_cluster_lock(struct ocfs2_super *osb,
986 struct ocfs2_lock_res *lockres,
991 struct ocfs2_mask_waiter mw;
992 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
993 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
998 ocfs2_init_mask_waiter(&mw);
1000 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1001 lkm_flags |= DLM_LKF_VALBLK;
1006 if (catch_signals && signal_pending(current)) {
1011 spin_lock_irqsave(&lockres->l_lock, flags);
1013 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1014 "Cluster lock called on freeing lockres %s! flags "
1015 "0x%lx\n", lockres->l_name, lockres->l_flags);
1017 /* We only compare against the currently granted level
1018 * here. If the lock is blocked waiting on a downconvert,
1019 * we'll get caught below. */
1020 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
1021 level > lockres->l_level) {
1022 /* is someone sitting in dlm_lock? If so, wait on
1024 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1029 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1030 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1031 /* is the lock is currently blocked on behalf of
1033 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1038 if (level > lockres->l_level) {
1039 if (lockres->l_action != OCFS2_AST_INVALID)
1040 mlog(ML_ERROR, "lockres %s has action %u pending\n",
1041 lockres->l_name, lockres->l_action);
1043 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1044 lockres->l_action = OCFS2_AST_ATTACH;
1045 lkm_flags &= ~DLM_LKF_CONVERT;
1047 lockres->l_action = OCFS2_AST_CONVERT;
1048 lkm_flags |= DLM_LKF_CONVERT;
1051 lockres->l_requested = level;
1052 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1053 spin_unlock_irqrestore(&lockres->l_lock, flags);
1055 BUG_ON(level == DLM_LOCK_IV);
1056 BUG_ON(level == DLM_LOCK_NL);
1058 mlog(0, "lock %s, convert from %d to level = %d\n",
1059 lockres->l_name, lockres->l_level, level);
1061 /* call dlm_lock to upgrade lock now */
1062 ret = ocfs2_dlm_lock(osb->cconn,
1067 OCFS2_LOCK_ID_MAX_LEN - 1,
1070 if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
1072 ocfs2_log_dlm_error("ocfs2_dlm_lock",
1075 ocfs2_recover_from_dlm_error(lockres, 1);
1079 mlog(0, "lock %s, successfull return from ocfs2_dlm_lock\n",
1082 /* At this point we've gone inside the dlm and need to
1083 * complete our work regardless. */
1086 /* wait for busy to clear and carry on */
1090 /* Ok, if we get here then we're good to go. */
1091 ocfs2_inc_holders(lockres, level);
1095 spin_unlock_irqrestore(&lockres->l_lock, flags);
1098 * This is helping work around a lock inversion between the page lock
1099 * and dlm locks. One path holds the page lock while calling aops
1100 * which block acquiring dlm locks. The voting thread holds dlm
1101 * locks while acquiring page locks while down converting data locks.
1102 * This block is helping an aop path notice the inversion and back
1103 * off to unlock its page lock before trying the dlm lock again.
1105 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1106 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1108 if (lockres_remove_mask_waiter(lockres, &mw))
1114 ret = ocfs2_wait_for_mask(&mw);
1124 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
1125 struct ocfs2_lock_res *lockres,
1128 unsigned long flags;
1131 spin_lock_irqsave(&lockres->l_lock, flags);
1132 ocfs2_dec_holders(lockres, level);
1133 ocfs2_downconvert_on_unlock(osb, lockres);
1134 spin_unlock_irqrestore(&lockres->l_lock, flags);
1138 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1139 struct ocfs2_lock_res *lockres,
1143 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1144 unsigned long flags;
1145 u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
1147 spin_lock_irqsave(&lockres->l_lock, flags);
1148 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1149 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1150 spin_unlock_irqrestore(&lockres->l_lock, flags);
1152 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1155 /* Grants us an EX lock on the data and metadata resources, skipping
1156 * the normal cluster directory lookup. Use this ONLY on newly created
1157 * inodes which other nodes can't possibly see, and which haven't been
1158 * hashed in the inode hash yet. This can give us a good performance
1159 * increase as it'll skip the network broadcast normally associated
1160 * with creating a new lock resource. */
1161 int ocfs2_create_new_inode_locks(struct inode *inode)
1164 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1167 BUG_ON(!ocfs2_inode_is_new(inode));
1171 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1173 /* NOTE: That we don't increment any of the holder counts, nor
1174 * do we add anything to a journal handle. Since this is
1175 * supposed to be a new inode which the cluster doesn't know
1176 * about yet, there is no need to. As far as the LVB handling
1177 * is concerned, this is basically like acquiring an EX lock
1178 * on a resource which has an invalid one -- we'll set it
1179 * valid when we release the EX. */
1181 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1188 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1189 * don't use a generation in their lock names.
1191 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1197 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1208 int ocfs2_rw_lock(struct inode *inode, int write)
1211 struct ocfs2_lock_res *lockres;
1212 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1218 mlog(0, "inode %llu take %s RW lock\n",
1219 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1220 write ? "EXMODE" : "PRMODE");
1222 if (ocfs2_mount_local(osb))
1225 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1227 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1229 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1238 void ocfs2_rw_unlock(struct inode *inode, int write)
1240 int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1241 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1242 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1246 mlog(0, "inode %llu drop %s RW lock\n",
1247 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1248 write ? "EXMODE" : "PRMODE");
1250 if (!ocfs2_mount_local(osb))
1251 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1257 * ocfs2_open_lock always get PR mode lock.
1259 int ocfs2_open_lock(struct inode *inode)
1262 struct ocfs2_lock_res *lockres;
1263 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1269 mlog(0, "inode %llu take PRMODE open lock\n",
1270 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1272 if (ocfs2_mount_local(osb))
1275 lockres = &OCFS2_I(inode)->ip_open_lockres;
1277 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1287 int ocfs2_try_open_lock(struct inode *inode, int write)
1289 int status = 0, level;
1290 struct ocfs2_lock_res *lockres;
1291 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1297 mlog(0, "inode %llu try to take %s open lock\n",
1298 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1299 write ? "EXMODE" : "PRMODE");
1301 if (ocfs2_mount_local(osb))
1304 lockres = &OCFS2_I(inode)->ip_open_lockres;
1306 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1309 * The file system may already holding a PRMODE/EXMODE open lock.
1310 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1311 * other nodes and the -EAGAIN will indicate to the caller that
1312 * this inode is still in use.
1314 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1315 level, DLM_LKF_NOQUEUE, 0);
1323 * ocfs2_open_unlock unlock PR and EX mode open locks.
1325 void ocfs2_open_unlock(struct inode *inode)
1327 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1328 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1332 mlog(0, "inode %llu drop open lock\n",
1333 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1335 if (ocfs2_mount_local(osb))
1338 if(lockres->l_ro_holders)
1339 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1341 if(lockres->l_ex_holders)
1342 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1349 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
1353 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1354 unsigned long flags;
1355 struct ocfs2_mask_waiter mw;
1357 ocfs2_init_mask_waiter(&mw);
1360 spin_lock_irqsave(&lockres->l_lock, flags);
1361 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
1362 ret = ocfs2_prepare_cancel_convert(osb, lockres);
1364 spin_unlock_irqrestore(&lockres->l_lock, flags);
1365 ret = ocfs2_cancel_convert(osb, lockres);
1372 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1373 spin_unlock_irqrestore(&lockres->l_lock, flags);
1375 ocfs2_wait_for_mask(&mw);
1381 * We may still have gotten the lock, in which case there's no
1382 * point to restarting the syscall.
1384 if (lockres->l_level == level)
1387 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1388 lockres->l_flags, lockres->l_level, lockres->l_action);
1390 spin_unlock_irqrestore(&lockres->l_lock, flags);
1397 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1398 * flock() calls. The locking approach this requires is sufficiently
1399 * different from all other cluster lock types that we implement a
1400 * seperate path to the "low-level" dlm calls. In particular:
1402 * - No optimization of lock levels is done - we take at exactly
1403 * what's been requested.
1405 * - No lock caching is employed. We immediately downconvert to
1406 * no-lock at unlock time. This also means flock locks never go on
1407 * the blocking list).
1409 * - Since userspace can trivially deadlock itself with flock, we make
1410 * sure to allow cancellation of a misbehaving applications flock()
1413 * - Access to any flock lockres doesn't require concurrency, so we
1414 * can simplify the code by requiring the caller to guarantee
1415 * serialization of dlmglue flock calls.
1417 int ocfs2_file_lock(struct file *file, int ex, int trylock)
1419 int ret, level = ex ? LKM_EXMODE : LKM_PRMODE;
1420 unsigned int lkm_flags = trylock ? LKM_NOQUEUE : 0;
1421 unsigned long flags;
1422 struct ocfs2_file_private *fp = file->private_data;
1423 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1424 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1425 struct ocfs2_mask_waiter mw;
1427 ocfs2_init_mask_waiter(&mw);
1429 if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
1430 (lockres->l_level > DLM_LOCK_NL)) {
1432 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1433 "level: %u\n", lockres->l_name, lockres->l_flags,
1438 spin_lock_irqsave(&lockres->l_lock, flags);
1439 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1440 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1441 spin_unlock_irqrestore(&lockres->l_lock, flags);
1444 * Get the lock at NLMODE to start - that way we
1445 * can cancel the upconvert request if need be.
1447 ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
1453 ret = ocfs2_wait_for_mask(&mw);
1458 spin_lock_irqsave(&lockres->l_lock, flags);
1461 lockres->l_action = OCFS2_AST_CONVERT;
1462 lkm_flags |= LKM_CONVERT;
1463 lockres->l_requested = level;
1464 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1466 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1467 spin_unlock_irqrestore(&lockres->l_lock, flags);
1469 ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
1470 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1,
1473 if (!trylock || (ret != -EAGAIN)) {
1474 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1478 ocfs2_recover_from_dlm_error(lockres, 1);
1479 lockres_remove_mask_waiter(lockres, &mw);
1483 ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
1484 if (ret == -ERESTARTSYS) {
1486 * Userspace can cause deadlock itself with
1487 * flock(). Current behavior locally is to allow the
1488 * deadlock, but abort the system call if a signal is
1489 * received. We follow this example, otherwise a
1490 * poorly written program could sit in kernel until
1493 * Handling this is a bit more complicated for Ocfs2
1494 * though. We can't exit this function with an
1495 * outstanding lock request, so a cancel convert is
1496 * required. We intentionally overwrite 'ret' - if the
1497 * cancel fails and the lock was granted, it's easier
1498 * to just bubble sucess back up to the user.
1500 ret = ocfs2_flock_handle_signal(lockres, level);
1505 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
1506 lockres->l_name, ex, trylock, ret);
1510 void ocfs2_file_unlock(struct file *file)
1513 unsigned long flags;
1514 struct ocfs2_file_private *fp = file->private_data;
1515 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1516 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1517 struct ocfs2_mask_waiter mw;
1519 ocfs2_init_mask_waiter(&mw);
1521 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
1524 if (lockres->l_level == LKM_NLMODE)
1527 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
1528 lockres->l_name, lockres->l_flags, lockres->l_level,
1531 spin_lock_irqsave(&lockres->l_lock, flags);
1533 * Fake a blocking ast for the downconvert code.
1535 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
1536 lockres->l_blocking = DLM_LOCK_EX;
1538 ocfs2_prepare_downconvert(lockres, LKM_NLMODE);
1539 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1540 spin_unlock_irqrestore(&lockres->l_lock, flags);
1542 ret = ocfs2_downconvert_lock(osb, lockres, LKM_NLMODE, 0);
1548 ret = ocfs2_wait_for_mask(&mw);
1553 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
1554 struct ocfs2_lock_res *lockres)
1560 /* If we know that another node is waiting on our lock, kick
1561 * the downconvert thread * pre-emptively when we reach a release
1563 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1564 switch(lockres->l_blocking) {
1566 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1570 if (!lockres->l_ex_holders)
1579 ocfs2_wake_downconvert_thread(osb);
1584 #define OCFS2_SEC_BITS 34
1585 #define OCFS2_SEC_SHIFT (64 - 34)
1586 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
1588 /* LVB only has room for 64 bits of time here so we pack it for
1590 static u64 ocfs2_pack_timespec(struct timespec *spec)
1593 u64 sec = spec->tv_sec;
1594 u32 nsec = spec->tv_nsec;
1596 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
1601 /* Call this with the lockres locked. I am reasonably sure we don't
1602 * need ip_lock in this function as anyone who would be changing those
1603 * values is supposed to be blocked in ocfs2_inode_lock right now. */
1604 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
1606 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1607 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
1608 struct ocfs2_meta_lvb *lvb;
1612 lvb = (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb);
1615 * Invalidate the LVB of a deleted inode - this way other
1616 * nodes are forced to go to disk and discover the new inode
1619 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1620 lvb->lvb_version = 0;
1624 lvb->lvb_version = OCFS2_LVB_VERSION;
1625 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
1626 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
1627 lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
1628 lvb->lvb_igid = cpu_to_be32(inode->i_gid);
1629 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
1630 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
1631 lvb->lvb_iatime_packed =
1632 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
1633 lvb->lvb_ictime_packed =
1634 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
1635 lvb->lvb_imtime_packed =
1636 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
1637 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
1638 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
1639 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
1642 mlog_meta_lvb(0, lockres);
1647 static void ocfs2_unpack_timespec(struct timespec *spec,
1650 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
1651 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
1654 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
1656 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1657 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
1658 struct ocfs2_meta_lvb *lvb;
1662 mlog_meta_lvb(0, lockres);
1664 lvb = (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb);
1666 /* We're safe here without the lockres lock... */
1667 spin_lock(&oi->ip_lock);
1668 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
1669 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
1671 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
1672 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
1673 ocfs2_set_inode_flags(inode);
1675 /* fast-symlinks are a special case */
1676 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
1677 inode->i_blocks = 0;
1679 inode->i_blocks = ocfs2_inode_sector_count(inode);
1681 inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
1682 inode->i_gid = be32_to_cpu(lvb->lvb_igid);
1683 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
1684 inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
1685 ocfs2_unpack_timespec(&inode->i_atime,
1686 be64_to_cpu(lvb->lvb_iatime_packed));
1687 ocfs2_unpack_timespec(&inode->i_mtime,
1688 be64_to_cpu(lvb->lvb_imtime_packed));
1689 ocfs2_unpack_timespec(&inode->i_ctime,
1690 be64_to_cpu(lvb->lvb_ictime_packed));
1691 spin_unlock(&oi->ip_lock);
1696 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
1697 struct ocfs2_lock_res *lockres)
1699 struct ocfs2_meta_lvb *lvb =
1700 (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb);
1702 if (lvb->lvb_version == OCFS2_LVB_VERSION
1703 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
1708 /* Determine whether a lock resource needs to be refreshed, and
1709 * arbitrate who gets to refresh it.
1711 * 0 means no refresh needed.
1713 * > 0 means you need to refresh this and you MUST call
1714 * ocfs2_complete_lock_res_refresh afterwards. */
1715 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
1717 unsigned long flags;
1723 spin_lock_irqsave(&lockres->l_lock, flags);
1724 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
1725 spin_unlock_irqrestore(&lockres->l_lock, flags);
1729 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
1730 spin_unlock_irqrestore(&lockres->l_lock, flags);
1732 ocfs2_wait_on_refreshing_lock(lockres);
1736 /* Ok, I'll be the one to refresh this lock. */
1737 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
1738 spin_unlock_irqrestore(&lockres->l_lock, flags);
1746 /* If status is non zero, I'll mark it as not being in refresh
1747 * anymroe, but i won't clear the needs refresh flag. */
1748 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
1751 unsigned long flags;
1754 spin_lock_irqsave(&lockres->l_lock, flags);
1755 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
1757 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
1758 spin_unlock_irqrestore(&lockres->l_lock, flags);
1760 wake_up(&lockres->l_event);
1765 /* may or may not return a bh if it went to disk. */
1766 static int ocfs2_inode_lock_update(struct inode *inode,
1767 struct buffer_head **bh)
1770 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1771 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
1772 struct ocfs2_dinode *fe;
1773 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1777 if (ocfs2_mount_local(osb))
1780 spin_lock(&oi->ip_lock);
1781 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1782 mlog(0, "Orphaned inode %llu was deleted while we "
1783 "were waiting on a lock. ip_flags = 0x%x\n",
1784 (unsigned long long)oi->ip_blkno, oi->ip_flags);
1785 spin_unlock(&oi->ip_lock);
1789 spin_unlock(&oi->ip_lock);
1791 if (!ocfs2_should_refresh_lock_res(lockres))
1794 /* This will discard any caching information we might have had
1795 * for the inode metadata. */
1796 ocfs2_metadata_cache_purge(inode);
1798 ocfs2_extent_map_trunc(inode, 0);
1800 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
1801 mlog(0, "Trusting LVB on inode %llu\n",
1802 (unsigned long long)oi->ip_blkno);
1803 ocfs2_refresh_inode_from_lvb(inode);
1805 /* Boo, we have to go to disk. */
1806 /* read bh, cast, ocfs2_refresh_inode */
1807 status = ocfs2_read_block(OCFS2_SB(inode->i_sb), oi->ip_blkno,
1808 bh, OCFS2_BH_CACHED, inode);
1813 fe = (struct ocfs2_dinode *) (*bh)->b_data;
1815 /* This is a good chance to make sure we're not
1816 * locking an invalid object.
1818 * We bug on a stale inode here because we checked
1819 * above whether it was wiped from disk. The wiping
1820 * node provides a guarantee that we receive that
1821 * message and can mark the inode before dropping any
1822 * locks associated with it. */
1823 if (!OCFS2_IS_VALID_DINODE(fe)) {
1824 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
1828 mlog_bug_on_msg(inode->i_generation !=
1829 le32_to_cpu(fe->i_generation),
1830 "Invalid dinode %llu disk generation: %u "
1831 "inode->i_generation: %u\n",
1832 (unsigned long long)oi->ip_blkno,
1833 le32_to_cpu(fe->i_generation),
1834 inode->i_generation);
1835 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
1836 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
1837 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
1838 (unsigned long long)oi->ip_blkno,
1839 (unsigned long long)le64_to_cpu(fe->i_dtime),
1840 le32_to_cpu(fe->i_flags));
1842 ocfs2_refresh_inode(inode, fe);
1847 ocfs2_complete_lock_res_refresh(lockres, status);
1853 static int ocfs2_assign_bh(struct inode *inode,
1854 struct buffer_head **ret_bh,
1855 struct buffer_head *passed_bh)
1860 /* Ok, the update went to disk for us, use the
1862 *ret_bh = passed_bh;
1868 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1869 OCFS2_I(inode)->ip_blkno,
1880 * returns < 0 error if the callback will never be called, otherwise
1881 * the result of the lock will be communicated via the callback.
1883 int ocfs2_inode_lock_full(struct inode *inode,
1884 struct buffer_head **ret_bh,
1888 int status, level, acquired;
1890 struct ocfs2_lock_res *lockres = NULL;
1891 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1892 struct buffer_head *local_bh = NULL;
1898 mlog(0, "inode %llu, take %s META lock\n",
1899 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1900 ex ? "EXMODE" : "PRMODE");
1904 /* We'll allow faking a readonly metadata lock for
1906 if (ocfs2_is_hard_readonly(osb)) {
1912 if (ocfs2_mount_local(osb))
1915 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1916 ocfs2_wait_for_recovery(osb);
1918 lockres = &OCFS2_I(inode)->ip_inode_lockres;
1919 level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1921 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
1922 dlm_flags |= DLM_LKF_NOQUEUE;
1924 status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags);
1926 if (status != -EAGAIN && status != -EIOCBRETRY)
1931 /* Notify the error cleanup path to drop the cluster lock. */
1934 /* We wait twice because a node may have died while we were in
1935 * the lower dlm layers. The second time though, we've
1936 * committed to owning this lock so we don't allow signals to
1937 * abort the operation. */
1938 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1939 ocfs2_wait_for_recovery(osb);
1943 * We only see this flag if we're being called from
1944 * ocfs2_read_locked_inode(). It means we're locking an inode
1945 * which hasn't been populated yet, so clear the refresh flag
1946 * and let the caller handle it.
1948 if (inode->i_state & I_NEW) {
1951 ocfs2_complete_lock_res_refresh(lockres, 0);
1955 /* This is fun. The caller may want a bh back, or it may
1956 * not. ocfs2_inode_lock_update definitely wants one in, but
1957 * may or may not read one, depending on what's in the
1958 * LVB. The result of all of this is that we've *only* gone to
1959 * disk if we have to, so the complexity is worthwhile. */
1960 status = ocfs2_inode_lock_update(inode, &local_bh);
1962 if (status != -ENOENT)
1968 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
1977 if (ret_bh && (*ret_bh)) {
1982 ocfs2_inode_unlock(inode, ex);
1993 * This is working around a lock inversion between tasks acquiring DLM
1994 * locks while holding a page lock and the downconvert thread which
1995 * blocks dlm lock acquiry while acquiring page locks.
1997 * ** These _with_page variantes are only intended to be called from aop
1998 * methods that hold page locks and return a very specific *positive* error
1999 * code that aop methods pass up to the VFS -- test for errors with != 0. **
2001 * The DLM is called such that it returns -EAGAIN if it would have
2002 * blocked waiting for the downconvert thread. In that case we unlock
2003 * our page so the downconvert thread can make progress. Once we've
2004 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2005 * that called us can bubble that back up into the VFS who will then
2006 * immediately retry the aop call.
2008 * We do a blocking lock and immediate unlock before returning, though, so that
2009 * the lock has a great chance of being cached on this node by the time the VFS
2010 * calls back to retry the aop. This has a potential to livelock as nodes
2011 * ping locks back and forth, but that's a risk we're willing to take to avoid
2012 * the lock inversion simply.
2014 int ocfs2_inode_lock_with_page(struct inode *inode,
2015 struct buffer_head **ret_bh,
2021 ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2022 if (ret == -EAGAIN) {
2024 if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2025 ocfs2_inode_unlock(inode, ex);
2026 ret = AOP_TRUNCATED_PAGE;
2032 int ocfs2_inode_lock_atime(struct inode *inode,
2033 struct vfsmount *vfsmnt,
2039 ret = ocfs2_inode_lock(inode, NULL, 0);
2046 * If we should update atime, we will get EX lock,
2047 * otherwise we just get PR lock.
2049 if (ocfs2_should_update_atime(inode, vfsmnt)) {
2050 struct buffer_head *bh = NULL;
2052 ocfs2_inode_unlock(inode, 0);
2053 ret = ocfs2_inode_lock(inode, &bh, 1);
2059 if (ocfs2_should_update_atime(inode, vfsmnt))
2060 ocfs2_update_inode_atime(inode, bh);
2070 void ocfs2_inode_unlock(struct inode *inode,
2073 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2074 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2075 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2079 mlog(0, "inode %llu drop %s META lock\n",
2080 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2081 ex ? "EXMODE" : "PRMODE");
2083 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
2084 !ocfs2_mount_local(osb))
2085 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
2090 int ocfs2_super_lock(struct ocfs2_super *osb,
2094 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2095 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2099 if (ocfs2_is_hard_readonly(osb))
2102 if (ocfs2_mount_local(osb))
2105 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2111 /* The super block lock path is really in the best position to
2112 * know when resources covered by the lock need to be
2113 * refreshed, so we do it here. Of course, making sense of
2114 * everything is up to the caller :) */
2115 status = ocfs2_should_refresh_lock_res(lockres);
2121 status = ocfs2_refresh_slot_info(osb);
2123 ocfs2_complete_lock_res_refresh(lockres, status);
2133 void ocfs2_super_unlock(struct ocfs2_super *osb,
2136 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2137 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2139 if (!ocfs2_mount_local(osb))
2140 ocfs2_cluster_unlock(osb, lockres, level);
2143 int ocfs2_rename_lock(struct ocfs2_super *osb)
2146 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2148 if (ocfs2_is_hard_readonly(osb))
2151 if (ocfs2_mount_local(osb))
2154 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2161 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2163 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2165 if (!ocfs2_mount_local(osb))
2166 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2169 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2172 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2173 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2174 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2178 if (ocfs2_is_hard_readonly(osb))
2181 if (ocfs2_mount_local(osb))
2184 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2191 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2193 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2194 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2195 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2197 if (!ocfs2_mount_local(osb))
2198 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2201 /* Reference counting of the dlm debug structure. We want this because
2202 * open references on the debug inodes can live on after a mount, so
2203 * we can't rely on the ocfs2_super to always exist. */
2204 static void ocfs2_dlm_debug_free(struct kref *kref)
2206 struct ocfs2_dlm_debug *dlm_debug;
2208 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2213 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2216 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2219 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2221 kref_get(&debug->d_refcnt);
2224 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2226 struct ocfs2_dlm_debug *dlm_debug;
2228 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2230 mlog_errno(-ENOMEM);
2234 kref_init(&dlm_debug->d_refcnt);
2235 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2236 dlm_debug->d_locking_state = NULL;
2241 /* Access to this is arbitrated for us via seq_file->sem. */
2242 struct ocfs2_dlm_seq_priv {
2243 struct ocfs2_dlm_debug *p_dlm_debug;
2244 struct ocfs2_lock_res p_iter_res;
2245 struct ocfs2_lock_res p_tmp_res;
2248 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2249 struct ocfs2_dlm_seq_priv *priv)
2251 struct ocfs2_lock_res *iter, *ret = NULL;
2252 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2254 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2256 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2257 /* discover the head of the list */
2258 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2259 mlog(0, "End of list found, %p\n", ret);
2263 /* We track our "dummy" iteration lockres' by a NULL
2265 if (iter->l_ops != NULL) {
2274 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2276 struct ocfs2_dlm_seq_priv *priv = m->private;
2277 struct ocfs2_lock_res *iter;
2279 spin_lock(&ocfs2_dlm_tracking_lock);
2280 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2282 /* Since lockres' have the lifetime of their container
2283 * (which can be inodes, ocfs2_supers, etc) we want to
2284 * copy this out to a temporary lockres while still
2285 * under the spinlock. Obviously after this we can't
2286 * trust any pointers on the copy returned, but that's
2287 * ok as the information we want isn't typically held
2289 priv->p_tmp_res = *iter;
2290 iter = &priv->p_tmp_res;
2292 spin_unlock(&ocfs2_dlm_tracking_lock);
2297 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2301 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2303 struct ocfs2_dlm_seq_priv *priv = m->private;
2304 struct ocfs2_lock_res *iter = v;
2305 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2307 spin_lock(&ocfs2_dlm_tracking_lock);
2308 iter = ocfs2_dlm_next_res(iter, priv);
2309 list_del_init(&dummy->l_debug_list);
2311 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2312 priv->p_tmp_res = *iter;
2313 iter = &priv->p_tmp_res;
2315 spin_unlock(&ocfs2_dlm_tracking_lock);
2320 /* So that debugfs.ocfs2 can determine which format is being used */
2321 #define OCFS2_DLM_DEBUG_STR_VERSION 1
2322 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2326 struct ocfs2_lock_res *lockres = v;
2331 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2333 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2334 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2336 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2338 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2340 seq_printf(m, "%d\t"
2351 lockres->l_unlock_action,
2352 lockres->l_ro_holders,
2353 lockres->l_ex_holders,
2354 lockres->l_requested,
2355 lockres->l_blocking);
2357 /* Dump the raw LVB */
2358 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2359 for(i = 0; i < DLM_LVB_LEN; i++)
2360 seq_printf(m, "0x%x\t", lvb[i]);
2363 seq_printf(m, "\n");
2367 static const struct seq_operations ocfs2_dlm_seq_ops = {
2368 .start = ocfs2_dlm_seq_start,
2369 .stop = ocfs2_dlm_seq_stop,
2370 .next = ocfs2_dlm_seq_next,
2371 .show = ocfs2_dlm_seq_show,
2374 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2376 struct seq_file *seq = (struct seq_file *) file->private_data;
2377 struct ocfs2_dlm_seq_priv *priv = seq->private;
2378 struct ocfs2_lock_res *res = &priv->p_iter_res;
2380 ocfs2_remove_lockres_tracking(res);
2381 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2382 return seq_release_private(inode, file);
2385 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2388 struct ocfs2_dlm_seq_priv *priv;
2389 struct seq_file *seq;
2390 struct ocfs2_super *osb;
2392 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
2398 osb = inode->i_private;
2399 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2400 priv->p_dlm_debug = osb->osb_dlm_debug;
2401 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2403 ret = seq_open(file, &ocfs2_dlm_seq_ops);
2410 seq = (struct seq_file *) file->private_data;
2411 seq->private = priv;
2413 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2420 static const struct file_operations ocfs2_dlm_debug_fops = {
2421 .open = ocfs2_dlm_debug_open,
2422 .release = ocfs2_dlm_debug_release,
2424 .llseek = seq_lseek,
2427 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2430 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2432 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2434 osb->osb_debug_root,
2436 &ocfs2_dlm_debug_fops);
2437 if (!dlm_debug->d_locking_state) {
2440 "Unable to create locking state debugfs file.\n");
2444 ocfs2_get_dlm_debug(dlm_debug);
2449 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2451 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2454 debugfs_remove(dlm_debug->d_locking_state);
2455 ocfs2_put_dlm_debug(dlm_debug);
2459 int ocfs2_dlm_init(struct ocfs2_super *osb)
2462 struct ocfs2_cluster_connection *conn = NULL;
2466 if (ocfs2_mount_local(osb))
2469 status = ocfs2_dlm_init_debug(osb);
2475 /* launch downconvert thread */
2476 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
2477 if (IS_ERR(osb->dc_task)) {
2478 status = PTR_ERR(osb->dc_task);
2479 osb->dc_task = NULL;
2484 /* for now, uuid == domain */
2485 status = ocfs2_cluster_connect(osb->uuid_str,
2486 strlen(osb->uuid_str),
2487 ocfs2_do_node_down, osb,
2495 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
2496 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
2503 ocfs2_dlm_shutdown_debug(osb);
2505 kthread_stop(osb->dc_task);
2512 void ocfs2_dlm_shutdown(struct ocfs2_super *osb)
2516 ocfs2_drop_osb_locks(osb);
2519 * Now that we have dropped all locks and ocfs2_dismount_volume()
2520 * has disabled recovery, the DLM won't be talking to us. It's
2521 * safe to tear things down before disconnecting the cluster.
2525 kthread_stop(osb->dc_task);
2526 osb->dc_task = NULL;
2529 ocfs2_lock_res_free(&osb->osb_super_lockres);
2530 ocfs2_lock_res_free(&osb->osb_rename_lockres);
2532 ocfs2_cluster_disconnect(osb->cconn);
2535 ocfs2_dlm_shutdown_debug(osb);
2540 static void ocfs2_unlock_ast(void *opaque, int error)
2542 struct ocfs2_lock_res *lockres = opaque;
2543 unsigned long flags;
2547 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
2548 lockres->l_unlock_action);
2550 spin_lock_irqsave(&lockres->l_lock, flags);
2551 /* We tried to cancel a convert request, but it was already
2552 * granted. All we want to do here is clear our unlock
2553 * state. The wake_up call done at the bottom is redundant
2554 * (ocfs2_prepare_cancel_convert doesn't sleep on this) but doesn't
2555 * hurt anything anyway */
2556 if (error == -DLM_ECANCEL &&
2557 lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2558 mlog(0, "Got cancelgrant for %s\n", lockres->l_name);
2560 /* We don't clear the busy flag in this case as it
2561 * should have been cleared by the ast which the dlm
2563 goto complete_unlock;
2566 /* DLM_EUNLOCK is the success code for unlock */
2567 if (error != -DLM_EUNLOCK) {
2568 mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
2569 "unlock_action %d\n", error, lockres->l_name,
2570 lockres->l_unlock_action);
2571 spin_unlock_irqrestore(&lockres->l_lock, flags);
2575 switch(lockres->l_unlock_action) {
2576 case OCFS2_UNLOCK_CANCEL_CONVERT:
2577 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
2578 lockres->l_action = OCFS2_AST_INVALID;
2580 case OCFS2_UNLOCK_DROP_LOCK:
2581 lockres->l_level = DLM_LOCK_IV;
2587 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
2589 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
2590 spin_unlock_irqrestore(&lockres->l_lock, flags);
2592 wake_up(&lockres->l_event);
2597 static int ocfs2_drop_lock(struct ocfs2_super *osb,
2598 struct ocfs2_lock_res *lockres)
2601 unsigned long flags;
2604 /* We didn't get anywhere near actually using this lockres. */
2605 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
2608 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
2609 lkm_flags |= DLM_LKF_VALBLK;
2611 spin_lock_irqsave(&lockres->l_lock, flags);
2613 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
2614 "lockres %s, flags 0x%lx\n",
2615 lockres->l_name, lockres->l_flags);
2617 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
2618 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
2619 "%u, unlock_action = %u\n",
2620 lockres->l_name, lockres->l_flags, lockres->l_action,
2621 lockres->l_unlock_action);
2623 spin_unlock_irqrestore(&lockres->l_lock, flags);
2625 /* XXX: Today we just wait on any busy
2626 * locks... Perhaps we need to cancel converts in the
2628 ocfs2_wait_on_busy_lock(lockres);
2630 spin_lock_irqsave(&lockres->l_lock, flags);
2633 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2634 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
2635 lockres->l_level == DLM_LOCK_EX &&
2636 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2637 lockres->l_ops->set_lvb(lockres);
2640 if (lockres->l_flags & OCFS2_LOCK_BUSY)
2641 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
2643 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
2644 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
2646 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
2647 spin_unlock_irqrestore(&lockres->l_lock, flags);
2651 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
2653 /* make sure we never get here while waiting for an ast to
2655 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
2657 /* is this necessary? */
2658 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2659 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
2660 spin_unlock_irqrestore(&lockres->l_lock, flags);
2662 mlog(0, "lock %s\n", lockres->l_name);
2664 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags,
2667 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
2668 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
2669 /* XXX Need to abstract this */
2670 dlm_print_one_lock(lockres->l_lksb.lksb_o2dlm.lockid);
2673 mlog(0, "lock %s, successfull return from ocfs2_dlm_unlock\n",
2676 ocfs2_wait_on_busy_lock(lockres);
2682 /* Mark the lockres as being dropped. It will no longer be
2683 * queued if blocking, but we still may have to wait on it
2684 * being dequeued from the downconvert thread before we can consider
2687 * You can *not* attempt to call cluster_lock on this lockres anymore. */
2688 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
2691 struct ocfs2_mask_waiter mw;
2692 unsigned long flags;
2694 ocfs2_init_mask_waiter(&mw);
2696 spin_lock_irqsave(&lockres->l_lock, flags);
2697 lockres->l_flags |= OCFS2_LOCK_FREEING;
2698 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
2699 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
2700 spin_unlock_irqrestore(&lockres->l_lock, flags);
2702 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
2704 status = ocfs2_wait_for_mask(&mw);
2708 spin_lock_irqsave(&lockres->l_lock, flags);
2710 spin_unlock_irqrestore(&lockres->l_lock, flags);
2713 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
2714 struct ocfs2_lock_res *lockres)
2718 ocfs2_mark_lockres_freeing(lockres);
2719 ret = ocfs2_drop_lock(osb, lockres);
2724 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
2726 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
2727 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
2730 int ocfs2_drop_inode_locks(struct inode *inode)
2736 /* No need to call ocfs2_mark_lockres_freeing here -
2737 * ocfs2_clear_inode has done it for us. */
2739 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2740 &OCFS2_I(inode)->ip_open_lockres);
2746 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2747 &OCFS2_I(inode)->ip_inode_lockres);
2750 if (err < 0 && !status)
2753 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2754 &OCFS2_I(inode)->ip_rw_lockres);
2757 if (err < 0 && !status)
2764 static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
2767 assert_spin_locked(&lockres->l_lock);
2769 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
2771 if (lockres->l_level <= new_level) {
2772 mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n",
2773 lockres->l_level, new_level);
2777 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
2778 lockres->l_name, new_level, lockres->l_blocking);
2780 lockres->l_action = OCFS2_AST_DOWNCONVERT;
2781 lockres->l_requested = new_level;
2782 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2785 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
2786 struct ocfs2_lock_res *lockres,
2791 u32 dlm_flags = DLM_LKF_CONVERT;
2796 dlm_flags |= DLM_LKF_VALBLK;
2798 ret = ocfs2_dlm_lock(osb->cconn,
2803 OCFS2_LOCK_ID_MAX_LEN - 1,
2806 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
2807 ocfs2_recover_from_dlm_error(lockres, 1);
2817 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
2818 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
2819 struct ocfs2_lock_res *lockres)
2821 assert_spin_locked(&lockres->l_lock);
2824 mlog(0, "lock %s\n", lockres->l_name);
2826 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2827 /* If we're already trying to cancel a lock conversion
2828 * then just drop the spinlock and allow the caller to
2829 * requeue this lock. */
2831 mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
2835 /* were we in a convert when we got the bast fire? */
2836 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
2837 lockres->l_action != OCFS2_AST_DOWNCONVERT);
2838 /* set things up for the unlockast to know to just
2839 * clear out the ast_action and unset busy, etc. */
2840 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
2842 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
2843 "lock %s, invalid flags: 0x%lx\n",
2844 lockres->l_name, lockres->l_flags);
2849 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
2850 struct ocfs2_lock_res *lockres)
2855 mlog(0, "lock %s\n", lockres->l_name);
2857 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
2858 DLM_LKF_CANCEL, lockres);
2860 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
2861 ocfs2_recover_from_dlm_error(lockres, 0);
2864 mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name);
2870 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
2871 struct ocfs2_lock_res *lockres,
2872 struct ocfs2_unblock_ctl *ctl)
2874 unsigned long flags;
2882 spin_lock_irqsave(&lockres->l_lock, flags);
2884 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
2887 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
2889 ret = ocfs2_prepare_cancel_convert(osb, lockres);
2890 spin_unlock_irqrestore(&lockres->l_lock, flags);
2892 ret = ocfs2_cancel_convert(osb, lockres);
2899 /* if we're blocking an exclusive and we have *any* holders,
2901 if ((lockres->l_blocking == DLM_LOCK_EX)
2902 && (lockres->l_ex_holders || lockres->l_ro_holders))
2905 /* If it's a PR we're blocking, then only
2906 * requeue if we've got any EX holders */
2907 if (lockres->l_blocking == DLM_LOCK_PR &&
2908 lockres->l_ex_holders)
2912 * Can we get a lock in this state if the holder counts are
2913 * zero? The meta data unblock code used to check this.
2915 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
2916 && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
2919 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
2921 if (lockres->l_ops->check_downconvert
2922 && !lockres->l_ops->check_downconvert(lockres, new_level))
2925 /* If we get here, then we know that there are no more
2926 * incompatible holders (and anyone asking for an incompatible
2927 * lock is blocked). We can now downconvert the lock */
2928 if (!lockres->l_ops->downconvert_worker)
2931 /* Some lockres types want to do a bit of work before
2932 * downconverting a lock. Allow that here. The worker function
2933 * may sleep, so we save off a copy of what we're blocking as
2934 * it may change while we're not holding the spin lock. */
2935 blocking = lockres->l_blocking;
2936 spin_unlock_irqrestore(&lockres->l_lock, flags);
2938 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
2940 if (ctl->unblock_action == UNBLOCK_STOP_POST)
2943 spin_lock_irqsave(&lockres->l_lock, flags);
2944 if (blocking != lockres->l_blocking) {
2945 /* If this changed underneath us, then we can't drop
2953 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2954 if (lockres->l_level == DLM_LOCK_EX)
2958 * We only set the lvb if the lock has been fully
2959 * refreshed - otherwise we risk setting stale
2960 * data. Otherwise, there's no need to actually clear
2961 * out the lvb here as it's value is still valid.
2963 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2964 lockres->l_ops->set_lvb(lockres);
2967 ocfs2_prepare_downconvert(lockres, new_level);
2968 spin_unlock_irqrestore(&lockres->l_lock, flags);
2969 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
2975 spin_unlock_irqrestore(&lockres->l_lock, flags);
2982 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
2985 struct inode *inode;
2986 struct address_space *mapping;
2988 inode = ocfs2_lock_res_inode(lockres);
2989 mapping = inode->i_mapping;
2991 if (!S_ISREG(inode->i_mode))
2995 * We need this before the filemap_fdatawrite() so that it can
2996 * transfer the dirty bit from the PTE to the
2997 * page. Unfortunately this means that even for EX->PR
2998 * downconverts, we'll lose our mappings and have to build
3001 unmap_mapping_range(mapping, 0, 0, 0);
3003 if (filemap_fdatawrite(mapping)) {
3004 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
3005 (unsigned long long)OCFS2_I(inode)->ip_blkno);
3007 sync_mapping_buffers(mapping);
3008 if (blocking == DLM_LOCK_EX) {
3009 truncate_inode_pages(mapping, 0);
3011 /* We only need to wait on the I/O if we're not also
3012 * truncating pages because truncate_inode_pages waits
3013 * for us above. We don't truncate pages if we're
3014 * blocking anything < EXMODE because we want to keep
3015 * them around in that case. */
3016 filemap_fdatawait(mapping);
3020 return UNBLOCK_CONTINUE;
3023 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
3026 struct inode *inode = ocfs2_lock_res_inode(lockres);
3027 int checkpointed = ocfs2_inode_fully_checkpointed(inode);
3029 BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
3030 BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
3035 ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb));
3039 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
3041 struct inode *inode = ocfs2_lock_res_inode(lockres);
3043 __ocfs2_stuff_meta_lvb(inode);
3047 * Does the final reference drop on our dentry lock. Right now this
3048 * happens in the downconvert thread, but we could choose to simplify the
3049 * dlmglue API and push these off to the ocfs2_wq in the future.
3051 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
3052 struct ocfs2_lock_res *lockres)
3054 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3055 ocfs2_dentry_lock_put(osb, dl);
3059 * d_delete() matching dentries before the lock downconvert.
3061 * At this point, any process waiting to destroy the
3062 * dentry_lock due to last ref count is stopped by the
3063 * OCFS2_LOCK_QUEUED flag.
3065 * We have two potential problems
3067 * 1) If we do the last reference drop on our dentry_lock (via dput)
3068 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
3069 * the downconvert to finish. Instead we take an elevated
3070 * reference and push the drop until after we've completed our
3071 * unblock processing.
3073 * 2) There might be another process with a final reference,
3074 * waiting on us to finish processing. If this is the case, we
3075 * detect it and exit out - there's no more dentries anyway.
3077 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
3080 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3081 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
3082 struct dentry *dentry;
3083 unsigned long flags;
3087 * This node is blocking another node from getting a read
3088 * lock. This happens when we've renamed within a
3089 * directory. We've forced the other nodes to d_delete(), but
3090 * we never actually dropped our lock because it's still
3091 * valid. The downconvert code will retain a PR for this node,
3092 * so there's no further work to do.
3094 if (blocking == DLM_LOCK_PR)
3095 return UNBLOCK_CONTINUE;
3098 * Mark this inode as potentially orphaned. The code in
3099 * ocfs2_delete_inode() will figure out whether it actually
3100 * needs to be freed or not.
3102 spin_lock(&oi->ip_lock);
3103 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
3104 spin_unlock(&oi->ip_lock);
3107 * Yuck. We need to make sure however that the check of
3108 * OCFS2_LOCK_FREEING and the extra reference are atomic with
3109 * respect to a reference decrement or the setting of that
3112 spin_lock_irqsave(&lockres->l_lock, flags);
3113 spin_lock(&dentry_attach_lock);
3114 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
3119 spin_unlock(&dentry_attach_lock);
3120 spin_unlock_irqrestore(&lockres->l_lock, flags);
3122 mlog(0, "extra_ref = %d\n", extra_ref);
3125 * We have a process waiting on us in ocfs2_dentry_iput(),
3126 * which means we can't have any more outstanding
3127 * aliases. There's no need to do any more work.
3130 return UNBLOCK_CONTINUE;
3132 spin_lock(&dentry_attach_lock);
3134 dentry = ocfs2_find_local_alias(dl->dl_inode,
3135 dl->dl_parent_blkno, 1);
3138 spin_unlock(&dentry_attach_lock);
3140 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
3141 dentry->d_name.name);
3144 * The following dcache calls may do an
3145 * iput(). Normally we don't want that from the
3146 * downconverting thread, but in this case it's ok
3147 * because the requesting node already has an
3148 * exclusive lock on the inode, so it can't be queued
3149 * for a downconvert.
3154 spin_lock(&dentry_attach_lock);
3156 spin_unlock(&dentry_attach_lock);
3159 * If we are the last holder of this dentry lock, there is no
3160 * reason to downconvert so skip straight to the unlock.
3162 if (dl->dl_count == 1)
3163 return UNBLOCK_STOP_POST;
3165 return UNBLOCK_CONTINUE_POST;
3169 * This is the filesystem locking protocol. It provides the lock handling
3170 * hooks for the underlying DLM. It has a maximum version number.
3171 * The version number allows interoperability with systems running at
3172 * the same major number and an equal or smaller minor number.
3174 * Whenever the filesystem does new things with locks (adds or removes a
3175 * lock, orders them differently, does different things underneath a lock),
3176 * the version must be changed. The protocol is negotiated when joining
3177 * the dlm domain. A node may join the domain if its major version is
3178 * identical to all other nodes and its minor version is greater than
3179 * or equal to all other nodes. When its minor version is greater than
3180 * the other nodes, it will run at the minor version specified by the
3183 * If a locking change is made that will not be compatible with older
3184 * versions, the major number must be increased and the minor version set
3185 * to zero. If a change merely adds a behavior that can be disabled when
3186 * speaking to older versions, the minor version must be increased. If a
3187 * change adds a fully backwards compatible change (eg, LVB changes that
3188 * are just ignored by older versions), the version does not need to be
3191 static struct ocfs2_locking_protocol lproto = {
3193 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
3194 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
3196 .lp_lock_ast = ocfs2_locking_ast,
3197 .lp_blocking_ast = ocfs2_blocking_ast,
3198 .lp_unlock_ast = ocfs2_unlock_ast,
3201 /* This interface isn't the final one, hence the less-than-perfect names */
3202 void dlmglue_init_stack(void)
3204 o2cb_get_stack(&lproto);
3207 void dlmglue_exit_stack(void)
3212 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3213 struct ocfs2_lock_res *lockres)
3216 struct ocfs2_unblock_ctl ctl = {0, 0,};
3217 unsigned long flags;
3219 /* Our reference to the lockres in this function can be
3220 * considered valid until we remove the OCFS2_LOCK_QUEUED
3226 BUG_ON(!lockres->l_ops);
3228 mlog(0, "lockres %s blocked.\n", lockres->l_name);
3230 /* Detect whether a lock has been marked as going away while
3231 * the downconvert thread was processing other things. A lock can
3232 * still be marked with OCFS2_LOCK_FREEING after this check,
3233 * but short circuiting here will still save us some
3235 spin_lock_irqsave(&lockres->l_lock, flags);
3236 if (lockres->l_flags & OCFS2_LOCK_FREEING)
3238 spin_unlock_irqrestore(&lockres->l_lock, flags);
3240 status = ocfs2_unblock_lock(osb, lockres, &ctl);
3244 spin_lock_irqsave(&lockres->l_lock, flags);
3246 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
3247 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
3249 ocfs2_schedule_blocked_lock(osb, lockres);
3251 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
3252 ctl.requeue ? "yes" : "no");
3253 spin_unlock_irqrestore(&lockres->l_lock, flags);
3255 if (ctl.unblock_action != UNBLOCK_CONTINUE
3256 && lockres->l_ops->post_unlock)
3257 lockres->l_ops->post_unlock(osb, lockres);
3262 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3263 struct ocfs2_lock_res *lockres)
3267 assert_spin_locked(&lockres->l_lock);
3269 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
3270 /* Do not schedule a lock for downconvert when it's on
3271 * the way to destruction - any nodes wanting access
3272 * to the resource will get it soon. */
3273 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
3274 lockres->l_name, lockres->l_flags);
3278 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
3280 spin_lock(&osb->dc_task_lock);
3281 if (list_empty(&lockres->l_blocked_list)) {
3282 list_add_tail(&lockres->l_blocked_list,
3283 &osb->blocked_lock_list);
3284 osb->blocked_lock_count++;
3286 spin_unlock(&osb->dc_task_lock);
3291 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
3293 unsigned long processed;
3294 struct ocfs2_lock_res *lockres;
3298 spin_lock(&osb->dc_task_lock);
3299 /* grab this early so we know to try again if a state change and
3300 * wake happens part-way through our work */
3301 osb->dc_work_sequence = osb->dc_wake_sequence;
3303 processed = osb->blocked_lock_count;
3305 BUG_ON(list_empty(&osb->blocked_lock_list));
3307 lockres = list_entry(osb->blocked_lock_list.next,
3308 struct ocfs2_lock_res, l_blocked_list);
3309 list_del_init(&lockres->l_blocked_list);
3310 osb->blocked_lock_count--;
3311 spin_unlock(&osb->dc_task_lock);
3316 ocfs2_process_blocked_lock(osb, lockres);
3318 spin_lock(&osb->dc_task_lock);
3320 spin_unlock(&osb->dc_task_lock);
3325 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
3329 spin_lock(&osb->dc_task_lock);
3330 if (list_empty(&osb->blocked_lock_list))
3333 spin_unlock(&osb->dc_task_lock);
3337 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
3339 int should_wake = 0;
3341 spin_lock(&osb->dc_task_lock);
3342 if (osb->dc_work_sequence != osb->dc_wake_sequence)
3344 spin_unlock(&osb->dc_task_lock);
3349 static int ocfs2_downconvert_thread(void *arg)
3352 struct ocfs2_super *osb = arg;
3354 /* only quit once we've been asked to stop and there is no more
3356 while (!(kthread_should_stop() &&
3357 ocfs2_downconvert_thread_lists_empty(osb))) {
3359 wait_event_interruptible(osb->dc_event,
3360 ocfs2_downconvert_thread_should_wake(osb) ||
3361 kthread_should_stop());
3363 mlog(0, "downconvert_thread: awoken\n");
3365 ocfs2_downconvert_thread_do_work(osb);
3368 osb->dc_task = NULL;
3372 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
3374 spin_lock(&osb->dc_task_lock);
3375 /* make sure the voting thread gets a swipe at whatever changes
3376 * the caller may have made to the voting state */
3377 osb->dc_wake_sequence++;
3378 spin_unlock(&osb->dc_task_lock);
3379 wake_up(&osb->dc_event);