Merge branch 'master' into gfs2
authorSteven Whitehouse <swhiteho@redhat.com>
Mon, 2 Oct 2006 12:45:08 +0000 (08:45 -0400)
committerSteven Whitehouse <swhiteho@redhat.com>
Mon, 2 Oct 2006 12:45:08 +0000 (08:45 -0400)
126 files changed:
CREDITS
Documentation/filesystems/gfs2.txt [new file with mode: 0644]
MAINTAINERS
fs/Kconfig
fs/Makefile
fs/configfs/item.c
fs/dlm/Kconfig [new file with mode: 0644]
fs/dlm/Makefile [new file with mode: 0644]
fs/dlm/ast.c [new file with mode: 0644]
fs/dlm/ast.h [new file with mode: 0644]
fs/dlm/config.c [new file with mode: 0644]
fs/dlm/config.h [new file with mode: 0644]
fs/dlm/debug_fs.c [new file with mode: 0644]
fs/dlm/dir.c [new file with mode: 0644]
fs/dlm/dir.h [new file with mode: 0644]
fs/dlm/dlm_internal.h [new file with mode: 0644]
fs/dlm/lock.c [new file with mode: 0644]
fs/dlm/lock.h [new file with mode: 0644]
fs/dlm/lockspace.c [new file with mode: 0644]
fs/dlm/lockspace.h [new file with mode: 0644]
fs/dlm/lowcomms.c [new file with mode: 0644]
fs/dlm/lowcomms.h [new file with mode: 0644]
fs/dlm/lvb_table.h [new file with mode: 0644]
fs/dlm/main.c [new file with mode: 0644]
fs/dlm/member.c [new file with mode: 0644]
fs/dlm/member.h [new file with mode: 0644]
fs/dlm/memory.c [new file with mode: 0644]
fs/dlm/memory.h [new file with mode: 0644]
fs/dlm/midcomms.c [new file with mode: 0644]
fs/dlm/midcomms.h [new file with mode: 0644]
fs/dlm/rcom.c [new file with mode: 0644]
fs/dlm/rcom.h [new file with mode: 0644]
fs/dlm/recover.c [new file with mode: 0644]
fs/dlm/recover.h [new file with mode: 0644]
fs/dlm/recoverd.c [new file with mode: 0644]
fs/dlm/recoverd.h [new file with mode: 0644]
fs/dlm/requestqueue.c [new file with mode: 0644]
fs/dlm/requestqueue.h [new file with mode: 0644]
fs/dlm/user.c [new file with mode: 0644]
fs/dlm/user.h [new file with mode: 0644]
fs/dlm/util.c [new file with mode: 0644]
fs/dlm/util.h [new file with mode: 0644]
fs/gfs2/Kconfig [new file with mode: 0644]
fs/gfs2/Makefile [new file with mode: 0644]
fs/gfs2/acl.c [new file with mode: 0644]
fs/gfs2/acl.h [new file with mode: 0644]
fs/gfs2/bmap.c [new file with mode: 0644]
fs/gfs2/bmap.h [new file with mode: 0644]
fs/gfs2/daemon.c [new file with mode: 0644]
fs/gfs2/daemon.h [new file with mode: 0644]
fs/gfs2/dir.c [new file with mode: 0644]
fs/gfs2/dir.h [new file with mode: 0644]
fs/gfs2/eaops.c [new file with mode: 0644]
fs/gfs2/eaops.h [new file with mode: 0644]
fs/gfs2/eattr.c [new file with mode: 0644]
fs/gfs2/eattr.h [new file with mode: 0644]
fs/gfs2/gfs2.h [new file with mode: 0644]
fs/gfs2/glock.c [new file with mode: 0644]
fs/gfs2/glock.h [new file with mode: 0644]
fs/gfs2/glops.c [new file with mode: 0644]
fs/gfs2/glops.h [new file with mode: 0644]
fs/gfs2/incore.h [new file with mode: 0644]
fs/gfs2/inode.c [new file with mode: 0644]
fs/gfs2/inode.h [new file with mode: 0644]
fs/gfs2/lm.c [new file with mode: 0644]
fs/gfs2/lm.h [new file with mode: 0644]
fs/gfs2/locking.c [new file with mode: 0644]
fs/gfs2/locking/dlm/Makefile [new file with mode: 0644]
fs/gfs2/locking/dlm/lock.c [new file with mode: 0644]
fs/gfs2/locking/dlm/lock_dlm.h [new file with mode: 0644]
fs/gfs2/locking/dlm/main.c [new file with mode: 0644]
fs/gfs2/locking/dlm/mount.c [new file with mode: 0644]
fs/gfs2/locking/dlm/plock.c [new file with mode: 0644]
fs/gfs2/locking/dlm/sysfs.c [new file with mode: 0644]
fs/gfs2/locking/dlm/thread.c [new file with mode: 0644]
fs/gfs2/locking/nolock/Makefile [new file with mode: 0644]
fs/gfs2/locking/nolock/main.c [new file with mode: 0644]
fs/gfs2/log.c [new file with mode: 0644]
fs/gfs2/log.h [new file with mode: 0644]
fs/gfs2/lops.c [new file with mode: 0644]
fs/gfs2/lops.h [new file with mode: 0644]
fs/gfs2/main.c [new file with mode: 0644]
fs/gfs2/meta_io.c [new file with mode: 0644]
fs/gfs2/meta_io.h [new file with mode: 0644]
fs/gfs2/mount.c [new file with mode: 0644]
fs/gfs2/mount.h [new file with mode: 0644]
fs/gfs2/ondisk.c [new file with mode: 0644]
fs/gfs2/ops_address.c [new file with mode: 0644]
fs/gfs2/ops_address.h [new file with mode: 0644]
fs/gfs2/ops_dentry.c [new file with mode: 0644]
fs/gfs2/ops_dentry.h [new file with mode: 0644]
fs/gfs2/ops_export.c [new file with mode: 0644]
fs/gfs2/ops_export.h [new file with mode: 0644]
fs/gfs2/ops_file.c [new file with mode: 0644]
fs/gfs2/ops_file.h [new file with mode: 0644]
fs/gfs2/ops_fstype.c [new file with mode: 0644]
fs/gfs2/ops_fstype.h [new file with mode: 0644]
fs/gfs2/ops_inode.c [new file with mode: 0644]
fs/gfs2/ops_inode.h [new file with mode: 0644]
fs/gfs2/ops_super.c [new file with mode: 0644]
fs/gfs2/ops_super.h [new file with mode: 0644]
fs/gfs2/ops_vm.c [new file with mode: 0644]
fs/gfs2/ops_vm.h [new file with mode: 0644]
fs/gfs2/quota.c [new file with mode: 0644]
fs/gfs2/quota.h [new file with mode: 0644]
fs/gfs2/recovery.c [new file with mode: 0644]
fs/gfs2/recovery.h [new file with mode: 0644]
fs/gfs2/rgrp.c [new file with mode: 0644]
fs/gfs2/rgrp.h [new file with mode: 0644]
fs/gfs2/super.c [new file with mode: 0644]
fs/gfs2/super.h [new file with mode: 0644]
fs/gfs2/sys.c [new file with mode: 0644]
fs/gfs2/sys.h [new file with mode: 0644]
fs/gfs2/trans.c [new file with mode: 0644]
fs/gfs2/trans.h [new file with mode: 0644]
fs/gfs2/util.c [new file with mode: 0644]
fs/gfs2/util.h [new file with mode: 0644]
include/linux/Kbuild
include/linux/dlm.h [new file with mode: 0644]
include/linux/dlm_device.h [new file with mode: 0644]
include/linux/gfs2_ondisk.h [new file with mode: 0644]
include/linux/iflags.h [new file with mode: 0644]
include/linux/lm_interface.h [new file with mode: 0644]
include/linux/lock_dlm_plock.h [new file with mode: 0644]
mm/filemap.c
mm/readahead.c

diff --git a/CREDITS b/CREDITS
index 66e8246..2aa8f6d 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3550,11 +3550,11 @@ S: Fargo, North Dakota 58122
 S: USA
 
 N: Steven Whitehouse
-E: SteveW@ACM.org
+E: steve@chygwyn.com
 W: http://www.chygwyn.com/~steve
-D: Linux DECnet project: http://www.sucs.swan.ac.uk/~rohan/DECnet/index.html
+D: Linux DECnet project
 D: Minor debugging of other networking protocols.
-D: Misc bug fixes and filesystem development
+D: Misc bug fixes and GFS2 filesystem development
 
 N: Hans-Joachim Widmaier
 E: hjw@zvw.de
diff --git a/Documentation/filesystems/gfs2.txt b/Documentation/filesystems/gfs2.txt
new file mode 100644 (file)
index 0000000..593004b
--- /dev/null
@@ -0,0 +1,43 @@
+Global File System
+------------------
+
+http://sources.redhat.com/cluster/
+
+GFS is a cluster file system. It allows a cluster of computers to
+simultaneously use a block device that is shared between them (with FC,
+iSCSI, NBD, etc).  GFS reads and writes to the block device like a local
+file system, but also uses a lock module to allow the computers coordinate
+their I/O so file system consistency is maintained.  One of the nifty
+features of GFS is perfect consistency -- changes made to the file system
+on one machine show up immediately on all other machines in the cluster.
+
+GFS uses interchangable inter-node locking mechanisms.  Different lock
+modules can plug into GFS and each file system selects the appropriate
+lock module at mount time.  Lock modules include:
+
+  lock_nolock -- allows gfs to be used as a local file system
+
+  lock_dlm -- uses a distributed lock manager (dlm) for inter-node locking
+  The dlm is found at linux/fs/dlm/
+
+In addition to interfacing with an external locking manager, a gfs lock
+module is responsible for interacting with external cluster management
+systems.  Lock_dlm depends on user space cluster management systems found
+at the URL above.
+
+To use gfs as a local file system, no external clustering systems are
+needed, simply:
+
+  $ mkfs -t gfs2 -p lock_nolock -j 1 /dev/block_device
+  $ mount -t gfs2 /dev/block_device /dir
+
+GFS2 is not on-disk compatible with previous versions of GFS.
+
+The following man pages can be found at the URL above:
+  gfs2_fsck    to repair a filesystem
+  gfs2_grow    to expand a filesystem online
+  gfs2_jadd    to add journals to a filesystem online
+  gfs2_tool    to manipulate, examine and tune a filesystem
+  gfs2_quota   to examine and change quota values in a filesystem
+  mount.gfs2   to help mount(8) mount a filesystem
+  mkfs.gfs2    to make a filesystem
index f0cd5a3..7c238e4 100644 (file)
@@ -900,6 +900,16 @@ M: jack@suse.cz
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
+DISTRIBUTED LOCK MANAGER
+P:     Patrick Caulfield
+M:     pcaulfie@redhat.com
+P:     David Teigland
+M:     teigland@redhat.com
+L:     cluster-devel@redhat.com
+W:     http://sources.redhat.com/cluster/
+T:     git kernel.org:/pub/scm/linux/kernel/git/steve/gfs-2.6.git
+S:     Supported
+
 DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
 P:     Tobias Ringstrom
 M:     tori@unhappy.mine.nu
@@ -1168,6 +1178,14 @@ M:       khc@pm.waw.pl
 W:     http://www.kernel.org/pub/linux/utils/net/hdlc/
 S:     Maintained
 
+GFS2 FILE SYSTEM
+P:     Steven Whitehouse
+M:     swhiteho@redhat.com
+L:     cluster-devel@redhat.com
+W:     http://sources.redhat.com/cluster/
+T:     git kernel.org:/pub/scm/linux/kernel/git/steve/gfs-2.6.git
+S:     Supported
+
 GIGASET ISDN DRIVERS
 P:     Hansjoerg Lipp
 M:     hjlipp@web.de
index 1453d2d..fa64867 100644 (file)
@@ -325,6 +325,7 @@ config FS_POSIX_ACL
        default n
 
 source "fs/xfs/Kconfig"
+source "fs/gfs2/Kconfig"
 
 config OCFS2_FS
        tristate "OCFS2 file system support"
@@ -1983,6 +1984,7 @@ endmenu
 endif
 
 source "fs/nls/Kconfig"
+source "fs/dlm/Kconfig"
 
 endmenu
 
index 819b2a9..215f703 100644 (file)
@@ -57,6 +57,7 @@ obj-$(CONFIG_CONFIGFS_FS)     += configfs/
 obj-y                          += devpts/
 
 obj-$(CONFIG_PROFILING)                += dcookies.o
+obj-$(CONFIG_DLM)              += dlm/
  
 # Do not add any filesystems before this line
 obj-$(CONFIG_REISERFS_FS)      += reiserfs/
@@ -109,3 +110,4 @@ obj-$(CONFIG_HOSTFS)                += hostfs/
 obj-$(CONFIG_HPPFS)            += hppfs/
 obj-$(CONFIG_DEBUG_FS)         += debugfs/
 obj-$(CONFIG_OCFS2_FS)         += ocfs2/
+obj-$(CONFIG_GFS2_FS)           += gfs2/
index e07485a..2442120 100644 (file)
@@ -224,4 +224,4 @@ EXPORT_SYMBOL(config_item_init);
 EXPORT_SYMBOL(config_group_init);
 EXPORT_SYMBOL(config_item_get);
 EXPORT_SYMBOL(config_item_put);
-
+EXPORT_SYMBOL(config_group_find_obj);
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
new file mode 100644 (file)
index 0000000..490f85b
--- /dev/null
@@ -0,0 +1,21 @@
+menu "Distributed Lock Manager"
+       depends on INET && EXPERIMENTAL
+
+config DLM
+       tristate "Distributed Lock Manager (DLM)"
+       depends on IPV6 || IPV6=n
+       depends on IP_SCTP
+       select CONFIGFS_FS
+       help
+       A general purpose distributed lock manager for kernel or userspace
+       applications.
+
+config DLM_DEBUG
+       bool "DLM debugging"
+       depends on DLM
+       help
+       Under the debugfs mount point, the name of each lockspace will
+       appear as a file in the "dlm" directory.  The output is the
+       list of resource and locks the local node knows about.
+
+endmenu
diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile
new file mode 100644 (file)
index 0000000..1832e02
--- /dev/null
@@ -0,0 +1,19 @@
+obj-$(CONFIG_DLM) +=           dlm.o
+dlm-y :=                       ast.o \
+                               config.o \
+                               dir.o \
+                               lock.o \
+                               lockspace.o \
+                               lowcomms.o \
+                               main.o \
+                               member.o \
+                               memory.o \
+                               midcomms.o \
+                               rcom.o \
+                               recover.o \
+                               recoverd.o \
+                               requestqueue.o \
+                               user.o \
+                               util.o
+dlm-$(CONFIG_DLM_DEBUG) +=     debug_fs.o
+
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
new file mode 100644 (file)
index 0000000..f91d39c
--- /dev/null
@@ -0,0 +1,173 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#include "dlm_internal.h"
+#include "lock.h"
+#include "user.h"
+
+#define WAKE_ASTS  0
+
+static struct list_head                ast_queue;
+static spinlock_t              ast_queue_lock;
+static struct task_struct *    astd_task;
+static unsigned long           astd_wakeflags;
+static struct mutex            astd_running;
+
+
+void dlm_del_ast(struct dlm_lkb *lkb)
+{
+       spin_lock(&ast_queue_lock);
+       if (lkb->lkb_ast_type & (AST_COMP | AST_BAST))
+               list_del(&lkb->lkb_astqueue);
+       spin_unlock(&ast_queue_lock);
+}
+
+void dlm_add_ast(struct dlm_lkb *lkb, int type)
+{
+       if (lkb->lkb_flags & DLM_IFL_USER) {
+               dlm_user_add_ast(lkb, type);
+               return;
+       }
+       DLM_ASSERT(lkb->lkb_astaddr != DLM_FAKE_USER_AST, dlm_print_lkb(lkb););
+
+       spin_lock(&ast_queue_lock);
+       if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
+               kref_get(&lkb->lkb_ref);
+               list_add_tail(&lkb->lkb_astqueue, &ast_queue);
+       }
+       lkb->lkb_ast_type |= type;
+       spin_unlock(&ast_queue_lock);
+
+       set_bit(WAKE_ASTS, &astd_wakeflags);
+       wake_up_process(astd_task);
+}
+
+static void process_asts(void)
+{
+       struct dlm_ls *ls = NULL;
+       struct dlm_rsb *r = NULL;
+       struct dlm_lkb *lkb;
+       void (*cast) (long param);
+       void (*bast) (long param, int mode);
+       int type = 0, found, bmode;
+
+       for (;;) {
+               found = 0;
+               spin_lock(&ast_queue_lock);
+               list_for_each_entry(lkb, &ast_queue, lkb_astqueue) {
+                       r = lkb->lkb_resource;
+                       ls = r->res_ls;
+
+                       if (dlm_locking_stopped(ls))
+                               continue;
+
+                       list_del(&lkb->lkb_astqueue);
+                       type = lkb->lkb_ast_type;
+                       lkb->lkb_ast_type = 0;
+                       found = 1;
+                       break;
+               }
+               spin_unlock(&ast_queue_lock);
+
+               if (!found)
+                       break;
+
+               cast = lkb->lkb_astaddr;
+               bast = lkb->lkb_bastaddr;
+               bmode = lkb->lkb_bastmode;
+
+               if ((type & AST_COMP) && cast)
+                       cast(lkb->lkb_astparam);
+
+               /* FIXME: Is it safe to look at lkb_grmode here
+                  without doing a lock_rsb() ?
+                  Look at other checks in v1 to avoid basts. */
+
+               if ((type & AST_BAST) && bast)
+                       if (!dlm_modes_compat(lkb->lkb_grmode, bmode))
+                               bast(lkb->lkb_astparam, bmode);
+
+               /* this removes the reference added by dlm_add_ast
+                  and may result in the lkb being freed */
+               dlm_put_lkb(lkb);
+
+               schedule();
+       }
+}
+
+static inline int no_asts(void)
+{
+       int ret;
+
+       spin_lock(&ast_queue_lock);
+       ret = list_empty(&ast_queue);
+       spin_unlock(&ast_queue_lock);
+       return ret;
+}
+
+static int dlm_astd(void *data)
+{
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (!test_bit(WAKE_ASTS, &astd_wakeflags))
+                       schedule();
+               set_current_state(TASK_RUNNING);
+
+               mutex_lock(&astd_running);
+               if (test_and_clear_bit(WAKE_ASTS, &astd_wakeflags))
+                       process_asts();
+               mutex_unlock(&astd_running);
+       }
+       return 0;
+}
+
+void dlm_astd_wake(void)
+{
+       if (!no_asts()) {
+               set_bit(WAKE_ASTS, &astd_wakeflags);
+               wake_up_process(astd_task);
+       }
+}
+
+int dlm_astd_start(void)
+{
+       struct task_struct *p;
+       int error = 0;
+
+       INIT_LIST_HEAD(&ast_queue);
+       spin_lock_init(&ast_queue_lock);
+       mutex_init(&astd_running);
+
+       p = kthread_run(dlm_astd, NULL, "dlm_astd");
+       if (IS_ERR(p))
+               error = PTR_ERR(p);
+       else
+               astd_task = p;
+       return error;
+}
+
+void dlm_astd_stop(void)
+{
+       kthread_stop(astd_task);
+}
+
+void dlm_astd_suspend(void)
+{
+       mutex_lock(&astd_running);
+}
+
+void dlm_astd_resume(void)
+{
+       mutex_unlock(&astd_running);
+}
+
diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h
new file mode 100644 (file)
index 0000000..6ee276c
--- /dev/null
@@ -0,0 +1,26 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) 2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#ifndef __ASTD_DOT_H__
+#define __ASTD_DOT_H__
+
+void dlm_add_ast(struct dlm_lkb *lkb, int type);
+void dlm_del_ast(struct dlm_lkb *lkb);
+
+void dlm_astd_wake(void);
+int dlm_astd_start(void);
+void dlm_astd_stop(void);
+void dlm_astd_suspend(void);
+void dlm_astd_resume(void);
+
+#endif
+
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
new file mode 100644 (file)
index 0000000..8855305
--- /dev/null
@@ -0,0 +1,789 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/configfs.h>
+#include <net/sock.h>
+
+#include "config.h"
+#include "lowcomms.h"
+
+/*
+ * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid
+ * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight
+ * /config/dlm/<cluster>/comms/<comm>/nodeid
+ * /config/dlm/<cluster>/comms/<comm>/local
+ * /config/dlm/<cluster>/comms/<comm>/addr
+ * The <cluster> level is useless, but I haven't figured out how to avoid it.
+ */
+
+static struct config_group *space_list;
+static struct config_group *comm_list;
+static struct comm *local_comm;
+
+struct clusters;
+struct cluster;
+struct spaces;
+struct space;
+struct comms;
+struct comm;
+struct nodes;
+struct node;
+
+static struct config_group *make_cluster(struct config_group *, const char *);
+static void drop_cluster(struct config_group *, struct config_item *);
+static void release_cluster(struct config_item *);
+static struct config_group *make_space(struct config_group *, const char *);
+static void drop_space(struct config_group *, struct config_item *);
+static void release_space(struct config_item *);
+static struct config_item *make_comm(struct config_group *, const char *);
+static void drop_comm(struct config_group *, struct config_item *);
+static void release_comm(struct config_item *);
+static struct config_item *make_node(struct config_group *, const char *);
+static void drop_node(struct config_group *, struct config_item *);
+static void release_node(struct config_item *);
+
+static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
+                        char *buf);
+static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
+                         const char *buf, size_t len);
+static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
+                        char *buf);
+static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
+                         const char *buf, size_t len);
+
+static ssize_t comm_nodeid_read(struct comm *cm, char *buf);
+static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len);
+static ssize_t comm_local_read(struct comm *cm, char *buf);
+static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len);
+static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len);
+static ssize_t node_nodeid_read(struct node *nd, char *buf);
+static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len);
+static ssize_t node_weight_read(struct node *nd, char *buf);
+static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len);
+
+enum {
+       COMM_ATTR_NODEID = 0,
+       COMM_ATTR_LOCAL,
+       COMM_ATTR_ADDR,
+};
+
+struct comm_attribute {
+       struct configfs_attribute attr;
+       ssize_t (*show)(struct comm *, char *);
+       ssize_t (*store)(struct comm *, const char *, size_t);
+};
+
+static struct comm_attribute comm_attr_nodeid = {
+       .attr   = { .ca_owner = THIS_MODULE,
+                    .ca_name = "nodeid",
+                    .ca_mode = S_IRUGO | S_IWUSR },
+       .show   = comm_nodeid_read,
+       .store  = comm_nodeid_write,
+};
+
+static struct comm_attribute comm_attr_local = {
+       .attr   = { .ca_owner = THIS_MODULE,
+                    .ca_name = "local",
+                    .ca_mode = S_IRUGO | S_IWUSR },
+       .show   = comm_local_read,
+       .store  = comm_local_write,
+};
+
+static struct comm_attribute comm_attr_addr = {
+       .attr   = { .ca_owner = THIS_MODULE,
+                    .ca_name = "addr",
+                    .ca_mode = S_IRUGO | S_IWUSR },
+       .store  = comm_addr_write,
+};
+
+static struct configfs_attribute *comm_attrs[] = {
+       [COMM_ATTR_NODEID] = &comm_attr_nodeid.attr,
+       [COMM_ATTR_LOCAL] = &comm_attr_local.attr,
+       [COMM_ATTR_ADDR] = &comm_attr_addr.attr,
+       NULL,
+};
+
+enum {
+       NODE_ATTR_NODEID = 0,
+       NODE_ATTR_WEIGHT,
+};
+
+struct node_attribute {
+       struct configfs_attribute attr;
+       ssize_t (*show)(struct node *, char *);
+       ssize_t (*store)(struct node *, const char *, size_t);
+};
+
+static struct node_attribute node_attr_nodeid = {
+       .attr   = { .ca_owner = THIS_MODULE,
+                    .ca_name = "nodeid",
+                    .ca_mode = S_IRUGO | S_IWUSR },
+       .show   = node_nodeid_read,
+       .store  = node_nodeid_write,
+};
+
+static struct node_attribute node_attr_weight = {
+       .attr   = { .ca_owner = THIS_MODULE,
+                    .ca_name = "weight",
+                    .ca_mode = S_IRUGO | S_IWUSR },
+       .show   = node_weight_read,
+       .store  = node_weight_write,
+};
+
+static struct configfs_attribute *node_attrs[] = {
+       [NODE_ATTR_NODEID] = &node_attr_nodeid.attr,
+       [NODE_ATTR_WEIGHT] = &node_attr_weight.attr,
+       NULL,
+};
+
+struct clusters {
+       struct configfs_subsystem subsys;
+};
+
+struct cluster {
+       struct config_group group;
+};
+
+struct spaces {
+       struct config_group ss_group;
+};
+
+struct space {
+       struct config_group group;
+       struct list_head members;
+       struct mutex members_lock;
+       int members_count;
+};
+
+struct comms {
+       struct config_group cs_group;
+};
+
+struct comm {
+       struct config_item item;
+       int nodeid;
+       int local;
+       int addr_count;
+       struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
+};
+
+struct nodes {
+       struct config_group ns_group;
+};
+
+struct node {
+       struct config_item item;
+       struct list_head list; /* space->members */
+       int nodeid;
+       int weight;
+};
+
+static struct configfs_group_operations clusters_ops = {
+       .make_group = make_cluster,
+       .drop_item = drop_cluster,
+};
+
+static struct configfs_item_operations cluster_ops = {
+       .release = release_cluster,
+};
+
+static struct configfs_group_operations spaces_ops = {
+       .make_group = make_space,
+       .drop_item = drop_space,
+};
+
+static struct configfs_item_operations space_ops = {
+       .release = release_space,
+};
+
+static struct configfs_group_operations comms_ops = {
+       .make_item = make_comm,
+       .drop_item = drop_comm,
+};
+
+static struct configfs_item_operations comm_ops = {
+       .release = release_comm,
+       .show_attribute = show_comm,
+       .store_attribute = store_comm,
+};
+
+static struct configfs_group_operations nodes_ops = {
+       .make_item = make_node,
+       .drop_item = drop_node,
+};
+
+static struct configfs_item_operations node_ops = {
+       .release = release_node,
+       .show_attribute = show_node,
+       .store_attribute = store_node,
+};
+
+static struct config_item_type clusters_type = {
+       .ct_group_ops = &clusters_ops,
+       .ct_owner = THIS_MODULE,
+};
+
+static struct config_item_type cluster_type = {
+       .ct_item_ops = &cluster_ops,
+       .ct_owner = THIS_MODULE,
+};
+
+static struct config_item_type spaces_type = {
+       .ct_group_ops = &spaces_ops,
+       .ct_owner = THIS_MODULE,
+};
+
+static struct config_item_type space_type = {
+       .ct_item_ops = &space_ops,
+       .ct_owner = THIS_MODULE,
+};
+
+static struct config_item_type comms_type = {
+       .ct_group_ops = &comms_ops,
+       .ct_owner = THIS_MODULE,
+};
+
+static struct config_item_type comm_type = {
+       .ct_item_ops = &comm_ops,
+       .ct_attrs = comm_attrs,
+       .ct_owner = THIS_MODULE,
+};
+
+static struct config_item_type nodes_type = {
+       .ct_group_ops = &nodes_ops,
+       .ct_owner = THIS_MODULE,
+};
+
+static struct config_item_type node_type = {
+       .ct_item_ops = &node_ops,
+       .ct_attrs = node_attrs,
+       .ct_owner = THIS_MODULE,
+};
+
+static struct cluster *to_cluster(struct config_item *i)
+{
+       return i ? container_of(to_config_group(i), struct cluster, group):NULL;
+}
+
+static struct space *to_space(struct config_item *i)
+{
+       return i ? container_of(to_config_group(i), struct space, group) : NULL;
+}
+
+static struct comm *to_comm(struct config_item *i)
+{
+       return i ? container_of(i, struct comm, item) : NULL;
+}
+
+static struct node *to_node(struct config_item *i)
+{
+       return i ? container_of(i, struct node, item) : NULL;
+}
+
+static struct config_group *make_cluster(struct config_group *g,
+                                        const char *name)
+{
+       struct cluster *cl = NULL;
+       struct spaces *sps = NULL;
+       struct comms *cms = NULL;
+       void *gps = NULL;
+
+       cl = kzalloc(sizeof(struct cluster), GFP_KERNEL);
+       gps = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
+       sps = kzalloc(sizeof(struct spaces), GFP_KERNEL);
+       cms = kzalloc(sizeof(struct comms), GFP_KERNEL);
+
+       if (!cl || !gps || !sps || !cms)
+               goto fail;
+
+       config_group_init_type_name(&cl->group, name, &cluster_type);
+       config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type);
+       config_group_init_type_name(&cms->cs_group, "comms", &comms_type);
+
+       cl->group.default_groups = gps;
+       cl->group.default_groups[0] = &sps->ss_group;
+       cl->group.default_groups[1] = &cms->cs_group;
+       cl->group.default_groups[2] = NULL;
+
+       space_list = &sps->ss_group;
+       comm_list = &cms->cs_group;
+       return &cl->group;
+
+ fail:
+       kfree(cl);
+       kfree(gps);
+       kfree(sps);
+       kfree(cms);
+       return NULL;
+}
+
+static void drop_cluster(struct config_group *g, struct config_item *i)
+{
+       struct cluster *cl = to_cluster(i);
+       struct config_item *tmp;
+       int j;
+
+       for (j = 0; cl->group.default_groups[j]; j++) {
+               tmp = &cl->group.default_groups[j]->cg_item;
+               cl->group.default_groups[j] = NULL;
+               config_item_put(tmp);
+       }
+
+       space_list = NULL;
+       comm_list = NULL;
+
+       config_item_put(i);
+}
+
+static void release_cluster(struct config_item *i)
+{
+       struct cluster *cl = to_cluster(i);
+       kfree(cl->group.default_groups);
+       kfree(cl);
+}
+
+static struct config_group *make_space(struct config_group *g, const char *name)
+{
+       struct space *sp = NULL;
+       struct nodes *nds = NULL;
+       void *gps = NULL;
+
+       sp = kzalloc(sizeof(struct space), GFP_KERNEL);
+       gps = kcalloc(2, sizeof(struct config_group *), GFP_KERNEL);
+       nds = kzalloc(sizeof(struct nodes), GFP_KERNEL);
+
+       if (!sp || !gps || !nds)
+               goto fail;
+
+       config_group_init_type_name(&sp->group, name, &space_type);
+       config_group_init_type_name(&nds->ns_group, "nodes", &nodes_type);
+
+       sp->group.default_groups = gps;
+       sp->group.default_groups[0] = &nds->ns_group;
+       sp->group.default_groups[1] = NULL;
+
+       INIT_LIST_HEAD(&sp->members);
+       mutex_init(&sp->members_lock);
+       sp->members_count = 0;
+       return &sp->group;
+
+ fail:
+       kfree(sp);
+       kfree(gps);
+       kfree(nds);
+       return NULL;
+}
+
+static void drop_space(struct config_group *g, struct config_item *i)
+{
+       struct space *sp = to_space(i);
+       struct config_item *tmp;
+       int j;
+
+       /* assert list_empty(&sp->members) */
+
+       for (j = 0; sp->group.default_groups[j]; j++) {
+               tmp = &sp->group.default_groups[j]->cg_item;
+               sp->group.default_groups[j] = NULL;
+               config_item_put(tmp);
+       }
+
+       config_item_put(i);
+}
+
+static void release_space(struct config_item *i)
+{
+       struct space *sp = to_space(i);
+       kfree(sp->group.default_groups);
+       kfree(sp);
+}
+
+static struct config_item *make_comm(struct config_group *g, const char *name)
+{
+       struct comm *cm;
+
+       cm = kzalloc(sizeof(struct comm), GFP_KERNEL);
+       if (!cm)
+               return NULL;
+
+       config_item_init_type_name(&cm->item, name, &comm_type);
+       cm->nodeid = -1;
+       cm->local = 0;
+       cm->addr_count = 0;
+       return &cm->item;
+}
+
+static void drop_comm(struct config_group *g, struct config_item *i)
+{
+       struct comm *cm = to_comm(i);
+       if (local_comm == cm)
+               local_comm = NULL;
+       dlm_lowcomms_close(cm->nodeid);
+       while (cm->addr_count--)
+               kfree(cm->addr[cm->addr_count]);
+       config_item_put(i);
+}
+
+static void release_comm(struct config_item *i)
+{
+       struct comm *cm = to_comm(i);
+       kfree(cm);
+}
+
+static struct config_item *make_node(struct config_group *g, const char *name)
+{
+       struct space *sp = to_space(g->cg_item.ci_parent);
+       struct node *nd;
+
+       nd = kzalloc(sizeof(struct node), GFP_KERNEL);
+       if (!nd)
+               return NULL;
+
+       config_item_init_type_name(&nd->item, name, &node_type);
+       nd->nodeid = -1;
+       nd->weight = 1;  /* default weight of 1 if none is set */
+
+       mutex_lock(&sp->members_lock);
+       list_add(&nd->list, &sp->members);
+       sp->members_count++;
+       mutex_unlock(&sp->members_lock);
+
+       return &nd->item;
+}
+
+static void drop_node(struct config_group *g, struct config_item *i)
+{
+       struct space *sp = to_space(g->cg_item.ci_parent);
+       struct node *nd = to_node(i);
+
+       mutex_lock(&sp->members_lock);
+       list_del(&nd->list);
+       sp->members_count--;
+       mutex_unlock(&sp->members_lock);
+
+       config_item_put(i);
+}
+
+static void release_node(struct config_item *i)
+{
+       struct node *nd = to_node(i);
+       kfree(nd);
+}
+
+static struct clusters clusters_root = {
+       .subsys = {
+               .su_group = {
+                       .cg_item = {
+                               .ci_namebuf = "dlm",
+                               .ci_type = &clusters_type,
+                       },
+               },
+       },
+};
+
+int dlm_config_init(void)
+{
+       config_group_init(&clusters_root.subsys.su_group);
+       init_MUTEX(&clusters_root.subsys.su_sem);
+       return configfs_register_subsystem(&clusters_root.subsys);
+}
+
+void dlm_config_exit(void)
+{
+       configfs_unregister_subsystem(&clusters_root.subsys);
+}
+
+/*
+ * Functions for user space to read/write attributes
+ */
+
+static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
+                        char *buf)
+{
+       struct comm *cm = to_comm(i);
+       struct comm_attribute *cma =
+                       container_of(a, struct comm_attribute, attr);
+       return cma->show ? cma->show(cm, buf) : 0;
+}
+
+static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
+                         const char *buf, size_t len)
+{
+       struct comm *cm = to_comm(i);
+       struct comm_attribute *cma =
+               container_of(a, struct comm_attribute, attr);
+       return cma->store ? cma->store(cm, buf, len) : -EINVAL;
+}
+
+static ssize_t comm_nodeid_read(struct comm *cm, char *buf)
+{
+       return sprintf(buf, "%d\n", cm->nodeid);
+}
+
+static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len)
+{
+       cm->nodeid = simple_strtol(buf, NULL, 0);
+       return len;
+}
+
+static ssize_t comm_local_read(struct comm *cm, char *buf)
+{
+       return sprintf(buf, "%d\n", cm->local);
+}
+
+static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len)
+{
+       cm->local= simple_strtol(buf, NULL, 0);
+       if (cm->local && !local_comm)
+               local_comm = cm;
+       return len;
+}
+
+static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len)
+{
+       struct sockaddr_storage *addr;
+
+       if (len != sizeof(struct sockaddr_storage))
+               return -EINVAL;
+
+       if (cm->addr_count >= DLM_MAX_ADDR_COUNT)
+               return -ENOSPC;
+
+       addr = kzalloc(sizeof(*addr), GFP_KERNEL);
+       if (!addr)
+               return -ENOMEM;
+
+       memcpy(addr, buf, len);
+       cm->addr[cm->addr_count++] = addr;
+       return len;
+}
+
+static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
+                        char *buf)
+{
+       struct node *nd = to_node(i);
+       struct node_attribute *nda =
+                       container_of(a, struct node_attribute, attr);
+       return nda->show ? nda->show(nd, buf) : 0;
+}
+
+static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
+                         const char *buf, size_t len)
+{
+       struct node *nd = to_node(i);
+       struct node_attribute *nda =
+               container_of(a, struct node_attribute, attr);
+       return nda->store ? nda->store(nd, buf, len) : -EINVAL;
+}
+
+static ssize_t node_nodeid_read(struct node *nd, char *buf)
+{
+       return sprintf(buf, "%d\n", nd->nodeid);
+}
+
+static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len)
+{
+       nd->nodeid = simple_strtol(buf, NULL, 0);
+       return len;
+}
+
+static ssize_t node_weight_read(struct node *nd, char *buf)
+{
+       return sprintf(buf, "%d\n", nd->weight);
+}
+
+static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len)
+{
+       nd->weight = simple_strtol(buf, NULL, 0);
+       return len;
+}
+
+/*
+ * Functions for the dlm to get the info that's been configured
+ */
+
+static struct space *get_space(char *name)
+{
+       if (!space_list)
+               return NULL;
+       return to_space(config_group_find_obj(space_list, name));
+}
+
+static void put_space(struct space *sp)
+{
+       config_item_put(&sp->group.cg_item);
+}
+
+static struct comm *get_comm(int nodeid, struct sockaddr_storage *addr)
+{
+       struct config_item *i;
+       struct comm *cm = NULL;
+       int found = 0;
+
+       if (!comm_list)
+               return NULL;
+
+       down(&clusters_root.subsys.su_sem);
+
+       list_for_each_entry(i, &comm_list->cg_children, ci_entry) {
+               cm = to_comm(i);
+
+               if (nodeid) {
+                       if (cm->nodeid != nodeid)
+                               continue;
+                       found = 1;
+                       break;
+               } else {
+                       if (!cm->addr_count ||
+                           memcmp(cm->addr[0], addr, sizeof(*addr)))
+                               continue;
+                       found = 1;
+                       break;
+               }
+       }
+       up(&clusters_root.subsys.su_sem);
+
+       if (found)
+               config_item_get(i);
+       else
+               cm = NULL;
+       return cm;
+}
+
+static void put_comm(struct comm *cm)
+{
+       config_item_put(&cm->item);
+}
+
+/* caller must free mem */
+int dlm_nodeid_list(char *lsname, int **ids_out)
+{
+       struct space *sp;
+       struct node *nd;
+       int i = 0, rv = 0;
+       int *ids;
+
+       sp = get_space(lsname);
+       if (!sp)
+               return -EEXIST;
+
+       mutex_lock(&sp->members_lock);
+       if (!sp->members_count) {
+               rv = 0;
+               goto out;
+       }
+
+       ids = kcalloc(sp->members_count, sizeof(int), GFP_KERNEL);
+       if (!ids) {
+               rv = -ENOMEM;
+               goto out;
+       }
+
+       rv = sp->members_count;
+       list_for_each_entry(nd, &sp->members, list)
+               ids[i++] = nd->nodeid;
+
+       if (rv != i)
+               printk("bad nodeid count %d %d\n", rv, i);
+
+       *ids_out = ids;
+ out:
+       mutex_unlock(&sp->members_lock);
+       put_space(sp);
+       return rv;
+}
+
+int dlm_node_weight(char *lsname, int nodeid)
+{
+       struct space *sp;
+       struct node *nd;
+       int w = -EEXIST;
+
+       sp = get_space(lsname);
+       if (!sp)
+               goto out;
+
+       mutex_lock(&sp->members_lock);
+       list_for_each_entry(nd, &sp->members, list) {
+               if (nd->nodeid != nodeid)
+                       continue;
+               w = nd->weight;
+               break;
+       }
+       mutex_unlock(&sp->members_lock);
+       put_space(sp);
+ out:
+       return w;
+}
+
+int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr)
+{
+       struct comm *cm = get_comm(nodeid, NULL);
+       if (!cm)
+               return -EEXIST;
+       if (!cm->addr_count)
+               return -ENOENT;
+       memcpy(addr, cm->addr[0], sizeof(*addr));
+       put_comm(cm);
+       return 0;
+}
+
+int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
+{
+       struct comm *cm = get_comm(0, addr);
+       if (!cm)
+               return -EEXIST;
+       *nodeid = cm->nodeid;
+       put_comm(cm);
+       return 0;
+}
+
+int dlm_our_nodeid(void)
+{
+       return local_comm ? local_comm->nodeid : 0;
+}
+
+/* num 0 is first addr, num 1 is second addr */
+int dlm_our_addr(struct sockaddr_storage *addr, int num)
+{
+       if (!local_comm)
+               return -1;
+       if (num + 1 > local_comm->addr_count)
+               return -1;
+       memcpy(addr, local_comm->addr[num], sizeof(*addr));
+       return 0;
+}
+
+/* Config file defaults */
+#define DEFAULT_TCP_PORT       21064
+#define DEFAULT_BUFFER_SIZE     4096
+#define DEFAULT_RSBTBL_SIZE      256
+#define DEFAULT_LKBTBL_SIZE     1024
+#define DEFAULT_DIRTBL_SIZE      512
+#define DEFAULT_RECOVER_TIMER      5
+#define DEFAULT_TOSS_SECS         10
+#define DEFAULT_SCAN_SECS          5
+
+struct dlm_config_info dlm_config = {
+       .tcp_port = DEFAULT_TCP_PORT,
+       .buffer_size = DEFAULT_BUFFER_SIZE,
+       .rsbtbl_size = DEFAULT_RSBTBL_SIZE,
+       .lkbtbl_size = DEFAULT_LKBTBL_SIZE,
+       .dirtbl_size = DEFAULT_DIRTBL_SIZE,
+       .recover_timer = DEFAULT_RECOVER_TIMER,
+       .toss_secs = DEFAULT_TOSS_SECS,
+       .scan_secs = DEFAULT_SCAN_SECS
+};
+
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
new file mode 100644 (file)
index 0000000..9da7839
--- /dev/null
@@ -0,0 +1,42 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#ifndef __CONFIG_DOT_H__
+#define __CONFIG_DOT_H__
+
+#define DLM_MAX_ADDR_COUNT 3
+
+struct dlm_config_info {
+       int tcp_port;
+       int buffer_size;
+       int rsbtbl_size;
+       int lkbtbl_size;
+       int dirtbl_size;
+       int recover_timer;
+       int toss_secs;
+       int scan_secs;
+};
+
+extern struct dlm_config_info dlm_config;
+
+int dlm_config_init(void);
+void dlm_config_exit(void);
+int dlm_node_weight(char *lsname, int nodeid);
+int dlm_nodeid_list(char *lsname, int **ids_out);
+int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr);
+int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid);
+int dlm_our_nodeid(void);
+int dlm_our_addr(struct sockaddr_storage *addr, int num);
+
+#endif                         /* __CONFIG_DOT_H__ */
+
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
new file mode 100644 (file)
index 0000000..ca94a83
--- /dev/null
@@ -0,0 +1,387 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) 2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+
+#include "dlm_internal.h"
+
+#define DLM_DEBUG_BUF_LEN 4096
+static char debug_buf[DLM_DEBUG_BUF_LEN];
+static struct mutex debug_buf_lock;
+
+static struct dentry *dlm_root;
+
+struct rsb_iter {
+       int entry;
+       struct dlm_ls *ls;
+       struct list_head *next;
+       struct dlm_rsb *rsb;
+};
+
+/*
+ * dump all rsb's in the lockspace hash table
+ */
+
+static char *print_lockmode(int mode)
+{
+       switch (mode) {
+       case DLM_LOCK_IV:
+               return "--";
+       case DLM_LOCK_NL:
+               return "NL";
+       case DLM_LOCK_CR:
+               return "CR";
+       case DLM_LOCK_CW:
+               return "CW";
+       case DLM_LOCK_PR:
+               return "PR";
+       case DLM_LOCK_PW:
+               return "PW";
+       case DLM_LOCK_EX:
+               return "EX";
+       default:
+               return "??";
+       }
+}
+
+static void print_lock(struct seq_file *s, struct dlm_lkb *lkb,
+                      struct dlm_rsb *res)
+{
+       seq_printf(s, "%08x %s", lkb->lkb_id, print_lockmode(lkb->lkb_grmode));
+
+       if (lkb->lkb_status == DLM_LKSTS_CONVERT
+           || lkb->lkb_status == DLM_LKSTS_WAITING)
+               seq_printf(s, " (%s)", print_lockmode(lkb->lkb_rqmode));
+
+       if (lkb->lkb_nodeid) {
+               if (lkb->lkb_nodeid != res->res_nodeid)
+                       seq_printf(s, " Remote: %3d %08x", lkb->lkb_nodeid,
+                                  lkb->lkb_remid);
+               else
+                       seq_printf(s, " Master:     %08x", lkb->lkb_remid);
+       }
+
+       if (lkb->lkb_wait_type)
+               seq_printf(s, " wait_type: %d", lkb->lkb_wait_type);
+
+       seq_printf(s, "\n");
+}
+
+static int print_resource(struct dlm_rsb *res, struct seq_file *s)
+{
+       struct dlm_lkb *lkb;
+       int i, lvblen = res->res_ls->ls_lvblen, recover_list, root_list;
+
+       seq_printf(s, "\nResource %p Name (len=%d) \"", res, res->res_length);
+       for (i = 0; i < res->res_length; i++) {
+               if (isprint(res->res_name[i]))
+                       seq_printf(s, "%c", res->res_name[i]);
+               else
+                       seq_printf(s, "%c", '.');
+       }
+       if (res->res_nodeid > 0)
+               seq_printf(s, "\"  \nLocal Copy, Master is node %d\n",
+                          res->res_nodeid);
+       else if (res->res_nodeid == 0)
+               seq_printf(s, "\"  \nMaster Copy\n");
+       else if (res->res_nodeid == -1)
+               seq_printf(s, "\"  \nLooking up master (lkid %x)\n",
+                          res->res_first_lkid);
+       else
+               seq_printf(s, "\"  \nInvalid master %d\n", res->res_nodeid);
+
+       /* Print the LVB: */
+       if (res->res_lvbptr) {
+               seq_printf(s, "LVB: ");
+               for (i = 0; i < lvblen; i++) {
+                       if (i == lvblen / 2)
+                               seq_printf(s, "\n     ");
+                       seq_printf(s, "%02x ",
+                                  (unsigned char) res->res_lvbptr[i]);
+               }
+               if (rsb_flag(res, RSB_VALNOTVALID))
+                       seq_printf(s, " (INVALID)");
+               seq_printf(s, "\n");
+       }
+
+       root_list = !list_empty(&res->res_root_list);
+       recover_list = !list_empty(&res->res_recover_list);
+
+       if (root_list || recover_list) {
+               seq_printf(s, "Recovery: root %d recover %d flags %lx "
+                          "count %d\n", root_list, recover_list,
+                          res->res_flags, res->res_recover_locks_count);
+       }
+
+       /* Print the locks attached to this resource */
+       seq_printf(s, "Granted Queue\n");
+       list_for_each_entry(lkb, &res->res_grantqueue, lkb_statequeue)
+               print_lock(s, lkb, res);
+
+       seq_printf(s, "Conversion Queue\n");
+       list_for_each_entry(lkb, &res->res_convertqueue, lkb_statequeue)
+               print_lock(s, lkb, res);
+
+       seq_printf(s, "Waiting Queue\n");
+       list_for_each_entry(lkb, &res->res_waitqueue, lkb_statequeue)
+               print_lock(s, lkb, res);
+
+       if (list_empty(&res->res_lookup))
+               goto out;
+
+       seq_printf(s, "Lookup Queue\n");
+       list_for_each_entry(lkb, &res->res_lookup, lkb_rsb_lookup) {
+               seq_printf(s, "%08x %s", lkb->lkb_id,
+                          print_lockmode(lkb->lkb_rqmode));
+               if (lkb->lkb_wait_type)
+                       seq_printf(s, " wait_type: %d", lkb->lkb_wait_type);
+               seq_printf(s, "\n");
+       }
+ out:
+       return 0;
+}
+
+static int rsb_iter_next(struct rsb_iter *ri)
+{
+       struct dlm_ls *ls = ri->ls;
+       int i;
+
+       if (!ri->next) {
+ top:
+               /* Find the next non-empty hash bucket */
+               for (i = ri->entry; i < ls->ls_rsbtbl_size; i++) {
+                       read_lock(&ls->ls_rsbtbl[i].lock);
+                       if (!list_empty(&ls->ls_rsbtbl[i].list)) {
+                               ri->next = ls->ls_rsbtbl[i].list.next;
+                               read_unlock(&ls->ls_rsbtbl[i].lock);
+                               break;
+                       }
+                       read_unlock(&ls->ls_rsbtbl[i].lock);
+                }
+               ri->entry = i;
+
+               if (ri->entry >= ls->ls_rsbtbl_size)
+                       return 1;
+       } else {
+               i = ri->entry;
+               read_lock(&ls->ls_rsbtbl[i].lock);
+               ri->next = ri->next->next;
+               if (ri->next->next == ls->ls_rsbtbl[i].list.next) {
+                       /* End of list - move to next bucket */
+                       ri->next = NULL;
+                       ri->entry++;
+                       read_unlock(&ls->ls_rsbtbl[i].lock);
+                       goto top;
+                }
+               read_unlock(&ls->ls_rsbtbl[i].lock);
+       }
+       ri->rsb = list_entry(ri->next, struct dlm_rsb, res_hashchain);
+
+       return 0;
+}
+
+static void rsb_iter_free(struct rsb_iter *ri)
+{
+       kfree(ri);
+}
+
+static struct rsb_iter *rsb_iter_init(struct dlm_ls *ls)
+{
+       struct rsb_iter *ri;
+
+       ri = kmalloc(sizeof *ri, GFP_KERNEL);
+       if (!ri)
+               return NULL;
+
+       ri->ls = ls;
+       ri->entry = 0;
+       ri->next = NULL;
+
+       if (rsb_iter_next(ri)) {
+               rsb_iter_free(ri);
+               return NULL;
+       }
+
+       return ri;
+}
+
+static void *rsb_seq_start(struct seq_file *file, loff_t *pos)
+{
+       struct rsb_iter *ri;
+       loff_t n = *pos;
+
+       ri = rsb_iter_init(file->private);
+       if (!ri)
+               return NULL;
+
+       while (n--) {
+               if (rsb_iter_next(ri)) {
+                       rsb_iter_free(ri);
+                       return NULL;
+               }
+       }
+
+       return ri;
+}
+
+static void *rsb_seq_next(struct seq_file *file, void *iter_ptr, loff_t *pos)
+{
+       struct rsb_iter *ri = iter_ptr;
+
+       (*pos)++;
+
+       if (rsb_iter_next(ri)) {
+               rsb_iter_free(ri);
+               return NULL;
+       }
+
+       return ri;
+}
+
+static void rsb_seq_stop(struct seq_file *file, void *iter_ptr)
+{
+       /* nothing for now */
+}
+
+static int rsb_seq_show(struct seq_file *file, void *iter_ptr)
+{
+       struct rsb_iter *ri = iter_ptr;
+
+       print_resource(ri->rsb, file);
+
+       return 0;
+}
+
+static struct seq_operations rsb_seq_ops = {
+       .start = rsb_seq_start,
+       .next  = rsb_seq_next,
+       .stop  = rsb_seq_stop,
+       .show  = rsb_seq_show,
+};
+
+static int rsb_open(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq;
+       int ret;
+
+       ret = seq_open(file, &rsb_seq_ops);
+       if (ret)
+               return ret;
+
+       seq = file->private_data;
+       seq->private = inode->i_private;
+
+       return 0;
+}
+
+static struct file_operations rsb_fops = {
+       .owner   = THIS_MODULE,
+       .open    = rsb_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release
+};
+
+/*
+ * dump lkb's on the ls_waiters list
+ */
+
+static int waiters_open(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+static ssize_t waiters_read(struct file *file, char __user *userbuf,
+                           size_t count, loff_t *ppos)
+{
+       struct dlm_ls *ls = file->private_data;
+       struct dlm_lkb *lkb;
+       size_t len = DLM_DEBUG_BUF_LEN, pos = 0, ret, rv;
+
+       mutex_lock(&debug_buf_lock);
+       mutex_lock(&ls->ls_waiters_mutex);
+       memset(debug_buf, 0, sizeof(debug_buf));
+
+       list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
+               ret = snprintf(debug_buf + pos, len - pos, "%x %d %d %s\n",
+                              lkb->lkb_id, lkb->lkb_wait_type,
+                              lkb->lkb_nodeid, lkb->lkb_resource->res_name);
+               if (ret >= len - pos)
+                       break;
+               pos += ret;
+       }
+       mutex_unlock(&ls->ls_waiters_mutex);
+
+       rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos);
+       mutex_unlock(&debug_buf_lock);
+       return rv;
+}
+
+static struct file_operations waiters_fops = {
+       .owner   = THIS_MODULE,
+       .open    = waiters_open,
+       .read    = waiters_read
+};
+
+int dlm_create_debug_file(struct dlm_ls *ls)
+{
+       char name[DLM_LOCKSPACE_LEN+8];
+
+       ls->ls_debug_rsb_dentry = debugfs_create_file(ls->ls_name,
+                                                     S_IFREG | S_IRUGO,
+                                                     dlm_root,
+                                                     ls,
+                                                     &rsb_fops);
+       if (!ls->ls_debug_rsb_dentry)
+               return -ENOMEM;
+
+       memset(name, 0, sizeof(name));
+       snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_waiters", ls->ls_name);
+
+       ls->ls_debug_waiters_dentry = debugfs_create_file(name,
+                                                         S_IFREG | S_IRUGO,
+                                                         dlm_root,
+                                                         ls,
+                                                         &waiters_fops);
+       if (!ls->ls_debug_waiters_dentry) {
+               debugfs_remove(ls->ls_debug_rsb_dentry);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void dlm_delete_debug_file(struct dlm_ls *ls)
+{
+       if (ls->ls_debug_rsb_dentry)
+               debugfs_remove(ls->ls_debug_rsb_dentry);
+       if (ls->ls_debug_waiters_dentry)
+               debugfs_remove(ls->ls_debug_waiters_dentry);
+}
+
+int dlm_register_debugfs(void)
+{
+       mutex_init(&debug_buf_lock);
+       dlm_root = debugfs_create_dir("dlm", NULL);
+       return dlm_root ? 0 : -ENOMEM;
+}
+
+void dlm_unregister_debugfs(void)
+{
+       debugfs_remove(dlm_root);
+}
+
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
new file mode 100644 (file)
index 0000000..4675455
--- /dev/null
@@ -0,0 +1,423 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#include "dlm_internal.h"
+#include "lockspace.h"
+#include "member.h"
+#include "lowcomms.h"
+#include "rcom.h"
+#include "config.h"
+#include "memory.h"
+#include "recover.h"
+#include "util.h"
+#include "lock.h"
+#include "dir.h"
+
+
+static void put_free_de(struct dlm_ls *ls, struct dlm_direntry *de)
+{
+       spin_lock(&ls->ls_recover_list_lock);
+       list_add(&de->list, &ls->ls_recover_list);
+       spin_unlock(&ls->ls_recover_list_lock);
+}
+
+static struct dlm_direntry *get_free_de(struct dlm_ls *ls, int len)
+{
+       int found = 0;
+       struct dlm_direntry *de;
+
+       spin_lock(&ls->ls_recover_list_lock);
+       list_for_each_entry(de, &ls->ls_recover_list, list) {
+               if (de->length == len) {
+                       list_del(&de->list);
+                       de->master_nodeid = 0;
+                       memset(de->name, 0, len);
+                       found = 1;
+                       break;
+               }
+       }
+       spin_unlock(&ls->ls_recover_list_lock);
+
+       if (!found)
+               de = allocate_direntry(ls, len);
+       return de;
+}
+
+void dlm_clear_free_entries(struct dlm_ls *ls)
+{
+       struct dlm_direntry *de;
+
+       spin_lock(&ls->ls_recover_list_lock);
+       while (!list_empty(&ls->ls_recover_list)) {
+               de = list_entry(ls->ls_recover_list.next, struct dlm_direntry,
+                               list);
+               list_del(&de->list);
+               free_direntry(de);
+       }
+       spin_unlock(&ls->ls_recover_list_lock);
+}
+
+/*
+ * We use the upper 16 bits of the hash value to select the directory node.
+ * Low bits are used for distribution of rsb's among hash buckets on each node.
+ *
+ * To give the exact range wanted (0 to num_nodes-1), we apply a modulus of
+ * num_nodes to the hash value.  This value in the desired range is used as an
+ * offset into the sorted list of nodeid's to give the particular nodeid.
+ */
+
+int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash)
+{
+       struct list_head *tmp;
+       struct dlm_member *memb = NULL;
+       uint32_t node, n = 0;
+       int nodeid;
+
+       if (ls->ls_num_nodes == 1) {
+               nodeid = dlm_our_nodeid();
+               goto out;
+       }
+
+       if (ls->ls_node_array) {
+               node = (hash >> 16) % ls->ls_total_weight;
+               nodeid = ls->ls_node_array[node];
+               goto out;
+       }
+
+       /* make_member_array() failed to kmalloc ls_node_array... */
+
+       node = (hash >> 16) % ls->ls_num_nodes;
+
+       list_for_each(tmp, &ls->ls_nodes) {
+               if (n++ != node)
+                       continue;
+               memb = list_entry(tmp, struct dlm_member, list);
+               break;
+       }
+
+       DLM_ASSERT(memb , printk("num_nodes=%u n=%u node=%u\n",
+                                ls->ls_num_nodes, n, node););
+       nodeid = memb->nodeid;
+ out:
+       return nodeid;
+}
+
+int dlm_dir_nodeid(struct dlm_rsb *r)
+{
+       return dlm_hash2nodeid(r->res_ls, r->res_hash);
+}
+
+static inline uint32_t dir_hash(struct dlm_ls *ls, char *name, int len)
+{
+       uint32_t val;
+
+       val = jhash(name, len, 0);
+       val &= (ls->ls_dirtbl_size - 1);
+
+       return val;
+}
+
+static void add_entry_to_hash(struct dlm_ls *ls, struct dlm_direntry *de)
+{
+       uint32_t bucket;
+
+       bucket = dir_hash(ls, de->name, de->length);
+       list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
+}
+
+static struct dlm_direntry *search_bucket(struct dlm_ls *ls, char *name,
+                                         int namelen, uint32_t bucket)
+{
+       struct dlm_direntry *de;
+
+       list_for_each_entry(de, &ls->ls_dirtbl[bucket].list, list) {
+               if (de->length == namelen && !memcmp(name, de->name, namelen))
+                       goto out;
+       }
+       de = NULL;
+ out:
+       return de;
+}
+
+void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen)
+{
+       struct dlm_direntry *de;
+       uint32_t bucket;
+
+       bucket = dir_hash(ls, name, namelen);
+
+       write_lock(&ls->ls_dirtbl[bucket].lock);
+
+       de = search_bucket(ls, name, namelen, bucket);
+
+       if (!de) {
+               log_error(ls, "remove fr %u none", nodeid);
+               goto out;
+       }
+
+       if (de->master_nodeid != nodeid) {
+               log_error(ls, "remove fr %u ID %u", nodeid, de->master_nodeid);
+               goto out;
+       }
+
+       list_del(&de->list);
+       free_direntry(de);
+ out:
+       write_unlock(&ls->ls_dirtbl[bucket].lock);
+}
+
+void dlm_dir_clear(struct dlm_ls *ls)
+{
+       struct list_head *head;
+       struct dlm_direntry *de;
+       int i;
+
+       DLM_ASSERT(list_empty(&ls->ls_recover_list), );
+
+       for (i = 0; i < ls->ls_dirtbl_size; i++) {
+               write_lock(&ls->ls_dirtbl[i].lock);
+               head = &ls->ls_dirtbl[i].list;
+               while (!list_empty(head)) {
+                       de = list_entry(head->next, struct dlm_direntry, list);
+                       list_del(&de->list);
+                       put_free_de(ls, de);
+               }
+               write_unlock(&ls->ls_dirtbl[i].lock);
+       }
+}
+
+int dlm_recover_directory(struct dlm_ls *ls)
+{
+       struct dlm_member *memb;
+       struct dlm_direntry *de;
+       char *b, *last_name = NULL;
+       int error = -ENOMEM, last_len, count = 0;
+       uint16_t namelen;
+
+       log_debug(ls, "dlm_recover_directory");
+
+       if (dlm_no_directory(ls))
+               goto out_status;
+
+       dlm_dir_clear(ls);
+
+       last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_KERNEL);
+       if (!last_name)
+               goto out;
+
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               memset(last_name, 0, DLM_RESNAME_MAXLEN);
+               last_len = 0;
+
+               for (;;) {
+                       error = dlm_recovery_stopped(ls);
+                       if (error)
+                               goto out_free;
+
+                       error = dlm_rcom_names(ls, memb->nodeid,
+                                              last_name, last_len);
+                       if (error)
+                               goto out_free;
+
+                       schedule();
+
+                       /*
+                        * pick namelen/name pairs out of received buffer
+                        */
+
+                       b = ls->ls_recover_buf + sizeof(struct dlm_rcom);
+
+                       for (;;) {
+                               memcpy(&namelen, b, sizeof(uint16_t));
+                               namelen = be16_to_cpu(namelen);
+                               b += sizeof(uint16_t);
+
+                               /* namelen of 0xFFFFF marks end of names for
+                                  this node; namelen of 0 marks end of the
+                                  buffer */
+
+                               if (namelen == 0xFFFF)
+                                       goto done;
+                               if (!namelen)
+                                       break;
+
+                               error = -ENOMEM;
+                               de = get_free_de(ls, namelen);
+                               if (!de)
+                                       goto out_free;
+
+                               de->master_nodeid = memb->nodeid;
+                               de->length = namelen;
+                               last_len = namelen;
+                               memcpy(de->name, b, namelen);
+                               memcpy(last_name, b, namelen);
+                               b += namelen;
+
+                               add_entry_to_hash(ls, de);
+                               count++;
+                       }
+               }
+         done:
+               ;
+       }
+
+ out_status:
+       error = 0;
+       dlm_set_recover_status(ls, DLM_RS_DIR);
+       log_debug(ls, "dlm_recover_directory %d entries", count);
+ out_free:
+       kfree(last_name);
+ out:
+       dlm_clear_free_entries(ls);
+       return error;
+}
+
+static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
+                    int namelen, int *r_nodeid)
+{
+       struct dlm_direntry *de, *tmp;
+       uint32_t bucket;
+
+       bucket = dir_hash(ls, name, namelen);
+
+       write_lock(&ls->ls_dirtbl[bucket].lock);
+       de = search_bucket(ls, name, namelen, bucket);
+       if (de) {
+               *r_nodeid = de->master_nodeid;
+               write_unlock(&ls->ls_dirtbl[bucket].lock);
+               if (*r_nodeid == nodeid)
+                       return -EEXIST;
+               return 0;
+       }
+
+       write_unlock(&ls->ls_dirtbl[bucket].lock);
+
+       de = allocate_direntry(ls, namelen);
+       if (!de)
+               return -ENOMEM;
+
+       de->master_nodeid = nodeid;
+       de->length = namelen;
+       memcpy(de->name, name, namelen);
+
+       write_lock(&ls->ls_dirtbl[bucket].lock);
+       tmp = search_bucket(ls, name, namelen, bucket);
+       if (tmp) {
+               free_direntry(de);
+               de = tmp;
+       } else {
+               list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
+       }
+       *r_nodeid = de->master_nodeid;
+       write_unlock(&ls->ls_dirtbl[bucket].lock);
+       return 0;
+}
+
+int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
+                  int *r_nodeid)
+{
+       return get_entry(ls, nodeid, name, namelen, r_nodeid);
+}
+
+/* Copy the names of master rsb's into the buffer provided.
+   Only select names whose dir node is the given nodeid. */
+
+void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
+                          char *outbuf, int outlen, int nodeid)
+{
+       struct list_head *list;
+       struct dlm_rsb *start_r = NULL, *r = NULL;
+       int offset = 0, start_namelen, error, dir_nodeid;
+       char *start_name;
+       uint16_t be_namelen;
+
+       /*
+        * Find the rsb where we left off (or start again)
+        */
+
+       start_namelen = inlen;
+       start_name = inbuf;
+
+       if (start_namelen > 1) {
+               /*
+                * We could also use a find_rsb_root() function here that
+                * searched the ls_root_list.
+                */
+               error = dlm_find_rsb(ls, start_name, start_namelen, R_MASTER,
+                                    &start_r);
+               DLM_ASSERT(!error && start_r,
+                          printk("error %d\n", error););
+               DLM_ASSERT(!list_empty(&start_r->res_root_list),
+                          dlm_print_rsb(start_r););
+               dlm_put_rsb(start_r);
+       }
+
+       /*
+        * Send rsb names for rsb's we're master of and whose directory node
+        * matches the requesting node.
+        */
+
+       down_read(&ls->ls_root_sem);
+       if (start_r)
+               list = start_r->res_root_list.next;
+       else
+               list = ls->ls_root_list.next;
+
+       for (offset = 0; list != &ls->ls_root_list; list = list->next) {
+               r = list_entry(list, struct dlm_rsb, res_root_list);
+               if (r->res_nodeid)
+                       continue;
+
+               dir_nodeid = dlm_dir_nodeid(r);
+               if (dir_nodeid != nodeid)
+                       continue;
+
+               /*
+                * The block ends when we can't fit the following in the
+                * remaining buffer space:
+                * namelen (uint16_t) +
+                * name (r->res_length) +
+                * end-of-block record 0x0000 (uint16_t)
+                */
+
+               if (offset + sizeof(uint16_t)*2 + r->res_length > outlen) {
+                       /* Write end-of-block record */
+                       be_namelen = 0;
+                       memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
+                       offset += sizeof(uint16_t);
+                       goto out;
+               }
+
+               be_namelen = cpu_to_be16(r->res_length);
+               memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
+               offset += sizeof(uint16_t);
+               memcpy(outbuf + offset, r->res_name, r->res_length);
+               offset += r->res_length;
+       }
+
+       /*
+        * If we've reached the end of the list (and there's room) write a
+        * terminating record.
+        */
+
+       if ((list == &ls->ls_root_list) &&
+           (offset + sizeof(uint16_t) <= outlen)) {
+               be_namelen = 0xFFFF;
+               memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
+               offset += sizeof(uint16_t);
+       }
+
+ out:
+       up_read(&ls->ls_root_sem);
+}
+
diff --git a/fs/dlm/dir.h b/fs/dlm/dir.h
new file mode 100644 (file)
index 0000000..0b0eb12
--- /dev/null
@@ -0,0 +1,30 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#ifndef __DIR_DOT_H__
+#define __DIR_DOT_H__
+
+
+int dlm_dir_nodeid(struct dlm_rsb *rsb);
+int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash);
+void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int len);
+void dlm_dir_clear(struct dlm_ls *ls);
+void dlm_clear_free_entries(struct dlm_ls *ls);
+int dlm_recover_directory(struct dlm_ls *ls);
+int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
+       int *r_nodeid);
+void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
+       char *outbuf, int outlen, int nodeid);
+
+#endif                         /* __DIR_DOT_H__ */
+
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
new file mode 100644 (file)
index 0000000..1e5cd67
--- /dev/null
@@ -0,0 +1,543 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#ifndef __DLM_INTERNAL_DOT_H__
+#define __DLM_INTERNAL_DOT_H__
+
+/*
+ * This is the main header file to be included in each DLM source file.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <linux/delay.h>
+#include <linux/socket.h>
+#include <linux/kthread.h>
+#include <linux/kobject.h>
+#include <linux/kref.h>
+#include <linux/kernel.h>
+#include <linux/jhash.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+
+#include <linux/dlm.h>
+
+#define DLM_LOCKSPACE_LEN      64
+
+/* Size of the temp buffer midcomms allocates on the stack.
+   We try to make this large enough so most messages fit.
+   FIXME: should sctp make this unnecessary? */
+
+#define DLM_INBUF_LEN          148
+
+struct dlm_ls;
+struct dlm_lkb;
+struct dlm_rsb;
+struct dlm_member;
+struct dlm_lkbtable;
+struct dlm_rsbtable;
+struct dlm_dirtable;
+struct dlm_direntry;
+struct dlm_recover;
+struct dlm_header;
+struct dlm_message;
+struct dlm_rcom;
+struct dlm_mhandle;
+
+#define log_print(fmt, args...) \
+       printk(KERN_ERR "dlm: "fmt"\n" , ##args)
+#define log_error(ls, fmt, args...) \
+       printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args)
+
+#define DLM_LOG_DEBUG
+#ifdef DLM_LOG_DEBUG
+#define log_debug(ls, fmt, args...) log_error(ls, fmt, ##args)
+#else
+#define log_debug(ls, fmt, args...)
+#endif
+
+#define DLM_ASSERT(x, do) \
+{ \
+  if (!(x)) \
+  { \
+    printk(KERN_ERR "\nDLM:  Assertion failed on line %d of file %s\n" \
+               "DLM:  assertion:  \"%s\"\n" \
+               "DLM:  time = %lu\n", \
+               __LINE__, __FILE__, #x, jiffies); \
+    {do} \
+    printk("\n"); \
+    BUG(); \
+    panic("DLM:  Record message above and reboot.\n"); \
+  } \
+}
+
+#define DLM_FAKE_USER_AST ERR_PTR(-EINVAL)
+
+
+struct dlm_direntry {
+       struct list_head        list;
+       uint32_t                master_nodeid;
+       uint16_t                length;
+       char                    name[1];
+};
+
+struct dlm_dirtable {
+       struct list_head        list;
+       rwlock_t                lock;
+};
+
+struct dlm_rsbtable {
+       struct list_head        list;
+       struct list_head        toss;
+       rwlock_t                lock;
+};
+
+struct dlm_lkbtable {
+       struct list_head        list;
+       rwlock_t                lock;
+       uint16_t                counter;
+};
+
+/*
+ * Lockspace member (per node in a ls)
+ */
+
+struct dlm_member {
+       struct list_head        list;
+       int                     nodeid;
+       int                     weight;
+};
+
+/*
+ * Save and manage recovery state for a lockspace.
+ */
+
+struct dlm_recover {
+       struct list_head        list;
+       int                     *nodeids;
+       int                     node_count;
+       uint64_t                seq;
+};
+
+/*
+ * Pass input args to second stage locking function.
+ */
+
+struct dlm_args {
+       uint32_t                flags;
+       void                    *astaddr;
+       long                    astparam;
+       void                    *bastaddr;
+       int                     mode;
+       struct dlm_lksb         *lksb;
+};
+
+
+/*
+ * Lock block
+ *
+ * A lock can be one of three types:
+ *
+ * local copy      lock is mastered locally
+ *                 (lkb_nodeid is zero and DLM_LKF_MSTCPY is not set)
+ * process copy    lock is mastered on a remote node
+ *                 (lkb_nodeid is non-zero and DLM_LKF_MSTCPY is not set)
+ * master copy     master node's copy of a lock owned by remote node
+ *                 (lkb_nodeid is non-zero and DLM_LKF_MSTCPY is set)
+ *
+ * lkb_exflags: a copy of the most recent flags arg provided to dlm_lock or
+ * dlm_unlock.  The dlm does not modify these or use any private flags in
+ * this field; it only contains DLM_LKF_ flags from dlm.h.  These flags
+ * are sent as-is to the remote master when the lock is remote.
+ *
+ * lkb_flags: internal dlm flags (DLM_IFL_ prefix) from dlm_internal.h.
+ * Some internal flags are shared between the master and process nodes;
+ * these shared flags are kept in the lower two bytes.  One of these
+ * flags set on the master copy will be propagated to the process copy
+ * and v.v.  Other internal flags are private to the master or process
+ * node (e.g. DLM_IFL_MSTCPY).  These are kept in the high two bytes.
+ *
+ * lkb_sbflags: status block flags.  These flags are copied directly into
+ * the caller's lksb.sb_flags prior to the dlm_lock/dlm_unlock completion
+ * ast.  All defined in dlm.h with DLM_SBF_ prefix.
+ *
+ * lkb_status: the lock status indicates which rsb queue the lock is
+ * on, grant, convert, or wait.  DLM_LKSTS_ WAITING/GRANTED/CONVERT
+ *
+ * lkb_wait_type: the dlm message type (DLM_MSG_ prefix) for which a
+ * reply is needed.  Only set when the lkb is on the lockspace waiters
+ * list awaiting a reply from a remote node.
+ *
+ * lkb_nodeid: when the lkb is a local copy, nodeid is 0; when the lkb
+ * is a master copy, nodeid specifies the remote lock holder, when the
+ * lkb is a process copy, the nodeid specifies the lock master.
+ */
+
+/* lkb_ast_type */
+
+#define AST_COMP               1
+#define AST_BAST               2
+
+/* lkb_status */
+
+#define DLM_LKSTS_WAITING      1
+#define DLM_LKSTS_GRANTED      2
+#define DLM_LKSTS_CONVERT      3
+
+/* lkb_flags */
+
+#define DLM_IFL_MSTCPY         0x00010000
+#define DLM_IFL_RESEND         0x00020000
+#define DLM_IFL_DEAD           0x00040000
+#define DLM_IFL_USER           0x00000001
+#define DLM_IFL_ORPHAN         0x00000002
+
+struct dlm_lkb {
+       struct dlm_rsb          *lkb_resource;  /* the rsb */
+       struct kref             lkb_ref;
+       int                     lkb_nodeid;     /* copied from rsb */
+       int                     lkb_ownpid;     /* pid of lock owner */
+       uint32_t                lkb_id;         /* our lock ID */
+       uint32_t                lkb_remid;      /* lock ID on remote partner */
+       uint32_t                lkb_exflags;    /* external flags from caller */
+       uint32_t                lkb_sbflags;    /* lksb flags */
+       uint32_t                lkb_flags;      /* internal flags */
+       uint32_t                lkb_lvbseq;     /* lvb sequence number */
+
+       int8_t                  lkb_status;     /* granted, waiting, convert */
+       int8_t                  lkb_rqmode;     /* requested lock mode */
+       int8_t                  lkb_grmode;     /* granted lock mode */
+       int8_t                  lkb_bastmode;   /* requested mode */
+       int8_t                  lkb_highbast;   /* highest mode bast sent for */
+
+       int8_t                  lkb_wait_type;  /* type of reply waiting for */
+       int8_t                  lkb_ast_type;   /* type of ast queued for */
+
+       struct list_head        lkb_idtbl_list; /* lockspace lkbtbl */
+       struct list_head        lkb_statequeue; /* rsb g/c/w list */
+       struct list_head        lkb_rsb_lookup; /* waiting for rsb lookup */
+       struct list_head        lkb_wait_reply; /* waiting for remote reply */
+       struct list_head        lkb_astqueue;   /* need ast to be sent */
+       struct list_head        lkb_ownqueue;   /* list of locks for a process */
+
+       char                    *lkb_lvbptr;
+       struct dlm_lksb         *lkb_lksb;      /* caller's status block */
+       void                    *lkb_astaddr;   /* caller's ast function */
+       void                    *lkb_bastaddr;  /* caller's bast function */
+       long                    lkb_astparam;   /* caller's ast arg */
+};
+
+
+struct dlm_rsb {
+       struct dlm_ls           *res_ls;        /* the lockspace */
+       struct kref             res_ref;
+       struct mutex            res_mutex;
+       unsigned long           res_flags;
+       int                     res_length;     /* length of rsb name */
+       int                     res_nodeid;
+       uint32_t                res_lvbseq;
+       uint32_t                res_hash;
+       uint32_t                res_bucket;     /* rsbtbl */
+       unsigned long           res_toss_time;
+       uint32_t                res_first_lkid;
+       struct list_head        res_lookup;     /* lkbs waiting on first */
+       struct list_head        res_hashchain;  /* rsbtbl */
+       struct list_head        res_grantqueue;
+       struct list_head        res_convertqueue;
+       struct list_head        res_waitqueue;
+
+       struct list_head        res_root_list;      /* used for recovery */
+       struct list_head        res_recover_list;   /* used for recovery */
+       int                     res_recover_locks_count;
+
+       char                    *res_lvbptr;
+       char                    res_name[1];
+};
+
+/* find_rsb() flags */
+
+#define R_MASTER               1       /* only return rsb if it's a master */
+#define R_CREATE               2       /* create/add rsb if not found */
+
+/* rsb_flags */
+
+enum rsb_flags {
+       RSB_MASTER_UNCERTAIN,
+       RSB_VALNOTVALID,
+       RSB_VALNOTVALID_PREV,
+       RSB_NEW_MASTER,
+       RSB_NEW_MASTER2,
+       RSB_RECOVER_CONVERT,
+       RSB_LOCKS_PURGED,
+};
+
+static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
+{
+       __set_bit(flag, &r->res_flags);
+}
+
+static inline void rsb_clear_flag(struct dlm_rsb *r, enum rsb_flags flag)
+{
+       __clear_bit(flag, &r->res_flags);
+}
+
+static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
+{
+       return test_bit(flag, &r->res_flags);
+}
+
+
+/* dlm_header is first element of all structs sent between nodes */
+
+#define DLM_HEADER_MAJOR       0x00020000
+#define DLM_HEADER_MINOR       0x00000001
+
+#define DLM_MSG                        1
+#define DLM_RCOM               2
+
+struct dlm_header {
+       uint32_t                h_version;
+       uint32_t                h_lockspace;
+       uint32_t                h_nodeid;       /* nodeid of sender */
+       uint16_t                h_length;
+       uint8_t                 h_cmd;          /* DLM_MSG, DLM_RCOM */
+       uint8_t                 h_pad;
+};
+
+
+#define DLM_MSG_REQUEST                1
+#define DLM_MSG_CONVERT                2
+#define DLM_MSG_UNLOCK         3
+#define DLM_MSG_CANCEL         4
+#define DLM_MSG_REQUEST_REPLY  5
+#define DLM_MSG_CONVERT_REPLY  6
+#define DLM_MSG_UNLOCK_REPLY   7
+#define DLM_MSG_CANCEL_REPLY   8
+#define DLM_MSG_GRANT          9
+#define DLM_MSG_BAST           10
+#define DLM_MSG_LOOKUP         11
+#define DLM_MSG_REMOVE         12
+#define DLM_MSG_LOOKUP_REPLY   13
+
+struct dlm_message {
+       struct dlm_header       m_header;
+       uint32_t                m_type;         /* DLM_MSG_ */
+       uint32_t                m_nodeid;
+       uint32_t                m_pid;
+       uint32_t                m_lkid;         /* lkid on sender */
+       uint32_t                m_remid;        /* lkid on receiver */
+       uint32_t                m_parent_lkid;
+       uint32_t                m_parent_remid;
+       uint32_t                m_exflags;
+       uint32_t                m_sbflags;
+       uint32_t                m_flags;
+       uint32_t                m_lvbseq;
+       uint32_t                m_hash;
+       int                     m_status;
+       int                     m_grmode;
+       int                     m_rqmode;
+       int                     m_bastmode;
+       int                     m_asts;
+       int                     m_result;       /* 0 or -EXXX */
+       char                    m_extra[0];     /* name or lvb */
+};
+
+
+#define DLM_RS_NODES           0x00000001
+#define DLM_RS_NODES_ALL       0x00000002
+#define DLM_RS_DIR             0x00000004
+#define DLM_RS_DIR_ALL         0x00000008
+#define DLM_RS_LOCKS           0x00000010
+#define DLM_RS_LOCKS_ALL       0x00000020
+#define DLM_RS_DONE            0x00000040
+#define DLM_RS_DONE_ALL                0x00000080
+
+#define DLM_RCOM_STATUS                1
+#define DLM_RCOM_NAMES         2
+#define DLM_RCOM_LOOKUP                3
+#define DLM_RCOM_LOCK          4
+#define DLM_RCOM_STATUS_REPLY  5
+#define DLM_RCOM_NAMES_REPLY   6
+#define DLM_RCOM_LOOKUP_REPLY  7
+#define DLM_RCOM_LOCK_REPLY    8
+
+struct dlm_rcom {
+       struct dlm_header       rc_header;
+       uint32_t                rc_type;        /* DLM_RCOM_ */
+       int                     rc_result;      /* multi-purpose */
+       uint64_t                rc_id;          /* match reply with request */
+       char                    rc_buf[0];
+};
+
+struct rcom_config {
+       uint32_t                rf_lvblen;
+       uint32_t                rf_lsflags;
+       uint64_t                rf_unused;
+};
+
+struct rcom_lock {
+       uint32_t                rl_ownpid;
+       uint32_t                rl_lkid;
+       uint32_t                rl_remid;
+       uint32_t                rl_parent_lkid;
+       uint32_t                rl_parent_remid;
+       uint32_t                rl_exflags;
+       uint32_t                rl_flags;
+       uint32_t                rl_lvbseq;
+       int                     rl_result;
+       int8_t                  rl_rqmode;
+       int8_t                  rl_grmode;
+       int8_t                  rl_status;
+       int8_t                  rl_asts;
+       uint16_t                rl_wait_type;
+       uint16_t                rl_namelen;
+       char                    rl_name[DLM_RESNAME_MAXLEN];
+       char                    rl_lvb[0];
+};
+
+struct dlm_ls {
+       struct list_head        ls_list;        /* list of lockspaces */
+       dlm_lockspace_t         *ls_local_handle;
+       uint32_t                ls_global_id;   /* global unique lockspace ID */
+       uint32_t                ls_exflags;
+       int                     ls_lvblen;
+       int                     ls_count;       /* reference count */
+       unsigned long           ls_flags;       /* LSFL_ */
+       struct kobject          ls_kobj;
+
+       struct dlm_rsbtable     *ls_rsbtbl;
+       uint32_t                ls_rsbtbl_size;
+
+       struct dlm_lkbtable     *ls_lkbtbl;
+       uint32_t                ls_lkbtbl_size;
+
+       struct dlm_dirtable     *ls_dirtbl;
+       uint32_t                ls_dirtbl_size;
+
+       struct mutex            ls_waiters_mutex;
+       struct list_head        ls_waiters;     /* lkbs needing a reply */
+
+       struct list_head        ls_nodes;       /* current nodes in ls */
+       struct list_head        ls_nodes_gone;  /* dead node list, recovery */
+       int                     ls_num_nodes;   /* number of nodes in ls */
+       int                     ls_low_nodeid;
+       int                     ls_total_weight;
+       int                     *ls_node_array;
+
+       struct dlm_rsb          ls_stub_rsb;    /* for returning errors */
+       struct dlm_lkb          ls_stub_lkb;    /* for returning errors */
+       struct dlm_message      ls_stub_ms;     /* for faking a reply */
+
+       struct dentry           *ls_debug_rsb_dentry; /* debugfs */
+       struct dentry           *ls_debug_waiters_dentry; /* debugfs */
+
+       wait_queue_head_t       ls_uevent_wait; /* user part of join/leave */
+       int                     ls_uevent_result;
+
+       struct miscdevice       ls_device;
+
+       /* recovery related */
+
+       struct timer_list       ls_timer;
+       struct task_struct      *ls_recoverd_task;
+       struct mutex            ls_recoverd_active;
+       spinlock_t              ls_recover_lock;
+       uint32_t                ls_recover_status; /* DLM_RS_ */
+       uint64_t                ls_recover_seq;
+       struct dlm_recover      *ls_recover_args;
+       struct rw_semaphore     ls_in_recovery; /* block local requests */
+       struct list_head        ls_requestqueue;/* queue remote requests */
+       struct mutex            ls_requestqueue_mutex;
+       char                    *ls_recover_buf;
+       int                     ls_recover_nodeid; /* for debugging */
+       uint64_t                ls_rcom_seq;
+       struct list_head        ls_recover_list;
+       spinlock_t              ls_recover_list_lock;
+       int                     ls_recover_list_count;
+       wait_queue_head_t       ls_wait_general;
+       struct mutex            ls_clear_proc_locks;
+
+       struct list_head        ls_root_list;   /* root resources */
+       struct rw_semaphore     ls_root_sem;    /* protect root_list */
+
+       int                     ls_namelen;
+       char                    ls_name[1];
+};
+
+#define LSFL_WORK              0
+#define LSFL_RUNNING           1
+#define LSFL_RECOVERY_STOP     2
+#define LSFL_RCOM_READY                3
+#define LSFL_UEVENT_WAIT       4
+
+/* much of this is just saving user space pointers associated with the
+   lock that we pass back to the user lib with an ast */
+
+struct dlm_user_args {
+       struct dlm_user_proc    *proc; /* each process that opens the lockspace
+                                         device has private data
+                                         (dlm_user_proc) on the struct file,
+                                         the process's locks point back to it*/
+       struct dlm_lksb         lksb;
+       int                     old_mode;
+       int                     update_user_lvb;
+       struct dlm_lksb __user  *user_lksb;
+       void __user             *castparam;
+       void __user             *castaddr;
+       void __user             *bastparam;
+       void __user             *bastaddr;
+};
+
+#define DLM_PROC_FLAGS_CLOSING 1
+#define DLM_PROC_FLAGS_COMPAT  2
+
+/* locks list is kept so we can remove all a process's locks when it
+   exits (or orphan those that are persistent) */
+
+struct dlm_user_proc {
+       dlm_lockspace_t         *lockspace;
+       unsigned long           flags; /* DLM_PROC_FLAGS */
+       struct list_head        asts;
+       spinlock_t              asts_spin;
+       struct list_head        locks;
+       spinlock_t              locks_spin;
+       wait_queue_head_t       wait;
+};
+
+static inline int dlm_locking_stopped(struct dlm_ls *ls)
+{
+       return !test_bit(LSFL_RUNNING, &ls->ls_flags);
+}
+
+static inline int dlm_recovery_stopped(struct dlm_ls *ls)
+{
+       return test_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
+}
+
+static inline int dlm_no_directory(struct dlm_ls *ls)
+{
+       return (ls->ls_exflags & DLM_LSFL_NODIR) ? 1 : 0;
+}
+
+#endif                         /* __DLM_INTERNAL_DOT_H__ */
+
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
new file mode 100644 (file)
index 0000000..3f2befa
--- /dev/null
@@ -0,0 +1,3871 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) 2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+/* Central locking logic has four stages:
+
+   dlm_lock()
+   dlm_unlock()
+
+   request_lock(ls, lkb)
+   convert_lock(ls, lkb)
+   unlock_lock(ls, lkb)
+   cancel_lock(ls, lkb)
+
+   _request_lock(r, lkb)
+   _convert_lock(r, lkb)
+   _unlock_lock(r, lkb)
+   _cancel_lock(r, lkb)
+
+   do_request(r, lkb)
+   do_convert(r, lkb)
+   do_unlock(r, lkb)
+   do_cancel(r, lkb)
+
+   Stage 1 (lock, unlock) is mainly about checking input args and
+   splitting into one of the four main operations:
+
+       dlm_lock          = request_lock
+       dlm_lock+CONVERT  = convert_lock
+       dlm_unlock        = unlock_lock
+       dlm_unlock+CANCEL = cancel_lock
+
+   Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
+   provided to the next stage.
+
+   Stage 3, _xxxx_lock(), determines if the operation is local or remote.
+   When remote, it calls send_xxxx(), when local it calls do_xxxx().
+
+   Stage 4, do_xxxx(), is the guts of the operation.  It manipulates the
+   given rsb and lkb and queues callbacks.
+
+   For remote operations, send_xxxx() results in the corresponding do_xxxx()
+   function being executed on the remote node.  The connecting send/receive
+   calls on local (L) and remote (R) nodes:
+
+   L: send_xxxx()              ->  R: receive_xxxx()
+                                   R: do_xxxx()
+   L: receive_xxxx_reply()     <-  R: send_xxxx_reply()
+*/
+#include <linux/types.h>
+#include "dlm_internal.h"
+#include <linux/dlm_device.h>
+#include "memory.h"
+#include "lowcomms.h"
+#include "requestqueue.h"
+#include "util.h"
+#include "dir.h"
+#include "member.h"
+#include "lockspace.h"
+#include "ast.h"
+#include "lock.h"
+#include "rcom.h"
+#include "recover.h"
+#include "lvb_table.h"
+#include "user.h"
+#include "config.h"
+
+static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
+static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
+static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
+static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
+static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
+static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
+static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
+static int send_remove(struct dlm_rsb *r);
+static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
+static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
+                                   struct dlm_message *ms);
+static int receive_extralen(struct dlm_message *ms);
+
+/*
+ * Lock compatibilty matrix - thanks Steve
+ * UN = Unlocked state. Not really a state, used as a flag
+ * PD = Padding. Used to make the matrix a nice power of two in size
+ * Other states are the same as the VMS DLM.
+ * Usage: matrix[grmode+1][rqmode+1]  (although m[rq+1][gr+1] is the same)
+ */
+
+static const int __dlm_compat_matrix[8][8] = {
+      /* UN NL CR CW PR PW EX PD */
+        {1, 1, 1, 1, 1, 1, 1, 0},       /* UN */
+        {1, 1, 1, 1, 1, 1, 1, 0},       /* NL */
+        {1, 1, 1, 1, 1, 1, 0, 0},       /* CR */
+        {1, 1, 1, 1, 0, 0, 0, 0},       /* CW */
+        {1, 1, 1, 0, 1, 0, 0, 0},       /* PR */
+        {1, 1, 1, 0, 0, 0, 0, 0},       /* PW */
+        {1, 1, 0, 0, 0, 0, 0, 0},       /* EX */
+        {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
+};
+
+/*
+ * This defines the direction of transfer of LVB data.
+ * Granted mode is the row; requested mode is the column.
+ * Usage: matrix[grmode+1][rqmode+1]
+ * 1 = LVB is returned to the caller
+ * 0 = LVB is written to the resource
+ * -1 = nothing happens to the LVB
+ */
+
+const int dlm_lvb_operations[8][8] = {
+        /* UN   NL  CR  CW  PR  PW  EX  PD*/
+        {  -1,  1,  1,  1,  1,  1,  1, -1 }, /* UN */
+        {  -1,  1,  1,  1,  1,  1,  1,  0 }, /* NL */
+        {  -1, -1,  1,  1,  1,  1,  1,  0 }, /* CR */
+        {  -1, -1, -1,  1,  1,  1,  1,  0 }, /* CW */
+        {  -1, -1, -1, -1,  1,  1,  1,  0 }, /* PR */
+        {  -1,  0,  0,  0,  0,  0,  1,  0 }, /* PW */
+        {  -1,  0,  0,  0,  0,  0,  0,  0 }, /* EX */
+        {  -1,  0,  0,  0,  0,  0,  0,  0 }  /* PD */
+};
+
+#define modes_compat(gr, rq) \
+       __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
+
+int dlm_modes_compat(int mode1, int mode2)
+{
+       return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
+}
+
+/*
+ * Compatibility matrix for conversions with QUECVT set.
+ * Granted mode is the row; requested mode is the column.
+ * Usage: matrix[grmode+1][rqmode+1]
+ */
+
+static const int __quecvt_compat_matrix[8][8] = {
+      /* UN NL CR CW PR PW EX PD */
+        {0, 0, 0, 0, 0, 0, 0, 0},       /* UN */
+        {0, 0, 1, 1, 1, 1, 1, 0},       /* NL */
+        {0, 0, 0, 1, 1, 1, 1, 0},       /* CR */
+        {0, 0, 0, 0, 1, 1, 1, 0},       /* CW */
+        {0, 0, 0, 1, 0, 1, 1, 0},       /* PR */
+        {0, 0, 0, 0, 0, 0, 1, 0},       /* PW */
+        {0, 0, 0, 0, 0, 0, 0, 0},       /* EX */
+        {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
+};
+
+void dlm_print_lkb(struct dlm_lkb *lkb)
+{
+       printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
+              "     status %d rqmode %d grmode %d wait_type %d ast_type %d\n",
+              lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
+              lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
+              lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
+}
+
+void dlm_print_rsb(struct dlm_rsb *r)
+{
+       printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
+              r->res_nodeid, r->res_flags, r->res_first_lkid,
+              r->res_recover_locks_count, r->res_name);
+}
+
+void dlm_dump_rsb(struct dlm_rsb *r)
+{
+       struct dlm_lkb *lkb;
+
+       dlm_print_rsb(r);
+
+       printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
+              list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
+       printk(KERN_ERR "rsb lookup list\n");
+       list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
+               dlm_print_lkb(lkb);
+       printk(KERN_ERR "rsb grant queue:\n");
+       list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
+               dlm_print_lkb(lkb);
+       printk(KERN_ERR "rsb convert queue:\n");
+       list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
+               dlm_print_lkb(lkb);
+       printk(KERN_ERR "rsb wait queue:\n");
+       list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
+               dlm_print_lkb(lkb);
+}
+
+/* Threads cannot use the lockspace while it's being recovered */
+
+static inline void lock_recovery(struct dlm_ls *ls)
+{
+       down_read(&ls->ls_in_recovery);
+}
+
+static inline void unlock_recovery(struct dlm_ls *ls)
+{
+       up_read(&ls->ls_in_recovery);
+}
+
+static inline int lock_recovery_try(struct dlm_ls *ls)
+{
+       return down_read_trylock(&ls->ls_in_recovery);
+}
+
+static inline int can_be_queued(struct dlm_lkb *lkb)
+{
+       return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
+}
+
+static inline int force_blocking_asts(struct dlm_lkb *lkb)
+{
+       return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
+}
+
+static inline int is_demoted(struct dlm_lkb *lkb)
+{
+       return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
+}
+
+static inline int is_remote(struct dlm_rsb *r)
+{
+       DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
+       return !!r->res_nodeid;
+}
+
+static inline int is_process_copy(struct dlm_lkb *lkb)
+{
+       return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
+}
+
+static inline int is_master_copy(struct dlm_lkb *lkb)
+{
+       if (lkb->lkb_flags & DLM_IFL_MSTCPY)
+               DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
+       return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
+}
+
+static inline int middle_conversion(struct dlm_lkb *lkb)
+{
+       if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
+           (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
+               return 1;
+       return 0;
+}
+
+static inline int down_conversion(struct dlm_lkb *lkb)
+{
+       return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
+}
+
+static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
+{
+       if (is_master_copy(lkb))
+               return;
+
+       DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
+
+       lkb->lkb_lksb->sb_status = rv;
+       lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
+
+       dlm_add_ast(lkb, AST_COMP);
+}
+
+static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
+{
+       if (is_master_copy(lkb))
+               send_bast(r, lkb, rqmode);
+       else {
+               lkb->lkb_bastmode = rqmode;
+               dlm_add_ast(lkb, AST_BAST);
+       }
+}
+
+/*
+ * Basic operations on rsb's and lkb's
+ */
+
+static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
+{
+       struct dlm_rsb *r;
+
+       r = allocate_rsb(ls, len);
+       if (!r)
+               return NULL;
+
+       r->res_ls = ls;
+       r->res_length = len;
+       memcpy(r->res_name, name, len);
+       mutex_init(&r->res_mutex);
+
+       INIT_LIST_HEAD(&r->res_lookup);
+       INIT_LIST_HEAD(&r->res_grantqueue);
+       INIT_LIST_HEAD(&r->res_convertqueue);
+       INIT_LIST_HEAD(&r->res_waitqueue);
+       INIT_LIST_HEAD(&r->res_root_list);
+       INIT_LIST_HEAD(&r->res_recover_list);
+
+       return r;
+}
+
+static int search_rsb_list(struct list_head *head, char *name, int len,
+                          unsigned int flags, struct dlm_rsb **r_ret)
+{
+       struct dlm_rsb *r;
+       int error = 0;
+
+       list_for_each_entry(r, head, res_hashchain) {
+               if (len == r->res_length && !memcmp(name, r->res_name, len))
+                       goto found;
+       }
+       return -EBADR;
+
+ found:
+       if (r->res_nodeid && (flags & R_MASTER))
+               error = -ENOTBLK;
+       *r_ret = r;
+       return error;
+}
+
+static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
+                      unsigned int flags, struct dlm_rsb **r_ret)
+{
+       struct dlm_rsb *r;
+       int error;
+
+       error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
+       if (!error) {
+               kref_get(&r->res_ref);
+               goto out;
+       }
+       error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
+       if (error)
+               goto out;
+
+       list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
+
+       if (dlm_no_directory(ls))
+               goto out;
+
+       if (r->res_nodeid == -1) {
+               rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
+               r->res_first_lkid = 0;
+       } else if (r->res_nodeid > 0) {
+               rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
+               r->res_first_lkid = 0;
+       } else {
+               DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
+               DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
+       }
+ out:
+       *r_ret = r;
+       return error;
+}
+
+static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
+                     unsigned int flags, struct dlm_rsb **r_ret)
+{
+       int error;
+       write_lock(&ls->ls_rsbtbl[b].lock);
+       error = _search_rsb(ls, name, len, b, flags, r_ret);
+       write_unlock(&ls->ls_rsbtbl[b].lock);
+       return error;
+}
+
+/*
+ * Find rsb in rsbtbl and potentially create/add one
+ *
+ * Delaying the release of rsb's has a similar benefit to applications keeping
+ * NL locks on an rsb, but without the guarantee that the cached master value
+ * will still be valid when the rsb is reused.  Apps aren't always smart enough
+ * to keep NL locks on an rsb that they may lock again shortly; this can lead
+ * to excessive master lookups and removals if we don't delay the release.
+ *
+ * Searching for an rsb means looking through both the normal list and toss
+ * list.  When found on the toss list the rsb is moved to the normal list with
+ * ref count of 1; when found on normal list the ref count is incremented.
+ */
+
+static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
+                   unsigned int flags, struct dlm_rsb **r_ret)
+{
+       struct dlm_rsb *r, *tmp;
+       uint32_t hash, bucket;
+       int error = 0;
+
+       if (dlm_no_directory(ls))
+               flags |= R_CREATE;
+
+       hash = jhash(name, namelen, 0);
+       bucket = hash & (ls->ls_rsbtbl_size - 1);
+
+       error = search_rsb(ls, name, namelen, bucket, flags, &r);
+       if (!error)
+               goto out;
+
+       if (error == -EBADR && !(flags & R_CREATE))
+               goto out;
+
+       /* the rsb was found but wasn't a master copy */
+       if (error == -ENOTBLK)
+               goto out;
+
+       error = -ENOMEM;
+       r = create_rsb(ls, name, namelen);
+       if (!r)
+               goto out;
+
+       r->res_hash = hash;
+       r->res_bucket = bucket;
+       r->res_nodeid = -1;
+       kref_init(&r->res_ref);
+
+       /* With no directory, the master can be set immediately */
+       if (dlm_no_directory(ls)) {
+               int nodeid = dlm_dir_nodeid(r);
+               if (nodeid == dlm_our_nodeid())
+                       nodeid = 0;
+               r->res_nodeid = nodeid;
+       }
+
+       write_lock(&ls->ls_rsbtbl[bucket].lock);
+       error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
+       if (!error) {
+               write_unlock(&ls->ls_rsbtbl[bucket].lock);
+               free_rsb(r);
+               r = tmp;
+               goto out;
+       }
+       list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
+       write_unlock(&ls->ls_rsbtbl[bucket].lock);
+       error = 0;
+ out:
+       *r_ret = r;
+       return error;
+}
+
+int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
+                unsigned int flags, struct dlm_rsb **r_ret)
+{
+       return find_rsb(ls, name, namelen, flags, r_ret);
+}
+
+/* This is only called to add a reference when the code already holds
+   a valid reference to the rsb, so there's no need for locking. */
+
+static inline void hold_rsb(struct dlm_rsb *r)
+{
+       kref_get(&r->res_ref);
+}
+
+void dlm_hold_rsb(struct dlm_rsb *r)
+{
+       hold_rsb(r);
+}
+
+static void toss_rsb(struct kref *kref)
+{
+       struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
+       struct dlm_ls *ls = r->res_ls;
+
+       DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
+       kref_init(&r->res_ref);
+       list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
+       r->res_toss_time = jiffies;
+       if (r->res_lvbptr) {
+               free_lvb(r->res_lvbptr);
+               r->res_lvbptr = NULL;
+       }
+}
+
+/* When all references to the rsb are gone it's transfered to
+   the tossed list for later disposal. */
+
+static void put_rsb(struct dlm_rsb *r)
+{
+       struct dlm_ls *ls = r->res_ls;
+       uint32_t bucket = r->res_bucket;
+
+       write_lock(&ls->ls_rsbtbl[bucket].lock);
+       kref_put(&r->res_ref, toss_rsb);
+       write_unlock(&ls->ls_rsbtbl[bucket].lock);
+}
+
+void dlm_put_rsb(struct dlm_rsb *r)
+{
+       put_rsb(r);
+}
+
+/* See comment for unhold_lkb */
+
+static void unhold_rsb(struct dlm_rsb *r)
+{
+       int rv;
+       rv = kref_put(&r->res_ref, toss_rsb);
+       DLM_ASSERT(!rv, dlm_dump_rsb(r););
+}
+
+static void kill_rsb(struct kref *kref)
+{
+       struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
+
+       /* All work is done after the return from kref_put() so we
+          can release the write_lock before the remove and free. */
+
+       DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
+       DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
+       DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
+       DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
+       DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
+       DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
+}
+
+/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
+   The rsb must exist as long as any lkb's for it do. */
+
+static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       hold_rsb(r);
+       lkb->lkb_resource = r;
+}
+
+static void detach_lkb(struct dlm_lkb *lkb)
+{
+       if (lkb->lkb_resource) {
+               put_rsb(lkb->lkb_resource);
+               lkb->lkb_resource = NULL;
+       }
+}
+
+static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
+{
+       struct dlm_lkb *lkb, *tmp;
+       uint32_t lkid = 0;
+       uint16_t bucket;
+
+       lkb = allocate_lkb(ls);
+       if (!lkb)
+               return -ENOMEM;
+
+       lkb->lkb_nodeid = -1;
+       lkb->lkb_grmode = DLM_LOCK_IV;
+       kref_init(&lkb->lkb_ref);
+       INIT_LIST_HEAD(&lkb->lkb_ownqueue);
+
+       get_random_bytes(&bucket, sizeof(bucket));
+       bucket &= (ls->ls_lkbtbl_size - 1);
+
+       write_lock(&ls->ls_lkbtbl[bucket].lock);
+
+       /* counter can roll over so we must verify lkid is not in use */
+
+       while (lkid == 0) {
+               lkid = bucket | (ls->ls_lkbtbl[bucket].counter++ << 16);
+
+               list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list,
+                                   lkb_idtbl_list) {
+                       if (tmp->lkb_id != lkid)
+                               continue;
+                       lkid = 0;
+                       break;
+               }
+       }
+
+       lkb->lkb_id = lkid;
+       list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list);
+       write_unlock(&ls->ls_lkbtbl[bucket].lock);
+
+       *lkb_ret = lkb;
+       return 0;
+}
+
+static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid)
+{
+       uint16_t bucket = lkid & 0xFFFF;
+       struct dlm_lkb *lkb;
+
+       list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) {
+               if (lkb->lkb_id == lkid)
+                       return lkb;
+       }
+       return NULL;
+}
+
+static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
+{
+       struct dlm_lkb *lkb;
+       uint16_t bucket = lkid & 0xFFFF;
+
+       if (bucket >= ls->ls_lkbtbl_size)
+               return -EBADSLT;
+
+       read_lock(&ls->ls_lkbtbl[bucket].lock);
+       lkb = __find_lkb(ls, lkid);
+       if (lkb)
+               kref_get(&lkb->lkb_ref);
+       read_unlock(&ls->ls_lkbtbl[bucket].lock);
+
+       *lkb_ret = lkb;
+       return lkb ? 0 : -ENOENT;
+}
+
+static void kill_lkb(struct kref *kref)
+{
+       struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
+
+       /* All work is done after the return from kref_put() so we
+          can release the write_lock before the detach_lkb */
+
+       DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
+}
+
+/* __put_lkb() is used when an lkb may not have an rsb attached to
+   it so we need to provide the lockspace explicitly */
+
+static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
+{
+       uint16_t bucket = lkb->lkb_id & 0xFFFF;
+
+       write_lock(&ls->ls_lkbtbl[bucket].lock);
+       if (kref_put(&lkb->lkb_ref, kill_lkb)) {
+               list_del(&lkb->lkb_idtbl_list);
+               write_unlock(&ls->ls_lkbtbl[bucket].lock);
+
+               detach_lkb(lkb);
+
+               /* for local/process lkbs, lvbptr points to caller's lksb */
+               if (lkb->lkb_lvbptr && is_master_copy(lkb))
+                       free_lvb(lkb->lkb_lvbptr);
+               free_lkb(lkb);
+               return 1;
+       } else {
+               write_unlock(&ls->ls_lkbtbl[bucket].lock);
+               return 0;
+       }
+}
+
+int dlm_put_lkb(struct dlm_lkb *lkb)
+{
+       struct dlm_ls *ls;
+
+       DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
+       DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
+
+       ls = lkb->lkb_resource->res_ls;
+       return __put_lkb(ls, lkb);
+}
+
+/* This is only called to add a reference when the code already holds
+   a valid reference to the lkb, so there's no need for locking. */
+
+static inline void hold_lkb(struct dlm_lkb *lkb)
+{
+       kref_get(&lkb->lkb_ref);
+}
+
+/* This is called when we need to remove a reference and are certain
+   it's not the last ref.  e.g. del_lkb is always called between a
+   find_lkb/put_lkb and is always the inverse of a previous add_lkb.
+   put_lkb would work fine, but would involve unnecessary locking */
+
+static inline void unhold_lkb(struct dlm_lkb *lkb)
+{
+       int rv;
+       rv = kref_put(&lkb->lkb_ref, kill_lkb);
+       DLM_ASSERT(!rv, dlm_print_lkb(lkb););
+}
+
+static void lkb_add_ordered(struct list_head *new, struct list_head *head,
+                           int mode)
+{
+       struct dlm_lkb *lkb = NULL;
+
+       list_for_each_entry(lkb, head, lkb_statequeue)
+               if (lkb->lkb_rqmode < mode)
+                       break;
+
+       if (!lkb)
+               list_add_tail(new, head);
+       else
+               __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
+}
+
+/* add/remove lkb to rsb's grant/convert/wait queue */
+
+static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
+{
+       kref_get(&lkb->lkb_ref);
+
+       DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
+
+       lkb->lkb_status = status;
+
+       switch (status) {
+       case DLM_LKSTS_WAITING:
+               if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
+                       list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
+               else
+                       list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
+               break;
+       case DLM_LKSTS_GRANTED:
+               /* convention says granted locks kept in order of grmode */
+               lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
+                               lkb->lkb_grmode);
+               break;
+       case DLM_LKSTS_CONVERT:
+               if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
+                       list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
+               else
+                       list_add_tail(&lkb->lkb_statequeue,
+                                     &r->res_convertqueue);
+               break;
+       default:
+               DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
+       }
+}
+
+static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       lkb->lkb_status = 0;
+       list_del(&lkb->lkb_statequeue);
+       unhold_lkb(lkb);
+}
+
+static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
+{
+       hold_lkb(lkb);
+       del_lkb(r, lkb);
+       add_lkb(r, lkb, sts);
+       unhold_lkb(lkb);
+}
+
+/* add/remove lkb from global waiters list of lkb's waiting for
+   a reply from a remote node */
+
+static void add_to_waiters(struct dlm_lkb *lkb, int mstype)
+{
+       struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+
+       mutex_lock(&ls->ls_waiters_mutex);
+       if (lkb->lkb_wait_type) {
+               log_print("add_to_waiters error %d", lkb->lkb_wait_type);
+               goto out;
+       }
+       lkb->lkb_wait_type = mstype;
+       kref_get(&lkb->lkb_ref);
+       list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
+ out:
+       mutex_unlock(&ls->ls_waiters_mutex);
+}
+
+static int _remove_from_waiters(struct dlm_lkb *lkb)
+{
+       int error = 0;
+
+       if (!lkb->lkb_wait_type) {
+               log_print("remove_from_waiters error");
+               error = -EINVAL;
+               goto out;
+       }
+       lkb->lkb_wait_type = 0;
+       list_del(&lkb->lkb_wait_reply);
+       unhold_lkb(lkb);
+ out:
+       return error;
+}
+
+static int remove_from_waiters(struct dlm_lkb *lkb)
+{
+       struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+       int error;
+
+       mutex_lock(&ls->ls_waiters_mutex);
+       error = _remove_from_waiters(lkb);
+       mutex_unlock(&ls->ls_waiters_mutex);
+       return error;
+}
+
+static void dir_remove(struct dlm_rsb *r)
+{
+       int to_nodeid;
+
+       if (dlm_no_directory(r->res_ls))
+               return;
+
+       to_nodeid = dlm_dir_nodeid(r);
+       if (to_nodeid != dlm_our_nodeid())
+               send_remove(r);
+       else
+               dlm_dir_remove_entry(r->res_ls, to_nodeid,
+                                    r->res_name, r->res_length);
+}
+
+/* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
+   found since they are in order of newest to oldest? */
+
+static int shrink_bucket(struct dlm_ls *ls, int b)
+{
+       struct dlm_rsb *r;
+       int count = 0, found;
+
+       for (;;) {
+               found = 0;
+               write_lock(&ls->ls_rsbtbl[b].lock);
+               list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
+                                           res_hashchain) {
+                       if (!time_after_eq(jiffies, r->res_toss_time +
+                                          dlm_config.toss_secs * HZ))
+                               continue;
+                       found = 1;
+                       break;
+               }
+
+               if (!found) {
+                       write_unlock(&ls->ls_rsbtbl[b].lock);
+                       break;
+               }
+
+               if (kref_put(&r->res_ref, kill_rsb)) {
+                       list_del(&r->res_hashchain);
+                       write_unlock(&ls->ls_rsbtbl[b].lock);
+
+                       if (is_master(r))
+                               dir_remove(r);
+                       free_rsb(r);
+                       count++;
+               } else {
+                       write_unlock(&ls->ls_rsbtbl[b].lock);
+                       log_error(ls, "tossed rsb in use %s", r->res_name);
+               }
+       }
+
+       return count;
+}
+
+void dlm_scan_rsbs(struct dlm_ls *ls)
+{
+       int i;
+
+       if (dlm_locking_stopped(ls))
+               return;
+
+       for (i = 0; i < ls->ls_rsbtbl_size; i++) {
+               shrink_bucket(ls, i);
+               cond_resched();
+       }
+}
+
+/* lkb is master or local copy */
+
+static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       int b, len = r->res_ls->ls_lvblen;
+
+       /* b=1 lvb returned to caller
+          b=0 lvb written to rsb or invalidated
+          b=-1 do nothing */
+
+       b =  dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
+
+       if (b == 1) {
+               if (!lkb->lkb_lvbptr)
+                       return;
+
+               if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
+                       return;
+
+               if (!r->res_lvbptr)
+                       return;
+
+               memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
+               lkb->lkb_lvbseq = r->res_lvbseq;
+
+       } else if (b == 0) {
+               if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
+                       rsb_set_flag(r, RSB_VALNOTVALID);
+                       return;
+               }
+
+               if (!lkb->lkb_lvbptr)
+                       return;
+
+               if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
+                       return;
+
+               if (!r->res_lvbptr)
+                       r->res_lvbptr = allocate_lvb(r->res_ls);
+
+               if (!r->res_lvbptr)
+                       return;
+
+               memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
+               r->res_lvbseq++;
+               lkb->lkb_lvbseq = r->res_lvbseq;
+               rsb_clear_flag(r, RSB_VALNOTVALID);
+       }
+
+       if (rsb_flag(r, RSB_VALNOTVALID))
+               lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
+}
+
+static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       if (lkb->lkb_grmode < DLM_LOCK_PW)
+               return;
+
+       if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
+               rsb_set_flag(r, RSB_VALNOTVALID);
+               return;
+       }
+
+       if (!lkb->lkb_lvbptr)
+               return;
+
+       if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
+               return;
+
+       if (!r->res_lvbptr)
+               r->res_lvbptr = allocate_lvb(r->res_ls);
+
+       if (!r->res_lvbptr)
+               return;
+
+       memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
+       r->res_lvbseq++;
+       rsb_clear_flag(r, RSB_VALNOTVALID);
+}
+
+/* lkb is process copy (pc) */
+
+static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
+                           struct dlm_message *ms)
+{
+       int b;
+
+       if (!lkb->lkb_lvbptr)
+               return;
+
+       if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
+               return;
+
+       b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
+       if (b == 1) {
+               int len = receive_extralen(ms);
+               memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
+               lkb->lkb_lvbseq = ms->m_lvbseq;
+       }
+}
+
+/* Manipulate lkb's on rsb's convert/granted/waiting queues
+   remove_lock -- used for unlock, removes lkb from granted
+   revert_lock -- used for cancel, moves lkb from convert to granted
+   grant_lock  -- used for request and convert, adds lkb to granted or
+                  moves lkb from convert or waiting to granted
+
+   Each of these is used for master or local copy lkb's.  There is
+   also a _pc() variation used to make the corresponding change on
+   a process copy (pc) lkb. */
+
+static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       del_lkb(r, lkb);
+       lkb->lkb_grmode = DLM_LOCK_IV;
+       /* this unhold undoes the original ref from create_lkb()
+          so this leads to the lkb being freed */
+       unhold_lkb(lkb);
+}
+
+static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       set_lvb_unlock(r, lkb);
+       _remove_lock(r, lkb);
+}
+
+static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       _remove_lock(r, lkb);
+}
+
+static void revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       lkb->lkb_rqmode = DLM_LOCK_IV;
+
+       switch (lkb->lkb_status) {
+       case DLM_LKSTS_GRANTED:
+               break;
+       case DLM_LKSTS_CONVERT:
+               move_lkb(r, lkb, DLM_LKSTS_GRANTED);
+               break;
+       case DLM_LKSTS_WAITING:
+               del_lkb(r, lkb);
+               lkb->lkb_grmode = DLM_LOCK_IV;
+               /* this unhold undoes the original ref from create_lkb()
+                  so this leads to the lkb being freed */
+               unhold_lkb(lkb);
+               break;
+       default:
+               log_print("invalid status for revert %d", lkb->lkb_status);
+       }
+}
+
+static void revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       revert_lock(r, lkb);
+}
+
+static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       if (lkb->lkb_grmode != lkb->lkb_rqmode) {
+               lkb->lkb_grmode = lkb->lkb_rqmode;
+               if (lkb->lkb_status)
+                       move_lkb(r, lkb, DLM_LKSTS_GRANTED);
+               else
+                       add_lkb(r, lkb, DLM_LKSTS_GRANTED);
+       }
+
+       lkb->lkb_rqmode = DLM_LOCK_IV;
+}
+
+static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       set_lvb_lock(r, lkb);
+       _grant_lock(r, lkb);
+       lkb->lkb_highbast = 0;
+}
+
+static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
+                         struct dlm_message *ms)
+{
+       set_lvb_lock_pc(r, lkb, ms);
+       _grant_lock(r, lkb);
+}
+
+/* called by grant_pending_locks() which means an async grant message must
+   be sent to the requesting node in addition to granting the lock if the
+   lkb belongs to a remote node. */
+
+static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       grant_lock(r, lkb);
+       if (is_master_copy(lkb))
+               send_grant(r, lkb);
+       else
+               queue_cast(r, lkb, 0);
+}
+
+static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
+{
+       struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
+                                          lkb_statequeue);
+       if (lkb->lkb_id == first->lkb_id)
+               return 1;
+
+       return 0;
+}
+
+/* Check if the given lkb conflicts with another lkb on the queue. */
+
+static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
+{
+       struct dlm_lkb *this;
+
+       list_for_each_entry(this, head, lkb_statequeue) {
+               if (this == lkb)
+                       continue;
+               if (!modes_compat(this, lkb))
+                       return 1;
+       }
+       return 0;
+}
+
+/*
+ * "A conversion deadlock arises with a pair of lock requests in the converting
+ * queue for one resource.  The granted mode of each lock blocks the requested
+ * mode of the other lock."
+ *
+ * Part 2: if the granted mode of lkb is preventing the first lkb in the
+ * convert queue from being granted, then demote lkb (set grmode to NL).
+ * This second form requires that we check for conv-deadlk even when
+ * now == 0 in _can_be_granted().
+ *
+ * Example:
+ * Granted Queue: empty
+ * Convert Queue: NL->EX (first lock)
+ *                PR->EX (second lock)
+ *
+ * The first lock can't be granted because of the granted mode of the second
+ * lock and the second lock can't be granted because it's not first in the
+ * list.  We demote the granted mode of the second lock (the lkb passed to this
+ * function).
+ *
+ * After the resolution, the "grant pending" function needs to go back and try
+ * to grant locks on the convert queue again since the first lock can now be
+ * granted.
+ */
+
+static int conversion_deadlock_detect(struct dlm_rsb *rsb, struct dlm_lkb *lkb)
+{
+       struct dlm_lkb *this, *first = NULL, *self = NULL;
+
+       list_for_each_entry(this, &rsb->res_convertqueue, lkb_statequeue) {
+               if (!first)
+                       first = this;
+               if (this == lkb) {
+                       self = lkb;
+                       continue;
+               }
+
+               if (!modes_compat(this, lkb) && !modes_compat(lkb, this))
+                       return 1;
+       }
+
+       /* if lkb is on the convert queue and is preventing the first
+          from being granted, then there's deadlock and we demote lkb.
+          multiple converting locks may need to do this before the first
+          converting lock can be granted. */
+
+       if (self && self != first) {
+               if (!modes_compat(lkb, first) &&
+                   !queue_conflict(&rsb->res_grantqueue, first))
+                       return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * Return 1 if the lock can be granted, 0 otherwise.
+ * Also detect and resolve conversion deadlocks.
+ *
+ * lkb is the lock to be granted
+ *
+ * now is 1 if the function is being called in the context of the
+ * immediate request, it is 0 if called later, after the lock has been
+ * queued.
+ *
+ * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
+ */
+
+static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
+{
+       int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
+
+       /*
+        * 6-10: Version 5.4 introduced an option to address the phenomenon of
+        * a new request for a NL mode lock being blocked.
+        *
+        * 6-11: If the optional EXPEDITE flag is used with the new NL mode
+        * request, then it would be granted.  In essence, the use of this flag
+        * tells the Lock Manager to expedite theis request by not considering
+        * what may be in the CONVERTING or WAITING queues...  As of this
+        * writing, the EXPEDITE flag can be used only with new requests for NL
+        * mode locks.  This flag is not valid for conversion requests.
+        *
+        * A shortcut.  Earlier checks return an error if EXPEDITE is used in a
+        * conversion or used with a non-NL requested mode.  We also know an
+        * EXPEDITE request is always granted immediately, so now must always
+        * be 1.  The full condition to grant an expedite request: (now &&
+        * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
+        * therefore be shortened to just checking the flag.
+        */
+
+       if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
+               return 1;
+
+       /*
+        * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
+        * added to the remaining conditions.
+        */
+
+       if (queue_conflict(&r->res_grantqueue, lkb))
+               goto out;
+
+       /*
+        * 6-3: By default, a conversion request is immediately granted if the
+        * requested mode is compatible with the modes of all other granted
+        * locks
+        */
+
+       if (queue_conflict(&r->res_convertqueue, lkb))
+               goto out;
+
+       /*
+        * 6-5: But the default algorithm for deciding whether to grant or
+        * queue conversion requests does not by itself guarantee that such
+        * requests are serviced on a "first come first serve" basis.  This, in
+        * turn, can lead to a phenomenon known as "indefinate postponement".
+        *
+        * 6-7: This issue is dealt with by using the optional QUECVT flag with
+        * the system service employed to request a lock conversion.  This flag
+        * forces certain conversion requests to be queued, even if they are
+        * compatible with the granted modes of other locks on the same
+        * resource.  Thus, the use of this flag results in conversion requests
+        * being ordered on a "first come first servce" basis.
+        *
+        * DCT: This condition is all about new conversions being able to occur
+        * "in place" while the lock remains on the granted queue (assuming
+        * nothing else conflicts.)  IOW if QUECVT isn't set, a conversion
+        * doesn't _have_ to go onto the convert queue where it's processed in
+        * order.  The "now" variable is necessary to distinguish converts
+        * being received and processed for the first time now, because once a
+        * convert is moved to the conversion queue the condition below applies
+        * requiring fifo granting.
+        */
+
+       if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
+               return 1;
+
+       /*
+        * The NOORDER flag is set to avoid the standard vms rules on grant
+        * order.
+        */
+
+       if (lkb->lkb_exflags & DLM_LKF_NOORDER)
+               return 1;
+
+       /*
+        * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
+        * granted until all other conversion requests ahead of it are granted
+        * and/or canceled.
+        */
+
+       if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
+               return 1;
+
+       /*
+        * 6-4: By default, a new request is immediately granted only if all
+        * three of the following conditions are satisfied when the request is
+        * issued:
+        * - The queue of ungranted conversion requests for the resource is
+        *   empty.
+        * - The queue of ungranted new requests for the resource is empty.
+        * - The mode of the new request is compatible with the most
+        *   restrictive mode of all granted locks on the resource.
+        */
+
+       if (now && !conv && list_empty(&r->res_convertqueue) &&
+           list_empty(&r->res_waitqueue))
+               return 1;
+
+       /*
+        * 6-4: Once a lock request is in the queue of ungranted new requests,
+        * it cannot be granted until the queue of ungranted conversion
+        * requests is empty, all ungranted new requests ahead of it are
+        * granted and/or canceled, and it is compatible with the granted mode
+        * of the most restrictive lock granted on the resource.
+        */
+
+       if (!now && !conv && list_empty(&r->res_convertqueue) &&
+           first_in_list(lkb, &r->res_waitqueue))
+               return 1;
+
+ out:
+       /*
+        * The following, enabled by CONVDEADLK, departs from VMS.
+        */
+
+       if (conv && (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) &&
+           conversion_deadlock_detect(r, lkb)) {
+               lkb->lkb_grmode = DLM_LOCK_NL;
+               lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
+       }
+
+       return 0;
+}
+
+/*
+ * The ALTPR and ALTCW flags aren't traditional lock manager flags, but are a
+ * simple way to provide a big optimization to applications that can use them.
+ */
+
+static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
+{
+       uint32_t flags = lkb->lkb_exflags;
+       int rv;
+       int8_t alt = 0, rqmode = lkb->lkb_rqmode;
+
+       rv = _can_be_granted(r, lkb, now);
+       if (rv)
+               goto out;
+
+       if (lkb->lkb_sbflags & DLM_SBF_DEMOTED)
+               goto out;
+
+       if (rqmode != DLM_LOCK_PR && flags & DLM_LKF_ALTPR)
+               alt = DLM_LOCK_PR;
+       else if (rqmode != DLM_LOCK_CW && flags & DLM_LKF_ALTCW)
+               alt = DLM_LOCK_CW;
+
+       if (alt) {
+               lkb->lkb_rqmode = alt;
+               rv = _can_be_granted(r, lkb, now);
+               if (rv)
+                       lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
+               else
+                       lkb->lkb_rqmode = rqmode;
+       }
+ out:
+       return rv;
+}
+
+static int grant_pending_convert(struct dlm_rsb *r, int high)
+{
+       struct dlm_lkb *lkb, *s;
+       int hi, demoted, quit, grant_restart, demote_restart;
+
+       quit = 0;
+ restart:
+       grant_restart = 0;
+       demote_restart = 0;
+       hi = DLM_LOCK_IV;
+
+       list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
+               demoted = is_demoted(lkb);
+               if (can_be_granted(r, lkb, 0)) {
+                       grant_lock_pending(r, lkb);
+                       grant_restart = 1;
+               } else {
+                       hi = max_t(int, lkb->lkb_rqmode, hi);
+                       if (!demoted && is_demoted(lkb))
+                               demote_restart = 1;
+               }
+       }
+
+       if (grant_restart)
+               goto restart;
+       if (demote_restart && !quit) {
+               quit = 1;
+               goto restart;
+       }
+
+       return max_t(int, high, hi);
+}
+
+static int grant_pending_wait(struct dlm_rsb *r, int high)
+{
+       struct dlm_lkb *lkb, *s;
+
+       list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
+               if (can_be_granted(r, lkb, 0))
+                       grant_lock_pending(r, lkb);
+                else
+                       high = max_t(int, lkb->lkb_rqmode, high);
+       }
+
+       return high;
+}
+
+static void grant_pending_locks(struct dlm_rsb *r)
+{
+       struct dlm_lkb *lkb, *s;
+       int high = DLM_LOCK_IV;
+
+       DLM_ASSERT(is_master(r), dlm_dump_rsb(r););
+
+       high = grant_pending_convert(r, high);
+       high = grant_pending_wait(r, high);
+
+       if (high == DLM_LOCK_IV)
+               return;
+
+       /*
+        * If there are locks left on the wait/convert queue then send blocking
+        * ASTs to granted locks based on the largest requested mode (high)
+        * found above. FIXME: highbast < high comparison not valid for PR/CW.
+        */
+
+       list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
+               if (lkb->lkb_bastaddr && (lkb->lkb_highbast < high) &&
+                   !__dlm_compat_matrix[lkb->lkb_grmode+1][high+1]) {
+                       queue_bast(r, lkb, high);
+                       lkb->lkb_highbast = high;
+               }
+       }
+}
+
+static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
+                           struct dlm_lkb *lkb)
+{
+       struct dlm_lkb *gr;
+
+       list_for_each_entry(gr, head, lkb_statequeue) {
+               if (gr->lkb_bastaddr &&
+                   gr->lkb_highbast < lkb->lkb_rqmode &&
+                   !modes_compat(gr, lkb)) {
+                       queue_bast(r, gr, lkb->lkb_rqmode);
+                       gr->lkb_highbast = lkb->lkb_rqmode;
+               }
+       }
+}
+
+static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       send_bast_queue(r, &r->res_grantqueue, lkb);
+}
+
+static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       send_bast_queue(r, &r->res_grantqueue, lkb);
+       send_bast_queue(r, &r->res_convertqueue, lkb);
+}
+
+/* set_master(r, lkb) -- set the master nodeid of a resource
+
+   The purpose of this function is to set the nodeid field in the given
+   lkb using the nodeid field in the given rsb.  If the rsb's nodeid is
+   known, it can just be copied to the lkb and the function will return
+   0.  If the rsb's nodeid is _not_ known, it needs to be looked up
+   before it can be copied to the lkb.
+
+   When the rsb nodeid is being looked up remotely, the initial lkb
+   causing the lookup is kept on the ls_waiters list waiting for the
+   lookup reply.  Other lkb's waiting for the same rsb lookup are kept
+   on the rsb's res_lookup list until the master is verified.
+
+   Return values:
+   0: nodeid is set in rsb/lkb and the caller should go ahead and use it
+   1: the rsb master is not available and the lkb has been placed on
+      a wait queue
+*/
+
+static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       struct dlm_ls *ls = r->res_ls;
+       int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
+
+       if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
+               rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
+               r->res_first_lkid = lkb->lkb_id;
+               lkb->lkb_nodeid = r->res_nodeid;
+               return 0;
+       }
+
+       if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
+               list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
+               return 1;
+       }
+
+       if (r->res_nodeid == 0) {
+               lkb->lkb_nodeid = 0;
+               return 0;
+       }
+
+       if (r->res_nodeid > 0) {
+               lkb->lkb_nodeid = r->res_nodeid;
+               return 0;
+       }
+
+       DLM_ASSERT(r->res_nodeid == -1, dlm_dump_rsb(r););
+
+       dir_nodeid = dlm_dir_nodeid(r);
+
+       if (dir_nodeid != our_nodeid) {
+               r->res_first_lkid = lkb->lkb_id;
+               send_lookup(r, lkb);
+               return 1;
+       }
+
+       for (;;) {
+               /* It's possible for dlm_scand to remove an old rsb for
+                  this same resource from the toss list, us to create
+                  a new one, look up the master locally, and find it
+                  already exists just before dlm_scand does the
+                  dir_remove() on the previous rsb. */
+
+               error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
+                                      r->res_length, &ret_nodeid);
+               if (!error)
+                       break;
+               log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
+               schedule();
+       }
+
+       if (ret_nodeid == our_nodeid) {
+               r->res_first_lkid = 0;
+               r->res_nodeid = 0;
+               lkb->lkb_nodeid = 0;
+       } else {
+               r->res_first_lkid = lkb->lkb_id;
+               r->res_nodeid = ret_nodeid;
+               lkb->lkb_nodeid = ret_nodeid;
+       }
+       return 0;
+}
+
+static void process_lookup_list(struct dlm_rsb *r)
+{
+       struct dlm_lkb *lkb, *safe;
+
+       list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
+               list_del(&lkb->lkb_rsb_lookup);
+               _request_lock(r, lkb);
+               schedule();
+       }
+}
+
+/* confirm_master -- confirm (or deny) an rsb's master nodeid */
+
+static void confirm_master(struct dlm_rsb *r, int error)
+{
+       struct dlm_lkb *lkb;
+
+       if (!r->res_first_lkid)
+               return;
+
+       switch (error) {
+       case 0:
+       case -EINPROGRESS:
+               r->res_first_lkid = 0;
+               process_lookup_list(r);
+               break;
+
+       case -EAGAIN:
+               /* the remote master didn't queue our NOQUEUE request;
+                  make a waiting lkb the first_lkid */
+
+               r->res_first_lkid = 0;
+
+               if (!list_empty(&r->res_lookup)) {
+                       lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
+                                        lkb_rsb_lookup);
+                       list_del(&lkb->lkb_rsb_lookup);
+                       r->res_first_lkid = lkb->lkb_id;
+                       _request_lock(r, lkb);
+               } else
+                       r->res_nodeid = -1;
+               break;
+
+       default:
+               log_error(r->res_ls, "confirm_master unknown error %d", error);
+       }
+}
+
+static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
+                        int namelen, uint32_t parent_lkid, void *ast,
+                        void *astarg, void *bast, struct dlm_args *args)
+{
+       int rv = -EINVAL;
+
+       /* check for invalid arg usage */
+
+       if (mode < 0 || mode > DLM_LOCK_EX)
+               goto out;
+
+       if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
+               goto out;
+
+       if (flags & DLM_LKF_CANCEL)
+               goto out;
+
+       if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
+               goto out;
+
+       if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
+               goto out;
+
+       if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
+               goto out;
+
+       if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
+               goto out;
+
+       if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
+               goto out;
+
+       if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
+               goto out;
+
+       if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
+               goto out;
+
+       if (!ast || !lksb)
+               goto out;
+
+       if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
+               goto out;
+
+       /* parent/child locks not yet supported */
+       if (parent_lkid)
+               goto out;
+
+       if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
+               goto out;
+
+       /* these args will be copied to the lkb in validate_lock_args,
+          it cannot be done now because when converting locks, fields in
+          an active lkb cannot be modified before locking the rsb */
+
+       args->flags = flags;
+       args->astaddr = ast;
+       args->astparam = (long) astarg;
+       args->bastaddr = bast;
+       args->mode = mode;
+       args->lksb = lksb;
+       rv = 0;
+ out:
+       return rv;
+}
+
+static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
+{
+       if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
+                     DLM_LKF_FORCEUNLOCK))
+               return -EINVAL;
+
+       args->flags = flags;
+       args->astparam = (long) astarg;
+       return 0;
+}
+
+static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+                             struct dlm_args *args)
+{
+       int rv = -EINVAL;
+
+       if (args->flags & DLM_LKF_CONVERT) {
+               if (lkb->lkb_flags & DLM_IFL_MSTCPY)
+                       goto out;
+
+               if (args->flags & DLM_LKF_QUECVT &&
+                   !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
+                       goto out;
+
+               rv = -EBUSY;
+               if (lkb->lkb_status != DLM_LKSTS_GRANTED)
+                       goto out;
+
+               if (lkb->lkb_wait_type)
+                       goto out;
+       }
+
+       lkb->lkb_exflags = args->flags;
+       lkb->lkb_sbflags = 0;
+       lkb->lkb_astaddr = args->astaddr;
+       lkb->lkb_astparam = args->astparam;
+       lkb->lkb_bastaddr = args->bastaddr;
+       lkb->lkb_rqmode = args->mode;
+       lkb->lkb_lksb = args->lksb;
+       lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
+       lkb->lkb_ownpid = (int) current->pid;
+       rv = 0;
+ out:
+       return rv;
+}
+
+static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
+{
+       int rv = -EINVAL;
+
+       if (lkb->lkb_flags & DLM_IFL_MSTCPY)
+               goto out;
+
+       if (args->flags & DLM_LKF_FORCEUNLOCK)
+               goto out_ok;
+
+       if (args->flags & DLM_LKF_CANCEL &&
+           lkb->lkb_status == DLM_LKSTS_GRANTED)
+               goto out;
+
+       if (!(args->flags & DLM_LKF_CANCEL) &&
+           lkb->lkb_status != DLM_LKSTS_GRANTED)
+               goto out;
+
+       rv = -EBUSY;
+       if (lkb->lkb_wait_type)
+               goto out;
+
+ out_ok:
+       lkb->lkb_exflags = args->flags;
+       lkb->lkb_sbflags = 0;
+       lkb->lkb_astparam = args->astparam;
+
+       rv = 0;
+ out:
+       return rv;
+}
+
+/*
+ * Four stage 4 varieties:
+ * do_request(), do_convert(), do_unlock(), do_cancel()
+ * These are called on the master node for the given lock and
+ * from the central locking logic.
+ */
+
+static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       int error = 0;
+
+       if (can_be_granted(r, lkb, 1)) {
+               grant_lock(r, lkb);
+               queue_cast(r, lkb, 0);
+               goto out;
+       }
+
+       if (can_be_queued(lkb)) {
+               error = -EINPROGRESS;
+               add_lkb(r, lkb, DLM_LKSTS_WAITING);
+               send_blocking_asts(r, lkb);
+               goto out;
+       }
+
+       error = -EAGAIN;
+       if (force_blocking_asts(lkb))
+               send_blocking_asts_all(r, lkb);
+       queue_cast(r, lkb, -EAGAIN);
+
+ out:
+       return error;
+}
+
+static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       int error = 0;
+
+       /* changing an existing lock may allow others to be granted */
+
+       if (can_be_granted(r, lkb, 1)) {
+               grant_lock(r, lkb);
+               queue_cast(r, lkb, 0);
+               grant_pending_locks(r);
+               goto out;
+       }
+
+       if (can_be_queued(lkb)) {
+               if (is_demoted(lkb))
+                       grant_pending_locks(r);
+               error = -EINPROGRESS;
+               del_lkb(r, lkb);
+               add_lkb(r, lkb, DLM_LKSTS_CONVERT);
+               send_blocking_asts(r, lkb);
+               goto out;
+       }
+
+       error = -EAGAIN;
+       if (force_blocking_asts(lkb))
+               send_blocking_asts_all(r, lkb);
+       queue_cast(r, lkb, -EAGAIN);
+
+ out:
+       return error;
+}
+
+static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       remove_lock(r, lkb);
+       queue_cast(r, lkb, -DLM_EUNLOCK);
+       grant_pending_locks(r);
+       return -DLM_EUNLOCK;
+}
+
+/* FIXME: if revert_lock() finds that the lkb is granted, we should
+   skip the queue_cast(ECANCEL).  It indicates that the request/convert
+   completed (and queued a normal ast) just before the cancel; we don't
+   want to clobber the sb_result for the normal ast with ECANCEL. */
+static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       revert_lock(r, lkb);
+       queue_cast(r, lkb, -DLM_ECANCEL);
+       grant_pending_locks(r);
+       return -DLM_ECANCEL;
+}
+
+/*
+ * Four stage 3 varieties:
+ * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
+ */
+
+/* add a new lkb to a possibly new rsb, called by requesting process */
+
+static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       int error;
+
+       /* set_master: sets lkb nodeid from r */
+
+       error = set_master(r, lkb);
+       if (error < 0)
+               goto out;
+       if (error) {
+               error = 0;
+               goto out;
+       }
+
+       if (is_remote(r))
+               /* receive_request() calls do_request() on remote node */
+               error = send_request(r, lkb);
+       else
+               error = do_request(r, lkb);
+ out:
+       return error;
+}
+
+/* change some property of an existing lkb, e.g. mode */
+
+static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       int error;
+
+       if (is_remote(r))
+               /* receive_convert() calls do_convert() on remote node */
+               error = send_convert(r, lkb);
+       else
+               error = do_convert(r, lkb);
+
+       return error;
+}
+
+/* remove an existing lkb from the granted queue */
+
+static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       int error;
+
+       if (is_remote(r))
+               /* receive_unlock() calls do_unlock() on remote node */
+               error = send_unlock(r, lkb);
+       else
+               error = do_unlock(r, lkb);
+
+       return error;
+}
+
+/* remove an existing lkb from the convert or wait queue */
+
+static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       int error;
+
+       if (is_remote(r))
+               /* receive_cancel() calls do_cancel() on remote node */
+               error = send_cancel(r, lkb);
+       else
+               error = do_cancel(r, lkb);
+
+       return error;
+}
+
+/*
+ * Four stage 2 varieties:
+ * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
+ */
+
+static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
+                       int len, struct dlm_args *args)
+{
+       struct dlm_rsb *r;
+       int error;
+
+       error = validate_lock_args(ls, lkb, args);
+       if (error)
+               goto out;
+
+       error = find_rsb(ls, name, len, R_CREATE, &r);
+       if (error)
+               goto out;
+
+       lock_rsb(r);
+
+       attach_lkb(r, lkb);
+       lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
+
+       error = _request_lock(r, lkb);
+
+       unlock_rsb(r);
+       put_rsb(r);
+
+ out:
+       return error;
+}
+
+static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
+                       struct dlm_args *args)
+{
+       struct dlm_rsb *r;
+       int error;
+
+       r = lkb->lkb_resource;
+
+       hold_rsb(r);
+       lock_rsb(r);
+
+       error = validate_lock_args(ls, lkb, args);
+       if (error)
+               goto out;
+
+       error = _convert_lock(r, lkb);
+ out:
+       unlock_rsb(r);
+       put_rsb(r);
+       return error;
+}
+
+static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
+                      struct dlm_args *args)
+{
+       struct dlm_rsb *r;
+       int error;
+
+       r = lkb->lkb_resource;
+
+       hold_rsb(r);
+       lock_rsb(r);
+
+       error = validate_unlock_args(lkb, args);
+       if (error)
+               goto out;
+
+       error = _unlock_lock(r, lkb);
+ out:
+       unlock_rsb(r);
+       put_rsb(r);
+       return error;
+}
+
+static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
+                      struct dlm_args *args)
+{
+       struct dlm_rsb *r;
+       int error;
+
+       r = lkb->lkb_resource;
+
+       hold_rsb(r);
+       lock_rsb(r);
+
+       error = validate_unlock_args(lkb, args);
+       if (error)
+               goto out;
+
+       error = _cancel_lock(r, lkb);
+ out:
+       unlock_rsb(r);
+       put_rsb(r);
+       return error;
+}
+
+/*
+ * Two stage 1 varieties:  dlm_lock() and dlm_unlock()
+ */
+
+int dlm_lock(dlm_lockspace_t *lockspace,
+            int mode,
+            struct dlm_lksb *lksb,
+            uint32_t flags,
+            void *name,
+            unsigned int namelen,
+            uint32_t parent_lkid,
+            void (*ast) (void *astarg),
+            void *astarg,
+            void (*bast) (void *astarg, int mode))
+{
+       struct dlm_ls *ls;
+       struct dlm_lkb *lkb;
+       struct dlm_args args;
+       int error, convert = flags & DLM_LKF_CONVERT;
+
+       ls = dlm_find_lockspace_local(lockspace);
+       if (!ls)
+               return -EINVAL;
+
+       lock_recovery(ls);
+
+       if (convert)
+               error = find_lkb(ls, lksb->sb_lkid, &lkb);
+       else
+               error = create_lkb(ls, &lkb);
+
+       if (error)
+               goto out;
+
+       error = set_lock_args(mode, lksb, flags, namelen, parent_lkid, ast,
+                             astarg, bast, &args);
+       if (error)
+               goto out_put;
+
+       if (convert)
+               error = convert_lock(ls, lkb, &args);
+       else
+               error = request_lock(ls, lkb, name, namelen, &args);
+
+       if (error == -EINPROGRESS)
+               error = 0;
+ out_put:
+       if (convert || error)
+               __put_lkb(ls, lkb);
+       if (error == -EAGAIN)
+               error = 0;
+ out:
+       unlock_recovery(ls);
+       dlm_put_lockspace(ls);
+       return error;
+}
+
+int dlm_unlock(dlm_lockspace_t *lockspace,
+              uint32_t lkid,
+              uint32_t flags,
+              struct dlm_lksb *lksb,
+              void *astarg)
+{
+       struct dlm_ls *ls;
+       struct dlm_lkb *lkb;
+       struct dlm_args args;
+       int error;
+
+       ls = dlm_find_lockspace_local(lockspace);
+       if (!ls)
+               return -EINVAL;
+
+       lock_recovery(ls);
+
+       error = find_lkb(ls, lkid, &lkb);
+       if (error)
+               goto out;
+
+       error = set_unlock_args(flags, astarg, &args);
+       if (error)
+               goto out_put;
+
+       if (flags & DLM_LKF_CANCEL)
+               error = cancel_lock(ls, lkb, &args);
+       else
+               error = unlock_lock(ls, lkb, &args);
+
+       if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
+               error = 0;
+ out_put:
+       dlm_put_lkb(lkb);
+ out:
+       unlock_recovery(ls);
+       dlm_put_lockspace(ls);
+       return error;
+}
+
+/*
+ * send/receive routines for remote operations and replies
+ *
+ * send_args
+ * send_common
+ * send_request                        receive_request
+ * send_convert                        receive_convert
+ * send_unlock                 receive_unlock
+ * send_cancel                 receive_cancel
+ * send_grant                  receive_grant
+ * send_bast                   receive_bast
+ * send_lookup                 receive_lookup
+ * send_remove                 receive_remove
+ *
+ *                             send_common_reply
+ * receive_request_reply       send_request_reply
+ * receive_convert_reply       send_convert_reply
+ * receive_unlock_reply                send_unlock_reply
+ * receive_cancel_reply                send_cancel_reply
+ * receive_lookup_reply                send_lookup_reply
+ */
+
+static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
+                         int to_nodeid, int mstype,
+                         struct dlm_message **ms_ret,
+                         struct dlm_mhandle **mh_ret)
+{
+       struct dlm_message *ms;
+       struct dlm_mhandle *mh;
+       char *mb;
+       int mb_len = sizeof(struct dlm_message);
+
+       switch (mstype) {
+       case DLM_MSG_REQUEST:
+       case DLM_MSG_LOOKUP:
+       case DLM_MSG_REMOVE:
+               mb_len += r->res_length;
+               break;
+       case DLM_MSG_CONVERT:
+       case DLM_MSG_UNLOCK:
+       case DLM_MSG_REQUEST_REPLY:
+       case DLM_MSG_CONVERT_REPLY:
+       case DLM_MSG_GRANT:
+               if (lkb && lkb->lkb_lvbptr)
+                       mb_len += r->res_ls->ls_lvblen;
+               break;
+       }
+
+       /* get_buffer gives us a message handle (mh) that we need to
+          pass into lowcomms_commit and a message buffer (mb) that we
+          write our data into */
+
+       mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_KERNEL, &mb);
+       if (!mh)
+               return -ENOBUFS;
+
+       memset(mb, 0, mb_len);
+
+       ms = (struct dlm_message *) mb;
+
+       ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+       ms->m_header.h_lockspace = r->res_ls->ls_global_id;
+       ms->m_header.h_nodeid = dlm_our_nodeid();
+       ms->m_header.h_length = mb_len;
+       ms->m_header.h_cmd = DLM_MSG;
+
+       ms->m_type = mstype;
+
+       *mh_ret = mh;
+       *ms_ret = ms;
+       return 0;
+}
+
+/* further lowcomms enhancements or alternate implementations may make
+   the return value from this function useful at some point */
+
+static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
+{
+       dlm_message_out(ms);
+       dlm_lowcomms_commit_buffer(mh);
+       return 0;
+}
+
+static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
+                     struct dlm_message *ms)
+{
+       ms->m_nodeid   = lkb->lkb_nodeid;
+       ms->m_pid      = lkb->lkb_ownpid;
+       ms->m_lkid     = lkb->lkb_id;
+       ms->m_remid    = lkb->lkb_remid;
+       ms->m_exflags  = lkb->lkb_exflags;
+       ms->m_sbflags  = lkb->lkb_sbflags;
+       ms->m_flags    = lkb->lkb_flags;
+       ms->m_lvbseq   = lkb->lkb_lvbseq;
+       ms->m_status   = lkb->lkb_status;
+       ms->m_grmode   = lkb->lkb_grmode;
+       ms->m_rqmode   = lkb->lkb_rqmode;
+       ms->m_hash     = r->res_hash;
+
+       /* m_result and m_bastmode are set from function args,
+          not from lkb fields */
+
+       if (lkb->lkb_bastaddr)
+               ms->m_asts |= AST_BAST;
+       if (lkb->lkb_astaddr)
+               ms->m_asts |= AST_COMP;
+
+       if (ms->m_type == DLM_MSG_REQUEST || ms->m_type == DLM_MSG_LOOKUP)
+               memcpy(ms->m_extra, r->res_name, r->res_length);
+
+       else if (lkb->lkb_lvbptr)
+               memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
+
+}
+
+static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
+{
+       struct dlm_message *ms;
+       struct dlm_mhandle *mh;
+       int to_nodeid, error;
+
+       add_to_waiters(lkb, mstype);
+
+       to_nodeid = r->res_nodeid;
+
+       error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
+       if (error)
+               goto fail;
+
+       send_args(r, lkb, ms);
+
+       error = send_message(mh, ms);
+       if (error)
+               goto fail;
+       return 0;
+
+ fail:
+       remove_from_waiters(lkb);
+       return error;
+}
+
+static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       return send_common(r, lkb, DLM_MSG_REQUEST);
+}
+
+static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       int error;
+
+       error = send_common(r, lkb, DLM_MSG_CONVERT);
+
+       /* down conversions go without a reply from the master */
+       if (!error && down_conversion(lkb)) {
+               remove_from_waiters(lkb);
+               r->res_ls->ls_stub_ms.m_result = 0;
+               r->res_ls->ls_stub_ms.m_flags = lkb->lkb_flags;
+               __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
+       }
+
+       return error;
+}
+
+/* FIXME: if this lkb is the only lock we hold on the rsb, then set
+   MASTER_UNCERTAIN to force the next request on the rsb to confirm
+   that the master is still correct. */
+
+static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       return send_common(r, lkb, DLM_MSG_UNLOCK);
+}
+
+static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       return send_common(r, lkb, DLM_MSG_CANCEL);
+}
+
+static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       struct dlm_message *ms;
+       struct dlm_mhandle *mh;
+       int to_nodeid, error;
+
+       to_nodeid = lkb->lkb_nodeid;
+
+       error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
+       if (error)
+               goto out;
+
+       send_args(r, lkb, ms);
+
+       ms->m_result = 0;
+
+       error = send_message(mh, ms);
+ out:
+       return error;
+}
+
+static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
+{
+       struct dlm_message *ms;
+       struct dlm_mhandle *mh;
+       int to_nodeid, error;
+
+       to_nodeid = lkb->lkb_nodeid;
+
+       error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
+       if (error)
+               goto out;
+
+       send_args(r, lkb, ms);
+
+       ms->m_bastmode = mode;
+
+       error = send_message(mh, ms);
+ out:
+       return error;
+}
+
+static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
+{
+       struct dlm_message *ms;
+       struct dlm_mhandle *mh;
+       int to_nodeid, error;
+
+       add_to_waiters(lkb, DLM_MSG_LOOKUP);
+
+       to_nodeid = dlm_dir_nodeid(r);
+
+       error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
+       if (error)
+               goto fail;
+
+       send_args(r, lkb, ms);
+
+       error = send_message(mh, ms);
+       if (error)
+               goto fail;
+       return 0;
+
+ fail:
+       remove_from_waiters(lkb);
+       return error;
+}
+
+static int send_remove(struct dlm_rsb *r)
+{
+       struct dlm_message *ms;
+       struct dlm_mhandle *mh;
+       int to_nodeid, error;
+
+       to_nodeid = dlm_dir_nodeid(r);
+
+       error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
+       if (error)
+               goto out;
+
+       memcpy(ms->m_extra, r->res_name, r->res_length);
+       ms->m_hash = r->res_hash;
+
+       error = send_message(mh, ms);
+ out:
+       return error;
+}
+
+static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
+                            int mstype, int rv)
+{
+       struct dlm_message *ms;
+       struct dlm_mhandle *mh;
+       int to_nodeid, error;
+
+       to_nodeid = lkb->lkb_nodeid;
+
+       error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
+       if (error)
+               goto out;
+
+       send_args(r, lkb, ms);
+
+       ms->m_result = rv;
+
+       error = send_message(mh, ms);
+ out:
+       return error;
+}
+
+static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
+{
+       return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
+}
+
+static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
+{
+       return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
+}
+
+static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
+{
+       return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
+}
+
+static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
+{
+       return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
+}
+
+static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
+                            int ret_nodeid, int rv)
+{
+       struct dlm_rsb *r = &ls->ls_stub_rsb;
+       struct dlm_message *ms;
+       struct dlm_mhandle *mh;
+       int error, nodeid = ms_in->m_header.h_nodeid;
+
+       error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
+       if (error)
+               goto out;
+
+       ms->m_lkid = ms_in->m_lkid;
+       ms->m_result = rv;
+       ms->m_nodeid = ret_nodeid;
+
+       error = send_message(mh, ms);
+ out:
+       return error;
+}
+
+/* which args we save from a received message depends heavily on the type
+   of message, unlike the send side where we can safely send everything about
+   the lkb for any type of message */
+
+static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
+{
+       lkb->lkb_exflags = ms->m_exflags;
+       lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
+                        (ms->m_flags & 0x0000FFFF);
+}
+
+static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+{
+       lkb->lkb_sbflags = ms->m_sbflags;
+       lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
+                        (ms->m_flags & 0x0000FFFF);
+}
+
+static int receive_extralen(struct dlm_message *ms)
+{
+       return (ms->m_header.h_length - sizeof(struct dlm_message));
+}
+
+static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
+                      struct dlm_message *ms)
+{
+       int len;
+
+       if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
+               if (!lkb->lkb_lvbptr)
+                       lkb->lkb_lvbptr = allocate_lvb(ls);
+               if (!lkb->lkb_lvbptr)
+                       return -ENOMEM;
+               len = receive_extralen(ms);
+               memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
+       }
+       return 0;
+}
+
+static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+                               struct dlm_message *ms)
+{
+       lkb->lkb_nodeid = ms->m_header.h_nodeid;
+       lkb->lkb_ownpid = ms->m_pid;
+       lkb->lkb_remid = ms->m_lkid;
+       lkb->lkb_grmode = DLM_LOCK_IV;
+       lkb->lkb_rqmode = ms->m_rqmode;
+       lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST);
+       lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP);
+
+       DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb););
+
+       if (receive_lvb(ls, lkb, ms))
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+                               struct dlm_message *ms)
+{
+       if (lkb->lkb_nodeid != ms->m_header.h_nodeid) {
+               log_error(ls, "convert_args nodeid %d %d lkid %x %x",
+                         lkb->lkb_nodeid, ms->m_header.h_nodeid,
+                         lkb->lkb_id, lkb->lkb_remid);
+               return -EINVAL;
+       }
+
+       if (!is_master_copy(lkb))
+               return -EINVAL;
+
+       if (lkb->lkb_status != DLM_LKSTS_GRANTED)
+               return -EBUSY;
+
+       if (receive_lvb(ls, lkb, ms))
+               return -ENOMEM;
+
+       lkb->lkb_rqmode = ms->m_rqmode;
+       lkb->lkb_lvbseq = ms->m_lvbseq;
+
+       return 0;
+}
+
+static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+                              struct dlm_message *ms)
+{
+       if (!is_master_copy(lkb))
+               return -EINVAL;
+       if (receive_lvb(ls, lkb, ms))
+               return -ENOMEM;
+       return 0;
+}
+
+/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
+   uses to send a reply and that the remote end uses to process the reply. */
+
+static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb = &ls->ls_stub_lkb;
+       lkb->lkb_nodeid = ms->m_header.h_nodeid;
+       lkb->lkb_remid = ms->m_lkid;
+}
+
+static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_rsb *r;
+       int error, namelen;
+
+       error = create_lkb(ls, &lkb);
+       if (error)
+               goto fail;
+
+       receive_flags(lkb, ms);
+       lkb->lkb_flags |= DLM_IFL_MSTCPY;
+       error = receive_request_args(ls, lkb, ms);
+       if (error) {
+               __put_lkb(ls, lkb);
+               goto fail;
+       }
+
+       namelen = receive_extralen(ms);
+
+       error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
+       if (error) {
+               __put_lkb(ls, lkb);
+               goto fail;
+       }
+
+       lock_rsb(r);
+
+       attach_lkb(r, lkb);
+       error = do_request(r, lkb);
+       send_request_reply(r, lkb, error);
+
+       unlock_rsb(r);
+       put_rsb(r);
+
+       if (error == -EINPROGRESS)
+               error = 0;
+       if (error)
+               dlm_put_lkb(lkb);
+       return;
+
+ fail:
+       setup_stub_lkb(ls, ms);
+       send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
+}
+
+static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_rsb *r;
+       int error, reply = 1;
+
+       error = find_lkb(ls, ms->m_remid, &lkb);
+       if (error)
+               goto fail;
+
+       r = lkb->lkb_resource;
+
+       hold_rsb(r);
+       lock_rsb(r);
+
+       receive_flags(lkb, ms);
+       error = receive_convert_args(ls, lkb, ms);
+       if (error)
+               goto out;
+       reply = !down_conversion(lkb);
+
+       error = do_convert(r, lkb);
+ out:
+       if (reply)
+               send_convert_reply(r, lkb, error);
+
+       unlock_rsb(r);
+       put_rsb(r);
+       dlm_put_lkb(lkb);
+       return;
+
+ fail:
+       setup_stub_lkb(ls, ms);
+       send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
+}
+
+static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_rsb *r;
+       int error;
+
+       error = find_lkb(ls, ms->m_remid, &lkb);
+       if (error)
+               goto fail;
+
+       r = lkb->lkb_resource;
+
+       hold_rsb(r);
+       lock_rsb(r);
+
+       receive_flags(lkb, ms);
+       error = receive_unlock_args(ls, lkb, ms);
+       if (error)
+               goto out;
+
+       error = do_unlock(r, lkb);
+ out:
+       send_unlock_reply(r, lkb, error);
+
+       unlock_rsb(r);
+       put_rsb(r);
+       dlm_put_lkb(lkb);
+       return;
+
+ fail:
+       setup_stub_lkb(ls, ms);
+       send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
+}
+
+static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_rsb *r;
+       int error;
+
+       error = find_lkb(ls, ms->m_remid, &lkb);
+       if (error)
+               goto fail;
+
+       receive_flags(lkb, ms);
+
+       r = lkb->lkb_resource;
+
+       hold_rsb(r);
+       lock_rsb(r);
+
+       error = do_cancel(r, lkb);
+       send_cancel_reply(r, lkb, error);
+
+       unlock_rsb(r);
+       put_rsb(r);
+       dlm_put_lkb(lkb);
+       return;
+
+ fail:
+       setup_stub_lkb(ls, ms);
+       send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
+}
+
+static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_rsb *r;
+       int error;
+
+       error = find_lkb(ls, ms->m_remid, &lkb);
+       if (error) {
+               log_error(ls, "receive_grant no lkb");
+               return;
+       }
+       DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+
+       r = lkb->lkb_resource;
+
+       hold_rsb(r);
+       lock_rsb(r);
+
+       receive_flags_reply(lkb, ms);
+       grant_lock_pc(r, lkb, ms);
+       queue_cast(r, lkb, 0);
+
+       unlock_rsb(r);
+       put_rsb(r);
+       dlm_put_lkb(lkb);
+}
+
+static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_rsb *r;
+       int error;
+
+       error = find_lkb(ls, ms->m_remid, &lkb);
+       if (error) {
+               log_error(ls, "receive_bast no lkb");
+               return;
+       }
+       DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+
+       r = lkb->lkb_resource;
+
+       hold_rsb(r);
+       lock_rsb(r);
+
+       queue_bast(r, lkb, ms->m_bastmode);
+
+       unlock_rsb(r);
+       put_rsb(r);
+       dlm_put_lkb(lkb);
+}
+
+static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
+
+       from_nodeid = ms->m_header.h_nodeid;
+       our_nodeid = dlm_our_nodeid();
+
+       len = receive_extralen(ms);
+
+       dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
+       if (dir_nodeid != our_nodeid) {
+               log_error(ls, "lookup dir_nodeid %d from %d",
+                         dir_nodeid, from_nodeid);
+               error = -EINVAL;
+               ret_nodeid = -1;
+               goto out;
+       }
+
+       error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
+
+       /* Optimization: we're master so treat lookup as a request */
+       if (!error && ret_nodeid == our_nodeid) {
+               receive_request(ls, ms);
+               return;
+       }
+ out:
+       send_lookup_reply(ls, ms, ret_nodeid, error);
+}
+
+static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       int len, dir_nodeid, from_nodeid;
+
+       from_nodeid = ms->m_header.h_nodeid;
+
+       len = receive_extralen(ms);
+
+       dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
+       if (dir_nodeid != dlm_our_nodeid()) {
+               log_error(ls, "remove dir entry dir_nodeid %d from %d",
+                         dir_nodeid, from_nodeid);
+               return;
+       }
+
+       dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
+}
+
+static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_rsb *r;
+       int error, mstype;
+
+       error = find_lkb(ls, ms->m_remid, &lkb);
+       if (error) {
+               log_error(ls, "receive_request_reply no lkb");
+               return;
+       }
+       DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+
+       mstype = lkb->lkb_wait_type;
+       error = remove_from_waiters(lkb);
+       if (error) {
+               log_error(ls, "receive_request_reply not on waiters");
+               goto out;
+       }
+
+       /* this is the value returned from do_request() on the master */
+       error = ms->m_result;
+
+       r = lkb->lkb_resource;
+       hold_rsb(r);
+       lock_rsb(r);
+
+       /* Optimization: the dir node was also the master, so it took our
+          lookup as a request and sent request reply instead of lookup reply */
+       if (mstype == DLM_MSG_LOOKUP) {
+               r->res_nodeid = ms->m_header.h_nodeid;
+               lkb->lkb_nodeid = r->res_nodeid;
+       }
+
+       switch (error) {
+       case -EAGAIN:
+               /* request would block (be queued) on remote master;
+                  the unhold undoes the original ref from create_lkb()
+                  so it leads to the lkb being freed */
+               queue_cast(r, lkb, -EAGAIN);
+               confirm_master(r, -EAGAIN);
+               unhold_lkb(lkb);
+               break;
+
+       case -EINPROGRESS:
+       case 0:
+               /* request was queued or granted on remote master */
+               receive_flags_reply(lkb, ms);
+               lkb->lkb_remid = ms->m_lkid;
+               if (error)
+                       add_lkb(r, lkb, DLM_LKSTS_WAITING);
+               else {
+                       grant_lock_pc(r, lkb, ms);
+                       queue_cast(r, lkb, 0);
+               }
+               confirm_master(r, error);
+               break;
+
+       case -EBADR:
+       case -ENOTBLK:
+               /* find_rsb failed to find rsb or rsb wasn't master */
+               r->res_nodeid = -1;
+               lkb->lkb_nodeid = -1;
+               _request_lock(r, lkb);
+               break;
+
+       default:
+               log_error(ls, "receive_request_reply error %d", error);
+       }
+
+       unlock_rsb(r);
+       put_rsb(r);
+ out:
+       dlm_put_lkb(lkb);
+}
+
+static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
+                                   struct dlm_message *ms)
+{
+       int error = ms->m_result;
+
+       /* this is the value returned from do_convert() on the master */
+
+       switch (error) {
+       case -EAGAIN:
+               /* convert would block (be queued) on remote master */
+               queue_cast(r, lkb, -EAGAIN);
+               break;
+
+       case -EINPROGRESS:
+               /* convert was queued on remote master */
+               del_lkb(r, lkb);
+               add_lkb(r, lkb, DLM_LKSTS_CONVERT);
+               break;
+
+       case 0:
+               /* convert was granted on remote master */
+               receive_flags_reply(lkb, ms);
+               grant_lock_pc(r, lkb, ms);
+               queue_cast(r, lkb, 0);
+               break;
+
+       default:
+               log_error(r->res_ls, "receive_convert_reply error %d", error);
+       }
+}
+
+static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+{
+       struct dlm_rsb *r = lkb->lkb_resource;
+
+       hold_rsb(r);
+       lock_rsb(r);
+
+       __receive_convert_reply(r, lkb, ms);
+
+       unlock_rsb(r);
+       put_rsb(r);
+}
+
+static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb;
+       int error;
+
+       error = find_lkb(ls, ms->m_remid, &lkb);
+       if (error) {
+               log_error(ls, "receive_convert_reply no lkb");
+               return;
+       }
+       DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+
+       error = remove_from_waiters(lkb);
+       if (error) {
+               log_error(ls, "receive_convert_reply not on waiters");
+               goto out;
+       }
+
+       _receive_convert_reply(lkb, ms);
+ out:
+       dlm_put_lkb(lkb);
+}
+
+static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+{
+       struct dlm_rsb *r = lkb->lkb_resource;
+       int error = ms->m_result;
+
+       hold_rsb(r);
+       lock_rsb(r);
+
+       /* this is the value returned from do_unlock() on the master */
+
+       switch (error) {
+       case -DLM_EUNLOCK:
+               receive_flags_reply(lkb, ms);
+               remove_lock_pc(r, lkb);
+               queue_cast(r, lkb, -DLM_EUNLOCK);
+               break;
+       default:
+               log_error(r->res_ls, "receive_unlock_reply error %d", error);
+       }
+
+       unlock_rsb(r);
+       put_rsb(r);
+}
+
+static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb;
+       int error;
+
+       error = find_lkb(ls, ms->m_remid, &lkb);
+       if (error) {
+               log_error(ls, "receive_unlock_reply no lkb");
+               return;
+       }
+       DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+
+       error = remove_from_waiters(lkb);
+       if (error) {
+               log_error(ls, "receive_unlock_reply not on waiters");
+               goto out;
+       }
+
+       _receive_unlock_reply(lkb, ms);
+ out:
+       dlm_put_lkb(lkb);
+}
+
+static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+{
+       struct dlm_rsb *r = lkb->lkb_resource;
+       int error = ms->m_result;
+
+       hold_rsb(r);
+       lock_rsb(r);
+
+       /* this is the value returned from do_cancel() on the master */
+
+       switch (error) {
+       case -DLM_ECANCEL:
+               receive_flags_reply(lkb, ms);
+               revert_lock_pc(r, lkb);
+               queue_cast(r, lkb, -DLM_ECANCEL);
+               break;
+       default:
+               log_error(r->res_ls, "receive_cancel_reply error %d", error);
+       }
+
+       unlock_rsb(r);
+       put_rsb(r);
+}
+
+static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb;
+       int error;
+
+       error = find_lkb(ls, ms->m_remid, &lkb);
+       if (error) {
+               log_error(ls, "receive_cancel_reply no lkb");
+               return;
+       }
+       DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+
+       error = remove_from_waiters(lkb);
+       if (error) {
+               log_error(ls, "receive_cancel_reply not on waiters");
+               goto out;
+       }
+
+       _receive_cancel_reply(lkb, ms);
+ out:
+       dlm_put_lkb(lkb);
+}
+
+static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_rsb *r;
+       int error, ret_nodeid;
+
+       error = find_lkb(ls, ms->m_lkid, &lkb);
+       if (error) {
+               log_error(ls, "receive_lookup_reply no lkb");
+               return;
+       }
+
+       error = remove_from_waiters(lkb);
+       if (error) {
+               log_error(ls, "receive_lookup_reply not on waiters");
+               goto out;
+       }
+
+       /* this is the value returned by dlm_dir_lookup on dir node
+          FIXME: will a non-zero error ever be returned? */
+       error = ms->m_result;
+
+       r = lkb->lkb_resource;
+       hold_rsb(r);
+       lock_rsb(r);
+
+       ret_nodeid = ms->m_nodeid;
+       if (ret_nodeid == dlm_our_nodeid()) {
+               r->res_nodeid = 0;
+               ret_nodeid = 0;
+               r->res_first_lkid = 0;
+       } else {
+               /* set_master() will copy res_nodeid to lkb_nodeid */
+               r->res_nodeid = ret_nodeid;
+       }
+
+       _request_lock(r, lkb);
+
+       if (!ret_nodeid)
+               process_lookup_list(r);
+
+       unlock_rsb(r);
+       put_rsb(r);
+ out:
+       dlm_put_lkb(lkb);
+}
+
+int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
+{
+       struct dlm_message *ms = (struct dlm_message *) hd;
+       struct dlm_ls *ls;
+       int error;
+
+       if (!recovery)
+               dlm_message_in(ms);
+
+       ls = dlm_find_lockspace_global(hd->h_lockspace);
+       if (!ls) {
+               log_print("drop message %d from %d for unknown lockspace %d",
+                         ms->m_type, nodeid, hd->h_lockspace);
+               return -EINVAL;
+       }
+
+       /* recovery may have just ended leaving a bunch of backed-up requests
+          in the requestqueue; wait while dlm_recoverd clears them */
+
+       if (!recovery)
+               dlm_wait_requestqueue(ls);
+
+       /* recovery may have just started while there were a bunch of
+          in-flight requests -- save them in requestqueue to be processed
+          after recovery.  we can't let dlm_recvd block on the recovery
+          lock.  if dlm_recoverd is calling this function to clear the
+          requestqueue, it needs to be interrupted (-EINTR) if another
+          recovery operation is starting. */
+
+       while (1) {
+               if (dlm_locking_stopped(ls)) {
+                       if (!recovery)
+                               dlm_add_requestqueue(ls, nodeid, hd);
+                       error = -EINTR;
+                       goto out;
+               }
+
+               if (lock_recovery_try(ls))
+                       break;
+               schedule();
+       }
+
+       switch (ms->m_type) {
+
+       /* messages sent to a master node */
+
+       case DLM_MSG_REQUEST:
+               receive_request(ls, ms);
+               break;
+
+       case DLM_MSG_CONVERT:
+               receive_convert(ls, ms);
+               break;
+
+       case DLM_MSG_UNLOCK:
+               receive_unlock(ls, ms);
+               break;
+
+       case DLM_MSG_CANCEL:
+               receive_cancel(ls, ms);
+               break;
+
+       /* messages sent from a master node (replies to above) */
+
+       case DLM_MSG_REQUEST_REPLY:
+               receive_request_reply(ls, ms);
+               break;
+
+       case DLM_MSG_CONVERT_REPLY:
+               receive_convert_reply(ls, ms);
+               break;
+
+       case DLM_MSG_UNLOCK_REPLY:
+               receive_unlock_reply(ls, ms);
+               break;
+
+       case DLM_MSG_CANCEL_REPLY:
+               receive_cancel_reply(ls, ms);
+               break;
+
+       /* messages sent from a master node (only two types of async msg) */
+
+       case DLM_MSG_GRANT:
+               receive_grant(ls, ms);
+               break;
+
+       case DLM_MSG_BAST:
+               receive_bast(ls, ms);
+               break;
+
+       /* messages sent to a dir node */
+
+       case DLM_MSG_LOOKUP:
+               receive_lookup(ls, ms);
+               break;
+
+       case DLM_MSG_REMOVE:
+               receive_remove(ls, ms);
+               break;
+
+       /* messages sent from a dir node (remove has no reply) */
+
+       case DLM_MSG_LOOKUP_REPLY:
+               receive_lookup_reply(ls, ms);
+               break;
+
+       default:
+               log_error(ls, "unknown message type %d", ms->m_type);
+       }
+
+       unlock_recovery(ls);
+ out:
+       dlm_put_lockspace(ls);
+       dlm_astd_wake();
+       return 0;
+}
+
+
+/*
+ * Recovery related
+ */
+
+static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
+{
+       if (middle_conversion(lkb)) {
+               hold_lkb(lkb);
+               ls->ls_stub_ms.m_result = -EINPROGRESS;
+               _remove_from_waiters(lkb);
+               _receive_convert_reply(lkb, &ls->ls_stub_ms);
+
+               /* Same special case as in receive_rcom_lock_args() */
+               lkb->lkb_grmode = DLM_LOCK_IV;
+               rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
+               unhold_lkb(lkb);
+
+       } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
+               lkb->lkb_flags |= DLM_IFL_RESEND;
+       }
+
+       /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
+          conversions are async; there's no reply from the remote master */
+}
+
+/* A waiting lkb needs recovery if the master node has failed, or
+   the master node is changing (only when no directory is used) */
+
+static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
+{
+       if (dlm_is_removed(ls, lkb->lkb_nodeid))
+               return 1;
+
+       if (!dlm_no_directory(ls))
+               return 0;
+
+       if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid)
+               return 1;
+
+       return 0;
+}
+
+/* Recovery for locks that are waiting for replies from nodes that are now
+   gone.  We can just complete unlocks and cancels by faking a reply from the
+   dead node.  Requests and up-conversions we flag to be resent after
+   recovery.  Down-conversions can just be completed with a fake reply like
+   unlocks.  Conversions between PR and CW need special attention. */
+
+void dlm_recover_waiters_pre(struct dlm_ls *ls)
+{
+       struct dlm_lkb *lkb, *safe;
+
+       mutex_lock(&ls->ls_waiters_mutex);
+
+       list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
+               log_debug(ls, "pre recover waiter lkid %x type %d flags %x",
+                         lkb->lkb_id, lkb->lkb_wait_type, lkb->lkb_flags);
+
+               /* all outstanding lookups, regardless of destination  will be
+                  resent after recovery is done */
+
+               if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
+                       lkb->lkb_flags |= DLM_IFL_RESEND;
+                       continue;
+               }
+
+               if (!waiter_needs_recovery(ls, lkb))
+                       continue;
+
+               switch (lkb->lkb_wait_type) {
+
+               case DLM_MSG_REQUEST:
+                       lkb->lkb_flags |= DLM_IFL_RESEND;
+                       break;
+
+               case DLM_MSG_CONVERT:
+                       recover_convert_waiter(ls, lkb);
+                       break;
+
+               case DLM_MSG_UNLOCK:
+                       hold_lkb(lkb);
+                       ls->ls_stub_ms.m_result = -DLM_EUNLOCK;
+                       _remove_from_waiters(lkb);
+                       _receive_unlock_reply(lkb, &ls->ls_stub_ms);
+                       dlm_put_lkb(lkb);
+                       break;
+
+               case DLM_MSG_CANCEL:
+                       hold_lkb(lkb);
+                       ls->ls_stub_ms.m_result = -DLM_ECANCEL;
+                       _remove_from_waiters(lkb);
+                       _receive_cancel_reply(lkb, &ls->ls_stub_ms);
+                       dlm_put_lkb(lkb);
+                       break;
+
+               default:
+                       log_error(ls, "invalid lkb wait_type %d",
+                                 lkb->lkb_wait_type);
+               }
+               schedule();
+       }
+       mutex_unlock(&ls->ls_waiters_mutex);
+}
+
+static int remove_resend_waiter(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
+{
+       struct dlm_lkb *lkb;
+       int rv = 0;
+
+       mutex_lock(&ls->ls_waiters_mutex);
+       list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
+               if (lkb->lkb_flags & DLM_IFL_RESEND) {
+                       rv = lkb->lkb_wait_type;
+                       _remove_from_waiters(lkb);
+                       lkb->lkb_flags &= ~DLM_IFL_RESEND;
+                       break;
+               }
+       }
+       mutex_unlock(&ls->ls_waiters_mutex);
+
+       if (!rv)
+               lkb = NULL;
+       *lkb_ret = lkb;
+       return rv;
+}
+
+/* Deal with lookups and lkb's marked RESEND from _pre.  We may now be the
+   master or dir-node for r.  Processing the lkb may result in it being placed
+   back on waiters. */
+
+int dlm_recover_waiters_post(struct dlm_ls *ls)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_rsb *r;
+       int error = 0, mstype;
+
+       while (1) {
+               if (dlm_locking_stopped(ls)) {
+                       log_debug(ls, "recover_waiters_post aborted");
+                       error = -EINTR;
+                       break;
+               }
+
+               mstype = remove_resend_waiter(ls, &lkb);
+               if (!mstype)
+                       break;
+
+               r = lkb->lkb_resource;
+
+               log_debug(ls, "recover_waiters_post %x type %d flags %x %s",
+                         lkb->lkb_id, mstype, lkb->lkb_flags, r->res_name);
+
+               switch (mstype) {
+
+               case DLM_MSG_LOOKUP:
+                       hold_rsb(r);
+                       lock_rsb(r);
+                       _request_lock(r, lkb);
+                       if (is_master(r))
+                               confirm_master(r, 0);
+                       unlock_rsb(r);
+                       put_rsb(r);
+                       break;
+
+               case DLM_MSG_REQUEST:
+                       hold_rsb(r);
+                       lock_rsb(r);
+                       _request_lock(r, lkb);
+                       if (is_master(r))
+                               confirm_master(r, 0);
+                       unlock_rsb(r);
+                       put_rsb(r);
+                       break;
+
+               case DLM_MSG_CONVERT:
+                       hold_rsb(r);
+                       lock_rsb(r);
+                       _convert_lock(r, lkb);
+                       unlock_rsb(r);
+                       put_rsb(r);
+                       break;
+
+               default:
+                       log_error(ls, "recover_waiters_post type %d", mstype);
+               }
+       }
+
+       return error;
+}
+
+static void purge_queue(struct dlm_rsb *r, struct list_head *queue,
+                       int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb))
+{
+       struct dlm_ls *ls = r->res_ls;
+       struct dlm_lkb *lkb, *safe;
+
+       list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) {
+               if (test(ls, lkb)) {
+                       rsb_set_flag(r, RSB_LOCKS_PURGED);
+                       del_lkb(r, lkb);
+                       /* this put should free the lkb */
+                       if (!dlm_put_lkb(lkb))
+                               log_error(ls, "purged lkb not released");
+               }
+       }
+}
+
+static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
+{
+       return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid));
+}
+
+static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
+{
+       return is_master_copy(lkb);
+}
+
+static void purge_dead_locks(struct dlm_rsb *r)
+{
+       purge_queue(r, &r->res_grantqueue, &purge_dead_test);
+       purge_queue(r, &r->res_convertqueue, &purge_dead_test);
+       purge_queue(r, &r->res_waitqueue, &purge_dead_test);
+}
+
+void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
+{
+       purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test);
+       purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test);
+       purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test);
+}
+
+/* Get rid of locks held by nodes that are gone. */
+
+int dlm_purge_locks(struct dlm_ls *ls)
+{
+       struct dlm_rsb *r;
+
+       log_debug(ls, "dlm_purge_locks");
+
+       down_write(&ls->ls_root_sem);
+       list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+               hold_rsb(r);
+               lock_rsb(r);
+               if (is_master(r))
+                       purge_dead_locks(r);
+               unlock_rsb(r);
+               unhold_rsb(r);
+
+               schedule();
+       }
+       up_write(&ls->ls_root_sem);
+
+       return 0;
+}
+
+static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
+{
+       struct dlm_rsb *r, *r_ret = NULL;
+
+       read_lock(&ls->ls_rsbtbl[bucket].lock);
+       list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
+               if (!rsb_flag(r, RSB_LOCKS_PURGED))
+                       continue;
+               hold_rsb(r);
+               rsb_clear_flag(r, RSB_LOCKS_PURGED);
+               r_ret = r;
+               break;
+       }
+       read_unlock(&ls->ls_rsbtbl[bucket].lock);
+       return r_ret;
+}
+
+void dlm_grant_after_purge(struct dlm_ls *ls)
+{
+       struct dlm_rsb *r;
+       int bucket = 0;
+
+       while (1) {
+               r = find_purged_rsb(ls, bucket);
+               if (!r) {
+                       if (bucket == ls->ls_rsbtbl_size - 1)
+                               break;
+                       bucket++;
+                       continue;
+               }
+               lock_rsb(r);
+               if (is_master(r)) {
+                       grant_pending_locks(r);
+                       confirm_master(r, 0);
+               }
+               unlock_rsb(r);
+               put_rsb(r);
+               schedule();
+       }
+}
+
+static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
+                                        uint32_t remid)
+{
+       struct dlm_lkb *lkb;
+
+       list_for_each_entry(lkb, head, lkb_statequeue) {
+               if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
+                       return lkb;
+       }
+       return NULL;
+}
+
+static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
+                                   uint32_t remid)
+{
+       struct dlm_lkb *lkb;
+
+       lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
+       if (lkb)
+               return lkb;
+       lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
+       if (lkb)
+               return lkb;
+       lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
+       if (lkb)
+               return lkb;
+       return NULL;
+}
+
+static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+                                 struct dlm_rsb *r, struct dlm_rcom *rc)
+{
+       struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
+       int lvblen;
+
+       lkb->lkb_nodeid = rc->rc_header.h_nodeid;
+       lkb->lkb_ownpid = rl->rl_ownpid;
+       lkb->lkb_remid = rl->rl_lkid;
+       lkb->lkb_exflags = rl->rl_exflags;
+       lkb->lkb_flags = rl->rl_flags & 0x0000FFFF;
+       lkb->lkb_flags |= DLM_IFL_MSTCPY;
+       lkb->lkb_lvbseq = rl->rl_lvbseq;
+       lkb->lkb_rqmode = rl->rl_rqmode;
+       lkb->lkb_grmode = rl->rl_grmode;
+       /* don't set lkb_status because add_lkb wants to itself */
+
+       lkb->lkb_bastaddr = (void *) (long) (rl->rl_asts & AST_BAST);
+       lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP);
+
+       if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
+               lkb->lkb_lvbptr = allocate_lvb(ls);
+               if (!lkb->lkb_lvbptr)
+                       return -ENOMEM;
+               lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
+                        sizeof(struct rcom_lock);
+               memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
+       }
+
+       /* Conversions between PR and CW (middle modes) need special handling.
+          The real granted mode of these converting locks cannot be determined
+          until all locks have been rebuilt on the rsb (recover_conversion) */
+
+       if (rl->rl_wait_type == DLM_MSG_CONVERT && middle_conversion(lkb)) {
+               rl->rl_status = DLM_LKSTS_CONVERT;
+               lkb->lkb_grmode = DLM_LOCK_IV;
+               rsb_set_flag(r, RSB_RECOVER_CONVERT);
+       }
+
+       return 0;
+}
+
+/* This lkb may have been recovered in a previous aborted recovery so we need
+   to check if the rsb already has an lkb with the given remote nodeid/lkid.
+   If so we just send back a standard reply.  If not, we create a new lkb with
+   the given values and send back our lkid.  We send back our lkid by sending
+   back the rcom_lock struct we got but with the remid field filled in. */
+
+int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
+{
+       struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
+       struct dlm_rsb *r;
+       struct dlm_lkb *lkb;
+       int error;
+
+       if (rl->rl_parent_lkid) {
+               error = -EOPNOTSUPP;
+               goto out;
+       }
+
+       error = find_rsb(ls, rl->rl_name, rl->rl_namelen, R_MASTER, &r);
+       if (error)
+               goto out;
+
+       lock_rsb(r);
+
+       lkb = search_remid(r, rc->rc_header.h_nodeid, rl->rl_lkid);
+       if (lkb) {
+               error = -EEXIST;
+               goto out_remid;
+       }
+
+       error = create_lkb(ls, &lkb);
+       if (error)
+               goto out_unlock;
+
+       error = receive_rcom_lock_args(ls, lkb, r, rc);
+       if (error) {
+               __put_lkb(ls, lkb);
+               goto out_unlock;
+       }
+
+       attach_lkb(r, lkb);
+       add_lkb(r, lkb, rl->rl_status);
+       error = 0;
+
+ out_remid:
+       /* this is the new value returned to the lock holder for
+          saving in its process-copy lkb */
+       rl->rl_remid = lkb->lkb_id;
+
+ out_unlock:
+       unlock_rsb(r);
+       put_rsb(r);
+ out:
+       if (error)
+               log_print("recover_master_copy %d %x", error, rl->rl_lkid);
+       rl->rl_result = error;
+       return error;
+}
+
+int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
+{
+       struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
+       struct dlm_rsb *r;
+       struct dlm_lkb *lkb;
+       int error;
+
+       error = find_lkb(ls, rl->rl_lkid, &lkb);
+       if (error) {
+               log_error(ls, "recover_process_copy no lkid %x", rl->rl_lkid);
+               return error;
+       }
+
+       DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
+
+       error = rl->rl_result;
+
+       r = lkb->lkb_resource;
+       hold_rsb(r);
+       lock_rsb(r);
+
+       switch (error) {
+       case -EEXIST:
+               log_debug(ls, "master copy exists %x", lkb->lkb_id);
+               /* fall through */
+       case 0:
+               lkb->lkb_remid = rl->rl_remid;
+               break;
+       default:
+               log_error(ls, "dlm_recover_process_copy unknown error %d %x",
+                         error, lkb->lkb_id);
+       }
+
+       /* an ack for dlm_recover_locks() which waits for replies from
+          all the locks it sends to new masters */
+       dlm_recovered_lock(r);
+
+       unlock_rsb(r);
+       put_rsb(r);
+       dlm_put_lkb(lkb);
+
+       return 0;
+}
+
+int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
+                    int mode, uint32_t flags, void *name, unsigned int namelen,
+                    uint32_t parent_lkid)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_args args;
+       int error;
+
+       lock_recovery(ls);
+
+       error = create_lkb(ls, &lkb);
+       if (error) {
+               kfree(ua);
+               goto out;
+       }
+
+       if (flags & DLM_LKF_VALBLK) {
+               ua->lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
+               if (!ua->lksb.sb_lvbptr) {
+                       kfree(ua);
+                       __put_lkb(ls, lkb);
+                       error = -ENOMEM;
+                       goto out;
+               }
+       }
+
+       /* After ua is attached to lkb it will be freed by free_lkb().
+          When DLM_IFL_USER is set, the dlm knows that this is a userspace
+          lock and that lkb_astparam is the dlm_user_args structure. */
+
+       error = set_lock_args(mode, &ua->lksb, flags, namelen, parent_lkid,
+                             DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args);
+       lkb->lkb_flags |= DLM_IFL_USER;
+       ua->old_mode = DLM_LOCK_IV;
+
+       if (error) {
+               __put_lkb(ls, lkb);
+               goto out;
+       }
+
+       error = request_lock(ls, lkb, name, namelen, &args);
+
+       switch (error) {
+       case 0:
+               break;
+       case -EINPROGRESS:
+               error = 0;
+               break;
+       case -EAGAIN:
+               error = 0;
+               /* fall through */
+       default:
+               __put_lkb(ls, lkb);
+               goto out;
+       }
+
+       /* add this new lkb to the per-process list of locks */
+       spin_lock(&ua->proc->locks_spin);
+       kref_get(&lkb->lkb_ref);
+       list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
+       spin_unlock(&ua->proc->locks_spin);
+ out:
+       unlock_recovery(ls);
+       return error;
+}
+
+int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+                    int mode, uint32_t flags, uint32_t lkid, char *lvb_in)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_args args;
+       struct dlm_user_args *ua;
+       int error;
+
+       lock_recovery(ls);
+
+       error = find_lkb(ls, lkid, &lkb);
+       if (error)
+               goto out;
+
+       /* user can change the params on its lock when it converts it, or
+          add an lvb that didn't exist before */
+
+       ua = (struct dlm_user_args *)lkb->lkb_astparam;
+
+       if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
+               ua->lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
+               if (!ua->lksb.sb_lvbptr) {
+                       error = -ENOMEM;
+                       goto out_put;
+               }
+       }
+       if (lvb_in && ua->lksb.sb_lvbptr)
+               memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
+
+       ua->castparam = ua_tmp->castparam;
+       ua->castaddr = ua_tmp->castaddr;
+       ua->bastparam = ua_tmp->bastparam;
+       ua->bastaddr = ua_tmp->bastaddr;
+       ua->user_lksb = ua_tmp->user_lksb;
+       ua->old_mode = lkb->lkb_grmode;
+
+       error = set_lock_args(mode, &ua->lksb, flags, 0, 0, DLM_FAKE_USER_AST,
+                             ua, DLM_FAKE_USER_AST, &args);
+       if (error)
+               goto out_put;
+
+       error = convert_lock(ls, lkb, &args);
+
+       if (error == -EINPROGRESS || error == -EAGAIN)
+               error = 0;
+ out_put:
+       dlm_put_lkb(lkb);
+ out:
+       unlock_recovery(ls);
+       kfree(ua_tmp);
+       return error;
+}
+
+int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+                   uint32_t flags, uint32_t lkid, char *lvb_in)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_args args;
+       struct dlm_user_args *ua;
+       int error;
+
+       lock_recovery(ls);
+
+       error = find_lkb(ls, lkid, &lkb);
+       if (error)
+               goto out;
+
+       ua = (struct dlm_user_args *)lkb->lkb_astparam;
+
+       if (lvb_in && ua->lksb.sb_lvbptr)
+               memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
+       ua->castparam = ua_tmp->castparam;
+       ua->user_lksb = ua_tmp->user_lksb;
+
+       error = set_unlock_args(flags, ua, &args);
+       if (error)
+               goto out_put;
+
+       error = unlock_lock(ls, lkb, &args);
+
+       if (error == -DLM_EUNLOCK)
+               error = 0;
+       if (error)
+               goto out_put;
+
+       spin_lock(&ua->proc->locks_spin);
+       list_del_init(&lkb->lkb_ownqueue);
+       spin_unlock(&ua->proc->locks_spin);
+
+       /* this removes the reference for the proc->locks list added by
+          dlm_user_request */
+       unhold_lkb(lkb);
+ out_put:
+       dlm_put_lkb(lkb);
+ out:
+       unlock_recovery(ls);
+       return error;
+}
+
+int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+                   uint32_t flags, uint32_t lkid)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_args args;
+       struct dlm_user_args *ua;
+       int error;
+
+       lock_recovery(ls);
+
+       error = find_lkb(ls, lkid, &lkb);
+       if (error)
+               goto out;
+
+       ua = (struct dlm_user_args *)lkb->lkb_astparam;
+       ua->castparam = ua_tmp->castparam;
+       ua->user_lksb = ua_tmp->user_lksb;
+
+       error = set_unlock_args(flags, ua, &args);
+       if (error)
+               goto out_put;
+
+       error = cancel_lock(ls, lkb, &args);
+
+       if (error == -DLM_ECANCEL)
+               error = 0;
+       if (error)
+               goto out_put;
+
+       /* this lkb was removed from the WAITING queue */
+       if (lkb->lkb_grmode == DLM_LOCK_IV) {
+               spin_lock(&ua->proc->locks_spin);
+               list_del_init(&lkb->lkb_ownqueue);
+               spin_unlock(&ua->proc->locks_spin);
+               unhold_lkb(lkb);
+       }
+ out_put:
+       dlm_put_lkb(lkb);
+ out:
+       unlock_recovery(ls);
+       return error;
+}
+
+static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
+{
+       struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
+
+       if (ua->lksb.sb_lvbptr)
+               kfree(ua->lksb.sb_lvbptr);
+       kfree(ua);
+       lkb->lkb_astparam = (long)NULL;
+
+       /* TODO: propogate to master if needed */
+       return 0;
+}
+
+/* The force flag allows the unlock to go ahead even if the lkb isn't granted.
+   Regardless of what rsb queue the lock is on, it's removed and freed. */
+
+static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
+{
+       struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
+       struct dlm_args args;
+       int error;
+
+       /* FIXME: we need to handle the case where the lkb is in limbo
+          while the rsb is being looked up, currently we assert in
+          _unlock_lock/is_remote because rsb nodeid is -1. */
+
+       set_unlock_args(DLM_LKF_FORCEUNLOCK, ua, &args);
+
+       error = unlock_lock(ls, lkb, &args);
+       if (error == -DLM_EUNLOCK)
+               error = 0;
+       return error;
+}
+
+/* The ls_clear_proc_locks mutex protects against dlm_user_add_asts() which
+   1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
+   which we clear here. */
+
+/* proc CLOSING flag is set so no more device_reads should look at proc->asts
+   list, and no more device_writes should add lkb's to proc->locks list; so we
+   shouldn't need to take asts_spin or locks_spin here.  this assumes that
+   device reads/writes/closes are serialized -- FIXME: we may need to serialize
+   them ourself. */
+
+void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
+{
+       struct dlm_lkb *lkb, *safe;
+
+       lock_recovery(ls);
+       mutex_lock(&ls->ls_clear_proc_locks);
+
+       list_for_each_entry_safe(lkb, safe, &proc->locks, lkb_ownqueue) {
+               if (lkb->lkb_ast_type) {
+                       list_del(&lkb->lkb_astqueue);
+                       unhold_lkb(lkb);
+               }
+
+               list_del_init(&lkb->lkb_ownqueue);
+
+               if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) {
+                       lkb->lkb_flags |= DLM_IFL_ORPHAN;
+                       orphan_proc_lock(ls, lkb);
+               } else {
+                       lkb->lkb_flags |= DLM_IFL_DEAD;
+                       unlock_proc_lock(ls, lkb);
+               }
+
+               /* this removes the reference for the proc->locks list
+                  added by dlm_user_request, it may result in the lkb
+                  being freed */
+
+               dlm_put_lkb(lkb);
+       }
+       mutex_unlock(&ls->ls_clear_proc_locks);
+       unlock_recovery(ls);
+}
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
new file mode 100644 (file)
index 0000000..0843a30
--- /dev/null
@@ -0,0 +1,62 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) 2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#ifndef __LOCK_DOT_H__
+#define __LOCK_DOT_H__
+
+void dlm_print_rsb(struct dlm_rsb *r);
+void dlm_dump_rsb(struct dlm_rsb *r);
+void dlm_print_lkb(struct dlm_lkb *lkb);
+int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery);
+int dlm_modes_compat(int mode1, int mode2);
+int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
+       unsigned int flags, struct dlm_rsb **r_ret);
+void dlm_put_rsb(struct dlm_rsb *r);
+void dlm_hold_rsb(struct dlm_rsb *r);
+int dlm_put_lkb(struct dlm_lkb *lkb);
+void dlm_scan_rsbs(struct dlm_ls *ls);
+
+int dlm_purge_locks(struct dlm_ls *ls);
+void dlm_purge_mstcpy_locks(struct dlm_rsb *r);
+void dlm_grant_after_purge(struct dlm_ls *ls);
+int dlm_recover_waiters_post(struct dlm_ls *ls);
+void dlm_recover_waiters_pre(struct dlm_ls *ls);
+int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
+int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
+
+int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, int mode,
+       uint32_t flags, void *name, unsigned int namelen, uint32_t parent_lkid);
+int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+       int mode, uint32_t flags, uint32_t lkid, char *lvb_in);
+int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+       uint32_t flags, uint32_t lkid, char *lvb_in);
+int dlm_user_cancel(struct dlm_ls *ls,  struct dlm_user_args *ua_tmp,
+       uint32_t flags, uint32_t lkid);
+void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc);
+
+static inline int is_master(struct dlm_rsb *r)
+{
+       return !r->res_nodeid;
+}
+
+static inline void lock_rsb(struct dlm_rsb *r)
+{
+       mutex_lock(&r->res_mutex);
+}
+
+static inline void unlock_rsb(struct dlm_rsb *r)
+{
+       mutex_unlock(&r->res_mutex);
+}
+
+#endif
+
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
new file mode 100644 (file)
index 0000000..109333c
--- /dev/null
@@ -0,0 +1,717 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#include "dlm_internal.h"
+#include "lockspace.h"
+#include "member.h"
+#include "recoverd.h"
+#include "ast.h"
+#include "dir.h"
+#include "lowcomms.h"
+#include "config.h"
+#include "memory.h"
+#include "lock.h"
+#include "recover.h"
+
+#ifdef CONFIG_DLM_DEBUG
+int dlm_create_debug_file(struct dlm_ls *ls);
+void dlm_delete_debug_file(struct dlm_ls *ls);
+#else
+static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
+static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
+#endif
+
+static int                     ls_count;
+static struct mutex            ls_lock;
+static struct list_head                lslist;
+static spinlock_t              lslist_lock;
+static struct task_struct *    scand_task;
+
+
+static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
+{
+       ssize_t ret = len;
+       int n = simple_strtol(buf, NULL, 0);
+
+       switch (n) {
+       case 0:
+               dlm_ls_stop(ls);
+               break;
+       case 1:
+               dlm_ls_start(ls);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       return ret;
+}
+
+static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
+{
+       ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
+       set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
+       wake_up(&ls->ls_uevent_wait);
+       return len;
+}
+
+static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
+}
+
+static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
+{
+       ls->ls_global_id = simple_strtoul(buf, NULL, 0);
+       return len;
+}
+
+static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
+{
+       uint32_t status = dlm_recover_status(ls);
+       return snprintf(buf, PAGE_SIZE, "%x\n", status);
+}
+
+static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
+}
+
+struct dlm_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct dlm_ls *, char *);
+       ssize_t (*store)(struct dlm_ls *, const char *, size_t);
+};
+
+static struct dlm_attr dlm_attr_control = {
+       .attr  = {.name = "control", .mode = S_IWUSR},
+       .store = dlm_control_store
+};
+
+static struct dlm_attr dlm_attr_event = {
+       .attr  = {.name = "event_done", .mode = S_IWUSR},
+       .store = dlm_event_store
+};
+
+static struct dlm_attr dlm_attr_id = {
+       .attr  = {.name = "id", .mode = S_IRUGO | S_IWUSR},
+       .show  = dlm_id_show,
+       .store = dlm_id_store
+};
+
+static struct dlm_attr dlm_attr_recover_status = {
+       .attr  = {.name = "recover_status", .mode = S_IRUGO},
+       .show  = dlm_recover_status_show
+};
+
+static struct dlm_attr dlm_attr_recover_nodeid = {
+       .attr  = {.name = "recover_nodeid", .mode = S_IRUGO},
+       .show  = dlm_recover_nodeid_show
+};
+
+static struct attribute *dlm_attrs[] = {
+       &dlm_attr_control.attr,
+       &dlm_attr_event.attr,
+       &dlm_attr_id.attr,
+       &dlm_attr_recover_status.attr,
+       &dlm_attr_recover_nodeid.attr,
+       NULL,
+};
+
+static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
+                            char *buf)
+{
+       struct dlm_ls *ls  = container_of(kobj, struct dlm_ls, ls_kobj);
+       struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
+       return a->show ? a->show(ls, buf) : 0;
+}
+
+static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
+                             const char *buf, size_t len)
+{
+       struct dlm_ls *ls  = container_of(kobj, struct dlm_ls, ls_kobj);
+       struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
+       return a->store ? a->store(ls, buf, len) : len;
+}
+
+static struct sysfs_ops dlm_attr_ops = {
+       .show  = dlm_attr_show,
+       .store = dlm_attr_store,
+};
+
+static struct kobj_type dlm_ktype = {
+       .default_attrs = dlm_attrs,
+       .sysfs_ops     = &dlm_attr_ops,
+};
+
+static struct kset dlm_kset = {
+       .subsys = &kernel_subsys,
+       .kobj   = {.name = "dlm",},
+       .ktype  = &dlm_ktype,
+};
+
+static int kobject_setup(struct dlm_ls *ls)
+{
+       char lsname[DLM_LOCKSPACE_LEN];
+       int error;
+
+       memset(lsname, 0, DLM_LOCKSPACE_LEN);
+       snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
+
+       error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
+       if (error)
+               return error;
+
+       ls->ls_kobj.kset = &dlm_kset;
+       ls->ls_kobj.ktype = &dlm_ktype;
+       return 0;
+}
+
+static int do_uevent(struct dlm_ls *ls, int in)
+{
+       int error;
+
+       if (in)
+               kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
+       else
+               kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
+
+       error = wait_event_interruptible(ls->ls_uevent_wait,
+                       test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
+       if (error)
+               goto out;
+
+       error = ls->ls_uevent_result;
+ out:
+       return error;
+}
+
+
+int dlm_lockspace_init(void)
+{
+       int error;
+
+       ls_count = 0;
+       mutex_init(&ls_lock);
+       INIT_LIST_HEAD(&lslist);
+       spin_lock_init(&lslist_lock);
+
+       error = kset_register(&dlm_kset);
+       if (error)
+               printk("dlm_lockspace_init: cannot register kset %d\n", error);
+       return error;
+}
+
+void dlm_lockspace_exit(void)
+{
+       kset_unregister(&dlm_kset);
+}
+
+static int dlm_scand(void *data)
+{
+       struct dlm_ls *ls;
+
+       while (!kthread_should_stop()) {
+               list_for_each_entry(ls, &lslist, ls_list)
+                       dlm_scan_rsbs(ls);
+               schedule_timeout_interruptible(dlm_config.scan_secs * HZ);
+       }
+       return 0;
+}
+
+static int dlm_scand_start(void)
+{
+       struct task_struct *p;
+       int error = 0;
+
+       p = kthread_run(dlm_scand, NULL, "dlm_scand");
+       if (IS_ERR(p))
+               error = PTR_ERR(p);
+       else
+               scand_task = p;
+       return error;
+}
+
+static void dlm_scand_stop(void)
+{
+       kthread_stop(scand_task);
+}
+
+static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
+{
+       struct dlm_ls *ls;
+
+       spin_lock(&lslist_lock);
+
+       list_for_each_entry(ls, &lslist, ls_list) {
+               if (ls->ls_namelen == namelen &&
+                   memcmp(ls->ls_name, name, namelen) == 0)
+                       goto out;
+       }
+       ls = NULL;
+ out:
+       spin_unlock(&lslist_lock);
+       return ls;
+}
+
+struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
+{
+       struct dlm_ls *ls;
+
+       spin_lock(&lslist_lock);
+
+       list_for_each_entry(ls, &lslist, ls_list) {
+               if (ls->ls_global_id == id) {
+                       ls->ls_count++;
+                       goto out;
+               }
+       }
+       ls = NULL;
+ out:
+       spin_unlock(&lslist_lock);
+       return ls;
+}
+
+struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
+{
+       struct dlm_ls *ls;
+
+       spin_lock(&lslist_lock);
+       list_for_each_entry(ls, &lslist, ls_list) {
+               if (ls->ls_local_handle == lockspace) {
+                       ls->ls_count++;
+                       goto out;
+               }
+       }
+       ls = NULL;
+ out:
+       spin_unlock(&lslist_lock);
+       return ls;
+}
+
+struct dlm_ls *dlm_find_lockspace_device(int minor)
+{
+       struct dlm_ls *ls;
+
+       spin_lock(&lslist_lock);
+       list_for_each_entry(ls, &lslist, ls_list) {
+               if (ls->ls_device.minor == minor) {
+                       ls->ls_count++;
+                       goto out;
+               }
+       }
+       ls = NULL;
+ out:
+       spin_unlock(&lslist_lock);
+       return ls;
+}
+
+void dlm_put_lockspace(struct dlm_ls *ls)
+{
+       spin_lock(&lslist_lock);
+       ls->ls_count--;
+       spin_unlock(&lslist_lock);
+}
+
+static void remove_lockspace(struct dlm_ls *ls)
+{
+       for (;;) {
+               spin_lock(&lslist_lock);
+               if (ls->ls_count == 0) {
+                       list_del(&ls->ls_list);
+                       spin_unlock(&lslist_lock);
+                       return;
+               }
+               spin_unlock(&lslist_lock);
+               ssleep(1);
+       }
+}
+
+static int threads_start(void)
+{
+       int error;
+
+       /* Thread which process lock requests for all lockspace's */
+       error = dlm_astd_start();
+       if (error) {
+               log_print("cannot start dlm_astd thread %d", error);
+               goto fail;
+       }
+
+       error = dlm_scand_start();
+       if (error) {
+               log_print("cannot start dlm_scand thread %d", error);
+               goto astd_fail;
+       }
+
+       /* Thread for sending/receiving messages for all lockspace's */
+       error = dlm_lowcomms_start();
+       if (error) {
+               log_print("cannot start dlm lowcomms %d", error);
+               goto scand_fail;
+       }
+
+       return 0;
+
+ scand_fail:
+       dlm_scand_stop();
+ astd_fail:
+       dlm_astd_stop();
+ fail:
+       return error;
+}
+
+static void threads_stop(void)
+{
+       dlm_scand_stop();
+       dlm_lowcomms_stop();
+       dlm_astd_stop();
+}
+
+static int new_lockspace(char *name, int namelen, void **lockspace,
+                        uint32_t flags, int lvblen)
+{
+       struct dlm_ls *ls;
+       int i, size, error = -ENOMEM;
+
+       if (namelen > DLM_LOCKSPACE_LEN)
+               return -EINVAL;
+
+       if (!lvblen || (lvblen % 8))
+               return -EINVAL;
+
+       if (!try_module_get(THIS_MODULE))
+               return -EINVAL;
+
+       ls = dlm_find_lockspace_name(name, namelen);
+       if (ls) {
+               *lockspace = ls;
+               module_put(THIS_MODULE);
+               return -EEXIST;
+       }
+
+       ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
+       if (!ls)
+               goto out;
+       memcpy(ls->ls_name, name, namelen);
+       ls->ls_namelen = namelen;
+       ls->ls_exflags = flags;
+       ls->ls_lvblen = lvblen;
+       ls->ls_count = 0;
+       ls->ls_flags = 0;
+
+       size = dlm_config.rsbtbl_size;
+       ls->ls_rsbtbl_size = size;
+
+       ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
+       if (!ls->ls_rsbtbl)
+               goto out_lsfree;
+       for (i = 0; i < size; i++) {
+               INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
+               INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
+               rwlock_init(&ls->ls_rsbtbl[i].lock);
+       }
+
+       size = dlm_config.lkbtbl_size;
+       ls->ls_lkbtbl_size = size;
+
+       ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
+       if (!ls->ls_lkbtbl)
+               goto out_rsbfree;
+       for (i = 0; i < size; i++) {
+               INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
+               rwlock_init(&ls->ls_lkbtbl[i].lock);
+               ls->ls_lkbtbl[i].counter = 1;
+       }
+
+       size = dlm_config.dirtbl_size;
+       ls->ls_dirtbl_size = size;
+
+       ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
+       if (!ls->ls_dirtbl)
+               goto out_lkbfree;
+       for (i = 0; i < size; i++) {
+               INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
+               rwlock_init(&ls->ls_dirtbl[i].lock);
+       }
+
+       INIT_LIST_HEAD(&ls->ls_waiters);
+       mutex_init(&ls->ls_waiters_mutex);
+
+       INIT_LIST_HEAD(&ls->ls_nodes);
+       INIT_LIST_HEAD(&ls->ls_nodes_gone);
+       ls->ls_num_nodes = 0;
+       ls->ls_low_nodeid = 0;
+       ls->ls_total_weight = 0;
+       ls->ls_node_array = NULL;
+
+       memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
+       ls->ls_stub_rsb.res_ls = ls;
+
+       ls->ls_debug_rsb_dentry = NULL;
+       ls->ls_debug_waiters_dentry = NULL;
+
+       init_waitqueue_head(&ls->ls_uevent_wait);
+       ls->ls_uevent_result = 0;
+
+       ls->ls_recoverd_task = NULL;
+       mutex_init(&ls->ls_recoverd_active);
+       spin_lock_init(&ls->ls_recover_lock);
+       ls->ls_recover_status = 0;
+       ls->ls_recover_seq = 0;
+       ls->ls_recover_args = NULL;
+       init_rwsem(&ls->ls_in_recovery);
+       INIT_LIST_HEAD(&ls->ls_requestqueue);
+       mutex_init(&ls->ls_requestqueue_mutex);
+       mutex_init(&ls->ls_clear_proc_locks);
+
+       ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
+       if (!ls->ls_recover_buf)
+               goto out_dirfree;
+
+       INIT_LIST_HEAD(&ls->ls_recover_list);
+       spin_lock_init(&ls->ls_recover_list_lock);
+       ls->ls_recover_list_count = 0;
+       ls->ls_local_handle = ls;
+       init_waitqueue_head(&ls->ls_wait_general);
+       INIT_LIST_HEAD(&ls->ls_root_list);
+       init_rwsem(&ls->ls_root_sem);
+
+       down_write(&ls->ls_in_recovery);
+
+       spin_lock(&lslist_lock);
+       list_add(&ls->ls_list, &lslist);
+       spin_unlock(&lslist_lock);
+
+       /* needs to find ls in lslist */
+       error = dlm_recoverd_start(ls);
+       if (error) {
+               log_error(ls, "can't start dlm_recoverd %d", error);
+               goto out_rcomfree;
+       }
+
+       dlm_create_debug_file(ls);
+
+       error = kobject_setup(ls);
+       if (error)
+               goto out_del;
+
+       error = kobject_register(&ls->ls_kobj);
+       if (error)
+               goto out_del;
+
+       error = do_uevent(ls, 1);
+       if (error)
+               goto out_unreg;
+
+       *lockspace = ls;
+       return 0;
+
+ out_unreg:
+       kobject_unregister(&ls->ls_kobj);
+ out_del:
+       dlm_delete_debug_file(ls);
+       dlm_recoverd_stop(ls);
+ out_rcomfree:
+       spin_lock(&lslist_lock);
+       list_del(&ls->ls_list);
+       spin_unlock(&lslist_lock);
+       kfree(ls->ls_recover_buf);
+ out_dirfree:
+       kfree(ls->ls_dirtbl);
+ out_lkbfree:
+       kfree(ls->ls_lkbtbl);
+ out_rsbfree:
+       kfree(ls->ls_rsbtbl);
+ out_lsfree:
+       kfree(ls);
+ out:
+       module_put(THIS_MODULE);
+       return error;
+}
+
+int dlm_new_lockspace(char *name, int namelen, void **lockspace,
+                     uint32_t flags, int lvblen)
+{
+       int error = 0;
+
+       mutex_lock(&ls_lock);
+       if (!ls_count)
+               error = threads_start();
+       if (error)
+               goto out;
+
+       error = new_lockspace(name, namelen, lockspace, flags, lvblen);
+       if (!error)
+               ls_count++;
+ out:
+       mutex_unlock(&ls_lock);
+       return error;
+}
+
+/* Return 1 if the lockspace still has active remote locks,
+ *        2 if the lockspace still has active local locks.
+ */
+static int lockspace_busy(struct dlm_ls *ls)
+{
+       int i, lkb_found = 0;
+       struct dlm_lkb *lkb;
+
+       /* NOTE: We check the lockidtbl here rather than the resource table.
+          This is because there may be LKBs queued as ASTs that have been
+          unlinked from their RSBs and are pending deletion once the AST has
+          been delivered */
+
+       for (i = 0; i < ls->ls_lkbtbl_size; i++) {
+               read_lock(&ls->ls_lkbtbl[i].lock);
+               if (!list_empty(&ls->ls_lkbtbl[i].list)) {
+                       lkb_found = 1;
+                       list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
+                                           lkb_idtbl_list) {
+                               if (!lkb->lkb_nodeid) {
+                                       read_unlock(&ls->ls_lkbtbl[i].lock);
+                                       return 2;
+                               }
+                       }
+               }
+               read_unlock(&ls->ls_lkbtbl[i].lock);
+       }
+       return lkb_found;
+}
+
+static int release_lockspace(struct dlm_ls *ls, int force)
+{
+       struct dlm_lkb *lkb;
+       struct dlm_rsb *rsb;
+       struct list_head *head;
+       int i;
+       int busy = lockspace_busy(ls);
+
+       if (busy > force)
+               return -EBUSY;
+
+       if (force < 3)
+               do_uevent(ls, 0);
+
+       dlm_recoverd_stop(ls);
+
+       remove_lockspace(ls);
+
+       dlm_delete_debug_file(ls);
+
+       dlm_astd_suspend();
+
+       kfree(ls->ls_recover_buf);
+
+       /*
+        * Free direntry structs.
+        */
+
+       dlm_dir_clear(ls);
+       kfree(ls->ls_dirtbl);
+
+       /*
+        * Free all lkb's on lkbtbl[] lists.
+        */
+
+       for (i = 0; i < ls->ls_lkbtbl_size; i++) {
+               head = &ls->ls_lkbtbl[i].list;
+               while (!list_empty(head)) {
+                       lkb = list_entry(head->next, struct dlm_lkb,
+                                        lkb_idtbl_list);
+
+                       list_del(&lkb->lkb_idtbl_list);
+
+                       dlm_del_ast(lkb);
+
+                       if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
+                               free_lvb(lkb->lkb_lvbptr);
+
+                       free_lkb(lkb);
+               }
+       }
+       dlm_astd_resume();
+
+       kfree(ls->ls_lkbtbl);
+
+       /*
+        * Free all rsb's on rsbtbl[] lists
+        */
+
+       for (i = 0; i < ls->ls_rsbtbl_size; i++) {
+               head = &ls->ls_rsbtbl[i].list;
+               while (!list_empty(head)) {
+                       rsb = list_entry(head->next, struct dlm_rsb,
+                                        res_hashchain);
+
+                       list_del(&rsb->res_hashchain);
+                       free_rsb(rsb);
+               }
+
+               head = &ls->ls_rsbtbl[i].toss;
+               while (!list_empty(head)) {
+                       rsb = list_entry(head->next, struct dlm_rsb,
+                                        res_hashchain);
+                       list_del(&rsb->res_hashchain);
+                       free_rsb(rsb);
+               }
+       }
+
+       kfree(ls->ls_rsbtbl);
+
+       /*
+        * Free structures on any other lists
+        */
+
+       kfree(ls->ls_recover_args);
+       dlm_clear_free_entries(ls);
+       dlm_clear_members(ls);
+       dlm_clear_members_gone(ls);
+       kfree(ls->ls_node_array);
+       kobject_unregister(&ls->ls_kobj);
+       kfree(ls);
+
+       mutex_lock(&ls_lock);
+       ls_count--;
+       if (!ls_count)
+               threads_stop();
+       mutex_unlock(&ls_lock);
+
+       module_put(THIS_MODULE);
+       return 0;
+}
+
+/*
+ * Called when a system has released all its locks and is not going to use the
+ * lockspace any longer.  We free everything we're managing for this lockspace.
+ * Remaining nodes will go through the recovery process as if we'd died.  The
+ * lockspace must continue to function as usual, participating in recoveries,
+ * until this returns.
+ *
+ * Force has 4 possible values:
+ * 0 - don't destroy locksapce if it has any LKBs
+ * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
+ * 2 - destroy lockspace regardless of LKBs
+ * 3 - destroy lockspace as part of a forced shutdown
+ */
+
+int dlm_release_lockspace(void *lockspace, int force)
+{
+       struct dlm_ls *ls;
+
+       ls = dlm_find_lockspace_local(lockspace);
+       if (!ls)
+               return -EINVAL;
+       dlm_put_lockspace(ls);
+       return release_lockspace(ls, force);
+}
+
diff --git a/fs/dlm/lockspace.h b/fs/dlm/lockspace.h
new file mode 100644 (file)
index 0000000..891eabb
--- /dev/null
@@ -0,0 +1,25 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#ifndef __LOCKSPACE_DOT_H__
+#define __LOCKSPACE_DOT_H__
+
+int dlm_lockspace_init(void);
+void dlm_lockspace_exit(void);
+struct dlm_ls *dlm_find_lockspace_global(uint32_t id);
+struct dlm_ls *dlm_find_lockspace_local(void *id);
+struct dlm_ls *dlm_find_lockspace_device(int minor);
+void dlm_put_lockspace(struct dlm_ls *ls);
+
+#endif                         /* __LOCKSPACE_DOT_H__ */
+
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
new file mode 100644 (file)
index 0000000..23f5ce1
--- /dev/null
@@ -0,0 +1,1238 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+/*
+ * lowcomms.c
+ *
+ * This is the "low-level" comms layer.
+ *
+ * It is responsible for sending/receiving messages
+ * from other nodes in the cluster.
+ *
+ * Cluster nodes are referred to by their nodeids. nodeids are
+ * simply 32 bit numbers to the locking module - if they need to
+ * be expanded for the cluster infrastructure then that is it's
+ * responsibility. It is this layer's
+ * responsibility to resolve these into IP address or
+ * whatever it needs for inter-node communication.
+ *
+ * The comms level is two kernel threads that deal mainly with
+ * the receiving of messages from other nodes and passing them
+ * up to the mid-level comms layer (which understands the
+ * message format) for execution by the locking core, and
+ * a send thread which does all the setting up of connections
+ * to remote nodes and the sending of data. Threads are not allowed
+ * to send their own data because it may cause them to wait in times
+ * of high load. Also, this way, the sending thread can collect together
+ * messages bound for one node and send them in one block.
+ *
+ * I don't see any problem with the recv thread executing the locking
+ * code on behalf of remote processes as the locking code is
+ * short, efficient and never (well, hardly ever) waits.
+ *
+ */
+
+#include <asm/ioctls.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/sctp/user.h>
+#include <linux/pagemap.h>
+#include <linux/socket.h>
+#include <linux/idr.h>
+
+#include "dlm_internal.h"
+#include "lowcomms.h"
+#include "config.h"
+#include "midcomms.h"
+
+static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
+static int                     dlm_local_count;
+static int                     dlm_local_nodeid;
+
+/* One of these per connected node */
+
+#define NI_INIT_PENDING 1
+#define NI_WRITE_PENDING 2
+
+struct nodeinfo {
+       spinlock_t              lock;
+       sctp_assoc_t            assoc_id;
+       unsigned long           flags;
+       struct list_head        write_list; /* nodes with pending writes */
+       struct list_head        writequeue; /* outgoing writequeue_entries */
+       spinlock_t              writequeue_lock;
+       int                     nodeid;
+};
+
+static DEFINE_IDR(nodeinfo_idr);
+static struct rw_semaphore     nodeinfo_lock;
+static int                     max_nodeid;
+
+struct cbuf {
+       unsigned                base;
+       unsigned                len;
+       unsigned                mask;
+};
+
+/* Just the one of these, now. But this struct keeps
+   the connection-specific variables together */
+
+#define CF_READ_PENDING 1
+
+struct connection {
+       struct socket          *sock;
+       unsigned long           flags;
+       struct page            *rx_page;
+       atomic_t                waiting_requests;
+       struct cbuf             cb;
+       int                     eagain_flag;
+};
+
+/* An entry waiting to be sent */
+
+struct writequeue_entry {
+       struct list_head        list;
+       struct page            *page;
+       int                     offset;
+       int                     len;
+       int                     end;
+       int                     users;
+       struct nodeinfo        *ni;
+};
+
+#define CBUF_ADD(cb, n) do { (cb)->len += n; } while(0)
+#define CBUF_EMPTY(cb) ((cb)->len == 0)
+#define CBUF_MAY_ADD(cb, n) (((cb)->len + (n)) < ((cb)->mask + 1))
+#define CBUF_DATA(cb) (((cb)->base + (cb)->len) & (cb)->mask)
+
+#define CBUF_INIT(cb, size) \
+do { \
+       (cb)->base = (cb)->len = 0; \
+       (cb)->mask = ((size)-1); \
+} while(0)
+
+#define CBUF_EAT(cb, n) \
+do { \
+       (cb)->len  -= (n); \
+       (cb)->base += (n); \
+       (cb)->base &= (cb)->mask; \
+} while(0)
+
+
+/* List of nodes which have writes pending */
+static struct list_head write_nodes;
+static spinlock_t write_nodes_lock;
+
+/* Maximum number of incoming messages to process before
+ * doing a schedule()
+ */
+#define MAX_RX_MSG_COUNT 25
+
+/* Manage daemons */
+static struct task_struct *recv_task;
+static struct task_struct *send_task;
+static wait_queue_head_t lowcomms_recv_wait;
+static atomic_t accepting;
+
+/* The SCTP connection */
+static struct connection sctp_con;
+
+
+static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
+{
+       struct sockaddr_storage addr;
+       int error;
+
+       if (!dlm_local_count)
+               return -1;
+
+       error = dlm_nodeid_to_addr(nodeid, &addr);
+       if (error)
+               return error;
+
+       if (dlm_local_addr[0]->ss_family == AF_INET) {
+               struct sockaddr_in *in4  = (struct sockaddr_in *) &addr;
+               struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr;
+               ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
+       } else {
+               struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &addr;
+               struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
+               memcpy(&ret6->sin6_addr, &in6->sin6_addr,
+                      sizeof(in6->sin6_addr));
+       }
+
+       return 0;
+}
+
+static struct nodeinfo *nodeid2nodeinfo(int nodeid, int alloc)
+{
+       struct nodeinfo *ni;
+       int r;
+       int n;
+
+       down_read(&nodeinfo_lock);
+       ni = idr_find(&nodeinfo_idr, nodeid);
+       up_read(&nodeinfo_lock);
+
+       if (!ni && alloc) {
+               down_write(&nodeinfo_lock);
+
+               ni = idr_find(&nodeinfo_idr, nodeid);
+               if (ni)
+                       goto out_up;
+
+               r = idr_pre_get(&nodeinfo_idr, alloc);
+               if (!r)
+                       goto out_up;
+
+               ni = kmalloc(sizeof(struct nodeinfo), alloc);
+               if (!ni)
+                       goto out_up;
+
+               r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n);
+               if (r) {
+                       kfree(ni);
+                       ni = NULL;
+                       goto out_up;
+               }
+               if (n != nodeid) {
+                       idr_remove(&nodeinfo_idr, n);
+                       kfree(ni);
+                       ni = NULL;
+                       goto out_up;
+               }
+               memset(ni, 0, sizeof(struct nodeinfo));
+               spin_lock_init(&ni->lock);
+               INIT_LIST_HEAD(&ni->writequeue);
+               spin_lock_init(&ni->writequeue_lock);
+               ni->nodeid = nodeid;
+
+               if (nodeid > max_nodeid)
+                       max_nodeid = nodeid;
+       out_up:
+               up_write(&nodeinfo_lock);
+       }
+
+       return ni;
+}
+
+/* Don't call this too often... */
+static struct nodeinfo *assoc2nodeinfo(sctp_assoc_t assoc)
+{
+       int i;
+       struct nodeinfo *ni;
+
+       for (i=1; i<=max_nodeid; i++) {
+               ni = nodeid2nodeinfo(i, 0);
+               if (ni && ni->assoc_id == assoc)
+                       return ni;
+       }
+       return NULL;
+}
+
+/* Data or notification available on socket */
+static void lowcomms_data_ready(struct sock *sk, int count_unused)
+{
+       atomic_inc(&sctp_con.waiting_requests);
+       if (test_and_set_bit(CF_READ_PENDING, &sctp_con.flags))
+               return;
+
+       wake_up_interruptible(&lowcomms_recv_wait);
+}
+
+
+/* Add the port number to an IP6 or 4 sockaddr and return the address length.
+   Also padd out the struct with zeros to make comparisons meaningful */
+
+static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
+                         int *addr_len)
+{
+       struct sockaddr_in *local4_addr;
+       struct sockaddr_in6 *local6_addr;
+
+       if (!dlm_local_count)
+               return;
+
+       if (!port) {
+               if (dlm_local_addr[0]->ss_family == AF_INET) {
+                       local4_addr = (struct sockaddr_in *)dlm_local_addr[0];
+                       port = be16_to_cpu(local4_addr->sin_port);
+               } else {
+                       local6_addr = (struct sockaddr_in6 *)dlm_local_addr[0];
+                       port = be16_to_cpu(local6_addr->sin6_port);
+               }
+       }
+
+       saddr->ss_family = dlm_local_addr[0]->ss_family;
+       if (dlm_local_addr[0]->ss_family == AF_INET) {
+               struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
+               in4_addr->sin_port = cpu_to_be16(port);
+               memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
+               memset(in4_addr+1, 0, sizeof(struct sockaddr_storage) -
+                                     sizeof(struct sockaddr_in));
+               *addr_len = sizeof(struct sockaddr_in);
+       } else {
+               struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
+               in6_addr->sin6_port = cpu_to_be16(port);
+               memset(in6_addr+1, 0, sizeof(struct sockaddr_storage) -
+                                     sizeof(struct sockaddr_in6));
+               *addr_len = sizeof(struct sockaddr_in6);
+       }
+}
+
+/* Close the connection and tidy up */
+static void close_connection(void)
+{
+       if (sctp_con.sock) {
+               sock_release(sctp_con.sock);
+               sctp_con.sock = NULL;
+       }
+
+       if (sctp_con.rx_page) {
+               __free_page(sctp_con.rx_page);
+               sctp_con.rx_page = NULL;
+       }
+}
+
+/* We only send shutdown messages to nodes that are not part of the cluster */
+static void send_shutdown(sctp_assoc_t associd)
+{
+       static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
+       struct msghdr outmessage;
+       struct cmsghdr *cmsg;
+       struct sctp_sndrcvinfo *sinfo;
+       int ret;
+
+       outmessage.msg_name = NULL;
+       outmessage.msg_namelen = 0;
+       outmessage.msg_control = outcmsg;
+       outmessage.msg_controllen = sizeof(outcmsg);
+       outmessage.msg_flags = MSG_EOR;
+
+       cmsg = CMSG_FIRSTHDR(&outmessage);
+       cmsg->cmsg_level = IPPROTO_SCTP;
+       cmsg->cmsg_type = SCTP_SNDRCV;
+       cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
+       outmessage.msg_controllen = cmsg->cmsg_len;
+       sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+       memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
+
+       sinfo->sinfo_flags |= MSG_EOF;
+       sinfo->sinfo_assoc_id = associd;
+
+       ret = kernel_sendmsg(sctp_con.sock, &outmessage, NULL, 0, 0);
+
+       if (ret != 0)
+               log_print("send EOF to node failed: %d", ret);
+}
+
+
+/* INIT failed but we don't know which node...
+   restart INIT on all pending nodes */
+static void init_failed(void)
+{
+       int i;
+       struct nodeinfo *ni;
+
+       for (i=1; i<=max_nodeid; i++) {
+               ni = nodeid2nodeinfo(i, 0);
+               if (!ni)
+                       continue;
+
+               if (test_and_clear_bit(NI_INIT_PENDING, &ni->flags)) {
+                       ni->assoc_id = 0;
+                       if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
+                               spin_lock_bh(&write_nodes_lock);
+                               list_add_tail(&ni->write_list, &write_nodes);
+                               spin_unlock_bh(&write_nodes_lock);
+                       }
+               }
+       }
+       wake_up_process(send_task);
+}
+
+/* Something happened to an association */
+static void process_sctp_notification(struct msghdr *msg, char *buf)
+{
+       union sctp_notification *sn = (union sctp_notification *)buf;
+
+       if (sn->sn_header.sn_type == SCTP_ASSOC_CHANGE) {
+               switch (sn->sn_assoc_change.sac_state) {
+
+               case SCTP_COMM_UP:
+               case SCTP_RESTART:
+               {
+                       /* Check that the new node is in the lockspace */
+                       struct sctp_prim prim;
+                       mm_segment_t fs;
+                       int nodeid;
+                       int prim_len, ret;
+                       int addr_len;
+                       struct nodeinfo *ni;
+
+                       /* This seems to happen when we received a connection
+                        * too early... or something...  anyway, it happens but
+                        * we always seem to get a real message too, see
+                        * receive_from_sock */
+
+                       if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) {
+                               log_print("COMM_UP for invalid assoc ID %d",
+                                        (int)sn->sn_assoc_change.sac_assoc_id);
+                               init_failed();
+                               return;
+                       }
+                       memset(&prim, 0, sizeof(struct sctp_prim));
+                       prim_len = sizeof(struct sctp_prim);
+                       prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id;
+
+                       fs = get_fs();
+                       set_fs(get_ds());
+                       ret = sctp_con.sock->ops->getsockopt(sctp_con.sock,
+                                               IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
+                                               (char*)&prim, &prim_len);
+                       set_fs(fs);
+                       if (ret < 0) {
+                               struct nodeinfo *ni;
+
+                               log_print("getsockopt/sctp_primary_addr on "
+                                         "new assoc %d failed : %d",
+                                   (int)sn->sn_assoc_change.sac_assoc_id, ret);
+
+                               /* Retry INIT later */
+                               ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id);
+                               if (ni)
+                                       clear_bit(NI_INIT_PENDING, &ni->flags);
+                               return;
+                       }
+                       make_sockaddr(&prim.ssp_addr, 0, &addr_len);
+                       if (dlm_addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
+                               log_print("reject connect from unknown addr");
+                               send_shutdown(prim.ssp_assoc_id);
+                               return;
+                       }
+
+                       ni = nodeid2nodeinfo(nodeid, GFP_KERNEL);
+                       if (!ni)
+                               return;
+
+                       /* Save the assoc ID */
+                       spin_lock(&ni->lock);
+                       ni->assoc_id = sn->sn_assoc_change.sac_assoc_id;
+                       spin_unlock(&ni->lock);
+
+                       log_print("got new/restarted association %d nodeid %d",
+                              (int)sn->sn_assoc_change.sac_assoc_id, nodeid);
+
+                       /* Send any pending writes */
+                       clear_bit(NI_INIT_PENDING, &ni->flags);
+                       if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
+                               spin_lock_bh(&write_nodes_lock);
+                               list_add_tail(&ni->write_list, &write_nodes);
+                               spin_unlock_bh(&write_nodes_lock);
+                       }
+                       wake_up_process(send_task);
+               }
+               break;
+
+               case SCTP_COMM_LOST:
+               case SCTP_SHUTDOWN_COMP:
+               {
+                       struct nodeinfo *ni;
+
+                       ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id);
+                       if (ni) {
+                               spin_lock(&ni->lock);
+                               ni->assoc_id = 0;
+                               spin_unlock(&ni->lock);
+                       }
+               }
+               break;
+
+               /* We don't know which INIT failed, so clear the PENDING flags
+                * on them all.  if assoc_id is zero then it will then try
+                * again */
+
+               case SCTP_CANT_STR_ASSOC:
+               {
+                       log_print("Can't start SCTP association - retrying");
+                       init_failed();
+               }
+               break;
+
+               default:
+                       log_print("unexpected SCTP assoc change id=%d state=%d",
+                                 (int)sn->sn_assoc_change.sac_assoc_id,
+                                 sn->sn_assoc_change.sac_state);
+               }
+       }
+}
+
+/* Data received from remote end */
+static int receive_from_sock(void)
+{
+       int ret = 0;
+       struct msghdr msg;
+       struct kvec iov[2];
+       unsigned len;
+       int r;
+       struct sctp_sndrcvinfo *sinfo;
+       struct cmsghdr *cmsg;
+       struct nodeinfo *ni;
+
+       /* These two are marginally too big for stack allocation, but this
+        * function is (currently) only called by dlm_recvd so static should be
+        * OK.
+        */
+       static struct sockaddr_storage msgname;
+       static char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
+
+       if (sctp_con.sock == NULL)
+               goto out;
+
+       if (sctp_con.rx_page == NULL) {
+               /*
+                * This doesn't need to be atomic, but I think it should
+                * improve performance if it is.
+                */
+               sctp_con.rx_page = alloc_page(GFP_ATOMIC);
+               if (sctp_con.rx_page == NULL)
+                       goto out_resched;
+               CBUF_INIT(&sctp_con.cb, PAGE_CACHE_SIZE);
+       }
+
+       memset(&incmsg, 0, sizeof(incmsg));
+       memset(&msgname, 0, sizeof(msgname));
+
+       memset(incmsg, 0, sizeof(incmsg));
+       msg.msg_name = &msgname;
+       msg.msg_namelen = sizeof(msgname);
+       msg.msg_flags = 0;
+       msg.msg_control = incmsg;
+       msg.msg_controllen = sizeof(incmsg);
+
+       /* I don't see why this circular buffer stuff is necessary for SCTP
+        * which is a packet-based protocol, but the whole thing breaks under
+        * load without it! The overhead is minimal (and is in the TCP lowcomms
+        * anyway, of course) so I'll leave it in until I can figure out what's
+        * really happening.
+        */
+
+       /*
+        * iov[0] is the bit of the circular buffer between the current end
+        * point (cb.base + cb.len) and the end of the buffer.
+        */
+       iov[0].iov_len = sctp_con.cb.base - CBUF_DATA(&sctp_con.cb);
+       iov[0].iov_base = page_address(sctp_con.rx_page) +
+                         CBUF_DATA(&sctp_con.cb);
+       iov[1].iov_len = 0;
+
+       /*
+        * iov[1] is the bit of the circular buffer between the start of the
+        * buffer and the start of the currently used section (cb.base)
+        */
+       if (CBUF_DATA(&sctp_con.cb) >= sctp_con.cb.base) {
+               iov[0].iov_len = PAGE_CACHE_SIZE - CBUF_DATA(&sctp_con.cb);
+               iov[1].iov_len = sctp_con.cb.base;
+               iov[1].iov_base = page_address(sctp_con.rx_page);
+               msg.msg_iovlen = 2;
+       }
+       len = iov[0].iov_len + iov[1].iov_len;
+
+       r = ret = kernel_recvmsg(sctp_con.sock, &msg, iov, 1, len,
+                                MSG_NOSIGNAL | MSG_DONTWAIT);
+       if (ret <= 0)
+               goto out_close;
+
+       msg.msg_control = incmsg;
+       msg.msg_controllen = sizeof(incmsg);
+       cmsg = CMSG_FIRSTHDR(&msg);
+       sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+
+       if (msg.msg_flags & MSG_NOTIFICATION) {
+               process_sctp_notification(&msg, page_address(sctp_con.rx_page));
+               return 0;
+       }
+
+       /* Is this a new association ? */
+       ni = nodeid2nodeinfo(le32_to_cpu(sinfo->sinfo_ppid), GFP_KERNEL);
+       if (ni) {
+               ni->assoc_id = sinfo->sinfo_assoc_id;
+               if (test_and_clear_bit(NI_INIT_PENDING, &ni->flags)) {
+
+                       if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
+                               spin_lock_bh(&write_nodes_lock);
+                               list_add_tail(&ni->write_list, &write_nodes);
+                               spin_unlock_bh(&write_nodes_lock);
+                       }
+                       wake_up_process(send_task);
+               }
+       }
+
+       /* INIT sends a message with length of 1 - ignore it */
+       if (r == 1)
+               return 0;
+
+       CBUF_ADD(&sctp_con.cb, ret);
+       ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid),
+                                         page_address(sctp_con.rx_page),
+                                         sctp_con.cb.base, sctp_con.cb.len,
+                                         PAGE_CACHE_SIZE);
+       if (ret < 0)
+               goto out_close;
+       CBUF_EAT(&sctp_con.cb, ret);
+
+      out:
+       ret = 0;
+       goto out_ret;
+
+      out_resched:
+       lowcomms_data_ready(sctp_con.sock->sk, 0);
+       ret = 0;
+       schedule();
+       goto out_ret;
+
+      out_close:
+       if (ret != -EAGAIN)
+               log_print("error reading from sctp socket: %d", ret);
+      out_ret:
+       return ret;
+}
+
+/* Bind to an IP address. SCTP allows multiple address so it can do multi-homing */
+static int add_bind_addr(struct sockaddr_storage *addr, int addr_len, int num)
+{
+       mm_segment_t fs;
+       int result = 0;
+
+       fs = get_fs();
+       set_fs(get_ds());
+       if (num == 1)
+               result = sctp_con.sock->ops->bind(sctp_con.sock,
+                                       (struct sockaddr *) addr, addr_len);
+       else
+               result = sctp_con.sock->ops->setsockopt(sctp_con.sock, SOL_SCTP,
+                               SCTP_SOCKOPT_BINDX_ADD, (char *)addr, addr_len);
+       set_fs(fs);
+
+       if (result < 0)
+               log_print("Can't bind to port %d addr number %d",
+                         dlm_config.tcp_port, num);
+
+       return result;
+}
+
+static void init_local(void)
+{
+       struct sockaddr_storage sas, *addr;
+       int i;
+
+       dlm_local_nodeid = dlm_our_nodeid();
+
+       for (i = 0; i < DLM_MAX_ADDR_COUNT - 1; i++) {
+               if (dlm_our_addr(&sas, i))
+                       break;
+
+               addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+               if (!addr)
+                       break;
+               memcpy(addr, &sas, sizeof(*addr));
+               dlm_local_addr[dlm_local_count++] = addr;
+       }
+}
+
+/* Initialise SCTP socket and bind to all interfaces */
+static int init_sock(void)
+{
+       mm_segment_t fs;
+       struct socket *sock = NULL;
+       struct sockaddr_storage localaddr;
+       struct sctp_event_subscribe subscribe;
+       int result = -EINVAL, num = 1, i, addr_len;
+
+       if (!dlm_local_count) {
+               init_local();
+               if (!dlm_local_count) {
+                       log_print("no local IP address has been set");
+                       goto out;
+               }
+       }
+
+       result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_SEQPACKET,
+                                 IPPROTO_SCTP, &sock);
+       if (result < 0) {
+               log_print("Can't create comms socket, check SCTP is loaded");
+               goto out;
+       }
+
+       /* Listen for events */
+       memset(&subscribe, 0, sizeof(subscribe));
+       subscribe.sctp_data_io_event = 1;
+       subscribe.sctp_association_event = 1;
+       subscribe.sctp_send_failure_event = 1;
+       subscribe.sctp_shutdown_event = 1;
+       subscribe.sctp_partial_delivery_event = 1;
+
+       fs = get_fs();
+       set_fs(get_ds());
+       result = sock->ops->setsockopt(sock, SOL_SCTP, SCTP_EVENTS,
+                                      (char *)&subscribe, sizeof(subscribe));
+       set_fs(fs);
+
+       if (result < 0) {
+               log_print("Failed to set SCTP_EVENTS on socket: result=%d",
+                         result);
+               goto create_delsock;
+       }
+
+       /* Init con struct */
+       sock->sk->sk_user_data = &sctp_con;
+       sctp_con.sock = sock;
+       sctp_con.sock->sk->sk_data_ready = lowcomms_data_ready;
+
+       /* Bind to all interfaces. */
+       for (i = 0; i < dlm_local_count; i++) {
+               memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
+               make_sockaddr(&localaddr, dlm_config.tcp_port, &addr_len);
+
+               result = add_bind_addr(&localaddr, addr_len, num);
+               if (result)
+                       goto create_delsock;
+               ++num;
+       }
+
+       result = sock->ops->listen(sock, 5);
+       if (result < 0) {
+               log_print("Can't set socket listening");
+               goto create_delsock;
+       }
+
+       return 0;
+
+ create_delsock:
+       sock_release(sock);
+       sctp_con.sock = NULL;
+ out:
+       return result;
+}
+
+
+static struct writequeue_entry *new_writequeue_entry(int allocation)
+{
+       struct writequeue_entry *entry;
+
+       entry = kmalloc(sizeof(struct writequeue_entry), allocation);
+       if (!entry)
+               return NULL;
+
+       entry->page = alloc_page(allocation);
+       if (!entry->page) {
+               kfree(entry);
+               return NULL;
+       }
+
+       entry->offset = 0;
+       entry->len = 0;
+       entry->end = 0;
+       entry->users = 0;
+
+       return entry;
+}
+
+void *dlm_lowcomms_get_buffer(int nodeid, int len, int allocation, char **ppc)
+{
+       struct writequeue_entry *e;
+       int offset = 0;
+       int users = 0;
+       struct nodeinfo *ni;
+
+       if (!atomic_read(&accepting))
+               return NULL;
+
+       ni = nodeid2nodeinfo(nodeid, allocation);
+       if (!ni)
+               return NULL;
+
+       spin_lock(&ni->writequeue_lock);
+       e = list_entry(ni->writequeue.prev, struct writequeue_entry, list);
+       if (((struct list_head *) e == &ni->writequeue) ||
+           (PAGE_CACHE_SIZE - e->end < len)) {
+               e = NULL;
+       } else {
+               offset = e->end;
+               e->end += len;
+               users = e->users++;
+       }
+       spin_unlock(&ni->writequeue_lock);
+
+       if (e) {
+             got_one:
+               if (users == 0)
+                       kmap(e->page);
+               *ppc = page_address(e->page) + offset;
+               return e;
+       }
+
+       e = new_writequeue_entry(allocation);
+       if (e) {
+               spin_lock(&ni->writequeue_lock);
+               offset = e->end;
+               e->end += len;
+               e->ni = ni;
+               users = e->users++;
+               list_add_tail(&e->list, &ni->writequeue);
+               spin_unlock(&ni->writequeue_lock);
+               goto got_one;
+       }
+       return NULL;
+}
+
+void dlm_lowcomms_commit_buffer(void *arg)
+{
+       struct writequeue_entry *e = (struct writequeue_entry *) arg;
+       int users;
+       struct nodeinfo *ni = e->ni;
+
+       if (!atomic_read(&accepting))
+               return;
+
+       spin_lock(&ni->writequeue_lock);
+       users = --e->users;
+       if (users)
+               goto out;
+       e->len = e->end - e->offset;
+       kunmap(e->page);
+       spin_unlock(&ni->writequeue_lock);
+
+       if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
+               spin_lock_bh(&write_nodes_lock);
+               list_add_tail(&ni->write_list, &write_nodes);
+               spin_unlock_bh(&write_nodes_lock);
+               wake_up_process(send_task);
+       }
+       return;
+
+      out:
+       spin_unlock(&ni->writequeue_lock);
+       return;
+}
+
+static void free_entry(struct writequeue_entry *e)
+{
+       __free_page(e->page);
+       kfree(e);
+}
+
+/* Initiate an SCTP association. In theory we could just use sendmsg() on
+   the first IP address and it should work, but this allows us to set up the
+   association before sending any valuable data that we can't afford to lose.
+   It also keeps the send path clean as it can now always use the association ID */
+static void initiate_association(int nodeid)
+{
+       struct sockaddr_storage rem_addr;
+       static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
+       struct msghdr outmessage;
+       struct cmsghdr *cmsg;
+       struct sctp_sndrcvinfo *sinfo;
+       int ret;
+       int addrlen;
+       char buf[1];
+       struct kvec iov[1];
+       struct nodeinfo *ni;
+
+       log_print("Initiating association with node %d", nodeid);
+
+       ni = nodeid2nodeinfo(nodeid, GFP_KERNEL);
+       if (!ni)
+               return;
+
+       if (nodeid_to_addr(nodeid, (struct sockaddr *)&rem_addr)) {
+               log_print("no address for nodeid %d", nodeid);
+               return;
+       }
+
+       make_sockaddr(&rem_addr, dlm_config.tcp_port, &addrlen);
+
+       outmessage.msg_name = &rem_addr;
+       outmessage.msg_namelen = addrlen;
+       outmessage.msg_control = outcmsg;
+       outmessage.msg_controllen = sizeof(outcmsg);
+       outmessage.msg_flags = MSG_EOR;
+
+       iov[0].iov_base = buf;
+       iov[0].iov_len = 1;
+
+       /* Real INIT messages seem to cause trouble. Just send a 1 byte message
+          we can afford to lose */
+       cmsg = CMSG_FIRSTHDR(&outmessage);
+       cmsg->cmsg_level = IPPROTO_SCTP;
+       cmsg->cmsg_type = SCTP_SNDRCV;
+       cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
+       sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+       memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
+       sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
+
+       outmessage.msg_controllen = cmsg->cmsg_len;
+       ret = kernel_sendmsg(sctp_con.sock, &outmessage, iov, 1, 1);
+       if (ret < 0) {
+               log_print("send INIT to node failed: %d", ret);
+               /* Try again later */
+               clear_bit(NI_INIT_PENDING, &ni->flags);
+       }
+}
+
+/* Send a message */
+static int send_to_sock(struct nodeinfo *ni)
+{
+       int ret = 0;
+       struct writequeue_entry *e;
+       int len, offset;
+       struct msghdr outmsg;
+       static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
+       struct cmsghdr *cmsg;
+       struct sctp_sndrcvinfo *sinfo;
+       struct kvec iov;
+
+        /* See if we need to init an association before we start
+          sending precious messages */
+       spin_lock(&ni->lock);
+       if (!ni->assoc_id && !test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
+               spin_unlock(&ni->lock);
+               initiate_association(ni->nodeid);
+               return 0;
+       }
+       spin_unlock(&ni->lock);
+
+       outmsg.msg_name = NULL; /* We use assoc_id */
+       outmsg.msg_namelen = 0;
+       outmsg.msg_control = outcmsg;
+       outmsg.msg_controllen = sizeof(outcmsg);
+       outmsg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL | MSG_EOR;
+
+       cmsg = CMSG_FIRSTHDR(&outmsg);
+       cmsg->cmsg_level = IPPROTO_SCTP;
+       cmsg->cmsg_type = SCTP_SNDRCV;
+       cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
+       sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+       memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
+       sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
+       sinfo->sinfo_assoc_id = ni->assoc_id;
+       outmsg.msg_controllen = cmsg->cmsg_len;
+
+       spin_lock(&ni->writequeue_lock);
+       for (;;) {
+               if (list_empty(&ni->writequeue))
+                       break;
+               e = list_entry(ni->writequeue.next, struct writequeue_entry,
+                              list);
+               len = e->len;
+               offset = e->offset;
+               BUG_ON(len == 0 && e->users == 0);
+               spin_unlock(&ni->writequeue_lock);
+               kmap(e->page);
+
+               ret = 0;
+               if (len) {
+                       iov.iov_base = page_address(e->page)+offset;
+                       iov.iov_len = len;
+
+                       ret = kernel_sendmsg(sctp_con.sock, &outmsg, &iov, 1,
+                                            len);
+                       if (ret == -EAGAIN) {
+                               sctp_con.eagain_flag = 1;
+                               goto out;
+                       } else if (ret < 0)
+                               goto send_error;
+               } else {
+                       /* Don't starve people filling buffers */
+                       schedule();
+               }
+
+               spin_lock(&ni->writequeue_lock);
+               e->offset += ret;
+               e->len -= ret;
+
+               if (e->len == 0 && e->users == 0) {
+                       list_del(&e->list);
+                       free_entry(e);
+                       continue;
+               }
+       }
+       spin_unlock(&ni->writequeue_lock);
+ out:
+       return ret;
+
+ send_error:
+       log_print("Error sending to node %d %d", ni->nodeid, ret);
+       spin_lock(&ni->lock);
+       if (!test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
+               ni->assoc_id = 0;
+               spin_unlock(&ni->lock);
+               initiate_association(ni->nodeid);
+       } else
+               spin_unlock(&ni->lock);
+
+       return ret;
+}
+
+/* Try to send any messages that are pending */
+static void process_output_queue(void)
+{
+       struct list_head *list;
+       struct list_head *temp;
+
+       spin_lock_bh(&write_nodes_lock);
+       list_for_each_safe(list, temp, &write_nodes) {
+               struct nodeinfo *ni =
+                   list_entry(list, struct nodeinfo, write_list);
+               clear_bit(NI_WRITE_PENDING, &ni->flags);
+               list_del(&ni->write_list);
+
+               spin_unlock_bh(&write_nodes_lock);
+
+               send_to_sock(ni);
+               spin_lock_bh(&write_nodes_lock);
+       }
+       spin_unlock_bh(&write_nodes_lock);
+}
+
+/* Called after we've had -EAGAIN and been woken up */
+static void refill_write_queue(void)
+{
+       int i;
+
+       for (i=1; i<=max_nodeid; i++) {
+               struct nodeinfo *ni = nodeid2nodeinfo(i, 0);
+
+               if (ni) {
+                       if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
+                               spin_lock_bh(&write_nodes_lock);
+                               list_add_tail(&ni->write_list, &write_nodes);
+                               spin_unlock_bh(&write_nodes_lock);
+                       }
+               }
+       }
+}
+
+static void clean_one_writequeue(struct nodeinfo *ni)
+{
+       struct list_head *list;
+       struct list_head *temp;
+
+       spin_lock(&ni->writequeue_lock);
+       list_for_each_safe(list, temp, &ni->writequeue) {
+               struct writequeue_entry *e =
+                       list_entry(list, struct writequeue_entry, list);
+               list_del(&e->list);
+               free_entry(e);
+       }
+       spin_unlock(&ni->writequeue_lock);
+}
+
+static void clean_writequeues(void)
+{
+       int i;
+
+       for (i=1; i<=max_nodeid; i++) {
+               struct nodeinfo *ni = nodeid2nodeinfo(i, 0);
+               if (ni)
+                       clean_one_writequeue(ni);
+       }
+}
+
+
+static void dealloc_nodeinfo(void)
+{
+       int i;
+
+       for (i=1; i<=max_nodeid; i++) {
+               struct nodeinfo *ni = nodeid2nodeinfo(i, 0);
+               if (ni) {
+                       idr_remove(&nodeinfo_idr, i);
+                       kfree(ni);
+               }
+       }
+}
+
+int dlm_lowcomms_close(int nodeid)
+{
+       struct nodeinfo *ni;
+
+       ni = nodeid2nodeinfo(nodeid, 0);
+       if (!ni)
+               return -1;
+
+       spin_lock(&ni->lock);
+       if (ni->assoc_id) {
+               ni->assoc_id = 0;
+               /* Don't send shutdown here, sctp will just queue it
+                  till the node comes back up! */
+       }
+       spin_unlock(&ni->lock);
+
+       clean_one_writequeue(ni);
+       clear_bit(NI_INIT_PENDING, &ni->flags);
+       return 0;
+}
+
+static int write_list_empty(void)
+{
+       int status;
+
+       spin_lock_bh(&write_nodes_lock);
+       status = list_empty(&write_nodes);
+       spin_unlock_bh(&write_nodes_lock);
+
+       return status;
+}
+
+static int dlm_recvd(void *data)
+{
+       DECLARE_WAITQUEUE(wait, current);
+
+       while (!kthread_should_stop()) {
+               int count = 0;
+
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&lowcomms_recv_wait, &wait);
+               if (!test_bit(CF_READ_PENDING, &sctp_con.flags))
+                       schedule();
+               remove_wait_queue(&lowcomms_recv_wait, &wait);
+               set_current_state(TASK_RUNNING);
+
+               if (test_and_clear_bit(CF_READ_PENDING, &sctp_con.flags)) {
+                       int ret;
+
+                       do {
+                               ret = receive_from_sock();
+
+                               /* Don't starve out everyone else */
+                               if (++count >= MAX_RX_MSG_COUNT) {
+                                       schedule();
+                                       count = 0;
+                               }
+                       } while (!kthread_should_stop() && ret >=0);
+               }
+               schedule();
+       }
+
+       return 0;
+}
+
+static int dlm_sendd(void *data)
+{
+       DECLARE_WAITQUEUE(wait, current);
+
+       add_wait_queue(sctp_con.sock->sk->sk_sleep, &wait);
+
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (write_list_empty())
+                       schedule();
+               set_current_state(TASK_RUNNING);
+
+               if (sctp_con.eagain_flag) {
+                       sctp_con.eagain_flag = 0;
+                       refill_write_queue();
+               }
+               process_output_queue();
+       }
+
+       remove_wait_queue(sctp_con.sock->sk->sk_sleep, &wait);
+
+       return 0;
+}
+
+static void daemons_stop(void)
+{
+       kthread_stop(recv_task);
+       kthread_stop(send_task);
+}
+
+static int daemons_start(void)
+{
+       struct task_struct *p;
+       int error;
+
+       p = kthread_run(dlm_recvd, NULL, "dlm_recvd");
+       error = IS_ERR(p);
+               if (error) {
+               log_print("can't start dlm_recvd %d", error);
+               return error;
+       }
+       recv_task = p;
+
+       p = kthread_run(dlm_sendd, NULL, "dlm_sendd");
+       error = IS_ERR(p);
+               if (error) {
+               log_print("can't start dlm_sendd %d", error);
+               kthread_stop(recv_task);
+               return error;
+       }
+       send_task = p;
+
+       return 0;
+}
+
+/*
+ * This is quite likely to sleep...
+ */
+int dlm_lowcomms_start(void)
+{
+       int error;
+
+       error = init_sock();
+       if (error)
+               goto fail_sock;
+       error = daemons_start();
+       if (error)
+               goto fail_sock;
+       atomic_set(&accepting, 1);
+       return 0;
+
+ fail_sock:
+       close_connection();
+       return error;
+}
+
+/* Set all the activity flags to prevent any socket activity. */
+
+void dlm_lowcomms_stop(void)
+{
+       atomic_set(&accepting, 0);
+       sctp_con.flags = 0x7;
+       daemons_stop();
+       clean_writequeues();
+       close_connection();
+       dealloc_nodeinfo();
+       max_nodeid = 0;
+}
+
+int dlm_lowcomms_init(void)
+{
+       init_waitqueue_head(&lowcomms_recv_wait);
+       spin_lock_init(&write_nodes_lock);
+       INIT_LIST_HEAD(&write_nodes);
+       init_rwsem(&nodeinfo_lock);
+       return 0;
+}
+
+void dlm_lowcomms_exit(void)
+{
+       int i;
+
+       for (i = 0; i < dlm_local_count; i++)
+               kfree(dlm_local_addr[i]);
+       dlm_local_count = 0;
+       dlm_local_nodeid = 0;
+}
+
diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
new file mode 100644 (file)
index 0000000..6c04bb0
--- /dev/null
@@ -0,0 +1,26 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#ifndef __LOWCOMMS_DOT_H__
+#define __LOWCOMMS_DOT_H__
+
+int dlm_lowcomms_init(void);
+void dlm_lowcomms_exit(void);
+int dlm_lowcomms_start(void);
+void dlm_lowcomms_stop(void);
+int dlm_lowcomms_close(int nodeid);
+void *dlm_lowcomms_get_buffer(int nodeid, int len, int allocation, char **ppc);
+void dlm_lowcomms_commit_buffer(void *mh);
+
+#endif                         /* __LOWCOMMS_DOT_H__ */
+
diff --git a/fs/dlm/lvb_table.h b/fs/dlm/lvb_table.h
new file mode 100644 (file)
index 0000000..cc3e92f
--- /dev/null
@@ -0,0 +1,18 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) 2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#ifndef __LVB_TABLE_DOT_H__
+#define __LVB_TABLE_DOT_H__
+
+extern const int dlm_lvb_operations[8][8];
+
+#endif
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
new file mode 100644 (file)
index 0000000..a8da8dc
--- /dev/null
@@ -0,0 +1,97 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#include "dlm_internal.h"
+#include "lockspace.h"
+#include "lock.h"
+#include "user.h"
+#include "memory.h"
+#include "lowcomms.h"
+#include "config.h"
+
+#ifdef CONFIG_DLM_DEBUG
+int dlm_register_debugfs(void);
+void dlm_unregister_debugfs(void);
+#else
+static inline int dlm_register_debugfs(void) { return 0; }
+static inline void dlm_unregister_debugfs(void) { }
+#endif
+
+static int __init init_dlm(void)
+{
+       int error;
+
+       error = dlm_memory_init();
+       if (error)
+               goto out;
+
+       error = dlm_lockspace_init();
+       if (error)
+               goto out_mem;
+
+       error = dlm_config_init();
+       if (error)
+               goto out_lockspace;
+
+       error = dlm_register_debugfs();
+       if (error)
+               goto out_config;
+
+       error = dlm_lowcomms_init();
+       if (error)
+               goto out_debug;
+
+       error = dlm_user_init();
+       if (error)
+               goto out_lowcomms;
+
+       printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
+
+       return 0;
+
+ out_lowcomms:
+       dlm_lowcomms_exit();
+ out_debug:
+       dlm_unregister_debugfs();
+ out_config:
+       dlm_config_exit();
+ out_lockspace:
+       dlm_lockspace_exit();
+ out_mem:
+       dlm_memory_exit();
+ out:
+       return error;
+}
+
+static void __exit exit_dlm(void)
+{
+       dlm_user_exit();
+       dlm_lowcomms_exit();
+       dlm_config_exit();
+       dlm_memory_exit();
+       dlm_lockspace_exit();
+       dlm_unregister_debugfs();
+}
+
+module_init(init_dlm);
+module_exit(exit_dlm);
+
+MODULE_DESCRIPTION("Distributed Lock Manager");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL_GPL(dlm_new_lockspace);
+EXPORT_SYMBOL_GPL(dlm_release_lockspace);
+EXPORT_SYMBOL_GPL(dlm_lock);
+EXPORT_SYMBOL_GPL(dlm_unlock);
+
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
new file mode 100644 (file)
index 0000000..a3f7de7
--- /dev/null
@@ -0,0 +1,327 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) 2005 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+#include "dlm_internal.h"
+#include "lockspace.h"
+#include "member.h"
+#include "recoverd.h"
+#include "recover.h"
+#include "rcom.h"
+#include "config.h"
+
+/*
+ * Following called by dlm_recoverd thread
+ */
+
+static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
+{
+       struct dlm_member *memb = NULL;
+       struct list_head *tmp;
+       struct list_head *newlist = &new->list;
+       struct list_head *head = &ls->ls_nodes;
+
+       list_for_each(tmp, head) {
+               memb = list_entry(tmp, struct dlm_member, list);
+               if (new->nodeid < memb->nodeid)
+                       break;
+       }
+
+       if (!memb)
+               list_add_tail(newlist, head);
+       else {
+               /* FIXME: can use list macro here */
+               newlist->prev = tmp->prev;
+               newlist->next = tmp;
+               tmp->prev->next = newlist;
+               tmp->prev = newlist;
+       }
+}
+
+static int dlm_add_member(struct dlm_ls *ls, int nodeid)
+{
+       struct dlm_member *memb;
+       int w;
+
+       memb = kzalloc(sizeof(struct dlm_member), GFP_KERNEL);
+       if (!memb)
+               return -ENOMEM;
+
+       w = dlm_node_weight(ls->ls_name, nodeid);
+       if (w < 0)
+               return w;
+
+       memb->nodeid = nodeid;
+       memb->weight = w;
+       add_ordered_member(ls, memb);
+       ls->ls_num_nodes++;
+       return 0;
+}
+
+static void dlm_remove_member(struct dlm_ls *ls, struct dlm_member *memb)
+{
+       list_move(&memb->list, &ls->ls_nodes_gone);
+       ls->ls_num_nodes--;
+}
+
+static int dlm_is_member(struct dlm_ls *ls, int nodeid)
+{
+       struct dlm_member *memb;
+
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               if (memb->nodeid == nodeid)
+                       return 1;
+       }
+       return 0;
+}
+
+int dlm_is_removed(struct dlm_ls *ls, int nodeid)
+{
+       struct dlm_member *memb;
+
+       list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
+               if (memb->nodeid == nodeid)
+                       return 1;
+       }
+       return 0;
+}
+
+static void clear_memb_list(struct list_head *head)
+{
+       struct dlm_member *memb;
+
+       while (!list_empty(head)) {
+               memb = list_entry(head->next, struct dlm_member, list);
+               list_del(&memb->list);
+               kfree(memb);
+       }
+}
+
+void dlm_clear_members(struct dlm_ls *ls)
+{
+       clear_memb_list(&ls->ls_nodes);
+       ls->ls_num_nodes = 0;
+}
+
+void dlm_clear_members_gone(struct dlm_ls *ls)
+{
+       clear_memb_list(&ls->ls_nodes_gone);
+}
+
+static void make_member_array(struct dlm_ls *ls)
+{
+       struct dlm_member *memb;
+       int i, w, x = 0, total = 0, all_zero = 0, *array;
+
+       kfree(ls->ls_node_array);
+       ls->ls_node_array = NULL;
+
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               if (memb->weight)
+                       total += memb->weight;
+       }
+
+       /* all nodes revert to weight of 1 if all have weight 0 */
+
+       if (!total) {
+               total = ls->ls_num_nodes;
+               all_zero = 1;
+       }
+
+       ls->ls_total_weight = total;
+
+       array = kmalloc(sizeof(int) * total, GFP_KERNEL);
+       if (!array)
+               return;
+
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               if (!all_zero && !memb->weight)
+                       continue;
+
+               if (all_zero)
+                       w = 1;
+               else
+                       w = memb->weight;
+
+               DLM_ASSERT(x < total, printk("total %d x %d\n", total, x););
+
+               for (i = 0; i < w; i++)
+                       array[x++] = memb->nodeid;
+       }
+
+       ls->ls_node_array = array;
+}
+
+/* send a status request to all members just to establish comms connections */
+
+static int ping_members(struct dlm_ls *ls)
+{
+       struct dlm_member *memb;
+       int error = 0;
+
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               error = dlm_recovery_stopped(ls);
+               if (error)
+                       break;
+               error = dlm_rcom_status(ls, memb->nodeid);
+               if (error)
+                       break;
+       }
+       if (error)
+               log_debug(ls, "ping_members aborted %d last nodeid %d",
+                         error, ls->ls_recover_nodeid);
+       return error;
+}
+
+int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
+{
+       struct dlm_member *memb,&nbs