1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
17 * This is the userland interface to the DLM.
19 * The locking is done via a misc char device (find the
20 * registered minor number in /proc/misc).
22 * User code should not use this interface directly but
23 * call the library routines in libdlm.a instead.
27 #include <linux/miscdevice.h>
28 #include <linux/init.h>
29 #include <linux/wait.h>
30 #include <linux/module.h>
31 #include <linux/file.h>
33 #include <linux/poll.h>
34 #include <linux/signal.h>
35 #include <linux/spinlock.h>
36 #include <linux/idr.h>
38 #include <linux/dlm.h>
39 #include <linux/dlm_device.h>
41 #include "lvb_table.h"
43 static struct file_operations _dlm_fops;
44 static const char *name_prefix="dlm";
45 static struct list_head user_ls_list;
46 static struct mutex user_ls_lock;
48 /* Lock infos are stored in here indexed by lock ID */
49 static DEFINE_IDR(lockinfo_idr);
50 static rwlock_t lockinfo_lock;
52 /* Flags in li_flags */
53 #define LI_FLAG_COMPLETE 1
54 #define LI_FLAG_FIRSTLOCK 2
55 #define LI_FLAG_PERSISTENT 3
56 #define LI_FLAG_ONLIST 4
58 /* flags in ls_flags*/
59 #define LS_FLAG_DELETED 1
60 #define LS_FLAG_AUTOFREE 2
63 #define LOCKINFO_MAGIC 0x53595324
70 struct dlm_lksb li_lksb;
71 wait_queue_head_t li_waitq;
72 unsigned long li_flags;
73 void __user *li_castparam;
74 void __user *li_castaddr;
75 void __user *li_bastparam;
76 void __user *li_bastaddr;
77 void __user *li_pend_bastparam;
78 void __user *li_pend_bastaddr;
79 struct list_head li_ownerqueue;
80 struct file_info *li_file;
81 struct dlm_lksb __user *li_user_lksb;
82 struct semaphore li_firstlock;
85 /* A queued AST no less */
87 struct dlm_lock_result result;
88 struct list_head list;
90 uint32_t progress; /* How much has been read */
93 /* One of these per userland lockspace */
99 /* Passed into misc_register() */
100 struct miscdevice ls_miscinfo;
101 struct list_head ls_list;
104 /* misc_device info for the control device */
105 static struct miscdevice ctl_device;
108 * Stuff we hang off the file struct.
109 * The first two are to cope with unlocking all the
110 * locks help by a process when it dies.
113 struct list_head fi_li_list; /* List of active lock_infos */
114 spinlock_t fi_li_lock;
115 struct list_head fi_ast_list; /* Queue of ASTs to be delivered */
116 spinlock_t fi_ast_lock;
117 wait_queue_head_t fi_wait;
118 struct user_ls *fi_ls;
119 atomic_t fi_refcnt; /* Number of users */
120 unsigned long fi_flags; /* Bit 1 means the device is open */
124 /* get and put ops for file_info.
125 Actually I don't really like "get" and "put", but everyone
126 else seems to use them and I can't think of anything
127 nicer at the moment */
128 static void get_file_info(struct file_info *f)
130 atomic_inc(&f->fi_refcnt);
133 static void put_file_info(struct file_info *f)
135 if (atomic_dec_and_test(&f->fi_refcnt))
139 static void release_lockinfo(struct lock_info *li)
141 put_file_info(li->li_file);
143 write_lock(&lockinfo_lock);
144 idr_remove(&lockinfo_idr, li->li_lksb.sb_lkid);
145 write_unlock(&lockinfo_lock);
147 if (li->li_lksb.sb_lvbptr)
148 kfree(li->li_lksb.sb_lvbptr);
151 module_put(THIS_MODULE);
154 static struct lock_info *get_lockinfo(uint32_t lockid)
156 struct lock_info *li;
158 read_lock(&lockinfo_lock);
159 li = idr_find(&lockinfo_idr, lockid);
160 read_unlock(&lockinfo_lock);
165 static int add_lockinfo(struct lock_info *li)
171 write_lock(&lockinfo_lock);
173 if (idr_find(&lockinfo_idr, li->li_lksb.sb_lkid))
177 r = idr_pre_get(&lockinfo_idr, GFP_KERNEL);
181 r = idr_get_new_above(&lockinfo_idr, li, li->li_lksb.sb_lkid, &n);
185 if (n != li->li_lksb.sb_lkid) {
186 idr_remove(&lockinfo_idr, n);
193 write_unlock(&lockinfo_lock);
199 static struct user_ls *__find_lockspace(int minor)
201 struct user_ls *lsinfo;
203 list_for_each_entry(lsinfo, &user_ls_list, ls_list) {
204 if (lsinfo->ls_miscinfo.minor == minor)
210 /* Find a lockspace struct given the device minor number */
211 static struct user_ls *find_lockspace(int minor)
213 struct user_ls *lsinfo;
215 mutex_lock(&user_ls_lock);
216 lsinfo = __find_lockspace(minor);
217 mutex_unlock(&user_ls_lock);
222 static void add_lockspace_to_list(struct user_ls *lsinfo)
224 mutex_lock(&user_ls_lock);
225 list_add(&lsinfo->ls_list, &user_ls_list);
226 mutex_unlock(&user_ls_lock);
229 /* Register a lockspace with the DLM and create a misc
230 device for userland to access it */
231 static int register_lockspace(char *name, struct user_ls **ls, int flags)
233 struct user_ls *newls;
237 namelen = strlen(name)+strlen(name_prefix)+2;
239 newls = kzalloc(sizeof(struct user_ls), GFP_KERNEL);
243 newls->ls_miscinfo.name = kzalloc(namelen, GFP_KERNEL);
244 if (!newls->ls_miscinfo.name) {
249 status = dlm_new_lockspace(name, strlen(name), &newls->ls_lockspace, 0,
252 kfree(newls->ls_miscinfo.name);
257 snprintf((char*)newls->ls_miscinfo.name, namelen, "%s_%s",
260 newls->ls_miscinfo.fops = &_dlm_fops;
261 newls->ls_miscinfo.minor = MISC_DYNAMIC_MINOR;
263 status = misc_register(&newls->ls_miscinfo);
265 printk(KERN_ERR "dlm: misc register failed for %s\n", name);
266 dlm_release_lockspace(newls->ls_lockspace, 0);
267 kfree(newls->ls_miscinfo.name);
272 if (flags & DLM_USER_LSFLG_AUTOFREE)
273 set_bit(LS_FLAG_AUTOFREE, &newls->ls_flags);
275 add_lockspace_to_list(newls);
280 /* Called with the user_ls_lock mutex held */
281 static int unregister_lockspace(struct user_ls *lsinfo, int force)
285 status = dlm_release_lockspace(lsinfo->ls_lockspace, force);
289 status = misc_deregister(&lsinfo->ls_miscinfo);
293 list_del(&lsinfo->ls_list);
294 set_bit(LS_FLAG_DELETED, &lsinfo->ls_flags);
295 lsinfo->ls_lockspace = NULL;
296 if (atomic_read(&lsinfo->ls_refcnt) == 0) {
297 kfree(lsinfo->ls_miscinfo.name);
304 /* Add it to userland's AST queue */
305 static void add_to_astqueue(struct lock_info *li, void *astaddr, void *astparam,
308 struct ast_info *ast = kzalloc(sizeof(struct ast_info), GFP_KERNEL);
312 ast->result.user_astparam = astparam;
313 ast->result.user_astaddr = astaddr;
314 ast->result.user_lksb = li->li_user_lksb;
315 memcpy(&ast->result.lksb, &li->li_lksb, sizeof(struct dlm_lksb));
316 ast->lvb_updated = lvb_updated;
318 spin_lock(&li->li_file->fi_ast_lock);
319 list_add_tail(&ast->list, &li->li_file->fi_ast_list);
320 spin_unlock(&li->li_file->fi_ast_lock);
321 wake_up_interruptible(&li->li_file->fi_wait);
324 static void bast_routine(void *param, int mode)
326 struct lock_info *li = param;
328 if (li && li->li_bastaddr)
329 add_to_astqueue(li, li->li_bastaddr, li->li_bastparam, 0);
333 * This is the kernel's AST routine.
334 * All lock, unlock & query operations complete here.
335 * The only syncronous ops are those done during device close.
337 static void ast_routine(void *param)
339 struct lock_info *li = param;
341 /* Param may be NULL if a persistent lock is unlocked by someone else */
345 /* If this is a succesful conversion then activate the blocking ast
346 * args from the conversion request */
347 if (!test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags) &&
348 li->li_lksb.sb_status == 0) {
350 li->li_bastparam = li->li_pend_bastparam;
351 li->li_bastaddr = li->li_pend_bastaddr;
352 li->li_pend_bastaddr = NULL;
355 /* If it's an async request then post data to the user's AST queue. */
356 if (li->li_castaddr) {
359 /* See if the lvb has been updated */
360 if (dlm_lvb_operations[li->li_grmode+1][li->li_rqmode+1] == 1)
363 if (li->li_lksb.sb_status == 0)
364 li->li_grmode = li->li_rqmode;
366 /* Only queue AST if the device is still open */
367 if (test_bit(1, &li->li_file->fi_flags))
368 add_to_astqueue(li, li->li_castaddr, li->li_castparam,
371 /* If it's a new lock operation that failed, then
372 * remove it from the owner queue and free the
375 if (test_and_clear_bit(LI_FLAG_FIRSTLOCK, &li->li_flags) &&
376 li->li_lksb.sb_status != 0) {
378 /* Wait till dlm_lock() has finished */
379 down(&li->li_firstlock);
380 up(&li->li_firstlock);
382 spin_lock(&li->li_file->fi_li_lock);
383 list_del(&li->li_ownerqueue);
384 clear_bit(LI_FLAG_ONLIST, &li->li_flags);
385 spin_unlock(&li->li_file->fi_li_lock);
386 release_lockinfo(li);
389 /* Free unlocks & queries */
390 if (li->li_lksb.sb_status == -DLM_EUNLOCK ||
391 li->li_cmd == DLM_USER_QUERY) {
392 release_lockinfo(li);
395 /* Synchronous request, just wake up the caller */
396 set_bit(LI_FLAG_COMPLETE, &li->li_flags);
397 wake_up_interruptible(&li->li_waitq);
402 * Wait for the lock op to complete and return the status.
404 static int wait_for_ast(struct lock_info *li)
406 /* Wait for the AST routine to complete */
407 set_task_state(current, TASK_INTERRUPTIBLE);
408 while (!test_bit(LI_FLAG_COMPLETE, &li->li_flags))
411 set_task_state(current, TASK_RUNNING);
413 return li->li_lksb.sb_status;
417 /* Open on control device */
418 static int dlm_ctl_open(struct inode *inode, struct file *file)
420 file->private_data = NULL;
424 /* Close on control device */
425 static int dlm_ctl_close(struct inode *inode, struct file *file)
430 /* Open on lockspace device */
431 static int dlm_open(struct inode *inode, struct file *file)
434 struct user_ls *lsinfo;
436 lsinfo = find_lockspace(iminor(inode));
440 f = kzalloc(sizeof(struct file_info), GFP_KERNEL);
444 atomic_inc(&lsinfo->ls_refcnt);
445 INIT_LIST_HEAD(&f->fi_li_list);
446 INIT_LIST_HEAD(&f->fi_ast_list);
447 spin_lock_init(&f->fi_li_lock);
448 spin_lock_init(&f->fi_ast_lock);
449 init_waitqueue_head(&f->fi_wait);
453 set_bit(1, &f->fi_flags);
455 file->private_data = f;
460 /* Check the user's version matches ours */
461 static int check_version(struct dlm_write_request *req)
463 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
464 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
465 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
467 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
468 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
474 DLM_DEVICE_VERSION_MAJOR,
475 DLM_DEVICE_VERSION_MINOR,
476 DLM_DEVICE_VERSION_PATCH);
482 /* Close on lockspace device */
483 static int dlm_close(struct inode *inode, struct file *file)
485 struct file_info *f = file->private_data;
487 struct lock_info *old_li, *safe;
490 struct user_ls *lsinfo;
491 DECLARE_WAITQUEUE(wq, current);
493 lsinfo = find_lockspace(iminor(inode));
497 /* Mark this closed so that ASTs will not be delivered any more */
498 clear_bit(1, &f->fi_flags);
500 /* Block signals while we are doing this */
501 sigfillset(&allsigs);
502 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
504 /* We use our own lock_info struct here, so that any
505 * outstanding "real" ASTs will be delivered with the
506 * corresponding "real" params, thus freeing the lock_info
507 * that belongs the lock. This catches the corner case where
508 * a lock is BUSY when we try to unlock it here
510 memset(&li, 0, sizeof(li));
511 clear_bit(LI_FLAG_COMPLETE, &li.li_flags);
512 init_waitqueue_head(&li.li_waitq);
513 add_wait_queue(&li.li_waitq, &wq);
516 * Free any outstanding locks, they are on the
517 * list in LIFO order so there should be no problems
518 * about unlocking parents before children.
520 list_for_each_entry_safe(old_li, safe, &f->fi_li_list, li_ownerqueue) {
524 /* Don't unlock persistent locks, just mark them orphaned */
525 if (test_bit(LI_FLAG_PERSISTENT, &old_li->li_flags)) {
526 list_del(&old_li->li_ownerqueue);
528 /* Update master copy */
529 /* TODO: Check locking core updates the local and
530 remote ORPHAN flags */
531 li.li_lksb.sb_lkid = old_li->li_lksb.sb_lkid;
532 status = dlm_lock(f->fi_ls->ls_lockspace,
533 old_li->li_grmode, &li.li_lksb,
534 DLM_LKF_CONVERT|DLM_LKF_ORPHAN,
535 NULL, 0, 0, ast_routine, NULL,
538 printk("dlm: Error orphaning lock %x: %d\n",
539 old_li->li_lksb.sb_lkid, status);
541 /* But tidy our references in it */
542 release_lockinfo(old_li);
546 clear_bit(LI_FLAG_COMPLETE, &li.li_flags);
548 flags = DLM_LKF_FORCEUNLOCK;
549 if (old_li->li_grmode >= DLM_LOCK_PW)
550 flags |= DLM_LKF_IVVALBLK;
552 status = dlm_unlock(f->fi_ls->ls_lockspace,
553 old_li->li_lksb.sb_lkid, flags,
556 /* Must wait for it to complete as the next lock could be its
561 /* Unlock suceeded, free the lock_info struct. */
563 release_lockinfo(old_li);
566 remove_wait_queue(&li.li_waitq, &wq);
569 * If this is the last reference to the lockspace
570 * then free the struct. If it's an AUTOFREE lockspace
571 * then free the whole thing.
573 mutex_lock(&user_ls_lock);
574 if (atomic_dec_and_test(&lsinfo->ls_refcnt)) {
576 if (lsinfo->ls_lockspace) {
577 if (test_bit(LS_FLAG_AUTOFREE, &lsinfo->ls_flags)) {
578 unregister_lockspace(lsinfo, 1);
581 kfree(lsinfo->ls_miscinfo.name);
585 mutex_unlock(&user_ls_lock);
588 /* Restore signals */
589 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
595 static int do_user_create_lockspace(struct file_info *fi, uint8_t cmd,
596 struct dlm_lspace_params *kparams)
599 struct user_ls *lsinfo;
601 if (!capable(CAP_SYS_ADMIN))
604 status = register_lockspace(kparams->name, &lsinfo, kparams->flags);
606 /* If it succeeded then return the minor number */
608 status = lsinfo->ls_miscinfo.minor;
613 static int do_user_remove_lockspace(struct file_info *fi, uint8_t cmd,
614 struct dlm_lspace_params *kparams)
618 struct user_ls *lsinfo;
620 if (!capable(CAP_SYS_ADMIN))
623 mutex_lock(&user_ls_lock);
624 lsinfo = __find_lockspace(kparams->minor);
626 mutex_unlock(&user_ls_lock);
630 if (kparams->flags & DLM_USER_LSFLG_FORCEFREE)
633 status = unregister_lockspace(lsinfo, force);
634 mutex_unlock(&user_ls_lock);
639 /* Read call, might block if no ASTs are waiting.
640 * It will only ever return one message at a time, regardless
641 * of how many are pending.
643 static ssize_t dlm_read(struct file *file, char __user *buffer, size_t count,
646 struct file_info *fi = file->private_data;
647 struct ast_info *ast;
650 DECLARE_WAITQUEUE(wait, current);
652 if (count < sizeof(struct dlm_lock_result))
655 spin_lock(&fi->fi_ast_lock);
656 if (list_empty(&fi->fi_ast_list)) {
659 * Return EOF if the lockspace been deleted.
661 if (test_bit(LS_FLAG_DELETED, &fi->fi_ls->ls_flags))
664 if (file->f_flags & O_NONBLOCK) {
665 spin_unlock(&fi->fi_ast_lock);
669 add_wait_queue(&fi->fi_wait, &wait);
672 set_current_state(TASK_INTERRUPTIBLE);
673 if (list_empty(&fi->fi_ast_list) &&
674 !signal_pending(current)) {
676 spin_unlock(&fi->fi_ast_lock);
678 spin_lock(&fi->fi_ast_lock);
682 current->state = TASK_RUNNING;
683 remove_wait_queue(&fi->fi_wait, &wait);
685 if (signal_pending(current)) {
686 spin_unlock(&fi->fi_ast_lock);
691 ast = list_entry(fi->fi_ast_list.next, struct ast_info, list);
692 list_del(&ast->list);
693 spin_unlock(&fi->fi_ast_lock);
695 /* Work out the size of the returned data */
696 data_size = sizeof(struct dlm_lock_result);
697 if (ast->lvb_updated && ast->result.lksb.sb_lvbptr)
698 data_size += DLM_USER_LVB_LEN;
700 offset = sizeof(struct dlm_lock_result);
702 /* Room for the extended data ? */
703 if (count >= data_size) {
705 if (ast->lvb_updated && ast->result.lksb.sb_lvbptr) {
706 if (copy_to_user(buffer+offset,
707 ast->result.lksb.sb_lvbptr,
710 ast->result.lvb_offset = offset;
711 offset += DLM_USER_LVB_LEN;
715 ast->result.length = data_size;
716 /* Copy the header now it has all the offsets in it */
717 if (copy_to_user(buffer, &ast->result, sizeof(struct dlm_lock_result)))
720 /* If we only returned a header and there's more to come then put it
722 if (count < data_size) {
723 spin_lock(&fi->fi_ast_lock);
724 list_add(&ast->list, &fi->fi_ast_list);
725 spin_unlock(&fi->fi_ast_lock);
731 static unsigned int dlm_poll(struct file *file, poll_table *wait)
733 struct file_info *fi = file->private_data;
735 poll_wait(file, &fi->fi_wait, wait);
737 spin_lock(&fi->fi_ast_lock);
738 if (!list_empty(&fi->fi_ast_list)) {
739 spin_unlock(&fi->fi_ast_lock);
740 return POLLIN | POLLRDNORM;
743 spin_unlock(&fi->fi_ast_lock);
747 static struct lock_info *allocate_lockinfo(struct file_info *fi, uint8_t cmd,
748 struct dlm_lock_params *kparams)
750 struct lock_info *li;
752 if (!try_module_get(THIS_MODULE))
755 li = kzalloc(sizeof(struct lock_info), GFP_KERNEL);
757 li->li_magic = LOCKINFO_MAGIC;
763 li->li_pend_bastparam = NULL;
764 li->li_pend_bastaddr = NULL;
765 li->li_castaddr = NULL;
766 li->li_castparam = NULL;
767 li->li_lksb.sb_lvbptr = NULL;
768 li->li_bastaddr = kparams->bastaddr;
769 li->li_bastparam = kparams->bastparam;
776 static int do_user_lock(struct file_info *fi, uint8_t cmd,
777 struct dlm_lock_params *kparams)
779 struct lock_info *li;
783 * Validate things that we need to have correct.
785 if (!kparams->castaddr)
791 /* Persistent child locks are not available yet */
792 if ((kparams->flags & DLM_LKF_PERSISTENT) && kparams->parent)
795 /* For conversions, there should already be a lockinfo struct,
796 unless we are adopting an orphaned persistent lock */
797 if (kparams->flags & DLM_LKF_CONVERT) {
799 li = get_lockinfo(kparams->lkid);
801 /* If this is a persistent lock we will have to create a
803 if (!li && (kparams->flags & DLM_LKF_PERSISTENT)) {
804 li = allocate_lockinfo(fi, cmd, kparams);
808 li->li_lksb.sb_lkid = kparams->lkid;
809 li->li_castaddr = kparams->castaddr;
810 li->li_castparam = kparams->castparam;
812 /* OK, this isn;t exactly a FIRSTLOCK but it is the
813 first time we've used this lockinfo, and if things
814 fail we want rid of it */
815 init_MUTEX_LOCKED(&li->li_firstlock);
816 set_bit(LI_FLAG_FIRSTLOCK, &li->li_flags);
819 /* TODO: do a query to get the current state ?? */
824 if (li->li_magic != LOCKINFO_MAGIC)
827 /* For conversions don't overwrite the current blocking AST
829 a) if a blocking AST fires before the conversion is queued
830 it runs the current handler
831 b) if the conversion is cancelled, the original blocking AST
832 declaration is active
833 The pend_ info is made active when the conversion
836 li->li_pend_bastaddr = kparams->bastaddr;
837 li->li_pend_bastparam = kparams->bastparam;
839 li = allocate_lockinfo(fi, cmd, kparams);
843 /* semaphore to allow us to complete our work before
844 the AST routine runs. In fact we only need (and use) this
845 when the initial lock fails */
846 init_MUTEX_LOCKED(&li->li_firstlock);
847 set_bit(LI_FLAG_FIRSTLOCK, &li->li_flags);
850 li->li_user_lksb = kparams->lksb;
851 li->li_castaddr = kparams->castaddr;
852 li->li_castparam = kparams->castparam;
853 li->li_lksb.sb_lkid = kparams->lkid;
854 li->li_rqmode = kparams->mode;
855 if (kparams->flags & DLM_LKF_PERSISTENT)
856 set_bit(LI_FLAG_PERSISTENT, &li->li_flags);
858 /* Copy in the value block */
859 if (kparams->flags & DLM_LKF_VALBLK) {
860 if (!li->li_lksb.sb_lvbptr) {
861 li->li_lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN,
863 if (!li->li_lksb.sb_lvbptr) {
869 memcpy(li->li_lksb.sb_lvbptr, kparams->lvb, DLM_USER_LVB_LEN);
873 status = dlm_lock(fi->fi_ls->ls_lockspace,
874 kparams->mode, &li->li_lksb,
876 kparams->name, kparams->namelen,
880 (li->li_pend_bastaddr || li->li_bastaddr) ?
882 kparams->range.ra_end ? &kparams->range : NULL);
886 /* If it succeeded (this far) with a new lock then keep track of
887 it on the file's lockinfo list */
888 if (!status && test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags)) {
890 spin_lock(&fi->fi_li_lock);
891 list_add(&li->li_ownerqueue, &fi->fi_li_list);
892 set_bit(LI_FLAG_ONLIST, &li->li_flags);
893 spin_unlock(&fi->fi_li_lock);
894 if (add_lockinfo(li))
895 printk(KERN_WARNING "Add lockinfo failed\n");
897 up(&li->li_firstlock);
900 /* Return the lockid as the user needs it /now/ */
901 return li->li_lksb.sb_lkid;
904 if (test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags))
905 release_lockinfo(li);
910 static int do_user_unlock(struct file_info *fi, uint8_t cmd,
911 struct dlm_lock_params *kparams)
913 struct lock_info *li;
915 int convert_cancel = 0;
917 li = get_lockinfo(kparams->lkid);
919 li = allocate_lockinfo(fi, cmd, kparams);
922 spin_lock(&fi->fi_li_lock);
923 list_add(&li->li_ownerqueue, &fi->fi_li_list);
924 set_bit(LI_FLAG_ONLIST, &li->li_flags);
925 spin_unlock(&fi->fi_li_lock);
928 if (li->li_magic != LOCKINFO_MAGIC)
931 li->li_user_lksb = kparams->lksb;
932 li->li_castparam = kparams->castparam;
935 /* Cancelling a conversion doesn't remove the lock...*/
936 if (kparams->flags & DLM_LKF_CANCEL && li->li_grmode != -1)
939 /* Wait until dlm_lock() has completed */
940 if (!test_bit(LI_FLAG_ONLIST, &li->li_flags)) {
941 down(&li->li_firstlock);
942 up(&li->li_firstlock);
945 /* dlm_unlock() passes a 0 for castaddr which means don't overwrite
946 the existing li_castaddr as that's the completion routine for
947 unlocks. dlm_unlock_wait() specifies a new AST routine to be
948 executed when the unlock completes. */
949 if (kparams->castaddr)
950 li->li_castaddr = kparams->castaddr;
952 /* Use existing lksb & astparams */
953 status = dlm_unlock(fi->fi_ls->ls_lockspace,
955 kparams->flags, &li->li_lksb, li);
957 if (!status && !convert_cancel) {
958 spin_lock(&fi->fi_li_lock);
959 list_del(&li->li_ownerqueue);
960 clear_bit(LI_FLAG_ONLIST, &li->li_flags);
961 spin_unlock(&fi->fi_li_lock);
967 /* Write call, submit a locking request */
968 static ssize_t dlm_write(struct file *file, const char __user *buffer,
969 size_t count, loff_t *ppos)
971 struct file_info *fi = file->private_data;
972 struct dlm_write_request *kparams;
977 /* -1 because lock name is optional */
978 if (count < sizeof(struct dlm_write_request)-1)
981 /* Has the lockspace been deleted */
982 if (fi && test_bit(LS_FLAG_DELETED, &fi->fi_ls->ls_flags))
985 kparams = kmalloc(count, GFP_KERNEL);
990 /* Get the command info */
991 if (copy_from_user(kparams, buffer, count))
995 if (check_version(kparams))
998 /* Block signals while we are doing this */
999 sigfillset(&allsigs);
1000 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
1003 switch (kparams->cmd)
1006 if (!fi) goto out_sig;
1007 status = do_user_lock(fi, kparams->cmd, &kparams->i.lock);
1010 case DLM_USER_UNLOCK:
1011 if (!fi) goto out_sig;
1012 status = do_user_unlock(fi, kparams->cmd, &kparams->i.lock);
1015 case DLM_USER_CREATE_LOCKSPACE:
1016 if (fi) goto out_sig;
1017 status = do_user_create_lockspace(fi, kparams->cmd,
1018 &kparams->i.lspace);
1021 case DLM_USER_REMOVE_LOCKSPACE:
1022 if (fi) goto out_sig;
1023 status = do_user_remove_lockspace(fi, kparams->cmd,
1024 &kparams->i.lspace);
1027 printk("Unknown command passed to DLM device : %d\n",
1033 /* Restore signals */
1034 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
1035 recalc_sigpending();
1045 static struct file_operations _dlm_fops = {
1047 .release = dlm_close,
1051 .owner = THIS_MODULE,
1054 static struct file_operations _dlm_ctl_fops = {
1055 .open = dlm_ctl_open,
1056 .release = dlm_ctl_close,
1058 .owner = THIS_MODULE,
1062 * Create control device
1064 static int __init dlm_device_init(void)
1068 INIT_LIST_HEAD(&user_ls_list);
1069 mutex_init(&user_ls_lock);
1070 rwlock_init(&lockinfo_lock);
1072 ctl_device.name = "dlm-control";
1073 ctl_device.fops = &_dlm_ctl_fops;
1074 ctl_device.minor = MISC_DYNAMIC_MINOR;
1076 r = misc_register(&ctl_device);
1078 printk(KERN_ERR "dlm: misc_register failed for control dev\n");
1085 static void __exit dlm_device_exit(void)
1087 misc_deregister(&ctl_device);
1090 MODULE_DESCRIPTION("Distributed Lock Manager device interface");
1091 MODULE_AUTHOR("Red Hat, Inc.");
1092 MODULE_LICENSE("GPL");
1094 module_init(dlm_device_init);
1095 module_exit(dlm_device_exit);