1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
17 * This is the userland interface to the DLM.
19 * The locking is done via a misc char device (find the
20 * registered minor number in /proc/misc).
22 * User code should not use this interface directly but
23 * call the library routines in libdlm.a instead.
27 #include <linux/miscdevice.h>
28 #include <linux/init.h>
29 #include <linux/wait.h>
30 #include <linux/module.h>
31 #include <linux/file.h>
33 #include <linux/poll.h>
34 #include <linux/signal.h>
35 #include <linux/spinlock.h>
36 #include <linux/idr.h>
38 #include <linux/dlm.h>
39 #include <linux/dlm_device.h>
41 #include "lvb_table.h"
43 static struct file_operations _dlm_fops;
44 static const char *name_prefix="dlm";
45 static struct list_head user_ls_list;
46 static struct mutex user_ls_lock;
48 /* Lock infos are stored in here indexed by lock ID */
49 static DEFINE_IDR(lockinfo_idr);
50 static rwlock_t lockinfo_lock;
52 /* Flags in li_flags */
53 #define LI_FLAG_COMPLETE 1
54 #define LI_FLAG_FIRSTLOCK 2
55 #define LI_FLAG_PERSISTENT 3
56 #define LI_FLAG_ONLIST 4
58 /* flags in ls_flags*/
59 #define LS_FLAG_DELETED 1
60 #define LS_FLAG_AUTOFREE 2
63 #define LOCKINFO_MAGIC 0x53595324
70 struct dlm_lksb li_lksb;
71 wait_queue_head_t li_waitq;
72 unsigned long li_flags;
73 void __user *li_castparam;
74 void __user *li_castaddr;
75 void __user *li_bastparam;
76 void __user *li_bastaddr;
77 void __user *li_pend_bastparam;
78 void __user *li_pend_bastaddr;
79 struct list_head li_ownerqueue;
80 struct file_info *li_file;
81 struct dlm_lksb __user *li_user_lksb;
82 struct semaphore li_firstlock;
85 /* A queued AST no less */
87 struct dlm_lock_result result;
88 struct list_head list;
90 uint32_t progress; /* How much has been read */
93 /* One of these per userland lockspace */
99 /* Passed into misc_register() */
100 struct miscdevice ls_miscinfo;
101 struct list_head ls_list;
104 /* misc_device info for the control device */
105 static struct miscdevice ctl_device;
108 * Stuff we hang off the file struct.
109 * The first two are to cope with unlocking all the
110 * locks help by a process when it dies.
113 struct list_head fi_li_list; /* List of active lock_infos */
114 spinlock_t fi_li_lock;
115 struct list_head fi_ast_list; /* Queue of ASTs to be delivered */
116 spinlock_t fi_ast_lock;
117 wait_queue_head_t fi_wait;
118 struct user_ls *fi_ls;
119 atomic_t fi_refcnt; /* Number of users */
120 unsigned long fi_flags; /* Bit 1 means the device is open */
124 /* get and put ops for file_info.
125 Actually I don't really like "get" and "put", but everyone
126 else seems to use them and I can't think of anything
127 nicer at the moment */
128 static void get_file_info(struct file_info *f)
130 atomic_inc(&f->fi_refcnt);
133 static void put_file_info(struct file_info *f)
135 if (atomic_dec_and_test(&f->fi_refcnt))
139 static void release_lockinfo(struct lock_info *li)
141 put_file_info(li->li_file);
143 write_lock(&lockinfo_lock);
144 idr_remove(&lockinfo_idr, li->li_lksb.sb_lkid);
145 write_unlock(&lockinfo_lock);
147 if (li->li_lksb.sb_lvbptr)
148 kfree(li->li_lksb.sb_lvbptr);
151 module_put(THIS_MODULE);
154 static struct lock_info *get_lockinfo(uint32_t lockid)
156 struct lock_info *li;
158 read_lock(&lockinfo_lock);
159 li = idr_find(&lockinfo_idr, lockid);
160 read_unlock(&lockinfo_lock);
165 static int add_lockinfo(struct lock_info *li)
171 write_lock(&lockinfo_lock);
173 if (idr_find(&lockinfo_idr, li->li_lksb.sb_lkid))
177 r = idr_pre_get(&lockinfo_idr, GFP_KERNEL);
181 r = idr_get_new_above(&lockinfo_idr, li, li->li_lksb.sb_lkid, &n);
185 if (n != li->li_lksb.sb_lkid) {
186 idr_remove(&lockinfo_idr, n);
193 write_unlock(&lockinfo_lock);
199 static struct user_ls *__find_lockspace(int minor)
201 struct user_ls *lsinfo;
203 list_for_each_entry(lsinfo, &user_ls_list, ls_list) {
204 if (lsinfo->ls_miscinfo.minor == minor)
210 /* Find a lockspace struct given the device minor number */
211 static struct user_ls *find_lockspace(int minor)
213 struct user_ls *lsinfo;
215 mutex_lock(&user_ls_lock);
216 lsinfo = __find_lockspace(minor);
217 mutex_unlock(&user_ls_lock);
222 static void add_lockspace_to_list(struct user_ls *lsinfo)
224 mutex_lock(&user_ls_lock);
225 list_add(&lsinfo->ls_list, &user_ls_list);
226 mutex_unlock(&user_ls_lock);
229 /* Register a lockspace with the DLM and create a misc
230 device for userland to access it */
231 static int register_lockspace(char *name, struct user_ls **ls, int flags)
233 struct user_ls *newls;
237 namelen = strlen(name)+strlen(name_prefix)+2;
239 newls = kzalloc(sizeof(struct user_ls), GFP_KERNEL);
243 newls->ls_miscinfo.name = kzalloc(namelen, GFP_KERNEL);
244 if (!newls->ls_miscinfo.name) {
249 status = dlm_new_lockspace(name, strlen(name), &newls->ls_lockspace, 0,
252 kfree(newls->ls_miscinfo.name);
257 snprintf((char*)newls->ls_miscinfo.name, namelen, "%s_%s",
260 newls->ls_miscinfo.fops = &_dlm_fops;
261 newls->ls_miscinfo.minor = MISC_DYNAMIC_MINOR;
263 status = misc_register(&newls->ls_miscinfo);
265 printk(KERN_ERR "dlm: misc register failed for %s\n", name);
266 dlm_release_lockspace(newls->ls_lockspace, 0);
267 kfree(newls->ls_miscinfo.name);
272 if (flags & DLM_USER_LSFLG_AUTOFREE)
273 set_bit(LS_FLAG_AUTOFREE, &newls->ls_flags);
275 add_lockspace_to_list(newls);
280 /* Called with the user_ls_lock mutex held */
281 static int unregister_lockspace(struct user_ls *lsinfo, int force)
285 status = dlm_release_lockspace(lsinfo->ls_lockspace, force);
289 status = misc_deregister(&lsinfo->ls_miscinfo);
293 list_del(&lsinfo->ls_list);
294 set_bit(LS_FLAG_DELETED, &lsinfo->ls_flags);
295 lsinfo->ls_lockspace = NULL;
296 if (atomic_read(&lsinfo->ls_refcnt) == 0) {
297 kfree(lsinfo->ls_miscinfo.name);
304 /* Add it to userland's AST queue */
305 static void add_to_astqueue(struct lock_info *li, void *astaddr, void *astparam,
308 struct ast_info *ast = kzalloc(sizeof(struct ast_info), GFP_KERNEL);
312 ast->result.user_astparam = astparam;
313 ast->result.user_astaddr = astaddr;
314 ast->result.user_lksb = li->li_user_lksb;
315 memcpy(&ast->result.lksb, &li->li_lksb, sizeof(struct dlm_lksb));
316 ast->lvb_updated = lvb_updated;
318 spin_lock(&li->li_file->fi_ast_lock);
319 list_add_tail(&ast->list, &li->li_file->fi_ast_list);
320 spin_unlock(&li->li_file->fi_ast_lock);
321 wake_up_interruptible(&li->li_file->fi_wait);
324 static void bast_routine(void *param, int mode)
326 struct lock_info *li = param;
328 if (li && li->li_bastaddr)
329 add_to_astqueue(li, li->li_bastaddr, li->li_bastparam, 0);
333 * This is the kernel's AST routine.
334 * All lock, unlock & query operations complete here.
335 * The only syncronous ops are those done during device close.
337 static void ast_routine(void *param)
339 struct lock_info *li = param;
341 /* Param may be NULL if a persistent lock is unlocked by someone else */
345 /* If this is a succesful conversion then activate the blocking ast
346 * args from the conversion request */
347 if (!test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags) &&
348 li->li_lksb.sb_status == 0) {
350 li->li_bastparam = li->li_pend_bastparam;
351 li->li_bastaddr = li->li_pend_bastaddr;
352 li->li_pend_bastaddr = NULL;
355 /* If it's an async request then post data to the user's AST queue. */
356 if (li->li_castaddr) {
359 /* See if the lvb has been updated */
360 if (dlm_lvb_operations[li->li_grmode+1][li->li_rqmode+1] == 1)
363 if (li->li_lksb.sb_status == 0)
364 li->li_grmode = li->li_rqmode;
366 /* Only queue AST if the device is still open */
367 if (test_bit(1, &li->li_file->fi_flags))
368 add_to_astqueue(li, li->li_castaddr, li->li_castparam,
371 /* If it's a new lock operation that failed, then
372 * remove it from the owner queue and free the
375 if (test_and_clear_bit(LI_FLAG_FIRSTLOCK, &li->li_flags) &&
376 li->li_lksb.sb_status != 0) {
378 /* Wait till dlm_lock() has finished */
379 down(&li->li_firstlock);
380 up(&li->li_firstlock);
382 spin_lock(&li->li_file->fi_li_lock);
383 list_del(&li->li_ownerqueue);
384 clear_bit(LI_FLAG_ONLIST, &li->li_flags);
385 spin_unlock(&li->li_file->fi_li_lock);
386 release_lockinfo(li);
389 /* Free unlocks & queries */
390 if (li->li_lksb.sb_status == -DLM_EUNLOCK ||
391 li->li_cmd == DLM_USER_QUERY) {
392 release_lockinfo(li);
395 /* Synchronous request, just wake up the caller */
396 set_bit(LI_FLAG_COMPLETE, &li->li_flags);
397 wake_up_interruptible(&li->li_waitq);
402 * Wait for the lock op to complete and return the status.
404 static int wait_for_ast(struct lock_info *li)
406 /* Wait for the AST routine to complete */
407 set_task_state(current, TASK_INTERRUPTIBLE);
408 while (!test_bit(LI_FLAG_COMPLETE, &li->li_flags))
411 set_task_state(current, TASK_RUNNING);
413 return li->li_lksb.sb_status;
417 /* Open on control device */
418 static int dlm_ctl_open(struct inode *inode, struct file *file)
420 file->private_data = NULL;
424 /* Close on control device */
425 static int dlm_ctl_close(struct inode *inode, struct file *file)
430 /* Open on lockspace device */
431 static int dlm_open(struct inode *inode, struct file *file)
434 struct user_ls *lsinfo;
436 lsinfo = find_lockspace(iminor(inode));
440 f = kzalloc(sizeof(struct file_info), GFP_KERNEL);
444 atomic_inc(&lsinfo->ls_refcnt);
445 INIT_LIST_HEAD(&f->fi_li_list);
446 INIT_LIST_HEAD(&f->fi_ast_list);
447 spin_lock_init(&f->fi_li_lock);
448 spin_lock_init(&f->fi_ast_lock);
449 init_waitqueue_head(&f->fi_wait);
453 set_bit(1, &f->fi_flags);
455 file->private_data = f;
460 /* Check the user's version matches ours */
461 static int check_version(struct dlm_write_request *req)
463 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
464 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
465 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
467 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
468 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
474 DLM_DEVICE_VERSION_MAJOR,
475 DLM_DEVICE_VERSION_MINOR,
476 DLM_DEVICE_VERSION_PATCH);
482 /* Close on lockspace device */
483 static int dlm_close(struct inode *inode, struct file *file)
485 struct file_info *f = file->private_data;
487 struct lock_info *old_li, *safe;
490 struct user_ls *lsinfo;
491 DECLARE_WAITQUEUE(wq, current);
493 lsinfo = find_lockspace(iminor(inode));
497 /* Mark this closed so that ASTs will not be delivered any more */
498 clear_bit(1, &f->fi_flags);
500 /* Block signals while we are doing this */
501 sigfillset(&allsigs);
502 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
504 /* We use our own lock_info struct here, so that any
505 * outstanding "real" ASTs will be delivered with the
506 * corresponding "real" params, thus freeing the lock_info
507 * that belongs the lock. This catches the corner case where
508 * a lock is BUSY when we try to unlock it here
510 memset(&li, 0, sizeof(li));
511 clear_bit(LI_FLAG_COMPLETE, &li.li_flags);
512 init_waitqueue_head(&li.li_waitq);
513 add_wait_queue(&li.li_waitq, &wq);
516 * Free any outstanding locks, they are on the
517 * list in LIFO order so there should be no problems
518 * about unlocking parents before children.
520 list_for_each_entry_safe(old_li, safe, &f->fi_li_list, li_ownerqueue) {
524 /* Don't unlock persistent locks, just mark them orphaned */
525 if (test_bit(LI_FLAG_PERSISTENT, &old_li->li_flags)) {
526 list_del(&old_li->li_ownerqueue);
528 /* Update master copy */
529 /* TODO: Check locking core updates the local and
530 remote ORPHAN flags */
531 li.li_lksb.sb_lkid = old_li->li_lksb.sb_lkid;
532 status = dlm_lock(f->fi_ls->ls_lockspace,
533 old_li->li_grmode, &li.li_lksb,
534 DLM_LKF_CONVERT|DLM_LKF_ORPHAN,
535 NULL, 0, 0, ast_routine, NULL, NULL);
537 printk("dlm: Error orphaning lock %x: %d\n",
538 old_li->li_lksb.sb_lkid, status);
540 /* But tidy our references in it */
541 release_lockinfo(old_li);
545 clear_bit(LI_FLAG_COMPLETE, &li.li_flags);
547 flags = DLM_LKF_FORCEUNLOCK;
548 if (old_li->li_grmode >= DLM_LOCK_PW)
549 flags |= DLM_LKF_IVVALBLK;
551 status = dlm_unlock(f->fi_ls->ls_lockspace,
552 old_li->li_lksb.sb_lkid, flags,
555 /* Must wait for it to complete as the next lock could be its
560 /* Unlock suceeded, free the lock_info struct. */
562 release_lockinfo(old_li);
565 remove_wait_queue(&li.li_waitq, &wq);
568 * If this is the last reference to the lockspace
569 * then free the struct. If it's an AUTOFREE lockspace
570 * then free the whole thing.
572 mutex_lock(&user_ls_lock);
573 if (atomic_dec_and_test(&lsinfo->ls_refcnt)) {
575 if (lsinfo->ls_lockspace) {
576 if (test_bit(LS_FLAG_AUTOFREE, &lsinfo->ls_flags)) {
577 unregister_lockspace(lsinfo, 1);
580 kfree(lsinfo->ls_miscinfo.name);
584 mutex_unlock(&user_ls_lock);
587 /* Restore signals */
588 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
594 static int do_user_create_lockspace(struct file_info *fi, uint8_t cmd,
595 struct dlm_lspace_params *kparams)
598 struct user_ls *lsinfo;
600 if (!capable(CAP_SYS_ADMIN))
603 status = register_lockspace(kparams->name, &lsinfo, kparams->flags);
605 /* If it succeeded then return the minor number */
607 status = lsinfo->ls_miscinfo.minor;
612 static int do_user_remove_lockspace(struct file_info *fi, uint8_t cmd,
613 struct dlm_lspace_params *kparams)
617 struct user_ls *lsinfo;
619 if (!capable(CAP_SYS_ADMIN))
622 mutex_lock(&user_ls_lock);
623 lsinfo = __find_lockspace(kparams->minor);
625 mutex_unlock(&user_ls_lock);
629 if (kparams->flags & DLM_USER_LSFLG_FORCEFREE)
632 status = unregister_lockspace(lsinfo, force);
633 mutex_unlock(&user_ls_lock);
638 /* Read call, might block if no ASTs are waiting.
639 * It will only ever return one message at a time, regardless
640 * of how many are pending.
642 static ssize_t dlm_read(struct file *file, char __user *buffer, size_t count,
645 struct file_info *fi = file->private_data;
646 struct ast_info *ast;
649 DECLARE_WAITQUEUE(wait, current);
651 if (count < sizeof(struct dlm_lock_result))
654 spin_lock(&fi->fi_ast_lock);
655 if (list_empty(&fi->fi_ast_list)) {
658 * Return EOF if the lockspace been deleted.
660 if (test_bit(LS_FLAG_DELETED, &fi->fi_ls->ls_flags))
663 if (file->f_flags & O_NONBLOCK) {
664 spin_unlock(&fi->fi_ast_lock);
668 add_wait_queue(&fi->fi_wait, &wait);
671 set_current_state(TASK_INTERRUPTIBLE);
672 if (list_empty(&fi->fi_ast_list) &&
673 !signal_pending(current)) {
675 spin_unlock(&fi->fi_ast_lock);
677 spin_lock(&fi->fi_ast_lock);
681 current->state = TASK_RUNNING;
682 remove_wait_queue(&fi->fi_wait, &wait);
684 if (signal_pending(current)) {
685 spin_unlock(&fi->fi_ast_lock);
690 ast = list_entry(fi->fi_ast_list.next, struct ast_info, list);
691 list_del(&ast->list);
692 spin_unlock(&fi->fi_ast_lock);
694 /* Work out the size of the returned data */
695 data_size = sizeof(struct dlm_lock_result);
696 if (ast->lvb_updated && ast->result.lksb.sb_lvbptr)
697 data_size += DLM_USER_LVB_LEN;
699 offset = sizeof(struct dlm_lock_result);
701 /* Room for the extended data ? */
702 if (count >= data_size) {
704 if (ast->lvb_updated && ast->result.lksb.sb_lvbptr) {
705 if (copy_to_user(buffer+offset,
706 ast->result.lksb.sb_lvbptr,
709 ast->result.lvb_offset = offset;
710 offset += DLM_USER_LVB_LEN;
714 ast->result.length = data_size;
715 /* Copy the header now it has all the offsets in it */
716 if (copy_to_user(buffer, &ast->result, sizeof(struct dlm_lock_result)))
719 /* If we only returned a header and there's more to come then put it
721 if (count < data_size) {
722 spin_lock(&fi->fi_ast_lock);
723 list_add(&ast->list, &fi->fi_ast_list);
724 spin_unlock(&fi->fi_ast_lock);
730 static unsigned int dlm_poll(struct file *file, poll_table *wait)
732 struct file_info *fi = file->private_data;
734 poll_wait(file, &fi->fi_wait, wait);
736 spin_lock(&fi->fi_ast_lock);
737 if (!list_empty(&fi->fi_ast_list)) {
738 spin_unlock(&fi->fi_ast_lock);
739 return POLLIN | POLLRDNORM;
742 spin_unlock(&fi->fi_ast_lock);
746 static struct lock_info *allocate_lockinfo(struct file_info *fi, uint8_t cmd,
747 struct dlm_lock_params *kparams)
749 struct lock_info *li;
751 if (!try_module_get(THIS_MODULE))
754 li = kzalloc(sizeof(struct lock_info), GFP_KERNEL);
756 li->li_magic = LOCKINFO_MAGIC;
762 li->li_pend_bastparam = NULL;
763 li->li_pend_bastaddr = NULL;
764 li->li_castaddr = NULL;
765 li->li_castparam = NULL;
766 li->li_lksb.sb_lvbptr = NULL;
767 li->li_bastaddr = kparams->bastaddr;
768 li->li_bastparam = kparams->bastparam;
775 static int do_user_lock(struct file_info *fi, uint8_t cmd,
776 struct dlm_lock_params *kparams)
778 struct lock_info *li;
782 * Validate things that we need to have correct.
784 if (!kparams->castaddr)
790 /* Persistent child locks are not available yet */
791 if ((kparams->flags & DLM_LKF_PERSISTENT) && kparams->parent)
794 /* For conversions, there should already be a lockinfo struct,
795 unless we are adopting an orphaned persistent lock */
796 if (kparams->flags & DLM_LKF_CONVERT) {
798 li = get_lockinfo(kparams->lkid);
800 /* If this is a persistent lock we will have to create a
802 if (!li && (kparams->flags & DLM_LKF_PERSISTENT)) {
803 li = allocate_lockinfo(fi, cmd, kparams);
807 li->li_lksb.sb_lkid = kparams->lkid;
808 li->li_castaddr = kparams->castaddr;
809 li->li_castparam = kparams->castparam;
811 /* OK, this isn;t exactly a FIRSTLOCK but it is the
812 first time we've used this lockinfo, and if things
813 fail we want rid of it */
814 init_MUTEX_LOCKED(&li->li_firstlock);
815 set_bit(LI_FLAG_FIRSTLOCK, &li->li_flags);
818 /* TODO: do a query to get the current state ?? */
823 if (li->li_magic != LOCKINFO_MAGIC)
826 /* For conversions don't overwrite the current blocking AST
828 a) if a blocking AST fires before the conversion is queued
829 it runs the current handler
830 b) if the conversion is cancelled, the original blocking AST
831 declaration is active
832 The pend_ info is made active when the conversion
835 li->li_pend_bastaddr = kparams->bastaddr;
836 li->li_pend_bastparam = kparams->bastparam;
838 li = allocate_lockinfo(fi, cmd, kparams);
842 /* semaphore to allow us to complete our work before
843 the AST routine runs. In fact we only need (and use) this
844 when the initial lock fails */
845 init_MUTEX_LOCKED(&li->li_firstlock);
846 set_bit(LI_FLAG_FIRSTLOCK, &li->li_flags);
849 li->li_user_lksb = kparams->lksb;
850 li->li_castaddr = kparams->castaddr;
851 li->li_castparam = kparams->castparam;
852 li->li_lksb.sb_lkid = kparams->lkid;
853 li->li_rqmode = kparams->mode;
854 if (kparams->flags & DLM_LKF_PERSISTENT)
855 set_bit(LI_FLAG_PERSISTENT, &li->li_flags);
857 /* Copy in the value block */
858 if (kparams->flags & DLM_LKF_VALBLK) {
859 if (!li->li_lksb.sb_lvbptr) {
860 li->li_lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN,
862 if (!li->li_lksb.sb_lvbptr) {
868 memcpy(li->li_lksb.sb_lvbptr, kparams->lvb, DLM_USER_LVB_LEN);
872 status = dlm_lock(fi->fi_ls->ls_lockspace,
873 kparams->mode, &li->li_lksb,
875 kparams->name, kparams->namelen,
879 (li->li_pend_bastaddr || li->li_bastaddr) ?
880 bast_routine : NULL);
884 /* If it succeeded (this far) with a new lock then keep track of
885 it on the file's lockinfo list */
886 if (!status && test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags)) {
888 spin_lock(&fi->fi_li_lock);
889 list_add(&li->li_ownerqueue, &fi->fi_li_list);
890 set_bit(LI_FLAG_ONLIST, &li->li_flags);
891 spin_unlock(&fi->fi_li_lock);
892 if (add_lockinfo(li))
893 printk(KERN_WARNING "Add lockinfo failed\n");
895 up(&li->li_firstlock);
898 /* Return the lockid as the user needs it /now/ */
899 return li->li_lksb.sb_lkid;
902 if (test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags))
903 release_lockinfo(li);
908 static int do_user_unlock(struct file_info *fi, uint8_t cmd,
909 struct dlm_lock_params *kparams)
911 struct lock_info *li;
913 int convert_cancel = 0;
915 li = get_lockinfo(kparams->lkid);
917 li = allocate_lockinfo(fi, cmd, kparams);
920 spin_lock(&fi->fi_li_lock);
921 list_add(&li->li_ownerqueue, &fi->fi_li_list);
922 set_bit(LI_FLAG_ONLIST, &li->li_flags);
923 spin_unlock(&fi->fi_li_lock);
926 if (li->li_magic != LOCKINFO_MAGIC)
929 li->li_user_lksb = kparams->lksb;
930 li->li_castparam = kparams->castparam;
933 /* Cancelling a conversion doesn't remove the lock...*/
934 if (kparams->flags & DLM_LKF_CANCEL && li->li_grmode != -1)
937 /* Wait until dlm_lock() has completed */
938 if (!test_bit(LI_FLAG_ONLIST, &li->li_flags)) {
939 down(&li->li_firstlock);
940 up(&li->li_firstlock);
943 /* dlm_unlock() passes a 0 for castaddr which means don't overwrite
944 the existing li_castaddr as that's the completion routine for
945 unlocks. dlm_unlock_wait() specifies a new AST routine to be
946 executed when the unlock completes. */
947 if (kparams->castaddr)
948 li->li_castaddr = kparams->castaddr;
950 /* Use existing lksb & astparams */
951 status = dlm_unlock(fi->fi_ls->ls_lockspace,
953 kparams->flags, &li->li_lksb, li);
955 if (!status && !convert_cancel) {
956 spin_lock(&fi->fi_li_lock);
957 list_del(&li->li_ownerqueue);
958 clear_bit(LI_FLAG_ONLIST, &li->li_flags);
959 spin_unlock(&fi->fi_li_lock);
965 /* Write call, submit a locking request */
966 static ssize_t dlm_write(struct file *file, const char __user *buffer,
967 size_t count, loff_t *ppos)
969 struct file_info *fi = file->private_data;
970 struct dlm_write_request *kparams;
975 /* -1 because lock name is optional */
976 if (count < sizeof(struct dlm_write_request)-1)
979 /* Has the lockspace been deleted */
980 if (fi && test_bit(LS_FLAG_DELETED, &fi->fi_ls->ls_flags))
983 kparams = kmalloc(count, GFP_KERNEL);
988 /* Get the command info */
989 if (copy_from_user(kparams, buffer, count))
993 if (check_version(kparams))
996 /* Block signals while we are doing this */
997 sigfillset(&allsigs);
998 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
1001 switch (kparams->cmd)
1004 if (!fi) goto out_sig;
1005 status = do_user_lock(fi, kparams->cmd, &kparams->i.lock);
1008 case DLM_USER_UNLOCK:
1009 if (!fi) goto out_sig;
1010 status = do_user_unlock(fi, kparams->cmd, &kparams->i.lock);
1013 case DLM_USER_CREATE_LOCKSPACE:
1014 if (fi) goto out_sig;
1015 status = do_user_create_lockspace(fi, kparams->cmd,
1016 &kparams->i.lspace);
1019 case DLM_USER_REMOVE_LOCKSPACE:
1020 if (fi) goto out_sig;
1021 status = do_user_remove_lockspace(fi, kparams->cmd,
1022 &kparams->i.lspace);
1025 printk("Unknown command passed to DLM device : %d\n",
1031 /* Restore signals */
1032 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
1033 recalc_sigpending();
1043 static struct file_operations _dlm_fops = {
1045 .release = dlm_close,
1049 .owner = THIS_MODULE,
1052 static struct file_operations _dlm_ctl_fops = {
1053 .open = dlm_ctl_open,
1054 .release = dlm_ctl_close,
1056 .owner = THIS_MODULE,
1060 * Create control device
1062 static int __init dlm_device_init(void)
1066 INIT_LIST_HEAD(&user_ls_list);
1067 mutex_init(&user_ls_lock);
1068 rwlock_init(&lockinfo_lock);
1070 ctl_device.name = "dlm-control";
1071 ctl_device.fops = &_dlm_ctl_fops;
1072 ctl_device.minor = MISC_DYNAMIC_MINOR;
1074 r = misc_register(&ctl_device);
1076 printk(KERN_ERR "dlm: misc_register failed for control dev\n");
1083 static void __exit dlm_device_exit(void)
1085 misc_deregister(&ctl_device);
1088 MODULE_DESCRIPTION("Distributed Lock Manager device interface");
1089 MODULE_AUTHOR("Red Hat, Inc.");
1090 MODULE_LICENSE("GPL");
1092 module_init(dlm_device_init);
1093 module_exit(dlm_device_exit);