[DLM] fix new_lockspace error exit [5/6]
[pandora-kernel.git] / fs / dlm / lockspace.c
1 /******************************************************************************
2 *******************************************************************************
3 **
4 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
5 **  Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
6 **
7 **  This copyrighted material is made available to anyone wishing to use,
8 **  modify, copy, or redistribute it subject to the terms and conditions
9 **  of the GNU General Public License v.2.
10 **
11 *******************************************************************************
12 ******************************************************************************/
13
14 #include "dlm_internal.h"
15 #include "lockspace.h"
16 #include "member.h"
17 #include "recoverd.h"
18 #include "ast.h"
19 #include "dir.h"
20 #include "lowcomms.h"
21 #include "config.h"
22 #include "memory.h"
23 #include "lock.h"
24 #include "recover.h"
25 #include "requestqueue.h"
26
27 #ifdef CONFIG_DLM_DEBUG
28 int dlm_create_debug_file(struct dlm_ls *ls);
29 void dlm_delete_debug_file(struct dlm_ls *ls);
30 #else
31 static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
32 static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
33 #endif
34
35 static int                      ls_count;
36 static struct mutex             ls_lock;
37 static struct list_head         lslist;
38 static spinlock_t               lslist_lock;
39 static struct task_struct *     scand_task;
40
41
42 static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
43 {
44         ssize_t ret = len;
45         int n = simple_strtol(buf, NULL, 0);
46
47         ls = dlm_find_lockspace_local(ls->ls_local_handle);
48         if (!ls)
49                 return -EINVAL;
50
51         switch (n) {
52         case 0:
53                 dlm_ls_stop(ls);
54                 break;
55         case 1:
56                 dlm_ls_start(ls);
57                 break;
58         default:
59                 ret = -EINVAL;
60         }
61         dlm_put_lockspace(ls);
62         return ret;
63 }
64
65 static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
66 {
67         ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
68         set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
69         wake_up(&ls->ls_uevent_wait);
70         return len;
71 }
72
73 static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
74 {
75         return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
76 }
77
78 static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
79 {
80         ls->ls_global_id = simple_strtoul(buf, NULL, 0);
81         return len;
82 }
83
84 static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
85 {
86         uint32_t status = dlm_recover_status(ls);
87         return snprintf(buf, PAGE_SIZE, "%x\n", status);
88 }
89
90 static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
91 {
92         return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
93 }
94
95 struct dlm_attr {
96         struct attribute attr;
97         ssize_t (*show)(struct dlm_ls *, char *);
98         ssize_t (*store)(struct dlm_ls *, const char *, size_t);
99 };
100
101 static struct dlm_attr dlm_attr_control = {
102         .attr  = {.name = "control", .mode = S_IWUSR},
103         .store = dlm_control_store
104 };
105
106 static struct dlm_attr dlm_attr_event = {
107         .attr  = {.name = "event_done", .mode = S_IWUSR},
108         .store = dlm_event_store
109 };
110
111 static struct dlm_attr dlm_attr_id = {
112         .attr  = {.name = "id", .mode = S_IRUGO | S_IWUSR},
113         .show  = dlm_id_show,
114         .store = dlm_id_store
115 };
116
117 static struct dlm_attr dlm_attr_recover_status = {
118         .attr  = {.name = "recover_status", .mode = S_IRUGO},
119         .show  = dlm_recover_status_show
120 };
121
122 static struct dlm_attr dlm_attr_recover_nodeid = {
123         .attr  = {.name = "recover_nodeid", .mode = S_IRUGO},
124         .show  = dlm_recover_nodeid_show
125 };
126
127 static struct attribute *dlm_attrs[] = {
128         &dlm_attr_control.attr,
129         &dlm_attr_event.attr,
130         &dlm_attr_id.attr,
131         &dlm_attr_recover_status.attr,
132         &dlm_attr_recover_nodeid.attr,
133         NULL,
134 };
135
136 static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
137                              char *buf)
138 {
139         struct dlm_ls *ls  = container_of(kobj, struct dlm_ls, ls_kobj);
140         struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
141         return a->show ? a->show(ls, buf) : 0;
142 }
143
144 static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
145                               const char *buf, size_t len)
146 {
147         struct dlm_ls *ls  = container_of(kobj, struct dlm_ls, ls_kobj);
148         struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
149         return a->store ? a->store(ls, buf, len) : len;
150 }
151
152 static void lockspace_kobj_release(struct kobject *k)
153 {
154         struct dlm_ls *ls  = container_of(k, struct dlm_ls, ls_kobj);
155         kfree(ls);
156 }
157
158 static struct sysfs_ops dlm_attr_ops = {
159         .show  = dlm_attr_show,
160         .store = dlm_attr_store,
161 };
162
163 static struct kobj_type dlm_ktype = {
164         .default_attrs = dlm_attrs,
165         .sysfs_ops     = &dlm_attr_ops,
166         .release       = lockspace_kobj_release,
167 };
168
169 static struct kset dlm_kset = {
170         .kobj   = {.name = "dlm",},
171         .ktype  = &dlm_ktype,
172 };
173
174 static int kobject_setup(struct dlm_ls *ls)
175 {
176         char lsname[DLM_LOCKSPACE_LEN];
177         int error;
178
179         memset(lsname, 0, DLM_LOCKSPACE_LEN);
180         snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
181
182         error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
183         if (error)
184                 return error;
185
186         ls->ls_kobj.kset = &dlm_kset;
187         ls->ls_kobj.ktype = &dlm_ktype;
188         return 0;
189 }
190
191 static int do_uevent(struct dlm_ls *ls, int in)
192 {
193         int error;
194
195         if (in)
196                 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
197         else
198                 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
199
200         error = wait_event_interruptible(ls->ls_uevent_wait,
201                         test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
202         if (error)
203                 goto out;
204
205         error = ls->ls_uevent_result;
206  out:
207         return error;
208 }
209
210
211 int dlm_lockspace_init(void)
212 {
213         int error;
214
215         ls_count = 0;
216         mutex_init(&ls_lock);
217         INIT_LIST_HEAD(&lslist);
218         spin_lock_init(&lslist_lock);
219
220         kobj_set_kset_s(&dlm_kset, kernel_subsys);
221         error = kset_register(&dlm_kset);
222         if (error)
223                 printk("dlm_lockspace_init: cannot register kset %d\n", error);
224         return error;
225 }
226
227 void dlm_lockspace_exit(void)
228 {
229         kset_unregister(&dlm_kset);
230 }
231
232 static int dlm_scand(void *data)
233 {
234         struct dlm_ls *ls;
235
236         while (!kthread_should_stop()) {
237                 list_for_each_entry(ls, &lslist, ls_list) {
238                         if (dlm_lock_recovery_try(ls)) {
239                                 dlm_scan_rsbs(ls);
240                                 dlm_scan_timeout(ls);
241                                 dlm_unlock_recovery(ls);
242                         }
243                 }
244                 schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
245         }
246         return 0;
247 }
248
249 static int dlm_scand_start(void)
250 {
251         struct task_struct *p;
252         int error = 0;
253
254         p = kthread_run(dlm_scand, NULL, "dlm_scand");
255         if (IS_ERR(p))
256                 error = PTR_ERR(p);
257         else
258                 scand_task = p;
259         return error;
260 }
261
262 static void dlm_scand_stop(void)
263 {
264         kthread_stop(scand_task);
265 }
266
267 static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
268 {
269         struct dlm_ls *ls;
270
271         spin_lock(&lslist_lock);
272
273         list_for_each_entry(ls, &lslist, ls_list) {
274                 if (ls->ls_namelen == namelen &&
275                     memcmp(ls->ls_name, name, namelen) == 0)
276                         goto out;
277         }
278         ls = NULL;
279  out:
280         spin_unlock(&lslist_lock);
281         return ls;
282 }
283
284 struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
285 {
286         struct dlm_ls *ls;
287
288         spin_lock(&lslist_lock);
289
290         list_for_each_entry(ls, &lslist, ls_list) {
291                 if (ls->ls_global_id == id) {
292                         ls->ls_count++;
293                         goto out;
294                 }
295         }
296         ls = NULL;
297  out:
298         spin_unlock(&lslist_lock);
299         return ls;
300 }
301
302 struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
303 {
304         struct dlm_ls *ls;
305
306         spin_lock(&lslist_lock);
307         list_for_each_entry(ls, &lslist, ls_list) {
308                 if (ls->ls_local_handle == lockspace) {
309                         ls->ls_count++;
310                         goto out;
311                 }
312         }
313         ls = NULL;
314  out:
315         spin_unlock(&lslist_lock);
316         return ls;
317 }
318
319 struct dlm_ls *dlm_find_lockspace_device(int minor)
320 {
321         struct dlm_ls *ls;
322
323         spin_lock(&lslist_lock);
324         list_for_each_entry(ls, &lslist, ls_list) {
325                 if (ls->ls_device.minor == minor) {
326                         ls->ls_count++;
327                         goto out;
328                 }
329         }
330         ls = NULL;
331  out:
332         spin_unlock(&lslist_lock);
333         return ls;
334 }
335
336 void dlm_put_lockspace(struct dlm_ls *ls)
337 {
338         spin_lock(&lslist_lock);
339         ls->ls_count--;
340         spin_unlock(&lslist_lock);
341 }
342
343 static void remove_lockspace(struct dlm_ls *ls)
344 {
345         for (;;) {
346                 spin_lock(&lslist_lock);
347                 if (ls->ls_count == 0) {
348                         list_del(&ls->ls_list);
349                         spin_unlock(&lslist_lock);
350                         return;
351                 }
352                 spin_unlock(&lslist_lock);
353                 ssleep(1);
354         }
355 }
356
357 static int threads_start(void)
358 {
359         int error;
360
361         /* Thread which process lock requests for all lockspace's */
362         error = dlm_astd_start();
363         if (error) {
364                 log_print("cannot start dlm_astd thread %d", error);
365                 goto fail;
366         }
367
368         error = dlm_scand_start();
369         if (error) {
370                 log_print("cannot start dlm_scand thread %d", error);
371                 goto astd_fail;
372         }
373
374         /* Thread for sending/receiving messages for all lockspace's */
375         error = dlm_lowcomms_start();
376         if (error) {
377                 log_print("cannot start dlm lowcomms %d", error);
378                 goto scand_fail;
379         }
380
381         return 0;
382
383  scand_fail:
384         dlm_scand_stop();
385  astd_fail:
386         dlm_astd_stop();
387  fail:
388         return error;
389 }
390
391 static void threads_stop(void)
392 {
393         dlm_scand_stop();
394         dlm_lowcomms_stop();
395         dlm_astd_stop();
396 }
397
398 static int new_lockspace(char *name, int namelen, void **lockspace,
399                          uint32_t flags, int lvblen)
400 {
401         struct dlm_ls *ls;
402         int i, size, error = -ENOMEM;
403         int do_unreg = 0;
404
405         if (namelen > DLM_LOCKSPACE_LEN)
406                 return -EINVAL;
407
408         if (!lvblen || (lvblen % 8))
409                 return -EINVAL;
410
411         if (!try_module_get(THIS_MODULE))
412                 return -EINVAL;
413
414         ls = dlm_find_lockspace_name(name, namelen);
415         if (ls) {
416                 *lockspace = ls;
417                 module_put(THIS_MODULE);
418                 return -EEXIST;
419         }
420
421         ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
422         if (!ls)
423                 goto out;
424         memcpy(ls->ls_name, name, namelen);
425         ls->ls_namelen = namelen;
426         ls->ls_lvblen = lvblen;
427         ls->ls_count = 0;
428         ls->ls_flags = 0;
429
430         /* ls_exflags are forced to match among nodes, and we don't
431            need to require all nodes to have TIMEWARN active */
432         if (flags & DLM_LSFL_TIMEWARN)
433                 set_bit(LSFL_TIMEWARN, &ls->ls_flags);
434         ls->ls_exflags = (flags & ~DLM_LSFL_TIMEWARN);
435
436         size = dlm_config.ci_rsbtbl_size;
437         ls->ls_rsbtbl_size = size;
438
439         ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
440         if (!ls->ls_rsbtbl)
441                 goto out_lsfree;
442         for (i = 0; i < size; i++) {
443                 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
444                 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
445                 rwlock_init(&ls->ls_rsbtbl[i].lock);
446         }
447
448         size = dlm_config.ci_lkbtbl_size;
449         ls->ls_lkbtbl_size = size;
450
451         ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
452         if (!ls->ls_lkbtbl)
453                 goto out_rsbfree;
454         for (i = 0; i < size; i++) {
455                 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
456                 rwlock_init(&ls->ls_lkbtbl[i].lock);
457                 ls->ls_lkbtbl[i].counter = 1;
458         }
459
460         size = dlm_config.ci_dirtbl_size;
461         ls->ls_dirtbl_size = size;
462
463         ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
464         if (!ls->ls_dirtbl)
465                 goto out_lkbfree;
466         for (i = 0; i < size; i++) {
467                 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
468                 rwlock_init(&ls->ls_dirtbl[i].lock);
469         }
470
471         INIT_LIST_HEAD(&ls->ls_waiters);
472         mutex_init(&ls->ls_waiters_mutex);
473         INIT_LIST_HEAD(&ls->ls_orphans);
474         mutex_init(&ls->ls_orphans_mutex);
475         INIT_LIST_HEAD(&ls->ls_timeout);
476         mutex_init(&ls->ls_timeout_mutex);
477
478         INIT_LIST_HEAD(&ls->ls_nodes);
479         INIT_LIST_HEAD(&ls->ls_nodes_gone);
480         ls->ls_num_nodes = 0;
481         ls->ls_low_nodeid = 0;
482         ls->ls_total_weight = 0;
483         ls->ls_node_array = NULL;
484
485         memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
486         ls->ls_stub_rsb.res_ls = ls;
487
488         ls->ls_debug_rsb_dentry = NULL;
489         ls->ls_debug_waiters_dentry = NULL;
490
491         init_waitqueue_head(&ls->ls_uevent_wait);
492         ls->ls_uevent_result = 0;
493
494         ls->ls_recoverd_task = NULL;
495         mutex_init(&ls->ls_recoverd_active);
496         spin_lock_init(&ls->ls_recover_lock);
497         spin_lock_init(&ls->ls_rcom_spin);
498         get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
499         ls->ls_recover_status = 0;
500         ls->ls_recover_seq = 0;
501         ls->ls_recover_args = NULL;
502         init_rwsem(&ls->ls_in_recovery);
503         INIT_LIST_HEAD(&ls->ls_requestqueue);
504         mutex_init(&ls->ls_requestqueue_mutex);
505         mutex_init(&ls->ls_clear_proc_locks);
506
507         ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
508         if (!ls->ls_recover_buf)
509                 goto out_dirfree;
510
511         INIT_LIST_HEAD(&ls->ls_recover_list);
512         spin_lock_init(&ls->ls_recover_list_lock);
513         ls->ls_recover_list_count = 0;
514         ls->ls_local_handle = ls;
515         init_waitqueue_head(&ls->ls_wait_general);
516         INIT_LIST_HEAD(&ls->ls_root_list);
517         init_rwsem(&ls->ls_root_sem);
518
519         down_write(&ls->ls_in_recovery);
520
521         spin_lock(&lslist_lock);
522         list_add(&ls->ls_list, &lslist);
523         spin_unlock(&lslist_lock);
524
525         /* needs to find ls in lslist */
526         error = dlm_recoverd_start(ls);
527         if (error) {
528                 log_error(ls, "can't start dlm_recoverd %d", error);
529                 goto out_delist;
530         }
531
532         error = kobject_setup(ls);
533         if (error)
534                 goto out_stop;
535
536         error = kobject_register(&ls->ls_kobj);
537         if (error)
538                 goto out_stop;
539
540         /* let kobject handle freeing of ls if there's an error */
541         do_unreg = 1;
542
543         error = do_uevent(ls, 1);
544         if (error)
545                 goto out_stop;
546
547         dlm_create_debug_file(ls);
548
549         log_debug(ls, "join complete");
550
551         *lockspace = ls;
552         return 0;
553
554  out_stop:
555         dlm_recoverd_stop(ls);
556  out_delist:
557         spin_lock(&lslist_lock);
558         list_del(&ls->ls_list);
559         spin_unlock(&lslist_lock);
560         kfree(ls->ls_recover_buf);
561  out_dirfree:
562         kfree(ls->ls_dirtbl);
563  out_lkbfree:
564         kfree(ls->ls_lkbtbl);
565  out_rsbfree:
566         kfree(ls->ls_rsbtbl);
567  out_lsfree:
568         if (do_unreg)
569                 kobject_unregister(&ls->ls_kobj);
570         else
571                 kfree(ls);
572  out:
573         module_put(THIS_MODULE);
574         return error;
575 }
576
577 int dlm_new_lockspace(char *name, int namelen, void **lockspace,
578                       uint32_t flags, int lvblen)
579 {
580         int error = 0;
581
582         mutex_lock(&ls_lock);
583         if (!ls_count)
584                 error = threads_start();
585         if (error)
586                 goto out;
587
588         error = new_lockspace(name, namelen, lockspace, flags, lvblen);
589         if (!error)
590                 ls_count++;
591  out:
592         mutex_unlock(&ls_lock);
593         return error;
594 }
595
596 /* Return 1 if the lockspace still has active remote locks,
597  *        2 if the lockspace still has active local locks.
598  */
599 static int lockspace_busy(struct dlm_ls *ls)
600 {
601         int i, lkb_found = 0;
602         struct dlm_lkb *lkb;
603
604         /* NOTE: We check the lockidtbl here rather than the resource table.
605            This is because there may be LKBs queued as ASTs that have been
606            unlinked from their RSBs and are pending deletion once the AST has
607            been delivered */
608
609         for (i = 0; i < ls->ls_lkbtbl_size; i++) {
610                 read_lock(&ls->ls_lkbtbl[i].lock);
611                 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
612                         lkb_found = 1;
613                         list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
614                                             lkb_idtbl_list) {
615                                 if (!lkb->lkb_nodeid) {
616                                         read_unlock(&ls->ls_lkbtbl[i].lock);
617                                         return 2;
618                                 }
619                         }
620                 }
621                 read_unlock(&ls->ls_lkbtbl[i].lock);
622         }
623         return lkb_found;
624 }
625
626 static int release_lockspace(struct dlm_ls *ls, int force)
627 {
628         struct dlm_lkb *lkb;
629         struct dlm_rsb *rsb;
630         struct list_head *head;
631         int i;
632         int busy = lockspace_busy(ls);
633
634         if (busy > force)
635                 return -EBUSY;
636
637         if (force < 3)
638                 do_uevent(ls, 0);
639
640         dlm_recoverd_stop(ls);
641
642         remove_lockspace(ls);
643
644         dlm_delete_debug_file(ls);
645
646         dlm_astd_suspend();
647
648         kfree(ls->ls_recover_buf);
649
650         /*
651          * Free direntry structs.
652          */
653
654         dlm_dir_clear(ls);
655         kfree(ls->ls_dirtbl);
656
657         /*
658          * Free all lkb's on lkbtbl[] lists.
659          */
660
661         for (i = 0; i < ls->ls_lkbtbl_size; i++) {
662                 head = &ls->ls_lkbtbl[i].list;
663                 while (!list_empty(head)) {
664                         lkb = list_entry(head->next, struct dlm_lkb,
665                                          lkb_idtbl_list);
666
667                         list_del(&lkb->lkb_idtbl_list);
668
669                         dlm_del_ast(lkb);
670
671                         if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
672                                 free_lvb(lkb->lkb_lvbptr);
673
674                         free_lkb(lkb);
675                 }
676         }
677         dlm_astd_resume();
678
679         kfree(ls->ls_lkbtbl);
680
681         /*
682          * Free all rsb's on rsbtbl[] lists
683          */
684
685         for (i = 0; i < ls->ls_rsbtbl_size; i++) {
686                 head = &ls->ls_rsbtbl[i].list;
687                 while (!list_empty(head)) {
688                         rsb = list_entry(head->next, struct dlm_rsb,
689                                          res_hashchain);
690
691                         list_del(&rsb->res_hashchain);
692                         free_rsb(rsb);
693                 }
694
695                 head = &ls->ls_rsbtbl[i].toss;
696                 while (!list_empty(head)) {
697                         rsb = list_entry(head->next, struct dlm_rsb,
698                                          res_hashchain);
699                         list_del(&rsb->res_hashchain);
700                         free_rsb(rsb);
701                 }
702         }
703
704         kfree(ls->ls_rsbtbl);
705
706         /*
707          * Free structures on any other lists
708          */
709
710         dlm_purge_requestqueue(ls);
711         kfree(ls->ls_recover_args);
712         dlm_clear_free_entries(ls);
713         dlm_clear_members(ls);
714         dlm_clear_members_gone(ls);
715         kfree(ls->ls_node_array);
716         kobject_unregister(&ls->ls_kobj);
717         /* The ls structure will be freed when the kobject is done with */
718
719         mutex_lock(&ls_lock);
720         ls_count--;
721         if (!ls_count)
722                 threads_stop();
723         mutex_unlock(&ls_lock);
724
725         module_put(THIS_MODULE);
726         return 0;
727 }
728
729 /*
730  * Called when a system has released all its locks and is not going to use the
731  * lockspace any longer.  We free everything we're managing for this lockspace.
732  * Remaining nodes will go through the recovery process as if we'd died.  The
733  * lockspace must continue to function as usual, participating in recoveries,
734  * until this returns.
735  *
736  * Force has 4 possible values:
737  * 0 - don't destroy locksapce if it has any LKBs
738  * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
739  * 2 - destroy lockspace regardless of LKBs
740  * 3 - destroy lockspace as part of a forced shutdown
741  */
742
743 int dlm_release_lockspace(void *lockspace, int force)
744 {
745         struct dlm_ls *ls;
746
747         ls = dlm_find_lockspace_local(lockspace);
748         if (!ls)
749                 return -EINVAL;
750         dlm_put_lockspace(ls);
751         return release_lockspace(ls, force);
752 }
753