1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
14 #include "dlm_internal.h"
15 #include "lockspace.h"
26 #ifdef CONFIG_DLM_DEBUG
27 int dlm_create_debug_file(struct dlm_ls *ls);
28 void dlm_delete_debug_file(struct dlm_ls *ls);
30 static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
31 static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
35 static struct mutex ls_lock;
36 static struct list_head lslist;
37 static spinlock_t lslist_lock;
38 static struct task_struct * scand_task;
41 static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
44 int n = simple_strtol(buf, NULL, 0);
59 static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
61 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
62 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
63 wake_up(&ls->ls_uevent_wait);
67 static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
69 return sprintf(buf, "%u\n", ls->ls_global_id);
72 static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
74 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
78 static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
80 uint32_t status = dlm_recover_status(ls);
81 return sprintf(buf, "%x\n", status);
85 struct attribute attr;
86 ssize_t (*show)(struct dlm_ls *, char *);
87 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
90 static struct dlm_attr dlm_attr_control = {
91 .attr = {.name = "control", .mode = S_IWUSR},
92 .store = dlm_control_store
95 static struct dlm_attr dlm_attr_event = {
96 .attr = {.name = "event_done", .mode = S_IWUSR},
97 .store = dlm_event_store
100 static struct dlm_attr dlm_attr_id = {
101 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
103 .store = dlm_id_store
106 static struct dlm_attr dlm_attr_recover_status = {
107 .attr = {.name = "recover_status", .mode = S_IRUGO},
108 .show = dlm_recover_status_show
111 static struct attribute *dlm_attrs[] = {
112 &dlm_attr_control.attr,
113 &dlm_attr_event.attr,
115 &dlm_attr_recover_status.attr,
119 static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
122 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
123 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
124 return a->show ? a->show(ls, buf) : 0;
127 static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
128 const char *buf, size_t len)
130 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
131 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
132 return a->store ? a->store(ls, buf, len) : len;
135 static struct sysfs_ops dlm_attr_ops = {
136 .show = dlm_attr_show,
137 .store = dlm_attr_store,
140 static struct kobj_type dlm_ktype = {
141 .default_attrs = dlm_attrs,
142 .sysfs_ops = &dlm_attr_ops,
145 static struct kset dlm_kset = {
146 .subsys = &kernel_subsys,
147 .kobj = {.name = "dlm",},
151 static int kobject_setup(struct dlm_ls *ls)
153 char lsname[DLM_LOCKSPACE_LEN];
156 memset(lsname, 0, DLM_LOCKSPACE_LEN);
157 snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
159 error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
163 ls->ls_kobj.kset = &dlm_kset;
164 ls->ls_kobj.ktype = &dlm_ktype;
168 static int do_uevent(struct dlm_ls *ls, int in)
173 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
175 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
177 error = wait_event_interruptible(ls->ls_uevent_wait,
178 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
182 error = ls->ls_uevent_result;
188 int dlm_lockspace_init(void)
193 mutex_init(&ls_lock);
194 INIT_LIST_HEAD(&lslist);
195 spin_lock_init(&lslist_lock);
197 error = kset_register(&dlm_kset);
199 printk("dlm_lockspace_init: cannot register kset %d\n", error);
203 void dlm_lockspace_exit(void)
205 kset_unregister(&dlm_kset);
208 static int dlm_scand(void *data)
212 while (!kthread_should_stop()) {
213 list_for_each_entry(ls, &lslist, ls_list)
215 schedule_timeout_interruptible(dlm_config.scan_secs * HZ);
220 static int dlm_scand_start(void)
222 struct task_struct *p;
225 p = kthread_run(dlm_scand, NULL, "dlm_scand");
233 static void dlm_scand_stop(void)
235 kthread_stop(scand_task);
238 static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
242 spin_lock(&lslist_lock);
244 list_for_each_entry(ls, &lslist, ls_list) {
245 if (ls->ls_namelen == namelen &&
246 memcmp(ls->ls_name, name, namelen) == 0)
251 spin_unlock(&lslist_lock);
255 struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
259 spin_lock(&lslist_lock);
261 list_for_each_entry(ls, &lslist, ls_list) {
262 if (ls->ls_global_id == id) {
269 spin_unlock(&lslist_lock);
273 struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
277 spin_lock(&lslist_lock);
278 list_for_each_entry(ls, &lslist, ls_list) {
279 if (ls->ls_local_handle == lockspace) {
286 spin_unlock(&lslist_lock);
290 struct dlm_ls *dlm_find_lockspace_device(int minor)
294 spin_lock(&lslist_lock);
295 list_for_each_entry(ls, &lslist, ls_list) {
296 if (ls->ls_device.minor == minor) {
303 spin_unlock(&lslist_lock);
307 void dlm_put_lockspace(struct dlm_ls *ls)
309 spin_lock(&lslist_lock);
311 spin_unlock(&lslist_lock);
314 static void remove_lockspace(struct dlm_ls *ls)
317 spin_lock(&lslist_lock);
318 if (ls->ls_count == 0) {
319 list_del(&ls->ls_list);
320 spin_unlock(&lslist_lock);
323 spin_unlock(&lslist_lock);
328 static int threads_start(void)
332 /* Thread which process lock requests for all lockspace's */
333 error = dlm_astd_start();
335 log_print("cannot start dlm_astd thread %d", error);
339 error = dlm_scand_start();
341 log_print("cannot start dlm_scand thread %d", error);
345 /* Thread for sending/receiving messages for all lockspace's */
346 error = dlm_lowcomms_start();
348 log_print("cannot start dlm lowcomms %d", error);
362 static void threads_stop(void)
369 static int new_lockspace(char *name, int namelen, void **lockspace,
370 uint32_t flags, int lvblen)
373 int i, size, error = -ENOMEM;
375 if (namelen > DLM_LOCKSPACE_LEN)
378 if (!lvblen || (lvblen % 8))
381 if (!try_module_get(THIS_MODULE))
384 ls = dlm_find_lockspace_name(name, namelen);
387 module_put(THIS_MODULE);
391 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
394 memcpy(ls->ls_name, name, namelen);
395 ls->ls_namelen = namelen;
396 ls->ls_exflags = flags;
397 ls->ls_lvblen = lvblen;
401 size = dlm_config.rsbtbl_size;
402 ls->ls_rsbtbl_size = size;
404 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
407 for (i = 0; i < size; i++) {
408 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
409 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
410 rwlock_init(&ls->ls_rsbtbl[i].lock);
413 size = dlm_config.lkbtbl_size;
414 ls->ls_lkbtbl_size = size;
416 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
419 for (i = 0; i < size; i++) {
420 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
421 rwlock_init(&ls->ls_lkbtbl[i].lock);
422 ls->ls_lkbtbl[i].counter = 1;
425 size = dlm_config.dirtbl_size;
426 ls->ls_dirtbl_size = size;
428 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
431 for (i = 0; i < size; i++) {
432 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
433 rwlock_init(&ls->ls_dirtbl[i].lock);
436 INIT_LIST_HEAD(&ls->ls_waiters);
437 mutex_init(&ls->ls_waiters_mutex);
439 INIT_LIST_HEAD(&ls->ls_nodes);
440 INIT_LIST_HEAD(&ls->ls_nodes_gone);
441 ls->ls_num_nodes = 0;
442 ls->ls_low_nodeid = 0;
443 ls->ls_total_weight = 0;
444 ls->ls_node_array = NULL;
446 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
447 ls->ls_stub_rsb.res_ls = ls;
449 ls->ls_debug_rsb_dentry = NULL;
450 ls->ls_debug_waiters_dentry = NULL;
452 init_waitqueue_head(&ls->ls_uevent_wait);
453 ls->ls_uevent_result = 0;
455 ls->ls_recoverd_task = NULL;
456 mutex_init(&ls->ls_recoverd_active);
457 spin_lock_init(&ls->ls_recover_lock);
458 ls->ls_recover_status = 0;
459 ls->ls_recover_seq = 0;
460 ls->ls_recover_args = NULL;
461 init_rwsem(&ls->ls_in_recovery);
462 INIT_LIST_HEAD(&ls->ls_requestqueue);
463 mutex_init(&ls->ls_requestqueue_mutex);
464 mutex_init(&ls->ls_clear_proc_locks);
466 ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
467 if (!ls->ls_recover_buf)
470 INIT_LIST_HEAD(&ls->ls_recover_list);
471 spin_lock_init(&ls->ls_recover_list_lock);
472 ls->ls_recover_list_count = 0;
473 ls->ls_local_handle = ls;
474 init_waitqueue_head(&ls->ls_wait_general);
475 INIT_LIST_HEAD(&ls->ls_root_list);
476 init_rwsem(&ls->ls_root_sem);
478 down_write(&ls->ls_in_recovery);
480 error = dlm_recoverd_start(ls);
482 log_error(ls, "can't start dlm_recoverd %d", error);
486 spin_lock(&lslist_lock);
487 list_add(&ls->ls_list, &lslist);
488 spin_unlock(&lslist_lock);
490 dlm_create_debug_file(ls);
492 error = kobject_setup(ls);
496 error = kobject_register(&ls->ls_kobj);
500 error = do_uevent(ls, 1);
508 kobject_unregister(&ls->ls_kobj);
510 dlm_delete_debug_file(ls);
511 spin_lock(&lslist_lock);
512 list_del(&ls->ls_list);
513 spin_unlock(&lslist_lock);
514 dlm_recoverd_stop(ls);
516 kfree(ls->ls_recover_buf);
518 kfree(ls->ls_dirtbl);
520 kfree(ls->ls_lkbtbl);
522 kfree(ls->ls_rsbtbl);
526 module_put(THIS_MODULE);
530 int dlm_new_lockspace(char *name, int namelen, void **lockspace,
531 uint32_t flags, int lvblen)
535 mutex_lock(&ls_lock);
537 error = threads_start();
541 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
545 mutex_unlock(&ls_lock);
549 /* Return 1 if the lockspace still has active remote locks,
550 * 2 if the lockspace still has active local locks.
552 static int lockspace_busy(struct dlm_ls *ls)
554 int i, lkb_found = 0;
557 /* NOTE: We check the lockidtbl here rather than the resource table.
558 This is because there may be LKBs queued as ASTs that have been
559 unlinked from their RSBs and are pending deletion once the AST has
562 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
563 read_lock(&ls->ls_lkbtbl[i].lock);
564 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
566 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
568 if (!lkb->lkb_nodeid) {
569 read_unlock(&ls->ls_lkbtbl[i].lock);
574 read_unlock(&ls->ls_lkbtbl[i].lock);
579 static int release_lockspace(struct dlm_ls *ls, int force)
583 struct list_head *head;
585 int busy = lockspace_busy(ls);
593 dlm_recoverd_stop(ls);
595 remove_lockspace(ls);
597 dlm_delete_debug_file(ls);
601 kfree(ls->ls_recover_buf);
604 * Free direntry structs.
608 kfree(ls->ls_dirtbl);
611 * Free all lkb's on lkbtbl[] lists.
614 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
615 head = &ls->ls_lkbtbl[i].list;
616 while (!list_empty(head)) {
617 lkb = list_entry(head->next, struct dlm_lkb,
620 list_del(&lkb->lkb_idtbl_list);
624 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
625 free_lvb(lkb->lkb_lvbptr);
632 kfree(ls->ls_lkbtbl);
635 * Free all rsb's on rsbtbl[] lists
638 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
639 head = &ls->ls_rsbtbl[i].list;
640 while (!list_empty(head)) {
641 rsb = list_entry(head->next, struct dlm_rsb,
644 list_del(&rsb->res_hashchain);
648 head = &ls->ls_rsbtbl[i].toss;
649 while (!list_empty(head)) {
650 rsb = list_entry(head->next, struct dlm_rsb,
652 list_del(&rsb->res_hashchain);
657 kfree(ls->ls_rsbtbl);
660 * Free structures on any other lists
663 kfree(ls->ls_recover_args);
664 dlm_clear_free_entries(ls);
665 dlm_clear_members(ls);
666 dlm_clear_members_gone(ls);
667 kfree(ls->ls_node_array);
668 kobject_unregister(&ls->ls_kobj);
671 mutex_lock(&ls_lock);
675 mutex_unlock(&ls_lock);
677 module_put(THIS_MODULE);
682 * Called when a system has released all its locks and is not going to use the
683 * lockspace any longer. We free everything we're managing for this lockspace.
684 * Remaining nodes will go through the recovery process as if we'd died. The
685 * lockspace must continue to function as usual, participating in recoveries,
686 * until this returns.
688 * Force has 4 possible values:
689 * 0 - don't destroy locksapce if it has any LKBs
690 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
691 * 2 - destroy lockspace regardless of LKBs
692 * 3 - destroy lockspace as part of a forced shutdown
695 int dlm_release_lockspace(void *lockspace, int force)
699 ls = dlm_find_lockspace_local(lockspace);
702 dlm_put_lockspace(ls);
703 return release_lockspace(ls, force);