pandora: defconfig: update
[pandora-kernel.git] / fs / ocfs2 / cluster / nodemanager.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * Copyright (C) 2004, 2005 Oracle.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public
17  * License along with this program; if not, write to the
18  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19  * Boston, MA 021110-1307, USA.
20  */
21
22 #include <linux/slab.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/configfs.h>
26
27 #include "tcp.h"
28 #include "nodemanager.h"
29 #include "heartbeat.h"
30 #include "masklog.h"
31 #include "sys.h"
32 #include "ver.h"
33
34 /* for now we operate under the assertion that there can be only one
35  * cluster active at a time.  Changing this will require trickling
36  * cluster references throughout where nodes are looked up */
37 struct o2nm_cluster *o2nm_single_cluster = NULL;
38
39 char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
40                 "reset",        /* O2NM_FENCE_RESET */
41                 "panic",        /* O2NM_FENCE_PANIC */
42 };
43
44 static inline void o2nm_lock_subsystem(void);
45 static inline void o2nm_unlock_subsystem(void);
46
47 struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
48 {
49         struct o2nm_node *node = NULL;
50
51         if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL)
52                 goto out;
53
54         read_lock(&o2nm_single_cluster->cl_nodes_lock);
55         node = o2nm_single_cluster->cl_nodes[node_num];
56         if (node)
57                 config_item_get(&node->nd_item);
58         read_unlock(&o2nm_single_cluster->cl_nodes_lock);
59 out:
60         return node;
61 }
62 EXPORT_SYMBOL_GPL(o2nm_get_node_by_num);
63
64 int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
65 {
66         struct o2nm_cluster *cluster = o2nm_single_cluster;
67
68         BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
69
70         if (cluster == NULL)
71                 return -EINVAL;
72
73         read_lock(&cluster->cl_nodes_lock);
74         memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
75         read_unlock(&cluster->cl_nodes_lock);
76
77         return 0;
78 }
79 EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
80
81 static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
82                                                   __be32 ip_needle,
83                                                   struct rb_node ***ret_p,
84                                                   struct rb_node **ret_parent)
85 {
86         struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
87         struct rb_node *parent = NULL;
88         struct o2nm_node *node, *ret = NULL;
89
90         while (*p) {
91                 int cmp;
92
93                 parent = *p;
94                 node = rb_entry(parent, struct o2nm_node, nd_ip_node);
95
96                 cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
97                                 sizeof(ip_needle));
98                 if (cmp < 0)
99                         p = &(*p)->rb_left;
100                 else if (cmp > 0)
101                         p = &(*p)->rb_right;
102                 else {
103                         ret = node;
104                         break;
105                 }
106         }
107
108         if (ret_p != NULL)
109                 *ret_p = p;
110         if (ret_parent != NULL)
111                 *ret_parent = parent;
112
113         return ret;
114 }
115
116 struct o2nm_node *o2nm_get_node_by_ip(__be32 addr)
117 {
118         struct o2nm_node *node = NULL;
119         struct o2nm_cluster *cluster = o2nm_single_cluster;
120
121         if (cluster == NULL)
122                 goto out;
123
124         read_lock(&cluster->cl_nodes_lock);
125         node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
126         if (node)
127                 config_item_get(&node->nd_item);
128         read_unlock(&cluster->cl_nodes_lock);
129
130 out:
131         return node;
132 }
133 EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip);
134
135 void o2nm_node_put(struct o2nm_node *node)
136 {
137         config_item_put(&node->nd_item);
138 }
139 EXPORT_SYMBOL_GPL(o2nm_node_put);
140
141 void o2nm_node_get(struct o2nm_node *node)
142 {
143         config_item_get(&node->nd_item);
144 }
145 EXPORT_SYMBOL_GPL(o2nm_node_get);
146
147 u8 o2nm_this_node(void)
148 {
149         u8 node_num = O2NM_MAX_NODES;
150
151         if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local)
152                 node_num = o2nm_single_cluster->cl_local_node;
153
154         return node_num;
155 }
156 EXPORT_SYMBOL_GPL(o2nm_this_node);
157
158 /* node configfs bits */
159
160 static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item)
161 {
162         return item ?
163                 container_of(to_config_group(item), struct o2nm_cluster,
164                              cl_group)
165                 : NULL;
166 }
167
168 static struct o2nm_node *to_o2nm_node(struct config_item *item)
169 {
170         return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
171 }
172
173 static void o2nm_node_release(struct config_item *item)
174 {
175         struct o2nm_node *node = to_o2nm_node(item);
176         kfree(node);
177 }
178
179 static ssize_t o2nm_node_num_read(struct o2nm_node *node, char *page)
180 {
181         return sprintf(page, "%d\n", node->nd_num);
182 }
183
184 static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
185 {
186         /* through the first node_set .parent
187          * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
188         if (node->nd_item.ci_parent)
189                 return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
190         else
191                 return NULL;
192 }
193
194 enum {
195         O2NM_NODE_ATTR_NUM = 0,
196         O2NM_NODE_ATTR_PORT,
197         O2NM_NODE_ATTR_ADDRESS,
198         O2NM_NODE_ATTR_LOCAL,
199 };
200
201 static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page,
202                                    size_t count)
203 {
204         struct o2nm_cluster *cluster;
205         unsigned long tmp;
206         char *p = (char *)page;
207
208         tmp = simple_strtoul(p, &p, 0);
209         if (!p || (*p && (*p != '\n')))
210                 return -EINVAL;
211
212         if (tmp >= O2NM_MAX_NODES)
213                 return -ERANGE;
214
215         /* once we're in the cl_nodes tree networking can look us up by
216          * node number and try to use our address and port attributes
217          * to connect to this node.. make sure that they've been set
218          * before writing the node attribute? */
219         if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
220             !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
221                 return -EINVAL; /* XXX */
222
223         o2nm_lock_subsystem();
224         cluster = to_o2nm_cluster_from_node(node);
225         if (!cluster) {
226                 o2nm_unlock_subsystem();
227                 return -EINVAL;
228         }
229
230         write_lock(&cluster->cl_nodes_lock);
231         if (cluster->cl_nodes[tmp])
232                 p = NULL;
233         else  {
234                 cluster->cl_nodes[tmp] = node;
235                 node->nd_num = tmp;
236                 set_bit(tmp, cluster->cl_nodes_bitmap);
237         }
238         write_unlock(&cluster->cl_nodes_lock);
239         o2nm_unlock_subsystem();
240
241         if (p == NULL)
242                 return -EEXIST;
243
244         return count;
245 }
246 static ssize_t o2nm_node_ipv4_port_read(struct o2nm_node *node, char *page)
247 {
248         return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
249 }
250
251 static ssize_t o2nm_node_ipv4_port_write(struct o2nm_node *node,
252                                          const char *page, size_t count)
253 {
254         unsigned long tmp;
255         char *p = (char *)page;
256
257         tmp = simple_strtoul(p, &p, 0);
258         if (!p || (*p && (*p != '\n')))
259                 return -EINVAL;
260
261         if (tmp == 0)
262                 return -EINVAL;
263         if (tmp >= (u16)-1)
264                 return -ERANGE;
265
266         node->nd_ipv4_port = htons(tmp);
267
268         return count;
269 }
270
271 static ssize_t o2nm_node_ipv4_address_read(struct o2nm_node *node, char *page)
272 {
273         return sprintf(page, "%pI4\n", &node->nd_ipv4_address);
274 }
275
276 static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node,
277                                             const char *page,
278                                             size_t count)
279 {
280         struct o2nm_cluster *cluster;
281         int ret, i;
282         struct rb_node **p, *parent;
283         unsigned int octets[4];
284         __be32 ipv4_addr = 0;
285
286         ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
287                      &octets[1], &octets[0]);
288         if (ret != 4)
289                 return -EINVAL;
290
291         for (i = 0; i < ARRAY_SIZE(octets); i++) {
292                 if (octets[i] > 255)
293                         return -ERANGE;
294                 be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
295         }
296
297         o2nm_lock_subsystem();
298         cluster = to_o2nm_cluster_from_node(node);
299         if (!cluster) {
300                 o2nm_unlock_subsystem();
301                 return -EINVAL;
302         }
303
304         ret = 0;
305         write_lock(&cluster->cl_nodes_lock);
306         if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
307                 ret = -EEXIST;
308         else {
309                 rb_link_node(&node->nd_ip_node, parent, p);
310                 rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
311         }
312         write_unlock(&cluster->cl_nodes_lock);
313         o2nm_unlock_subsystem();
314
315         if (ret)
316                 return ret;
317
318         memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
319
320         return count;
321 }
322
323 static ssize_t o2nm_node_local_read(struct o2nm_node *node, char *page)
324 {
325         return sprintf(page, "%d\n", node->nd_local);
326 }
327
328 static ssize_t o2nm_node_local_write(struct o2nm_node *node, const char *page,
329                                      size_t count)
330 {
331         struct o2nm_cluster *cluster;
332         unsigned long tmp;
333         char *p = (char *)page;
334         ssize_t ret;
335
336         tmp = simple_strtoul(p, &p, 0);
337         if (!p || (*p && (*p != '\n')))
338                 return -EINVAL;
339
340         tmp = !!tmp; /* boolean of whether this node wants to be local */
341
342         /* setting local turns on networking rx for now so we require having
343          * set everything else first */
344         if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
345             !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
346             !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
347                 return -EINVAL; /* XXX */
348
349         o2nm_lock_subsystem();
350         cluster = to_o2nm_cluster_from_node(node);
351         if (!cluster) {
352                 ret = -EINVAL;
353                 goto out;
354         }
355
356         /* the only failure case is trying to set a new local node
357          * when a different one is already set */
358         if (tmp && tmp == cluster->cl_has_local &&
359             cluster->cl_local_node != node->nd_num) {
360                 ret = -EBUSY;
361                 goto out;
362         }
363
364         /* bring up the rx thread if we're setting the new local node. */
365         if (tmp && !cluster->cl_has_local) {
366                 ret = o2net_start_listening(node);
367                 if (ret)
368                         goto out;
369         }
370
371         if (!tmp && cluster->cl_has_local &&
372             cluster->cl_local_node == node->nd_num) {
373                 o2net_stop_listening(node);
374                 cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
375         }
376
377         node->nd_local = tmp;
378         if (node->nd_local) {
379                 cluster->cl_has_local = tmp;
380                 cluster->cl_local_node = node->nd_num;
381         }
382
383         ret = count;
384
385 out:
386         o2nm_unlock_subsystem();
387         return ret;
388 }
389
390 struct o2nm_node_attribute {
391         struct configfs_attribute attr;
392         ssize_t (*show)(struct o2nm_node *, char *);
393         ssize_t (*store)(struct o2nm_node *, const char *, size_t);
394 };
395
396 static struct o2nm_node_attribute o2nm_node_attr_num = {
397         .attr   = { .ca_owner = THIS_MODULE,
398                     .ca_name = "num",
399                     .ca_mode = S_IRUGO | S_IWUSR },
400         .show   = o2nm_node_num_read,
401         .store  = o2nm_node_num_write,
402 };
403
404 static struct o2nm_node_attribute o2nm_node_attr_ipv4_port = {
405         .attr   = { .ca_owner = THIS_MODULE,
406                     .ca_name = "ipv4_port",
407                     .ca_mode = S_IRUGO | S_IWUSR },
408         .show   = o2nm_node_ipv4_port_read,
409         .store  = o2nm_node_ipv4_port_write,
410 };
411
412 static struct o2nm_node_attribute o2nm_node_attr_ipv4_address = {
413         .attr   = { .ca_owner = THIS_MODULE,
414                     .ca_name = "ipv4_address",
415                     .ca_mode = S_IRUGO | S_IWUSR },
416         .show   = o2nm_node_ipv4_address_read,
417         .store  = o2nm_node_ipv4_address_write,
418 };
419
420 static struct o2nm_node_attribute o2nm_node_attr_local = {
421         .attr   = { .ca_owner = THIS_MODULE,
422                     .ca_name = "local",
423                     .ca_mode = S_IRUGO | S_IWUSR },
424         .show   = o2nm_node_local_read,
425         .store  = o2nm_node_local_write,
426 };
427
428 static struct configfs_attribute *o2nm_node_attrs[] = {
429         [O2NM_NODE_ATTR_NUM] = &o2nm_node_attr_num.attr,
430         [O2NM_NODE_ATTR_PORT] = &o2nm_node_attr_ipv4_port.attr,
431         [O2NM_NODE_ATTR_ADDRESS] = &o2nm_node_attr_ipv4_address.attr,
432         [O2NM_NODE_ATTR_LOCAL] = &o2nm_node_attr_local.attr,
433         NULL,
434 };
435
436 static int o2nm_attr_index(struct configfs_attribute *attr)
437 {
438         int i;
439         for (i = 0; i < ARRAY_SIZE(o2nm_node_attrs); i++) {
440                 if (attr == o2nm_node_attrs[i])
441                         return i;
442         }
443         BUG();
444         return 0;
445 }
446
447 static ssize_t o2nm_node_show(struct config_item *item,
448                               struct configfs_attribute *attr,
449                               char *page)
450 {
451         struct o2nm_node *node = to_o2nm_node(item);
452         struct o2nm_node_attribute *o2nm_node_attr =
453                 container_of(attr, struct o2nm_node_attribute, attr);
454         ssize_t ret = 0;
455
456         if (o2nm_node_attr->show)
457                 ret = o2nm_node_attr->show(node, page);
458         return ret;
459 }
460
461 static ssize_t o2nm_node_store(struct config_item *item,
462                                struct configfs_attribute *attr,
463                                const char *page, size_t count)
464 {
465         struct o2nm_node *node = to_o2nm_node(item);
466         struct o2nm_node_attribute *o2nm_node_attr =
467                 container_of(attr, struct o2nm_node_attribute, attr);
468         ssize_t ret;
469         int attr_index = o2nm_attr_index(attr);
470
471         if (o2nm_node_attr->store == NULL) {
472                 ret = -EINVAL;
473                 goto out;
474         }
475
476         if (test_bit(attr_index, &node->nd_set_attributes))
477                 return -EBUSY;
478
479         ret = o2nm_node_attr->store(node, page, count);
480         if (ret < count)
481                 goto out;
482
483         set_bit(attr_index, &node->nd_set_attributes);
484 out:
485         return ret;
486 }
487
488 static struct configfs_item_operations o2nm_node_item_ops = {
489         .release                = o2nm_node_release,
490         .show_attribute         = o2nm_node_show,
491         .store_attribute        = o2nm_node_store,
492 };
493
494 static struct config_item_type o2nm_node_type = {
495         .ct_item_ops    = &o2nm_node_item_ops,
496         .ct_attrs       = o2nm_node_attrs,
497         .ct_owner       = THIS_MODULE,
498 };
499
500 /* node set */
501
502 struct o2nm_node_group {
503         struct config_group ns_group;
504         /* some stuff? */
505 };
506
507 #if 0
508 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
509 {
510         return group ?
511                 container_of(group, struct o2nm_node_group, ns_group)
512                 : NULL;
513 }
514 #endif
515
516 struct o2nm_cluster_attribute {
517         struct configfs_attribute attr;
518         ssize_t (*show)(struct o2nm_cluster *, char *);
519         ssize_t (*store)(struct o2nm_cluster *, const char *, size_t);
520 };
521
522 static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count,
523                                        unsigned int *val)
524 {
525         unsigned long tmp;
526         char *p = (char *)page;
527
528         tmp = simple_strtoul(p, &p, 0);
529         if (!p || (*p && (*p != '\n')))
530                 return -EINVAL;
531
532         if (tmp == 0)
533                 return -EINVAL;
534         if (tmp >= (u32)-1)
535                 return -ERANGE;
536
537         *val = tmp;
538
539         return count;
540 }
541
542 static ssize_t o2nm_cluster_attr_idle_timeout_ms_read(
543         struct o2nm_cluster *cluster, char *page)
544 {
545         return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
546 }
547
548 static ssize_t o2nm_cluster_attr_idle_timeout_ms_write(
549         struct o2nm_cluster *cluster, const char *page, size_t count)
550 {
551         ssize_t ret;
552         unsigned int val;
553
554         ret =  o2nm_cluster_attr_write(page, count, &val);
555
556         if (ret > 0) {
557                 if (cluster->cl_idle_timeout_ms != val
558                         && o2net_num_connected_peers()) {
559                         mlog(ML_NOTICE,
560                              "o2net: cannot change idle timeout after "
561                              "the first peer has agreed to it."
562                              "  %d connected peers\n",
563                              o2net_num_connected_peers());
564                         ret = -EINVAL;
565                 } else if (val <= cluster->cl_keepalive_delay_ms) {
566                         mlog(ML_NOTICE, "o2net: idle timeout must be larger "
567                              "than keepalive delay\n");
568                         ret = -EINVAL;
569                 } else {
570                         cluster->cl_idle_timeout_ms = val;
571                 }
572         }
573
574         return ret;
575 }
576
577 static ssize_t o2nm_cluster_attr_keepalive_delay_ms_read(
578         struct o2nm_cluster *cluster, char *page)
579 {
580         return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
581 }
582
583 static ssize_t o2nm_cluster_attr_keepalive_delay_ms_write(
584         struct o2nm_cluster *cluster, const char *page, size_t count)
585 {
586         ssize_t ret;
587         unsigned int val;
588
589         ret =  o2nm_cluster_attr_write(page, count, &val);
590
591         if (ret > 0) {
592                 if (cluster->cl_keepalive_delay_ms != val
593                     && o2net_num_connected_peers()) {
594                         mlog(ML_NOTICE,
595                              "o2net: cannot change keepalive delay after"
596                              " the first peer has agreed to it."
597                              "  %d connected peers\n",
598                              o2net_num_connected_peers());
599                         ret = -EINVAL;
600                 } else if (val >= cluster->cl_idle_timeout_ms) {
601                         mlog(ML_NOTICE, "o2net: keepalive delay must be "
602                              "smaller than idle timeout\n");
603                         ret = -EINVAL;
604                 } else {
605                         cluster->cl_keepalive_delay_ms = val;
606                 }
607         }
608
609         return ret;
610 }
611
612 static ssize_t o2nm_cluster_attr_reconnect_delay_ms_read(
613         struct o2nm_cluster *cluster, char *page)
614 {
615         return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
616 }
617
618 static ssize_t o2nm_cluster_attr_reconnect_delay_ms_write(
619         struct o2nm_cluster *cluster, const char *page, size_t count)
620 {
621         return o2nm_cluster_attr_write(page, count,
622                                        &cluster->cl_reconnect_delay_ms);
623 }
624
625 static ssize_t o2nm_cluster_attr_fence_method_read(
626         struct o2nm_cluster *cluster, char *page)
627 {
628         ssize_t ret = 0;
629
630         if (cluster)
631                 ret = sprintf(page, "%s\n",
632                               o2nm_fence_method_desc[cluster->cl_fence_method]);
633         return ret;
634 }
635
636 static ssize_t o2nm_cluster_attr_fence_method_write(
637         struct o2nm_cluster *cluster, const char *page, size_t count)
638 {
639         unsigned int i;
640
641         if (page[count - 1] != '\n')
642                 goto bail;
643
644         for (i = 0; i < O2NM_FENCE_METHODS; ++i) {
645                 if (count != strlen(o2nm_fence_method_desc[i]) + 1)
646                         continue;
647                 if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1))
648                         continue;
649                 if (cluster->cl_fence_method != i) {
650                         printk(KERN_INFO "ocfs2: Changing fence method to %s\n",
651                                o2nm_fence_method_desc[i]);
652                         cluster->cl_fence_method = i;
653                 }
654                 return count;
655         }
656
657 bail:
658         return -EINVAL;
659 }
660
661 static struct o2nm_cluster_attribute o2nm_cluster_attr_idle_timeout_ms = {
662         .attr   = { .ca_owner = THIS_MODULE,
663                     .ca_name = "idle_timeout_ms",
664                     .ca_mode = S_IRUGO | S_IWUSR },
665         .show   = o2nm_cluster_attr_idle_timeout_ms_read,
666         .store  = o2nm_cluster_attr_idle_timeout_ms_write,
667 };
668
669 static struct o2nm_cluster_attribute o2nm_cluster_attr_keepalive_delay_ms = {
670         .attr   = { .ca_owner = THIS_MODULE,
671                     .ca_name = "keepalive_delay_ms",
672                     .ca_mode = S_IRUGO | S_IWUSR },
673         .show   = o2nm_cluster_attr_keepalive_delay_ms_read,
674         .store  = o2nm_cluster_attr_keepalive_delay_ms_write,
675 };
676
677 static struct o2nm_cluster_attribute o2nm_cluster_attr_reconnect_delay_ms = {
678         .attr   = { .ca_owner = THIS_MODULE,
679                     .ca_name = "reconnect_delay_ms",
680                     .ca_mode = S_IRUGO | S_IWUSR },
681         .show   = o2nm_cluster_attr_reconnect_delay_ms_read,
682         .store  = o2nm_cluster_attr_reconnect_delay_ms_write,
683 };
684
685 static struct o2nm_cluster_attribute o2nm_cluster_attr_fence_method = {
686         .attr   = { .ca_owner = THIS_MODULE,
687                     .ca_name = "fence_method",
688                     .ca_mode = S_IRUGO | S_IWUSR },
689         .show   = o2nm_cluster_attr_fence_method_read,
690         .store  = o2nm_cluster_attr_fence_method_write,
691 };
692
693 static struct configfs_attribute *o2nm_cluster_attrs[] = {
694         &o2nm_cluster_attr_idle_timeout_ms.attr,
695         &o2nm_cluster_attr_keepalive_delay_ms.attr,
696         &o2nm_cluster_attr_reconnect_delay_ms.attr,
697         &o2nm_cluster_attr_fence_method.attr,
698         NULL,
699 };
700 static ssize_t o2nm_cluster_show(struct config_item *item,
701                                  struct configfs_attribute *attr,
702                                  char *page)
703 {
704         struct o2nm_cluster *cluster = to_o2nm_cluster(item);
705         struct o2nm_cluster_attribute *o2nm_cluster_attr =
706                 container_of(attr, struct o2nm_cluster_attribute, attr);
707         ssize_t ret = 0;
708
709         if (o2nm_cluster_attr->show)
710                 ret = o2nm_cluster_attr->show(cluster, page);
711         return ret;
712 }
713
714 static ssize_t o2nm_cluster_store(struct config_item *item,
715                                   struct configfs_attribute *attr,
716                                   const char *page, size_t count)
717 {
718         struct o2nm_cluster *cluster = to_o2nm_cluster(item);
719         struct o2nm_cluster_attribute *o2nm_cluster_attr =
720                 container_of(attr, struct o2nm_cluster_attribute, attr);
721         ssize_t ret;
722
723         if (o2nm_cluster_attr->store == NULL) {
724                 ret = -EINVAL;
725                 goto out;
726         }
727
728         ret = o2nm_cluster_attr->store(cluster, page, count);
729         if (ret < count)
730                 goto out;
731 out:
732         return ret;
733 }
734
735 static struct config_item *o2nm_node_group_make_item(struct config_group *group,
736                                                      const char *name)
737 {
738         struct o2nm_node *node = NULL;
739
740         if (strlen(name) > O2NM_MAX_NAME_LEN)
741                 return ERR_PTR(-ENAMETOOLONG);
742
743         node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL);
744         if (node == NULL)
745                 return ERR_PTR(-ENOMEM);
746
747         strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
748         config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
749         spin_lock_init(&node->nd_lock);
750
751         mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name);
752
753         return &node->nd_item;
754 }
755
756 static void o2nm_node_group_drop_item(struct config_group *group,
757                                       struct config_item *item)
758 {
759         struct o2nm_node *node = to_o2nm_node(item);
760         struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
761
762         o2net_disconnect_node(node);
763
764         if (cluster->cl_has_local &&
765             (cluster->cl_local_node == node->nd_num)) {
766                 cluster->cl_has_local = 0;
767                 cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
768                 o2net_stop_listening(node);
769         }
770
771         /* XXX call into net to stop this node from trading messages */
772
773         write_lock(&cluster->cl_nodes_lock);
774
775         /* XXX sloppy */
776         if (node->nd_ipv4_address)
777                 rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
778
779         /* nd_num might be 0 if the node number hasn't been set.. */
780         if (cluster->cl_nodes[node->nd_num] == node) {
781                 cluster->cl_nodes[node->nd_num] = NULL;
782                 clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
783         }
784         write_unlock(&cluster->cl_nodes_lock);
785
786         mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n",
787              config_item_name(&node->nd_item));
788
789         config_item_put(item);
790 }
791
792 static struct configfs_group_operations o2nm_node_group_group_ops = {
793         .make_item      = o2nm_node_group_make_item,
794         .drop_item      = o2nm_node_group_drop_item,
795 };
796
797 static struct config_item_type o2nm_node_group_type = {
798         .ct_group_ops   = &o2nm_node_group_group_ops,
799         .ct_owner       = THIS_MODULE,
800 };
801
802 /* cluster */
803
804 static void o2nm_cluster_release(struct config_item *item)
805 {
806         struct o2nm_cluster *cluster = to_o2nm_cluster(item);
807
808         kfree(cluster->cl_group.default_groups);
809         kfree(cluster);
810 }
811
812 static struct configfs_item_operations o2nm_cluster_item_ops = {
813         .release        = o2nm_cluster_release,
814         .show_attribute         = o2nm_cluster_show,
815         .store_attribute        = o2nm_cluster_store,
816 };
817
818 static struct config_item_type o2nm_cluster_type = {
819         .ct_item_ops    = &o2nm_cluster_item_ops,
820         .ct_attrs       = o2nm_cluster_attrs,
821         .ct_owner       = THIS_MODULE,
822 };
823
824 /* cluster set */
825
826 struct o2nm_cluster_group {
827         struct configfs_subsystem cs_subsys;
828         /* some stuff? */
829 };
830
831 #if 0
832 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group)
833 {
834         return group ?
835                 container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys)
836                : NULL;
837 }
838 #endif
839
840 static struct config_group *o2nm_cluster_group_make_group(struct config_group *group,
841                                                           const char *name)
842 {
843         struct o2nm_cluster *cluster = NULL;
844         struct o2nm_node_group *ns = NULL;
845         struct config_group *o2hb_group = NULL, *ret = NULL;
846         void *defs = NULL;
847
848         /* this runs under the parent dir's i_mutex; there can be only
849          * one caller in here at a time */
850         if (o2nm_single_cluster)
851                 return ERR_PTR(-ENOSPC);
852
853         cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL);
854         ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL);
855         defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
856         o2hb_group = o2hb_alloc_hb_set();
857         if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL)
858                 goto out;
859
860         config_group_init_type_name(&cluster->cl_group, name,
861                                     &o2nm_cluster_type);
862         config_group_init_type_name(&ns->ns_group, "node",
863                                     &o2nm_node_group_type);
864
865         cluster->cl_group.default_groups = defs;
866         cluster->cl_group.default_groups[0] = &ns->ns_group;
867         cluster->cl_group.default_groups[1] = o2hb_group;
868         cluster->cl_group.default_groups[2] = NULL;
869         rwlock_init(&cluster->cl_nodes_lock);
870         cluster->cl_node_ip_tree = RB_ROOT;
871         cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT;
872         cluster->cl_idle_timeout_ms    = O2NET_IDLE_TIMEOUT_MS_DEFAULT;
873         cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT;
874         cluster->cl_fence_method       = O2NM_FENCE_RESET;
875
876         ret = &cluster->cl_group;
877         o2nm_single_cluster = cluster;
878
879 out:
880         if (ret == NULL) {
881                 kfree(cluster);
882                 kfree(ns);
883                 o2hb_free_hb_set(o2hb_group);
884                 kfree(defs);
885                 ret = ERR_PTR(-ENOMEM);
886         }
887
888         return ret;
889 }
890
891 static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
892 {
893         struct o2nm_cluster *cluster = to_o2nm_cluster(item);
894         int i;
895         struct config_item *killme;
896
897         BUG_ON(o2nm_single_cluster != cluster);
898         o2nm_single_cluster = NULL;
899
900         for (i = 0; cluster->cl_group.default_groups[i]; i++) {
901                 killme = &cluster->cl_group.default_groups[i]->cg_item;
902                 cluster->cl_group.default_groups[i] = NULL;
903                 config_item_put(killme);
904         }
905
906         config_item_put(item);
907 }
908
909 static struct configfs_group_operations o2nm_cluster_group_group_ops = {
910         .make_group     = o2nm_cluster_group_make_group,
911         .drop_item      = o2nm_cluster_group_drop_item,
912 };
913
914 static struct config_item_type o2nm_cluster_group_type = {
915         .ct_group_ops   = &o2nm_cluster_group_group_ops,
916         .ct_owner       = THIS_MODULE,
917 };
918
919 static struct o2nm_cluster_group o2nm_cluster_group = {
920         .cs_subsys = {
921                 .su_group = {
922                         .cg_item = {
923                                 .ci_namebuf = "cluster",
924                                 .ci_type = &o2nm_cluster_group_type,
925                         },
926                 },
927         },
928 };
929
930 static inline void o2nm_lock_subsystem(void)
931 {
932         mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
933 }
934
935 static inline void o2nm_unlock_subsystem(void)
936 {
937         mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
938 }
939
940 int o2nm_depend_item(struct config_item *item)
941 {
942         return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
943 }
944
945 void o2nm_undepend_item(struct config_item *item)
946 {
947         configfs_undepend_item(&o2nm_cluster_group.cs_subsys, item);
948 }
949
950 int o2nm_depend_this_node(void)
951 {
952         int ret = 0;
953         struct o2nm_node *local_node;
954
955         local_node = o2nm_get_node_by_num(o2nm_this_node());
956         if (!local_node) {
957                 ret = -EINVAL;
958                 goto out;
959         }
960
961         ret = o2nm_depend_item(&local_node->nd_item);
962         o2nm_node_put(local_node);
963
964 out:
965         return ret;
966 }
967
968 void o2nm_undepend_this_node(void)
969 {
970         struct o2nm_node *local_node;
971
972         local_node = o2nm_get_node_by_num(o2nm_this_node());
973         BUG_ON(!local_node);
974
975         o2nm_undepend_item(&local_node->nd_item);
976         o2nm_node_put(local_node);
977 }
978
979
980 static void __exit exit_o2nm(void)
981 {
982         /* XXX sync with hb callbacks and shut down hb? */
983         o2net_unregister_hb_callbacks();
984         configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
985         o2cb_sys_shutdown();
986
987         o2net_exit();
988         o2hb_exit();
989 }
990
991 static int __init init_o2nm(void)
992 {
993         int ret = -1;
994
995         cluster_print_version();
996
997         ret = o2hb_init();
998         if (ret)
999                 goto out;
1000
1001         ret = o2net_init();
1002         if (ret)
1003                 goto out_o2hb;
1004
1005         ret = o2net_register_hb_callbacks();
1006         if (ret)
1007                 goto out_o2net;
1008
1009         config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
1010         mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex);
1011         ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys);
1012         if (ret) {
1013                 printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
1014                 goto out_callbacks;
1015         }
1016
1017         ret = o2cb_sys_init();
1018         if (!ret)
1019                 goto out;
1020
1021         configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
1022 out_callbacks:
1023         o2net_unregister_hb_callbacks();
1024 out_o2net:
1025         o2net_exit();
1026 out_o2hb:
1027         o2hb_exit();
1028 out:
1029         return ret;
1030 }
1031
1032 MODULE_AUTHOR("Oracle");
1033 MODULE_LICENSE("GPL");
1034
1035 module_init(init_o2nm)
1036 module_exit(exit_o2nm)