733d9a528873fd412842d4cfe5ffef930434ac29
[pandora-kernel.git] / fs / ocfs2 / dlm / dlmmaster.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmmod.c
5  *
6  * standalone DLM module
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
42
43
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
47
48 #include "dlmapi.h"
49 #include "dlmcommon.h"
50 #include "dlmdebug.h"
51 #include "dlmdomain.h"
52
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
54 #include "cluster/masklog.h"
55
56 enum dlm_mle_type {
57         DLM_MLE_BLOCK,
58         DLM_MLE_MASTER,
59         DLM_MLE_MIGRATION
60 };
61
62 struct dlm_lock_name
63 {
64         u8 len;
65         u8 name[DLM_LOCKID_NAME_MAX];
66 };
67
68 struct dlm_master_list_entry
69 {
70         struct list_head list;
71         struct list_head hb_events;
72         struct dlm_ctxt *dlm;
73         spinlock_t spinlock;
74         wait_queue_head_t wq;
75         atomic_t woken;
76         struct kref mle_refs;
77         int inuse;
78         unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
79         unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
80         unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
81         unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
82         u8 master;
83         u8 new_master;
84         enum dlm_mle_type type;
85         struct o2hb_callback_func mle_hb_up;
86         struct o2hb_callback_func mle_hb_down;
87         union {
88                 struct dlm_lock_resource *res;
89                 struct dlm_lock_name name;
90         } u;
91 };
92
93 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
94                               struct dlm_master_list_entry *mle,
95                               struct o2nm_node *node,
96                               int idx);
97 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
98                             struct dlm_master_list_entry *mle,
99                             struct o2nm_node *node,
100                             int idx);
101
102 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
103 static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
104                                 unsigned int namelen, void *nodemap,
105                                 u32 flags);
106
107 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
108                                 struct dlm_master_list_entry *mle,
109                                 const char *name,
110                                 unsigned int namelen)
111 {
112         struct dlm_lock_resource *res;
113
114         if (dlm != mle->dlm)
115                 return 0;
116
117         if (mle->type == DLM_MLE_BLOCK ||
118             mle->type == DLM_MLE_MIGRATION) {
119                 if (namelen != mle->u.name.len ||
120                     memcmp(name, mle->u.name.name, namelen)!=0)
121                         return 0;
122         } else {
123                 res = mle->u.res;
124                 if (namelen != res->lockname.len ||
125                     memcmp(res->lockname.name, name, namelen) != 0)
126                         return 0;
127         }
128         return 1;
129 }
130
131 #if 0
132 /* Code here is included but defined out as it aids debugging */
133
134 #define dlm_print_nodemap(m)  _dlm_print_nodemap(m,#m)
135 void _dlm_print_nodemap(unsigned long *map, const char *mapname)
136 {
137         int i;
138         printk("%s=[ ", mapname);
139         for (i=0; i<O2NM_MAX_NODES; i++)
140                 if (test_bit(i, map))
141                         printk("%d ", i);
142         printk("]");
143 }
144
145 void dlm_print_one_mle(struct dlm_master_list_entry *mle)
146 {
147         int refs;
148         char *type;
149         char attached;
150         u8 master;
151         unsigned int namelen;
152         const char *name;
153         struct kref *k;
154         unsigned long *maybe = mle->maybe_map,
155                       *vote = mle->vote_map,
156                       *resp = mle->response_map,
157                       *node = mle->node_map;
158
159         k = &mle->mle_refs;
160         if (mle->type == DLM_MLE_BLOCK)
161                 type = "BLK";
162         else if (mle->type == DLM_MLE_MASTER)
163                 type = "MAS";
164         else
165                 type = "MIG";
166         refs = atomic_read(&k->refcount);
167         master = mle->master;
168         attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
169
170         if (mle->type != DLM_MLE_MASTER) {
171                 namelen = mle->u.name.len;
172                 name = mle->u.name.name;
173         } else {
174                 namelen = mle->u.res->lockname.len;
175                 name = mle->u.res->lockname.name;
176         }
177
178         mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
179                   namelen, name, type, refs, master, mle->new_master, attached,
180                   mle->inuse);
181         dlm_print_nodemap(maybe);
182         printk(", ");
183         dlm_print_nodemap(vote);
184         printk(", ");
185         dlm_print_nodemap(resp);
186         printk(", ");
187         dlm_print_nodemap(node);
188         printk(", ");
189         printk("\n");
190 }
191
192 static void dlm_dump_mles(struct dlm_ctxt *dlm)
193 {
194         struct dlm_master_list_entry *mle;
195         struct list_head *iter;
196         
197         mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
198         spin_lock(&dlm->master_lock);
199         list_for_each(iter, &dlm->master_list) {
200                 mle = list_entry(iter, struct dlm_master_list_entry, list);
201                 dlm_print_one_mle(mle);
202         }
203         spin_unlock(&dlm->master_lock);
204 }
205
206 int dlm_dump_all_mles(const char __user *data, unsigned int len)
207 {
208         struct list_head *iter;
209         struct dlm_ctxt *dlm;
210
211         spin_lock(&dlm_domain_lock);
212         list_for_each(iter, &dlm_domains) {
213                 dlm = list_entry (iter, struct dlm_ctxt, list);
214                 mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
215                 dlm_dump_mles(dlm);
216         }
217         spin_unlock(&dlm_domain_lock);
218         return len;
219 }
220 EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
221
222 #endif  /*  0  */
223
224
225 static kmem_cache_t *dlm_mle_cache = NULL;
226
227
228 static void dlm_mle_release(struct kref *kref);
229 static void dlm_init_mle(struct dlm_master_list_entry *mle,
230                         enum dlm_mle_type type,
231                         struct dlm_ctxt *dlm,
232                         struct dlm_lock_resource *res,
233                         const char *name,
234                         unsigned int namelen);
235 static void dlm_put_mle(struct dlm_master_list_entry *mle);
236 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
237 static int dlm_find_mle(struct dlm_ctxt *dlm,
238                         struct dlm_master_list_entry **mle,
239                         char *name, unsigned int namelen);
240
241 static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to);
242
243
244 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
245                                      struct dlm_lock_resource *res,
246                                      struct dlm_master_list_entry *mle,
247                                      int *blocked);
248 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
249                                     struct dlm_lock_resource *res,
250                                     struct dlm_master_list_entry *mle,
251                                     int blocked);
252 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
253                                  struct dlm_lock_resource *res,
254                                  struct dlm_master_list_entry *mle,
255                                  struct dlm_master_list_entry **oldmle,
256                                  const char *name, unsigned int namelen,
257                                  u8 new_master, u8 master);
258
259 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
260                                     struct dlm_lock_resource *res);
261 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
262                                       struct dlm_lock_resource *res);
263 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
264                                        struct dlm_lock_resource *res,
265                                        u8 target);
266 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
267                                        struct dlm_lock_resource *res);
268
269
270 int dlm_is_host_down(int errno)
271 {
272         switch (errno) {
273                 case -EBADF:
274                 case -ECONNREFUSED:
275                 case -ENOTCONN:
276                 case -ECONNRESET:
277                 case -EPIPE:
278                 case -EHOSTDOWN:
279                 case -EHOSTUNREACH:
280                 case -ETIMEDOUT:
281                 case -ECONNABORTED:
282                 case -ENETDOWN:
283                 case -ENETUNREACH:
284                 case -ENETRESET:
285                 case -ESHUTDOWN:
286                 case -ENOPROTOOPT:
287                 case -EINVAL:   /* if returned from our tcp code,
288                                    this means there is no socket */
289                         return 1;
290         }
291         return 0;
292 }
293
294
295 /*
296  * MASTER LIST FUNCTIONS
297  */
298
299
300 /*
301  * regarding master list entries and heartbeat callbacks:
302  *
303  * in order to avoid sleeping and allocation that occurs in
304  * heartbeat, master list entries are simply attached to the
305  * dlm's established heartbeat callbacks.  the mle is attached
306  * when it is created, and since the dlm->spinlock is held at
307  * that time, any heartbeat event will be properly discovered
308  * by the mle.  the mle needs to be detached from the
309  * dlm->mle_hb_events list as soon as heartbeat events are no
310  * longer useful to the mle, and before the mle is freed.
311  *
312  * as a general rule, heartbeat events are no longer needed by
313  * the mle once an "answer" regarding the lock master has been
314  * received.
315  */
316 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
317                                               struct dlm_master_list_entry *mle)
318 {
319         assert_spin_locked(&dlm->spinlock);
320
321         list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
322 }
323
324
325 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
326                                               struct dlm_master_list_entry *mle)
327 {
328         if (!list_empty(&mle->hb_events))
329                 list_del_init(&mle->hb_events);
330 }
331
332
333 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
334                                             struct dlm_master_list_entry *mle)
335 {
336         spin_lock(&dlm->spinlock);
337         __dlm_mle_detach_hb_events(dlm, mle);
338         spin_unlock(&dlm->spinlock);
339 }
340
341 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
342 {
343         struct dlm_ctxt *dlm;
344         dlm = mle->dlm;
345
346         assert_spin_locked(&dlm->spinlock);
347         assert_spin_locked(&dlm->master_lock);
348         mle->inuse++;
349         kref_get(&mle->mle_refs);
350 }
351
352 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
353 {
354         struct dlm_ctxt *dlm;
355         dlm = mle->dlm;
356
357         spin_lock(&dlm->spinlock);
358         spin_lock(&dlm->master_lock);
359         mle->inuse--;
360         __dlm_put_mle(mle);
361         spin_unlock(&dlm->master_lock);
362         spin_unlock(&dlm->spinlock);
363
364 }
365
366 /* remove from list and free */
367 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
368 {
369         struct dlm_ctxt *dlm;
370         dlm = mle->dlm;
371
372         assert_spin_locked(&dlm->spinlock);
373         assert_spin_locked(&dlm->master_lock);
374         if (!atomic_read(&mle->mle_refs.refcount)) {
375                 /* this may or may not crash, but who cares.
376                  * it's a BUG. */
377                 mlog(ML_ERROR, "bad mle: %p\n", mle);
378                 dlm_print_one_mle(mle);
379                 BUG();
380         } else
381                 kref_put(&mle->mle_refs, dlm_mle_release);
382 }
383
384
385 /* must not have any spinlocks coming in */
386 static void dlm_put_mle(struct dlm_master_list_entry *mle)
387 {
388         struct dlm_ctxt *dlm;
389         dlm = mle->dlm;
390
391         spin_lock(&dlm->spinlock);
392         spin_lock(&dlm->master_lock);
393         __dlm_put_mle(mle);
394         spin_unlock(&dlm->master_lock);
395         spin_unlock(&dlm->spinlock);
396 }
397
398 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
399 {
400         kref_get(&mle->mle_refs);
401 }
402
403 static void dlm_init_mle(struct dlm_master_list_entry *mle,
404                         enum dlm_mle_type type,
405                         struct dlm_ctxt *dlm,
406                         struct dlm_lock_resource *res,
407                         const char *name,
408                         unsigned int namelen)
409 {
410         assert_spin_locked(&dlm->spinlock);
411
412         mle->dlm = dlm;
413         mle->type = type;
414         INIT_LIST_HEAD(&mle->list);
415         INIT_LIST_HEAD(&mle->hb_events);
416         memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
417         spin_lock_init(&mle->spinlock);
418         init_waitqueue_head(&mle->wq);
419         atomic_set(&mle->woken, 0);
420         kref_init(&mle->mle_refs);
421         memset(mle->response_map, 0, sizeof(mle->response_map));
422         mle->master = O2NM_MAX_NODES;
423         mle->new_master = O2NM_MAX_NODES;
424         mle->inuse = 0;
425
426         if (mle->type == DLM_MLE_MASTER) {
427                 BUG_ON(!res);
428                 mle->u.res = res;
429         } else if (mle->type == DLM_MLE_BLOCK) {
430                 BUG_ON(!name);
431                 memcpy(mle->u.name.name, name, namelen);
432                 mle->u.name.len = namelen;
433         } else /* DLM_MLE_MIGRATION */ {
434                 BUG_ON(!name);
435                 memcpy(mle->u.name.name, name, namelen);
436                 mle->u.name.len = namelen;
437         }
438
439         /* copy off the node_map and register hb callbacks on our copy */
440         memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
441         memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
442         clear_bit(dlm->node_num, mle->vote_map);
443         clear_bit(dlm->node_num, mle->node_map);
444
445         /* attach the mle to the domain node up/down events */
446         __dlm_mle_attach_hb_events(dlm, mle);
447 }
448
449
450 /* returns 1 if found, 0 if not */
451 static int dlm_find_mle(struct dlm_ctxt *dlm,
452                         struct dlm_master_list_entry **mle,
453                         char *name, unsigned int namelen)
454 {
455         struct dlm_master_list_entry *tmpmle;
456         struct list_head *iter;
457
458         assert_spin_locked(&dlm->master_lock);
459
460         list_for_each(iter, &dlm->master_list) {
461                 tmpmle = list_entry(iter, struct dlm_master_list_entry, list);
462                 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
463                         continue;
464                 dlm_get_mle(tmpmle);
465                 *mle = tmpmle;
466                 return 1;
467         }
468         return 0;
469 }
470
471 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
472 {
473         struct dlm_master_list_entry *mle;
474         struct list_head *iter;
475
476         assert_spin_locked(&dlm->spinlock);
477         
478         list_for_each(iter, &dlm->mle_hb_events) {
479                 mle = list_entry(iter, struct dlm_master_list_entry, 
480                                  hb_events);
481                 if (node_up)
482                         dlm_mle_node_up(dlm, mle, NULL, idx);
483                 else
484                         dlm_mle_node_down(dlm, mle, NULL, idx);
485         }
486 }
487
488 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
489                               struct dlm_master_list_entry *mle,
490                               struct o2nm_node *node, int idx)
491 {
492         spin_lock(&mle->spinlock);
493
494         if (!test_bit(idx, mle->node_map))
495                 mlog(0, "node %u already removed from nodemap!\n", idx);
496         else
497                 clear_bit(idx, mle->node_map);
498
499         spin_unlock(&mle->spinlock);
500 }
501
502 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
503                             struct dlm_master_list_entry *mle,
504                             struct o2nm_node *node, int idx)
505 {
506         spin_lock(&mle->spinlock);
507
508         if (test_bit(idx, mle->node_map))
509                 mlog(0, "node %u already in node map!\n", idx);
510         else
511                 set_bit(idx, mle->node_map);
512
513         spin_unlock(&mle->spinlock);
514 }
515
516
517 int dlm_init_mle_cache(void)
518 {
519         dlm_mle_cache = kmem_cache_create("dlm_mle_cache",
520                                           sizeof(struct dlm_master_list_entry),
521                                           0, SLAB_HWCACHE_ALIGN,
522                                           NULL, NULL);
523         if (dlm_mle_cache == NULL)
524                 return -ENOMEM;
525         return 0;
526 }
527
528 void dlm_destroy_mle_cache(void)
529 {
530         if (dlm_mle_cache)
531                 kmem_cache_destroy(dlm_mle_cache);
532 }
533
534 static void dlm_mle_release(struct kref *kref)
535 {
536         struct dlm_master_list_entry *mle;
537         struct dlm_ctxt *dlm;
538
539         mlog_entry_void();
540
541         mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
542         dlm = mle->dlm;
543
544         if (mle->type != DLM_MLE_MASTER) {
545                 mlog(0, "calling mle_release for %.*s, type %d\n",
546                      mle->u.name.len, mle->u.name.name, mle->type);
547         } else {
548                 mlog(0, "calling mle_release for %.*s, type %d\n",
549                      mle->u.res->lockname.len,
550                      mle->u.res->lockname.name, mle->type);
551         }
552         assert_spin_locked(&dlm->spinlock);
553         assert_spin_locked(&dlm->master_lock);
554
555         /* remove from list if not already */
556         if (!list_empty(&mle->list))
557                 list_del_init(&mle->list);
558
559         /* detach the mle from the domain node up/down events */
560         __dlm_mle_detach_hb_events(dlm, mle);
561
562         /* NOTE: kfree under spinlock here.
563          * if this is bad, we can move this to a freelist. */
564         kmem_cache_free(dlm_mle_cache, mle);
565 }
566
567
568 /*
569  * LOCK RESOURCE FUNCTIONS
570  */
571
572 static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
573                                   struct dlm_lock_resource *res,
574                                   u8 owner)
575 {
576         assert_spin_locked(&res->spinlock);
577
578         mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
579
580         if (owner == dlm->node_num)
581                 atomic_inc(&dlm->local_resources);
582         else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
583                 atomic_inc(&dlm->unknown_resources);
584         else
585                 atomic_inc(&dlm->remote_resources);
586
587         res->owner = owner;
588 }
589
590 void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
591                               struct dlm_lock_resource *res, u8 owner)
592 {
593         assert_spin_locked(&res->spinlock);
594
595         if (owner == res->owner)
596                 return;
597
598         if (res->owner == dlm->node_num)
599                 atomic_dec(&dlm->local_resources);
600         else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
601                 atomic_dec(&dlm->unknown_resources);
602         else
603                 atomic_dec(&dlm->remote_resources);
604
605         dlm_set_lockres_owner(dlm, res, owner);
606 }
607
608
609 static void dlm_lockres_release(struct kref *kref)
610 {
611         struct dlm_lock_resource *res;
612
613         res = container_of(kref, struct dlm_lock_resource, refs);
614
615         /* This should not happen -- all lockres' have a name
616          * associated with them at init time. */
617         BUG_ON(!res->lockname.name);
618
619         mlog(0, "destroying lockres %.*s\n", res->lockname.len,
620              res->lockname.name);
621
622         if (!hlist_unhashed(&res->hash_node) ||
623             !list_empty(&res->granted) ||
624             !list_empty(&res->converting) ||
625             !list_empty(&res->blocked) ||
626             !list_empty(&res->dirty) ||
627             !list_empty(&res->recovering) ||
628             !list_empty(&res->purge)) {
629                 mlog(ML_ERROR,
630                      "Going to BUG for resource %.*s."
631                      "  We're on a list! [%c%c%c%c%c%c%c]\n",
632                      res->lockname.len, res->lockname.name,
633                      !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
634                      !list_empty(&res->granted) ? 'G' : ' ',
635                      !list_empty(&res->converting) ? 'C' : ' ',
636                      !list_empty(&res->blocked) ? 'B' : ' ',
637                      !list_empty(&res->dirty) ? 'D' : ' ',
638                      !list_empty(&res->recovering) ? 'R' : ' ',
639                      !list_empty(&res->purge) ? 'P' : ' ');
640
641                 dlm_print_one_lock_resource(res);
642         }
643
644         /* By the time we're ready to blow this guy away, we shouldn't
645          * be on any lists. */
646         BUG_ON(!hlist_unhashed(&res->hash_node));
647         BUG_ON(!list_empty(&res->granted));
648         BUG_ON(!list_empty(&res->converting));
649         BUG_ON(!list_empty(&res->blocked));
650         BUG_ON(!list_empty(&res->dirty));
651         BUG_ON(!list_empty(&res->recovering));
652         BUG_ON(!list_empty(&res->purge));
653
654         kfree(res->lockname.name);
655
656         kfree(res);
657 }
658
659 void dlm_lockres_put(struct dlm_lock_resource *res)
660 {
661         kref_put(&res->refs, dlm_lockres_release);
662 }
663
664 static void dlm_init_lockres(struct dlm_ctxt *dlm,
665                              struct dlm_lock_resource *res,
666                              const char *name, unsigned int namelen)
667 {
668         char *qname;
669
670         /* If we memset here, we lose our reference to the kmalloc'd
671          * res->lockname.name, so be sure to init every field
672          * correctly! */
673
674         qname = (char *) res->lockname.name;
675         memcpy(qname, name, namelen);
676
677         res->lockname.len = namelen;
678         res->lockname.hash = dlm_lockid_hash(name, namelen);
679
680         init_waitqueue_head(&res->wq);
681         spin_lock_init(&res->spinlock);
682         INIT_HLIST_NODE(&res->hash_node);
683         INIT_LIST_HEAD(&res->granted);
684         INIT_LIST_HEAD(&res->converting);
685         INIT_LIST_HEAD(&res->blocked);
686         INIT_LIST_HEAD(&res->dirty);
687         INIT_LIST_HEAD(&res->recovering);
688         INIT_LIST_HEAD(&res->purge);
689         atomic_set(&res->asts_reserved, 0);
690         res->migration_pending = 0;
691
692         kref_init(&res->refs);
693
694         /* just for consistency */
695         spin_lock(&res->spinlock);
696         dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
697         spin_unlock(&res->spinlock);
698
699         res->state = DLM_LOCK_RES_IN_PROGRESS;
700
701         res->last_used = 0;
702
703         memset(res->lvb, 0, DLM_LVB_LEN);
704 }
705
706 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
707                                    const char *name,
708                                    unsigned int namelen)
709 {
710         struct dlm_lock_resource *res;
711
712         res = kmalloc(sizeof(struct dlm_lock_resource), GFP_NOFS);
713         if (!res)
714                 return NULL;
715
716         res->lockname.name = kmalloc(namelen, GFP_NOFS);
717         if (!res->lockname.name) {
718                 kfree(res);
719                 return NULL;
720         }
721
722         dlm_init_lockres(dlm, res, name, namelen);
723         return res;
724 }
725
726 /*
727  * lookup a lock resource by name.
728  * may already exist in the hashtable.
729  * lockid is null terminated
730  *
731  * if not, allocate enough for the lockres and for
732  * the temporary structure used in doing the mastering.
733  *
734  * also, do a lookup in the dlm->master_list to see
735  * if another node has begun mastering the same lock.
736  * if so, there should be a block entry in there
737  * for this name, and we should *not* attempt to master
738  * the lock here.   need to wait around for that node
739  * to assert_master (or die).
740  *
741  */
742 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
743                                           const char *lockid,
744                                           int flags)
745 {
746         struct dlm_lock_resource *tmpres=NULL, *res=NULL;
747         struct dlm_master_list_entry *mle = NULL;
748         struct dlm_master_list_entry *alloc_mle = NULL;
749         int blocked = 0;
750         int ret, nodenum;
751         struct dlm_node_iter iter;
752         unsigned int namelen, hash;
753         int tries = 0;
754         int bit, wait_on_recovery = 0;
755
756         BUG_ON(!lockid);
757
758         namelen = strlen(lockid);
759         hash = dlm_lockid_hash(lockid, namelen);
760
761         mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
762
763 lookup:
764         spin_lock(&dlm->spinlock);
765         tmpres = __dlm_lookup_lockres(dlm, lockid, namelen, hash);
766         if (tmpres) {
767                 spin_unlock(&dlm->spinlock);
768                 mlog(0, "found in hash!\n");
769                 if (res)
770                         dlm_lockres_put(res);
771                 res = tmpres;
772                 goto leave;
773         }
774
775         if (!res) {
776                 spin_unlock(&dlm->spinlock);
777                 mlog(0, "allocating a new resource\n");
778                 /* nothing found and we need to allocate one. */
779                 alloc_mle = (struct dlm_master_list_entry *)
780                         kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
781                 if (!alloc_mle)
782                         goto leave;
783                 res = dlm_new_lockres(dlm, lockid, namelen);
784                 if (!res)
785                         goto leave;
786                 goto lookup;
787         }
788
789         mlog(0, "no lockres found, allocated our own: %p\n", res);
790
791         if (flags & LKM_LOCAL) {
792                 /* caller knows it's safe to assume it's not mastered elsewhere
793                  * DONE!  return right away */
794                 spin_lock(&res->spinlock);
795                 dlm_change_lockres_owner(dlm, res, dlm->node_num);
796                 __dlm_insert_lockres(dlm, res);
797                 spin_unlock(&res->spinlock);
798                 spin_unlock(&dlm->spinlock);
799                 /* lockres still marked IN_PROGRESS */
800                 goto wake_waiters;
801         }
802
803         /* check master list to see if another node has started mastering it */
804         spin_lock(&dlm->master_lock);
805
806         /* if we found a block, wait for lock to be mastered by another node */
807         blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
808         if (blocked) {
809                 if (mle->type == DLM_MLE_MASTER) {
810                         mlog(ML_ERROR, "master entry for nonexistent lock!\n");
811                         BUG();
812                 } else if (mle->type == DLM_MLE_MIGRATION) {
813                         /* migration is in progress! */
814                         /* the good news is that we now know the
815                          * "current" master (mle->master). */
816
817                         spin_unlock(&dlm->master_lock);
818                         assert_spin_locked(&dlm->spinlock);
819
820                         /* set the lockres owner and hash it */
821                         spin_lock(&res->spinlock);
822                         dlm_set_lockres_owner(dlm, res, mle->master);
823                         __dlm_insert_lockres(dlm, res);
824                         spin_unlock(&res->spinlock);
825                         spin_unlock(&dlm->spinlock);
826
827                         /* master is known, detach */
828                         dlm_mle_detach_hb_events(dlm, mle);
829                         dlm_put_mle(mle);
830                         mle = NULL;
831                         goto wake_waiters;
832                 }
833         } else {
834                 /* go ahead and try to master lock on this node */
835                 mle = alloc_mle;
836                 /* make sure this does not get freed below */
837                 alloc_mle = NULL;
838                 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
839                 set_bit(dlm->node_num, mle->maybe_map);
840                 list_add(&mle->list, &dlm->master_list);
841
842                 /* still holding the dlm spinlock, check the recovery map
843                  * to see if there are any nodes that still need to be 
844                  * considered.  these will not appear in the mle nodemap
845                  * but they might own this lockres.  wait on them. */
846                 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
847                 if (bit < O2NM_MAX_NODES) {
848                         mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
849                              "recover before lock mastery can begin\n",
850                              dlm->name, namelen, (char *)lockid, bit);
851                         wait_on_recovery = 1;
852                 }
853         }
854
855         /* at this point there is either a DLM_MLE_BLOCK or a
856          * DLM_MLE_MASTER on the master list, so it's safe to add the
857          * lockres to the hashtable.  anyone who finds the lock will
858          * still have to wait on the IN_PROGRESS. */
859
860         /* finally add the lockres to its hash bucket */
861         __dlm_insert_lockres(dlm, res);
862         /* get an extra ref on the mle in case this is a BLOCK
863          * if so, the creator of the BLOCK may try to put the last
864          * ref at this time in the assert master handler, so we
865          * need an extra one to keep from a bad ptr deref. */
866         dlm_get_mle_inuse(mle);
867         spin_unlock(&dlm->master_lock);
868         spin_unlock(&dlm->spinlock);
869
870 redo_request:
871         while (wait_on_recovery) {
872                 /* any cluster changes that occurred after dropping the
873                  * dlm spinlock would be detectable be a change on the mle,
874                  * so we only need to clear out the recovery map once. */
875                 if (dlm_is_recovery_lock(lockid, namelen)) {
876                         mlog(ML_NOTICE, "%s: recovery map is not empty, but "
877                              "must master $RECOVERY lock now\n", dlm->name);
878                         if (!dlm_pre_master_reco_lockres(dlm, res))
879                                 wait_on_recovery = 0;
880                         else {
881                                 mlog(0, "%s: waiting 500ms for heartbeat state "
882                                     "change\n", dlm->name);
883                                 msleep(500);
884                         }
885                         continue;
886                 } 
887
888                 dlm_kick_recovery_thread(dlm);
889                 msleep(1000);
890                 dlm_wait_for_recovery(dlm);
891
892                 spin_lock(&dlm->spinlock);
893                 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
894                 if (bit < O2NM_MAX_NODES) {
895                         mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
896                              "recover before lock mastery can begin\n",
897                              dlm->name, namelen, (char *)lockid, bit);
898                         wait_on_recovery = 1;
899                 } else
900                         wait_on_recovery = 0;
901                 spin_unlock(&dlm->spinlock);
902
903                 if (wait_on_recovery)
904                         dlm_wait_for_node_recovery(dlm, bit, 10000);
905         }
906
907         /* must wait for lock to be mastered elsewhere */
908         if (blocked)
909                 goto wait;
910
911         ret = -EINVAL;
912         dlm_node_iter_init(mle->vote_map, &iter);
913         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
914                 ret = dlm_do_master_request(mle, nodenum);
915                 if (ret < 0)
916                         mlog_errno(ret);
917                 if (mle->master != O2NM_MAX_NODES) {
918                         /* found a master ! */
919                         if (mle->master <= nodenum)
920                                 break;
921                         /* if our master request has not reached the master
922                          * yet, keep going until it does.  this is how the
923                          * master will know that asserts are needed back to
924                          * the lower nodes. */
925                         mlog(0, "%s:%.*s: requests only up to %u but master "
926                              "is %u, keep going\n", dlm->name, namelen,
927                              lockid, nodenum, mle->master);
928                 }
929         }
930
931 wait:
932         /* keep going until the response map includes all nodes */
933         ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
934         if (ret < 0) {
935                 wait_on_recovery = 1;
936                 mlog(0, "%s:%.*s: node map changed, redo the "
937                      "master request now, blocked=%d\n",
938                      dlm->name, res->lockname.len,
939                      res->lockname.name, blocked);
940                 if (++tries > 20) {
941                         mlog(ML_ERROR, "%s:%.*s: spinning on "
942                              "dlm_wait_for_lock_mastery, blocked=%d\n", 
943                              dlm->name, res->lockname.len, 
944                              res->lockname.name, blocked);
945                         dlm_print_one_lock_resource(res);
946                         /* dlm_print_one_mle(mle); */
947                         tries = 0;
948                 }
949                 goto redo_request;
950         }
951
952         mlog(0, "lockres mastered by %u\n", res->owner);
953         /* make sure we never continue without this */
954         BUG_ON(res->owner == O2NM_MAX_NODES);
955
956         /* master is known, detach if not already detached */
957         dlm_mle_detach_hb_events(dlm, mle);
958         dlm_put_mle(mle);
959         /* put the extra ref */
960         dlm_put_mle_inuse(mle);
961
962 wake_waiters:
963         spin_lock(&res->spinlock);
964         res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
965         spin_unlock(&res->spinlock);
966         wake_up(&res->wq);
967
968 leave:
969         /* need to free the unused mle */
970         if (alloc_mle)
971                 kmem_cache_free(dlm_mle_cache, alloc_mle);
972
973         return res;
974 }
975
976
977 #define DLM_MASTERY_TIMEOUT_MS   5000
978
979 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
980                                      struct dlm_lock_resource *res,
981                                      struct dlm_master_list_entry *mle,
982                                      int *blocked)
983 {
984         u8 m;
985         int ret, bit;
986         int map_changed, voting_done;
987         int assert, sleep;
988
989 recheck:
990         ret = 0;
991         assert = 0;
992
993         /* check if another node has already become the owner */
994         spin_lock(&res->spinlock);
995         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
996                 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
997                      res->lockname.len, res->lockname.name, res->owner);
998                 spin_unlock(&res->spinlock);
999                 /* this will cause the master to re-assert across
1000                  * the whole cluster, freeing up mles */
1001                 if (res->owner != dlm->node_num) {
1002                         ret = dlm_do_master_request(mle, res->owner);
1003                         if (ret < 0) {
1004                                 /* give recovery a chance to run */
1005                                 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1006                                 msleep(500);
1007                                 goto recheck;
1008                         }
1009                 }
1010                 ret = 0;
1011                 goto leave;
1012         }
1013         spin_unlock(&res->spinlock);
1014
1015         spin_lock(&mle->spinlock);
1016         m = mle->master;
1017         map_changed = (memcmp(mle->vote_map, mle->node_map,
1018                               sizeof(mle->vote_map)) != 0);
1019         voting_done = (memcmp(mle->vote_map, mle->response_map,
1020                              sizeof(mle->vote_map)) == 0);
1021
1022         /* restart if we hit any errors */
1023         if (map_changed) {
1024                 int b;
1025                 mlog(0, "%s: %.*s: node map changed, restarting\n",
1026                      dlm->name, res->lockname.len, res->lockname.name);
1027                 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1028                 b = (mle->type == DLM_MLE_BLOCK);
1029                 if ((*blocked && !b) || (!*blocked && b)) {
1030                         mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 
1031                              dlm->name, res->lockname.len, res->lockname.name,
1032                              *blocked, b);
1033                         *blocked = b;
1034                 }
1035                 spin_unlock(&mle->spinlock);
1036                 if (ret < 0) {
1037                         mlog_errno(ret);
1038                         goto leave;
1039                 }
1040                 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1041                      "rechecking now\n", dlm->name, res->lockname.len,
1042                      res->lockname.name);
1043                 goto recheck;
1044         } else {
1045                 if (!voting_done) {
1046                         mlog(0, "map not changed and voting not done "
1047                              "for %s:%.*s\n", dlm->name, res->lockname.len,
1048                              res->lockname.name);
1049                 }
1050         }
1051
1052         if (m != O2NM_MAX_NODES) {
1053                 /* another node has done an assert!
1054                  * all done! */
1055                 sleep = 0;
1056         } else {
1057                 sleep = 1;
1058                 /* have all nodes responded? */
1059                 if (voting_done && !*blocked) {
1060                         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1061                         if (dlm->node_num <= bit) {
1062                                 /* my node number is lowest.
1063                                  * now tell other nodes that I am
1064                                  * mastering this. */
1065                                 mle->master = dlm->node_num;
1066                                 assert = 1;
1067                                 sleep = 0;
1068                         }
1069                         /* if voting is done, but we have not received
1070                          * an assert master yet, we must sleep */
1071                 }
1072         }
1073
1074         spin_unlock(&mle->spinlock);
1075
1076         /* sleep if we haven't finished voting yet */
1077         if (sleep) {
1078                 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1079
1080                 /*
1081                 if (atomic_read(&mle->mle_refs.refcount) < 2)
1082                         mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1083                         atomic_read(&mle->mle_refs.refcount),
1084                         res->lockname.len, res->lockname.name);
1085                 */
1086                 atomic_set(&mle->woken, 0);
1087                 (void)wait_event_timeout(mle->wq,
1088                                          (atomic_read(&mle->woken) == 1),
1089                                          timeo);
1090                 if (res->owner == O2NM_MAX_NODES) {
1091                         mlog(0, "waiting again\n");
1092                         goto recheck;
1093                 }
1094                 mlog(0, "done waiting, master is %u\n", res->owner);
1095                 ret = 0;
1096                 goto leave;
1097         }
1098
1099         ret = 0;   /* done */
1100         if (assert) {
1101                 m = dlm->node_num;
1102                 mlog(0, "about to master %.*s here, this=%u\n",
1103                      res->lockname.len, res->lockname.name, m);
1104                 ret = dlm_do_assert_master(dlm, res->lockname.name,
1105                                            res->lockname.len, mle->vote_map, 0);
1106                 if (ret) {
1107                         /* This is a failure in the network path,
1108                          * not in the response to the assert_master
1109                          * (any nonzero response is a BUG on this node).
1110                          * Most likely a socket just got disconnected
1111                          * due to node death. */
1112                         mlog_errno(ret);
1113                 }
1114                 /* no longer need to restart lock mastery.
1115                  * all living nodes have been contacted. */
1116                 ret = 0;
1117         }
1118
1119         /* set the lockres owner */
1120         spin_lock(&res->spinlock);
1121         dlm_change_lockres_owner(dlm, res, m);
1122         spin_unlock(&res->spinlock);
1123
1124 leave:
1125         return ret;
1126 }
1127
1128 struct dlm_bitmap_diff_iter
1129 {
1130         int curnode;
1131         unsigned long *orig_bm;
1132         unsigned long *cur_bm;
1133         unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1134 };
1135
1136 enum dlm_node_state_change
1137 {
1138         NODE_DOWN = -1,
1139         NODE_NO_CHANGE = 0,
1140         NODE_UP
1141 };
1142
1143 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1144                                       unsigned long *orig_bm,
1145                                       unsigned long *cur_bm)
1146 {
1147         unsigned long p1, p2;
1148         int i;
1149
1150         iter->curnode = -1;
1151         iter->orig_bm = orig_bm;
1152         iter->cur_bm = cur_bm;
1153
1154         for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1155                 p1 = *(iter->orig_bm + i);
1156                 p2 = *(iter->cur_bm + i);
1157                 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1158         }
1159 }
1160
1161 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1162                                      enum dlm_node_state_change *state)
1163 {
1164         int bit;
1165
1166         if (iter->curnode >= O2NM_MAX_NODES)
1167                 return -ENOENT;
1168
1169         bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1170                             iter->curnode+1);
1171         if (bit >= O2NM_MAX_NODES) {
1172                 iter->curnode = O2NM_MAX_NODES;
1173                 return -ENOENT;
1174         }
1175
1176         /* if it was there in the original then this node died */
1177         if (test_bit(bit, iter->orig_bm))
1178                 *state = NODE_DOWN;
1179         else
1180                 *state = NODE_UP;
1181
1182         iter->curnode = bit;
1183         return bit;
1184 }
1185
1186
1187 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1188                                     struct dlm_lock_resource *res,
1189                                     struct dlm_master_list_entry *mle,
1190                                     int blocked)
1191 {
1192         struct dlm_bitmap_diff_iter bdi;
1193         enum dlm_node_state_change sc;
1194         int node;
1195         int ret = 0;
1196
1197         mlog(0, "something happened such that the "
1198              "master process may need to be restarted!\n");
1199
1200         assert_spin_locked(&mle->spinlock);
1201
1202         dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1203         node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1204         while (node >= 0) {
1205                 if (sc == NODE_UP) {
1206                         /* a node came up.  clear any old vote from
1207                          * the response map and set it in the vote map
1208                          * then restart the mastery. */
1209                         mlog(ML_NOTICE, "node %d up while restarting\n", node);
1210
1211                         /* redo the master request, but only for the new node */
1212                         mlog(0, "sending request to new node\n");
1213                         clear_bit(node, mle->response_map);
1214                         set_bit(node, mle->vote_map);
1215                 } else {
1216                         mlog(ML_ERROR, "node down! %d\n", node);
1217                         if (blocked) {
1218                                 int lowest = find_next_bit(mle->maybe_map,
1219                                                        O2NM_MAX_NODES, 0);
1220
1221                                 /* act like it was never there */
1222                                 clear_bit(node, mle->maybe_map);
1223
1224                                 if (node == lowest) {
1225                                         mlog(0, "expected master %u died"
1226                                             " while this node was blocked "
1227                                             "waiting on it!\n", node);
1228                                         lowest = find_next_bit(mle->maybe_map,
1229                                                         O2NM_MAX_NODES,
1230                                                         lowest+1);
1231                                         if (lowest < O2NM_MAX_NODES) {
1232                                                 mlog(0, "%s:%.*s:still "
1233                                                      "blocked. waiting on %u "
1234                                                      "now\n", dlm->name,
1235                                                      res->lockname.len,
1236                                                      res->lockname.name,
1237                                                      lowest);
1238                                         } else {
1239                                                 /* mle is an MLE_BLOCK, but
1240                                                  * there is now nothing left to
1241                                                  * block on.  we need to return
1242                                                  * all the way back out and try
1243                                                  * again with an MLE_MASTER.
1244                                                  * dlm_do_local_recovery_cleanup
1245                                                  * has already run, so the mle
1246                                                  * refcount is ok */
1247                                                 mlog(0, "%s:%.*s: no "
1248                                                      "longer blocking. try to "
1249                                                      "master this here\n",
1250                                                      dlm->name,
1251                                                      res->lockname.len,
1252                                                      res->lockname.name);
1253                                                 mle->type = DLM_MLE_MASTER;
1254                                                 mle->u.res = res;
1255                                         }
1256                                 }
1257                         }
1258
1259                         /* now blank out everything, as if we had never
1260                          * contacted anyone */
1261                         memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1262                         memset(mle->response_map, 0, sizeof(mle->response_map));
1263                         /* reset the vote_map to the current node_map */
1264                         memcpy(mle->vote_map, mle->node_map,
1265                                sizeof(mle->node_map));
1266                         /* put myself into the maybe map */
1267                         if (mle->type != DLM_MLE_BLOCK)
1268                                 set_bit(dlm->node_num, mle->maybe_map);
1269                 }
1270                 ret = -EAGAIN;
1271                 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1272         }
1273         return ret;
1274 }
1275
1276
1277 /*
1278  * DLM_MASTER_REQUEST_MSG
1279  *
1280  * returns: 0 on success,
1281  *          -errno on a network error
1282  *
1283  * on error, the caller should assume the target node is "dead"
1284  *
1285  */
1286
1287 static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to)
1288 {
1289         struct dlm_ctxt *dlm = mle->dlm;
1290         struct dlm_master_request request;
1291         int ret, response=0, resend;
1292
1293         memset(&request, 0, sizeof(request));
1294         request.node_idx = dlm->node_num;
1295
1296         BUG_ON(mle->type == DLM_MLE_MIGRATION);
1297
1298         if (mle->type != DLM_MLE_MASTER) {
1299                 request.namelen = mle->u.name.len;
1300                 memcpy(request.name, mle->u.name.name, request.namelen);
1301         } else {
1302                 request.namelen = mle->u.res->lockname.len;
1303                 memcpy(request.name, mle->u.res->lockname.name,
1304                         request.namelen);
1305         }
1306
1307 again:
1308         ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1309                                  sizeof(request), to, &response);
1310         if (ret < 0)  {
1311                 if (ret == -ESRCH) {
1312                         /* should never happen */
1313                         mlog(ML_ERROR, "TCP stack not ready!\n");
1314                         BUG();
1315                 } else if (ret == -EINVAL) {
1316                         mlog(ML_ERROR, "bad args passed to o2net!\n");
1317                         BUG();
1318                 } else if (ret == -ENOMEM) {
1319                         mlog(ML_ERROR, "out of memory while trying to send "
1320                              "network message!  retrying\n");
1321                         /* this is totally crude */
1322                         msleep(50);
1323                         goto again;
1324                 } else if (!dlm_is_host_down(ret)) {
1325                         /* not a network error. bad. */
1326                         mlog_errno(ret);
1327                         mlog(ML_ERROR, "unhandled error!");
1328                         BUG();
1329                 }
1330                 /* all other errors should be network errors,
1331                  * and likely indicate node death */
1332                 mlog(ML_ERROR, "link to %d went down!\n", to);
1333                 goto out;
1334         }
1335
1336         ret = 0;
1337         resend = 0;
1338         spin_lock(&mle->spinlock);
1339         switch (response) {
1340                 case DLM_MASTER_RESP_YES:
1341                         set_bit(to, mle->response_map);
1342                         mlog(0, "node %u is the master, response=YES\n", to);
1343                         mle->master = to;
1344                         break;
1345                 case DLM_MASTER_RESP_NO:
1346                         mlog(0, "node %u not master, response=NO\n", to);
1347                         set_bit(to, mle->response_map);
1348                         break;
1349                 case DLM_MASTER_RESP_MAYBE:
1350                         mlog(0, "node %u not master, response=MAYBE\n", to);
1351                         set_bit(to, mle->response_map);
1352                         set_bit(to, mle->maybe_map);
1353                         break;
1354                 case DLM_MASTER_RESP_ERROR:
1355                         mlog(0, "node %u hit an error, resending\n", to);
1356                         resend = 1;
1357                         response = 0;
1358                         break;
1359                 default:
1360                         mlog(ML_ERROR, "bad response! %u\n", response);
1361                         BUG();
1362         }
1363         spin_unlock(&mle->spinlock);
1364         if (resend) {
1365                 /* this is also totally crude */
1366                 msleep(50);
1367                 goto again;
1368         }
1369
1370 out:
1371         return ret;
1372 }
1373
1374 /*
1375  * locks that can be taken here:
1376  * dlm->spinlock
1377  * res->spinlock
1378  * mle->spinlock
1379  * dlm->master_list
1380  *
1381  * if possible, TRIM THIS DOWN!!!
1382  */
1383 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
1384 {
1385         u8 response = DLM_MASTER_RESP_MAYBE;
1386         struct dlm_ctxt *dlm = data;
1387         struct dlm_lock_resource *res = NULL;
1388         struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1389         struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1390         char *name;
1391         unsigned int namelen, hash;
1392         int found, ret;
1393         int set_maybe;
1394         int dispatch_assert = 0;
1395
1396         if (!dlm_grab(dlm))
1397                 return DLM_MASTER_RESP_NO;
1398
1399         if (!dlm_domain_fully_joined(dlm)) {
1400                 response = DLM_MASTER_RESP_NO;
1401                 goto send_response;
1402         }
1403
1404         name = request->name;
1405         namelen = request->namelen;
1406         hash = dlm_lockid_hash(name, namelen);
1407
1408         if (namelen > DLM_LOCKID_NAME_MAX) {
1409                 response = DLM_IVBUFLEN;
1410                 goto send_response;
1411         }
1412
1413 way_up_top:
1414         spin_lock(&dlm->spinlock);
1415         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1416         if (res) {
1417                 spin_unlock(&dlm->spinlock);
1418
1419                 /* take care of the easy cases up front */
1420                 spin_lock(&res->spinlock);
1421                 if (res->state & DLM_LOCK_RES_RECOVERING) {
1422                         spin_unlock(&res->spinlock);
1423                         mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1424                              "being recovered\n");
1425                         response = DLM_MASTER_RESP_ERROR;
1426                         if (mle)
1427                                 kmem_cache_free(dlm_mle_cache, mle);
1428                         goto send_response;
1429                 }
1430
1431                 if (res->owner == dlm->node_num) {
1432                         spin_unlock(&res->spinlock);
1433                         // mlog(0, "this node is the master\n");
1434                         response = DLM_MASTER_RESP_YES;
1435                         if (mle)
1436                                 kmem_cache_free(dlm_mle_cache, mle);
1437
1438                         /* this node is the owner.
1439                          * there is some extra work that needs to
1440                          * happen now.  the requesting node has
1441                          * caused all nodes up to this one to
1442                          * create mles.  this node now needs to
1443                          * go back and clean those up. */
1444                         dispatch_assert = 1;
1445                         goto send_response;
1446                 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1447                         spin_unlock(&res->spinlock);
1448                         // mlog(0, "node %u is the master\n", res->owner);
1449                         response = DLM_MASTER_RESP_NO;
1450                         if (mle)
1451                                 kmem_cache_free(dlm_mle_cache, mle);
1452                         goto send_response;
1453                 }
1454
1455                 /* ok, there is no owner.  either this node is
1456                  * being blocked, or it is actively trying to
1457                  * master this lock. */
1458                 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1459                         mlog(ML_ERROR, "lock with no owner should be "
1460                              "in-progress!\n");
1461                         BUG();
1462                 }
1463
1464                 // mlog(0, "lockres is in progress...\n");
1465                 spin_lock(&dlm->master_lock);
1466                 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1467                 if (!found) {
1468                         mlog(ML_ERROR, "no mle found for this lock!\n");
1469                         BUG();
1470                 }
1471                 set_maybe = 1;
1472                 spin_lock(&tmpmle->spinlock);
1473                 if (tmpmle->type == DLM_MLE_BLOCK) {
1474                         // mlog(0, "this node is waiting for "
1475                         // "lockres to be mastered\n");
1476                         response = DLM_MASTER_RESP_NO;
1477                 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1478                         mlog(0, "node %u is master, but trying to migrate to "
1479                              "node %u.\n", tmpmle->master, tmpmle->new_master);
1480                         if (tmpmle->master == dlm->node_num) {
1481                                 response = DLM_MASTER_RESP_YES;
1482                                 mlog(ML_ERROR, "no owner on lockres, but this "
1483                                      "node is trying to migrate it to %u?!\n",
1484                                      tmpmle->new_master);
1485                                 BUG();
1486                         } else {
1487                                 /* the real master can respond on its own */
1488                                 response = DLM_MASTER_RESP_NO;
1489                         }
1490                 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1491                         set_maybe = 0;
1492                         if (tmpmle->master == dlm->node_num) {
1493                                 response = DLM_MASTER_RESP_YES;
1494                                 /* this node will be the owner.
1495                                  * go back and clean the mles on any
1496                                  * other nodes */
1497                                 dispatch_assert = 1;
1498                         } else
1499                                 response = DLM_MASTER_RESP_NO;
1500                 } else {
1501                         // mlog(0, "this node is attempting to "
1502                         // "master lockres\n");
1503                         response = DLM_MASTER_RESP_MAYBE;
1504                 }
1505                 if (set_maybe)
1506                         set_bit(request->node_idx, tmpmle->maybe_map);
1507                 spin_unlock(&tmpmle->spinlock);
1508
1509                 spin_unlock(&dlm->master_lock);
1510                 spin_unlock(&res->spinlock);
1511
1512                 /* keep the mle attached to heartbeat events */
1513                 dlm_put_mle(tmpmle);
1514                 if (mle)
1515                         kmem_cache_free(dlm_mle_cache, mle);
1516                 goto send_response;
1517         }
1518
1519         /*
1520          * lockres doesn't exist on this node
1521          * if there is an MLE_BLOCK, return NO
1522          * if there is an MLE_MASTER, return MAYBE
1523          * otherwise, add an MLE_BLOCK, return NO
1524          */
1525         spin_lock(&dlm->master_lock);
1526         found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1527         if (!found) {
1528                 /* this lockid has never been seen on this node yet */
1529                 // mlog(0, "no mle found\n");
1530                 if (!mle) {
1531                         spin_unlock(&dlm->master_lock);
1532                         spin_unlock(&dlm->spinlock);
1533
1534                         mle = (struct dlm_master_list_entry *)
1535                                 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1536                         if (!mle) {
1537                                 response = DLM_MASTER_RESP_ERROR;
1538                                 mlog_errno(-ENOMEM);
1539                                 goto send_response;
1540                         }
1541                         goto way_up_top;
1542                 }
1543
1544                 // mlog(0, "this is second time thru, already allocated, "
1545                 // "add the block.\n");
1546                 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1547                 set_bit(request->node_idx, mle->maybe_map);
1548                 list_add(&mle->list, &dlm->master_list);
1549                 response = DLM_MASTER_RESP_NO;
1550         } else {
1551                 // mlog(0, "mle was found\n");
1552                 set_maybe = 1;
1553                 spin_lock(&tmpmle->spinlock);
1554                 if (tmpmle->master == dlm->node_num) {
1555                         mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1556                         BUG();
1557                 }
1558                 if (tmpmle->type == DLM_MLE_BLOCK)
1559                         response = DLM_MASTER_RESP_NO;
1560                 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1561                         mlog(0, "migration mle was found (%u->%u)\n",
1562                              tmpmle->master, tmpmle->new_master);
1563                         /* real master can respond on its own */
1564                         response = DLM_MASTER_RESP_NO;
1565                 } else
1566                         response = DLM_MASTER_RESP_MAYBE;
1567                 if (set_maybe)
1568                         set_bit(request->node_idx, tmpmle->maybe_map);
1569                 spin_unlock(&tmpmle->spinlock);
1570         }
1571         spin_unlock(&dlm->master_lock);
1572         spin_unlock(&dlm->spinlock);
1573
1574         if (found) {
1575                 /* keep the mle attached to heartbeat events */
1576                 dlm_put_mle(tmpmle);
1577         }
1578 send_response:
1579
1580         if (dispatch_assert) {
1581                 if (response != DLM_MASTER_RESP_YES)
1582                         mlog(ML_ERROR, "invalid response %d\n", response);
1583                 if (!res) {
1584                         mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1585                         BUG();
1586                 }
1587                 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1588                              dlm->node_num, res->lockname.len, res->lockname.name);
1589                 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 
1590                                                  DLM_ASSERT_MASTER_MLE_CLEANUP);
1591                 if (ret < 0) {
1592                         mlog(ML_ERROR, "failed to dispatch assert master work\n");
1593                         response = DLM_MASTER_RESP_ERROR;
1594                 }
1595         }
1596
1597         dlm_put(dlm);
1598         return response;
1599 }
1600
1601 /*
1602  * DLM_ASSERT_MASTER_MSG
1603  */
1604
1605
1606 /*
1607  * NOTE: this can be used for debugging
1608  * can periodically run all locks owned by this node
1609  * and re-assert across the cluster...
1610  */
1611 static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
1612                                 unsigned int namelen, void *nodemap,
1613                                 u32 flags)
1614 {
1615         struct dlm_assert_master assert;
1616         int to, tmpret;
1617         struct dlm_node_iter iter;
1618         int ret = 0;
1619         int reassert;
1620
1621         BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1622 again:
1623         reassert = 0;
1624
1625         /* note that if this nodemap is empty, it returns 0 */
1626         dlm_node_iter_init(nodemap, &iter);
1627         while ((to = dlm_node_iter_next(&iter)) >= 0) {
1628                 int r = 0;
1629                 struct dlm_master_list_entry *mle = NULL;
1630
1631                 mlog(0, "sending assert master to %d (%.*s)\n", to,
1632                      namelen, lockname);
1633                 memset(&assert, 0, sizeof(assert));
1634                 assert.node_idx = dlm->node_num;
1635                 assert.namelen = namelen;
1636                 memcpy(assert.name, lockname, namelen);
1637                 assert.flags = cpu_to_be32(flags);
1638
1639                 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1640                                             &assert, sizeof(assert), to, &r);
1641                 if (tmpret < 0) {
1642                         mlog(0, "assert_master returned %d!\n", tmpret);
1643                         if (!dlm_is_host_down(tmpret)) {
1644                                 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1645                                 BUG();
1646                         }
1647                         /* a node died.  finish out the rest of the nodes. */
1648                         mlog(0, "link to %d went down!\n", to);
1649                         /* any nonzero status return will do */
1650                         ret = tmpret;
1651                 } else if (r < 0) {
1652                         /* ok, something horribly messed.  kill thyself. */
1653                         mlog(ML_ERROR,"during assert master of %.*s to %u, "
1654                              "got %d.\n", namelen, lockname, to, r);
1655                         spin_lock(&dlm->spinlock);
1656                         spin_lock(&dlm->master_lock);
1657                         if (dlm_find_mle(dlm, &mle, (char *)lockname,
1658                                          namelen)) {
1659                                 dlm_print_one_mle(mle);
1660                                 __dlm_put_mle(mle);
1661                         }
1662                         spin_unlock(&dlm->master_lock);
1663                         spin_unlock(&dlm->spinlock);
1664                         BUG();
1665                 } else if (r == EAGAIN) {
1666                         mlog(0, "%.*s: node %u create mles on other "
1667                              "nodes and requests a re-assert\n", 
1668                              namelen, lockname, to);
1669                         reassert = 1;
1670                 }
1671         }
1672
1673         if (reassert)
1674                 goto again;
1675
1676         return ret;
1677 }
1678
1679 /*
1680  * locks that can be taken here:
1681  * dlm->spinlock
1682  * res->spinlock
1683  * mle->spinlock
1684  * dlm->master_list
1685  *
1686  * if possible, TRIM THIS DOWN!!!
1687  */
1688 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
1689 {
1690         struct dlm_ctxt *dlm = data;
1691         struct dlm_master_list_entry *mle = NULL;
1692         struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1693         struct dlm_lock_resource *res = NULL;
1694         char *name;
1695         unsigned int namelen, hash;
1696         u32 flags;
1697         int master_request = 0;
1698         int ret = 0;
1699
1700         if (!dlm_grab(dlm))
1701                 return 0;
1702
1703         name = assert->name;
1704         namelen = assert->namelen;
1705         hash = dlm_lockid_hash(name, namelen);
1706         flags = be32_to_cpu(assert->flags);
1707
1708         if (namelen > DLM_LOCKID_NAME_MAX) {
1709                 mlog(ML_ERROR, "Invalid name length!");
1710                 goto done;
1711         }
1712
1713         spin_lock(&dlm->spinlock);
1714
1715         if (flags)
1716                 mlog(0, "assert_master with flags: %u\n", flags);
1717
1718         /* find the MLE */
1719         spin_lock(&dlm->master_lock);
1720         if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1721                 /* not an error, could be master just re-asserting */
1722                 mlog(0, "just got an assert_master from %u, but no "
1723                      "MLE for it! (%.*s)\n", assert->node_idx,
1724                      namelen, name);
1725         } else {
1726                 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1727                 if (bit >= O2NM_MAX_NODES) {
1728                         /* not necessarily an error, though less likely.
1729                          * could be master just re-asserting. */
1730                         mlog(0, "no bits set in the maybe_map, but %u "
1731                              "is asserting! (%.*s)\n", assert->node_idx,
1732                              namelen, name);
1733                 } else if (bit != assert->node_idx) {
1734                         if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1735                                 mlog(0, "master %u was found, %u should "
1736                                      "back off\n", assert->node_idx, bit);
1737                         } else {
1738                                 /* with the fix for bug 569, a higher node
1739                                  * number winning the mastery will respond
1740                                  * YES to mastery requests, but this node
1741                                  * had no way of knowing.  let it pass. */
1742                                 mlog(0, "%u is the lowest node, "
1743                                      "%u is asserting. (%.*s)  %u must "
1744                                      "have begun after %u won.\n", bit,
1745                                      assert->node_idx, namelen, name, bit,
1746                                      assert->node_idx);
1747                         }
1748                 }
1749                 if (mle->type == DLM_MLE_MIGRATION) {
1750                         if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1751                                 mlog(0, "%s:%.*s: got cleanup assert"
1752                                      " from %u for migration\n",
1753                                      dlm->name, namelen, name,
1754                                      assert->node_idx);
1755                         } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1756                                 mlog(0, "%s:%.*s: got unrelated assert"
1757                                      " from %u for migration, ignoring\n",
1758                                      dlm->name, namelen, name,
1759                                      assert->node_idx);
1760                                 __dlm_put_mle(mle);
1761                                 spin_unlock(&dlm->master_lock);
1762                                 spin_unlock(&dlm->spinlock);
1763                                 goto done;
1764                         }       
1765                 }
1766         }
1767         spin_unlock(&dlm->master_lock);
1768
1769         /* ok everything checks out with the MLE
1770          * now check to see if there is a lockres */
1771         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1772         if (res) {
1773                 spin_lock(&res->spinlock);
1774                 if (res->state & DLM_LOCK_RES_RECOVERING)  {
1775                         mlog(ML_ERROR, "%u asserting but %.*s is "
1776                              "RECOVERING!\n", assert->node_idx, namelen, name);
1777                         goto kill;
1778                 }
1779                 if (!mle) {
1780                         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1781                             res->owner != assert->node_idx) {
1782                                 mlog(ML_ERROR, "assert_master from "
1783                                           "%u, but current owner is "
1784                                           "%u! (%.*s)\n",
1785                                        assert->node_idx, res->owner,
1786                                        namelen, name);
1787                                 goto kill;
1788                         }
1789                 } else if (mle->type != DLM_MLE_MIGRATION) {
1790                         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1791                                 /* owner is just re-asserting */
1792                                 if (res->owner == assert->node_idx) {
1793                                         mlog(0, "owner %u re-asserting on "
1794                                              "lock %.*s\n", assert->node_idx,
1795                                              namelen, name);
1796                                         goto ok;
1797                                 }
1798                                 mlog(ML_ERROR, "got assert_master from "
1799                                      "node %u, but %u is the owner! "
1800                                      "(%.*s)\n", assert->node_idx,
1801                                      res->owner, namelen, name);
1802                                 goto kill;
1803                         }
1804                         if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1805                                 mlog(ML_ERROR, "got assert from %u, but lock "
1806                                      "with no owner should be "
1807                                      "in-progress! (%.*s)\n",
1808                                      assert->node_idx,
1809                                      namelen, name);
1810                                 goto kill;
1811                         }
1812                 } else /* mle->type == DLM_MLE_MIGRATION */ {
1813                         /* should only be getting an assert from new master */
1814                         if (assert->node_idx != mle->new_master) {
1815                                 mlog(ML_ERROR, "got assert from %u, but "
1816                                      "new master is %u, and old master "
1817                                      "was %u (%.*s)\n",
1818                                      assert->node_idx, mle->new_master,
1819                                      mle->master, namelen, name);
1820                                 goto kill;
1821                         }
1822
1823                 }
1824 ok:
1825                 spin_unlock(&res->spinlock);
1826         }
1827         spin_unlock(&dlm->spinlock);
1828
1829         // mlog(0, "woo!  got an assert_master from node %u!\n",
1830         //           assert->node_idx);
1831         if (mle) {
1832                 int extra_ref = 0;
1833                 int nn = -1;
1834                 int rr, err = 0;
1835                 
1836                 spin_lock(&mle->spinlock);
1837                 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1838                         extra_ref = 1;
1839                 else {
1840                         /* MASTER mle: if any bits set in the response map
1841                          * then the calling node needs to re-assert to clear
1842                          * up nodes that this node contacted */
1843                         while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 
1844                                                     nn+1)) < O2NM_MAX_NODES) {
1845                                 if (nn != dlm->node_num && nn != assert->node_idx)
1846                                         master_request = 1;
1847                         }
1848                 }
1849                 mle->master = assert->node_idx;
1850                 atomic_set(&mle->woken, 1);
1851                 wake_up(&mle->wq);
1852                 spin_unlock(&mle->spinlock);
1853
1854                 if (res) {
1855                         spin_lock(&res->spinlock);
1856                         if (mle->type == DLM_MLE_MIGRATION) {
1857                                 mlog(0, "finishing off migration of lockres %.*s, "
1858                                         "from %u to %u\n",
1859                                         res->lockname.len, res->lockname.name,
1860                                         dlm->node_num, mle->new_master);
1861                                 res->state &= ~DLM_LOCK_RES_MIGRATING;
1862                                 dlm_change_lockres_owner(dlm, res, mle->new_master);
1863                                 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1864                         } else {
1865                                 dlm_change_lockres_owner(dlm, res, mle->master);
1866                         }
1867                         spin_unlock(&res->spinlock);
1868                 }
1869
1870                 /* master is known, detach if not already detached.
1871                  * ensures that only one assert_master call will happen
1872                  * on this mle. */
1873                 spin_lock(&dlm->spinlock);
1874                 spin_lock(&dlm->master_lock);
1875
1876                 rr = atomic_read(&mle->mle_refs.refcount);
1877                 if (mle->inuse > 0) {
1878                         if (extra_ref && rr < 3)
1879                                 err = 1;
1880                         else if (!extra_ref && rr < 2)
1881                                 err = 1;
1882                 } else {
1883                         if (extra_ref && rr < 2)
1884                                 err = 1;
1885                         else if (!extra_ref && rr < 1)
1886                                 err = 1;
1887                 }
1888                 if (err) {
1889                         mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1890                              "that will mess up this node, refs=%d, extra=%d, "
1891                              "inuse=%d\n", dlm->name, namelen, name,
1892                              assert->node_idx, rr, extra_ref, mle->inuse);
1893                         dlm_print_one_mle(mle);
1894                 }
1895                 list_del_init(&mle->list);
1896                 __dlm_mle_detach_hb_events(dlm, mle);
1897                 __dlm_put_mle(mle);
1898                 if (extra_ref) {
1899                         /* the assert master message now balances the extra
1900                          * ref given by the master / migration request message.
1901                          * if this is the last put, it will be removed
1902                          * from the list. */
1903                         __dlm_put_mle(mle);
1904                 }
1905                 spin_unlock(&dlm->master_lock);
1906                 spin_unlock(&dlm->spinlock);
1907         } else if (res) {
1908                 if (res->owner != assert->node_idx) {
1909                         mlog(0, "assert_master from %u, but current "
1910                              "owner is %u (%.*s), no mle\n", assert->node_idx,
1911                              res->owner, namelen, name);
1912                 }
1913         }
1914
1915 done:
1916         ret = 0;
1917         if (res)
1918                 dlm_lockres_put(res);
1919         dlm_put(dlm);
1920         if (master_request) {
1921                 mlog(0, "need to tell master to reassert\n");
1922                 ret = EAGAIN;  // positive. negative would shoot down the node.
1923         }
1924         return ret;
1925
1926 kill:
1927         /* kill the caller! */
1928         mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
1929              "and killing the other node now!  This node is OK and can continue.\n");
1930         __dlm_print_one_lock_resource(res);
1931         spin_unlock(&res->spinlock);
1932         spin_unlock(&dlm->spinlock);
1933         dlm_lockres_put(res);
1934         dlm_put(dlm);
1935         return -EINVAL;
1936 }
1937
1938 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
1939                                struct dlm_lock_resource *res,
1940                                int ignore_higher, u8 request_from, u32 flags)
1941 {
1942         struct dlm_work_item *item;
1943         item = kcalloc(1, sizeof(*item), GFP_NOFS);
1944         if (!item)
1945                 return -ENOMEM;
1946
1947
1948         /* queue up work for dlm_assert_master_worker */
1949         dlm_grab(dlm);  /* get an extra ref for the work item */
1950         dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
1951         item->u.am.lockres = res; /* already have a ref */
1952         /* can optionally ignore node numbers higher than this node */
1953         item->u.am.ignore_higher = ignore_higher;
1954         item->u.am.request_from = request_from;
1955         item->u.am.flags = flags;
1956
1957         if (ignore_higher) 
1958                 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 
1959                      res->lockname.name);
1960                 
1961         spin_lock(&dlm->work_lock);
1962         list_add_tail(&item->list, &dlm->work_list);
1963         spin_unlock(&dlm->work_lock);
1964
1965         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1966         return 0;
1967 }
1968
1969 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
1970 {
1971         struct dlm_ctxt *dlm = data;
1972         int ret = 0;
1973         struct dlm_lock_resource *res;
1974         unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
1975         int ignore_higher;
1976         int bit;
1977         u8 request_from;
1978         u32 flags;
1979
1980         dlm = item->dlm;
1981         res = item->u.am.lockres;
1982         ignore_higher = item->u.am.ignore_higher;
1983         request_from = item->u.am.request_from;
1984         flags = item->u.am.flags;
1985
1986         spin_lock(&dlm->spinlock);
1987         memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
1988         spin_unlock(&dlm->spinlock);
1989
1990         clear_bit(dlm->node_num, nodemap);
1991         if (ignore_higher) {
1992                 /* if is this just to clear up mles for nodes below
1993                  * this node, do not send the message to the original
1994                  * caller or any node number higher than this */
1995                 clear_bit(request_from, nodemap);
1996                 bit = dlm->node_num;
1997                 while (1) {
1998                         bit = find_next_bit(nodemap, O2NM_MAX_NODES,
1999                                             bit+1);
2000                         if (bit >= O2NM_MAX_NODES)
2001                                 break;
2002                         clear_bit(bit, nodemap);
2003                 }
2004         }
2005
2006         /*
2007          * If we're migrating this lock to someone else, we are no
2008          * longer allowed to assert out own mastery.  OTOH, we need to
2009          * prevent migration from starting while we're still asserting
2010          * our dominance.  The reserved ast delays migration.
2011          */
2012         spin_lock(&res->spinlock);
2013         if (res->state & DLM_LOCK_RES_MIGRATING) {
2014                 mlog(0, "Someone asked us to assert mastery, but we're "
2015                      "in the middle of migration.  Skipping assert, "
2016                      "the new master will handle that.\n");
2017                 spin_unlock(&res->spinlock);
2018                 goto put;
2019         } else
2020                 __dlm_lockres_reserve_ast(res);
2021         spin_unlock(&res->spinlock);
2022
2023         /* this call now finishes out the nodemap
2024          * even if one or more nodes die */
2025         mlog(0, "worker about to master %.*s here, this=%u\n",
2026                      res->lockname.len, res->lockname.name, dlm->node_num);
2027         ret = dlm_do_assert_master(dlm, res->lockname.name,
2028                                    res->lockname.len,
2029                                    nodemap, flags);
2030         if (ret < 0) {
2031                 /* no need to restart, we are done */
2032                 if (!dlm_is_host_down(ret))
2033                         mlog_errno(ret);
2034         }
2035
2036         /* Ok, we've asserted ourselves.  Let's let migration start. */
2037         dlm_lockres_release_ast(dlm, res);
2038
2039 put:
2040         dlm_lockres_put(res);
2041
2042         mlog(0, "finished with dlm_assert_master_worker\n");
2043 }
2044
2045 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2046  * We cannot wait for node recovery to complete to begin mastering this
2047  * lockres because this lockres is used to kick off recovery! ;-)
2048  * So, do a pre-check on all living nodes to see if any of those nodes
2049  * think that $RECOVERY is currently mastered by a dead node.  If so,
2050  * we wait a short time to allow that node to get notified by its own
2051  * heartbeat stack, then check again.  All $RECOVERY lock resources
2052  * mastered by dead nodes are purged when the hearbeat callback is 
2053  * fired, so we can know for sure that it is safe to continue once
2054  * the node returns a live node or no node.  */
2055 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2056                                        struct dlm_lock_resource *res)
2057 {
2058         struct dlm_node_iter iter;
2059         int nodenum;
2060         int ret = 0;
2061         u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2062
2063         spin_lock(&dlm->spinlock);
2064         dlm_node_iter_init(dlm->domain_map, &iter);
2065         spin_unlock(&dlm->spinlock);
2066
2067         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2068                 /* do not send to self */
2069                 if (nodenum == dlm->node_num)
2070                         continue;
2071                 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2072                 if (ret < 0) {
2073                         mlog_errno(ret);
2074                         if (!dlm_is_host_down(ret))
2075                                 BUG();
2076                         /* host is down, so answer for that node would be
2077                          * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2078                         ret = 0;
2079                 }
2080
2081                 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2082                         /* check to see if this master is in the recovery map */
2083                         spin_lock(&dlm->spinlock);
2084                         if (test_bit(master, dlm->recovery_map)) {
2085                                 mlog(ML_NOTICE, "%s: node %u has not seen "
2086                                      "node %u go down yet, and thinks the "
2087                                      "dead node is mastering the recovery "
2088                                      "lock.  must wait.\n", dlm->name,
2089                                      nodenum, master);
2090                                 ret = -EAGAIN;
2091                         }
2092                         spin_unlock(&dlm->spinlock);
2093                         mlog(0, "%s: reco lock master is %u\n", dlm->name, 
2094                              master);
2095                         break;
2096                 }
2097         }
2098         return ret;
2099 }
2100
2101
2102 /*
2103  * DLM_MIGRATE_LOCKRES
2104  */
2105
2106
2107 int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2108                         u8 target)
2109 {
2110         struct dlm_master_list_entry *mle = NULL;
2111         struct dlm_master_list_entry *oldmle = NULL;
2112         struct dlm_migratable_lockres *mres = NULL;
2113         int ret = -EINVAL;
2114         const char *name;
2115         unsigned int namelen;
2116         int mle_added = 0;
2117         struct list_head *queue, *iter;
2118         int i;
2119         struct dlm_lock *lock;
2120         int empty = 1;
2121
2122         if (!dlm_grab(dlm))
2123                 return -EINVAL;
2124
2125         name = res->lockname.name;
2126         namelen = res->lockname.len;
2127
2128         mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2129
2130         /*
2131          * ensure this lockres is a proper candidate for migration
2132          */
2133         spin_lock(&res->spinlock);
2134         if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2135                 mlog(0, "cannot migrate lockres with unknown owner!\n");
2136                 spin_unlock(&res->spinlock);
2137                 goto leave;
2138         }
2139         if (res->owner != dlm->node_num) {
2140                 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2141                 spin_unlock(&res->spinlock);
2142                 goto leave;
2143         }
2144         mlog(0, "checking queues...\n");
2145         queue = &res->granted;
2146         for (i=0; i<3; i++) {
2147                 list_for_each(iter, queue) {
2148                         lock = list_entry (iter, struct dlm_lock, list);
2149                         empty = 0;
2150                         if (lock->ml.node == dlm->node_num) {
2151                                 mlog(0, "found a lock owned by this node "
2152                                      "still on the %s queue!  will not "
2153                                      "migrate this lockres\n",
2154                                      i==0 ? "granted" :
2155                                      (i==1 ? "converting" : "blocked"));
2156                                 spin_unlock(&res->spinlock);
2157                                 ret = -ENOTEMPTY;
2158                                 goto leave;
2159                         }
2160                 }
2161                 queue++;
2162         }
2163         mlog(0, "all locks on this lockres are nonlocal.  continuing\n");
2164         spin_unlock(&res->spinlock);
2165
2166         /* no work to do */
2167         if (empty) {
2168                 mlog(0, "no locks were found on this lockres! done!\n");
2169                 ret = 0;
2170                 goto leave;
2171         }
2172
2173         /*
2174          * preallocate up front
2175          * if this fails, abort
2176          */
2177
2178         ret = -ENOMEM;
2179         mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2180         if (!mres) {
2181                 mlog_errno(ret);
2182                 goto leave;
2183         }
2184
2185         mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2186                                                                 GFP_NOFS);
2187         if (!mle) {
2188                 mlog_errno(ret);
2189                 goto leave;
2190         }
2191         ret = 0;
2192
2193         /*
2194          * find a node to migrate the lockres to
2195          */
2196
2197         mlog(0, "picking a migration node\n");
2198         spin_lock(&dlm->spinlock);
2199         /* pick a new node */
2200         if (!test_bit(target, dlm->domain_map) ||
2201             target >= O2NM_MAX_NODES) {
2202                 target = dlm_pick_migration_target(dlm, res);
2203         }
2204         mlog(0, "node %u chosen for migration\n", target);
2205
2206         if (target >= O2NM_MAX_NODES ||
2207             !test_bit(target, dlm->domain_map)) {
2208                 /* target chosen is not alive */
2209                 ret = -EINVAL;
2210         }
2211
2212         if (ret) {
2213                 spin_unlock(&dlm->spinlock);
2214                 goto fail;
2215         }
2216
2217         mlog(0, "continuing with target = %u\n", target);
2218
2219         /*
2220          * clear any existing master requests and
2221          * add the migration mle to the list
2222          */
2223         spin_lock(&dlm->master_lock);
2224         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2225                                     namelen, target, dlm->node_num);
2226         spin_unlock(&dlm->master_lock);
2227         spin_unlock(&dlm->spinlock);
2228
2229         if (ret == -EEXIST) {
2230                 mlog(0, "another process is already migrating it\n");
2231                 goto fail;
2232         }
2233         mle_added = 1;
2234
2235         /*
2236          * set the MIGRATING flag and flush asts
2237          * if we fail after this we need to re-dirty the lockres
2238          */
2239         if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2240                 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2241                      "the target went down.\n", res->lockname.len,
2242                      res->lockname.name, target);
2243                 spin_lock(&res->spinlock);
2244                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2245                 spin_unlock(&res->spinlock);
2246                 ret = -EINVAL;
2247         }
2248
2249 fail:
2250         if (oldmle) {
2251                 /* master is known, detach if not already detached */
2252                 dlm_mle_detach_hb_events(dlm, oldmle);
2253                 dlm_put_mle(oldmle);
2254         }
2255
2256         if (ret < 0) {
2257                 if (mle_added) {
2258                         dlm_mle_detach_hb_events(dlm, mle);
2259                         dlm_put_mle(mle);
2260                 } else if (mle) {
2261                         kmem_cache_free(dlm_mle_cache, mle);
2262                 }
2263                 goto leave;
2264         }
2265
2266         /*
2267          * at this point, we have a migration target, an mle
2268          * in the master list, and the MIGRATING flag set on
2269          * the lockres
2270          */
2271
2272
2273         /* get an extra reference on the mle.
2274          * otherwise the assert_master from the new
2275          * master will destroy this.
2276          * also, make sure that all callers of dlm_get_mle
2277          * take both dlm->spinlock and dlm->master_lock */
2278         spin_lock(&dlm->spinlock);
2279         spin_lock(&dlm->master_lock);
2280         dlm_get_mle_inuse(mle);
2281         spin_unlock(&dlm->master_lock);
2282         spin_unlock(&dlm->spinlock);
2283
2284         /* notify new node and send all lock state */
2285         /* call send_one_lockres with migration flag.
2286          * this serves as notice to the target node that a
2287          * migration is starting. */
2288         ret = dlm_send_one_lockres(dlm, res, mres, target,
2289                                    DLM_MRES_MIGRATION);
2290
2291         if (ret < 0) {
2292                 mlog(0, "migration to node %u failed with %d\n",
2293                      target, ret);
2294                 /* migration failed, detach and clean up mle */
2295                 dlm_mle_detach_hb_events(dlm, mle);
2296                 dlm_put_mle(mle);
2297                 dlm_put_mle_inuse(mle);
2298                 spin_lock(&res->spinlock);
2299                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2300                 spin_unlock(&res->spinlock);
2301                 goto leave;
2302         }
2303
2304         /* at this point, the target sends a message to all nodes,
2305          * (using dlm_do_migrate_request).  this node is skipped since
2306          * we had to put an mle in the list to begin the process.  this
2307          * node now waits for target to do an assert master.  this node
2308          * will be the last one notified, ensuring that the migration
2309          * is complete everywhere.  if the target dies while this is
2310          * going on, some nodes could potentially see the target as the
2311          * master, so it is important that my recovery finds the migration
2312          * mle and sets the master to UNKNONWN. */
2313
2314
2315         /* wait for new node to assert master */
2316         while (1) {
2317                 ret = wait_event_interruptible_timeout(mle->wq,
2318                                         (atomic_read(&mle->woken) == 1),
2319                                         msecs_to_jiffies(5000));
2320
2321                 if (ret >= 0) {
2322                         if (atomic_read(&mle->woken) == 1 ||
2323                             res->owner == target)
2324                                 break;
2325
2326                         mlog(0, "timed out during migration\n");
2327                         /* avoid hang during shutdown when migrating lockres 
2328                          * to a node which also goes down */
2329                         if (dlm_is_node_dead(dlm, target)) {
2330                                 mlog(0, "%s:%.*s: expected migration "
2331                                      "target %u is no longer up, restarting\n",
2332                                      dlm->name, res->lockname.len,
2333                                      res->lockname.name, target);
2334                                 ret = -ERESTARTSYS;
2335                         }
2336                 }
2337                 if (ret == -ERESTARTSYS) {
2338                         /* migration failed, detach and clean up mle */
2339                         dlm_mle_detach_hb_events(dlm, mle);
2340                         dlm_put_mle(mle);
2341                         dlm_put_mle_inuse(mle);
2342                         spin_lock(&res->spinlock);
2343                         res->state &= ~DLM_LOCK_RES_MIGRATING;
2344                         spin_unlock(&res->spinlock);
2345                         goto leave;
2346                 }
2347                 /* TODO: if node died: stop, clean up, return error */
2348         }
2349
2350         /* all done, set the owner, clear the flag */
2351         spin_lock(&res->spinlock);
2352         dlm_set_lockres_owner(dlm, res, target);
2353         res->state &= ~DLM_LOCK_RES_MIGRATING;
2354         dlm_remove_nonlocal_locks(dlm, res);
2355         spin_unlock(&res->spinlock);
2356         wake_up(&res->wq);
2357
2358         /* master is known, detach if not already detached */
2359         dlm_mle_detach_hb_events(dlm, mle);
2360         dlm_put_mle_inuse(mle);
2361         ret = 0;
2362
2363         dlm_lockres_calc_usage(dlm, res);
2364
2365 leave:
2366         /* re-dirty the lockres if we failed */
2367         if (ret < 0)
2368                 dlm_kick_thread(dlm, res);
2369
2370         /* TODO: cleanup */
2371         if (mres)
2372                 free_page((unsigned long)mres);
2373
2374         dlm_put(dlm);
2375
2376         mlog(0, "returning %d\n", ret);
2377         return ret;
2378 }
2379 EXPORT_SYMBOL_GPL(dlm_migrate_lockres);
2380
2381 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2382 {
2383         int ret;
2384         spin_lock(&dlm->ast_lock);
2385         spin_lock(&lock->spinlock);
2386         ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2387         spin_unlock(&lock->spinlock);
2388         spin_unlock(&dlm->ast_lock);
2389         return ret;
2390 }
2391
2392 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2393                                      struct dlm_lock_resource *res,
2394                                      u8 mig_target)
2395 {
2396         int can_proceed;
2397         spin_lock(&res->spinlock);
2398         can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2399         spin_unlock(&res->spinlock);
2400
2401         /* target has died, so make the caller break out of the 
2402          * wait_event, but caller must recheck the domain_map */
2403         spin_lock(&dlm->spinlock);
2404         if (!test_bit(mig_target, dlm->domain_map))
2405                 can_proceed = 1;
2406         spin_unlock(&dlm->spinlock);
2407         return can_proceed;
2408 }
2409
2410 int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2411 {
2412         int ret;
2413         spin_lock(&res->spinlock);
2414         ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2415         spin_unlock(&res->spinlock);
2416         return ret;
2417 }
2418
2419
2420 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2421                                        struct dlm_lock_resource *res,
2422                                        u8 target)
2423 {
2424         int ret = 0;
2425
2426         mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2427                res->lockname.len, res->lockname.name, dlm->node_num,
2428                target);
2429         /* need to set MIGRATING flag on lockres.  this is done by
2430          * ensuring that all asts have been flushed for this lockres. */
2431         spin_lock(&res->spinlock);
2432         BUG_ON(res->migration_pending);
2433         res->migration_pending = 1;
2434         /* strategy is to reserve an extra ast then release
2435          * it below, letting the release do all of the work */
2436         __dlm_lockres_reserve_ast(res);
2437         spin_unlock(&res->spinlock);
2438
2439         /* now flush all the pending asts.. hang out for a bit */
2440         dlm_kick_thread(dlm, res);
2441         wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2442         dlm_lockres_release_ast(dlm, res);
2443
2444         mlog(0, "about to wait on migration_wq, dirty=%s\n",
2445                res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2446         /* if the extra ref we just put was the final one, this
2447          * will pass thru immediately.  otherwise, we need to wait
2448          * for the last ast to finish. */
2449 again:
2450         ret = wait_event_interruptible_timeout(dlm->migration_wq,
2451                    dlm_migration_can_proceed(dlm, res, target),
2452                    msecs_to_jiffies(1000));
2453         if (ret < 0) {
2454                 mlog(0, "woken again: migrating? %s, dead? %s\n",
2455                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2456                        test_bit(target, dlm->domain_map) ? "no":"yes");
2457         } else {
2458                 mlog(0, "all is well: migrating? %s, dead? %s\n",
2459                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2460                        test_bit(target, dlm->domain_map) ? "no":"yes");
2461         }
2462         if (!dlm_migration_can_proceed(dlm, res, target)) {
2463                 mlog(0, "trying again...\n");
2464                 goto again;
2465         }
2466
2467         /* did the target go down or die? */
2468         spin_lock(&dlm->spinlock);
2469         if (!test_bit(target, dlm->domain_map)) {
2470                 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2471                      target);
2472                 ret = -EHOSTDOWN;
2473         }
2474         spin_unlock(&dlm->spinlock);
2475
2476         /*
2477          * at this point:
2478          *
2479          *   o the DLM_LOCK_RES_MIGRATING flag is set
2480          *   o there are no pending asts on this lockres
2481          *   o all processes trying to reserve an ast on this
2482          *     lockres must wait for the MIGRATING flag to clear
2483          */
2484         return ret;
2485 }
2486
2487 /* last step in the migration process.
2488  * original master calls this to free all of the dlm_lock
2489  * structures that used to be for other nodes. */
2490 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2491                                       struct dlm_lock_resource *res)
2492 {
2493         struct list_head *iter, *iter2;
2494         struct list_head *queue = &res->granted;
2495         int i;
2496         struct dlm_lock *lock;
2497
2498         assert_spin_locked(&res->spinlock);
2499
2500         BUG_ON(res->owner == dlm->node_num);
2501
2502         for (i=0; i<3; i++) {
2503                 list_for_each_safe(iter, iter2, queue) {
2504                         lock = list_entry (iter, struct dlm_lock, list);
2505                         if (lock->ml.node != dlm->node_num) {
2506                                 mlog(0, "putting lock for node %u\n",
2507                                      lock->ml.node);
2508                                 /* be extra careful */
2509                                 BUG_ON(!list_empty(&lock->ast_list));
2510                                 BUG_ON(!list_empty(&lock->bast_list));
2511                                 BUG_ON(lock->ast_pending);
2512                                 BUG_ON(lock->bast_pending);
2513                                 list_del_init(&lock->list);
2514                                 dlm_lock_put(lock);
2515                         }
2516                 }
2517                 queue++;
2518         }
2519 }
2520
2521 /* for now this is not too intelligent.  we will
2522  * need stats to make this do the right thing.
2523  * this just finds the first lock on one of the
2524  * queues and uses that node as the target. */
2525 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2526                                     struct dlm_lock_resource *res)
2527 {
2528         int i;
2529         struct list_head *queue = &res->granted;
2530         struct list_head *iter;
2531         struct dlm_lock *lock;
2532         int nodenum;
2533
2534         assert_spin_locked(&dlm->spinlock);
2535
2536         spin_lock(&res->spinlock);
2537         for (i=0; i<3; i++) {
2538                 list_for_each(iter, queue) {
2539                         /* up to the caller to make sure this node
2540                          * is alive */
2541                         lock = list_entry (iter, struct dlm_lock, list);
2542                         if (lock->ml.node != dlm->node_num) {
2543                                 spin_unlock(&res->spinlock);
2544                                 return lock->ml.node;
2545                         }
2546                 }
2547                 queue++;
2548         }
2549         spin_unlock(&res->spinlock);
2550         mlog(0, "have not found a suitable target yet! checking domain map\n");
2551
2552         /* ok now we're getting desperate.  pick anyone alive. */
2553         nodenum = -1;
2554         while (1) {
2555                 nodenum = find_next_bit(dlm->domain_map,
2556                                         O2NM_MAX_NODES, nodenum+1);
2557                 mlog(0, "found %d in domain map\n", nodenum);
2558                 if (nodenum >= O2NM_MAX_NODES)
2559                         break;
2560                 if (nodenum != dlm->node_num) {
2561                         mlog(0, "picking %d\n", nodenum);
2562                         return nodenum;
2563                 }
2564         }
2565
2566         mlog(0, "giving up.  no master to migrate to\n");
2567         return DLM_LOCK_RES_OWNER_UNKNOWN;
2568 }
2569
2570
2571
2572 /* this is called by the new master once all lockres
2573  * data has been received */
2574 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2575                                   struct dlm_lock_resource *res,
2576                                   u8 master, u8 new_master,
2577                                   struct dlm_node_iter *iter)
2578 {
2579         struct dlm_migrate_request migrate;
2580         int ret, status = 0;
2581         int nodenum;
2582
2583         memset(&migrate, 0, sizeof(migrate));
2584         migrate.namelen = res->lockname.len;
2585         memcpy(migrate.name, res->lockname.name, migrate.namelen);
2586         migrate.new_master = new_master;
2587         migrate.master = master;
2588
2589         ret = 0;
2590
2591         /* send message to all nodes, except the master and myself */
2592         while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2593                 if (nodenum == master ||
2594                     nodenum == new_master)
2595                         continue;
2596
2597                 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2598                                          &migrate, sizeof(migrate), nodenum,
2599                                          &status);
2600                 if (ret < 0)
2601                         mlog_errno(ret);
2602                 else if (status < 0) {
2603                         mlog(0, "migrate request (node %u) returned %d!\n",
2604                              nodenum, status);
2605                         ret = status;
2606                 }
2607         }
2608
2609         if (ret < 0)
2610                 mlog_errno(ret);
2611
2612         mlog(0, "returning ret=%d\n", ret);
2613         return ret;
2614 }
2615
2616
2617 /* if there is an existing mle for this lockres, we now know who the master is.
2618  * (the one who sent us *this* message) we can clear it up right away.
2619  * since the process that put the mle on the list still has a reference to it,
2620  * we can unhash it now, set the master and wake the process.  as a result,
2621  * we will have no mle in the list to start with.  now we can add an mle for
2622  * the migration and this should be the only one found for those scanning the
2623  * list.  */
2624 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
2625 {
2626         struct dlm_ctxt *dlm = data;
2627         struct dlm_lock_resource *res = NULL;
2628         struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
2629         struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
2630         const char *name;
2631         unsigned int namelen, hash;
2632         int ret = 0;
2633
2634         if (!dlm_grab(dlm))
2635                 return -EINVAL;
2636
2637         name = migrate->name;
2638         namelen = migrate->namelen;
2639         hash = dlm_lockid_hash(name, namelen);
2640
2641         /* preallocate.. if this fails, abort */
2642         mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2643                                                          GFP_NOFS);
2644
2645         if (!mle) {
2646                 ret = -ENOMEM;
2647                 goto leave;
2648         }
2649
2650         /* check for pre-existing lock */
2651         spin_lock(&dlm->spinlock);
2652         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
2653         spin_lock(&dlm->master_lock);
2654
2655         if (res) {
2656                 spin_lock(&res->spinlock);
2657                 if (res->state & DLM_LOCK_RES_RECOVERING) {
2658                         /* if all is working ok, this can only mean that we got
2659                         * a migrate request from a node that we now see as
2660                         * dead.  what can we do here?  drop it to the floor? */
2661                         spin_unlock(&res->spinlock);
2662                         mlog(ML_ERROR, "Got a migrate request, but the "
2663                              "lockres is marked as recovering!");
2664                         kmem_cache_free(dlm_mle_cache, mle);
2665                         ret = -EINVAL; /* need a better solution */
2666                         goto unlock;
2667                 }
2668                 res->state |= DLM_LOCK_RES_MIGRATING;
2669                 spin_unlock(&res->spinlock);
2670         }
2671
2672         /* ignore status.  only nonzero status would BUG. */
2673         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
2674                                     name, namelen,
2675                                     migrate->new_master,
2676                                     migrate->master);
2677
2678 unlock:
2679         spin_unlock(&dlm->master_lock);
2680         spin_unlock(&dlm->spinlock);
2681
2682         if (oldmle) {
2683                 /* master is known, detach if not already detached */
2684                 dlm_mle_detach_hb_events(dlm, oldmle);
2685                 dlm_put_mle(oldmle);
2686         }
2687
2688         if (res)
2689                 dlm_lockres_put(res);
2690 leave:
2691         dlm_put(dlm);
2692         return ret;
2693 }
2694
2695 /* must be holding dlm->spinlock and dlm->master_lock
2696  * when adding a migration mle, we can clear any other mles
2697  * in the master list because we know with certainty that
2698  * the master is "master".  so we remove any old mle from
2699  * the list after setting it's master field, and then add
2700  * the new migration mle.  this way we can hold with the rule
2701  * of having only one mle for a given lock name at all times. */
2702 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
2703                                  struct dlm_lock_resource *res,
2704                                  struct dlm_master_list_entry *mle,
2705                                  struct dlm_master_list_entry **oldmle,
2706                                  const char *name, unsigned int namelen,
2707                                  u8 new_master, u8 master)
2708 {
2709         int found;
2710         int ret = 0;
2711
2712         *oldmle = NULL;
2713
2714         mlog_entry_void();
2715
2716         assert_spin_locked(&dlm->spinlock);
2717         assert_spin_locked(&dlm->master_lock);
2718
2719         /* caller is responsible for any ref taken here on oldmle */
2720         found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
2721         if (found) {
2722                 struct dlm_master_list_entry *tmp = *oldmle;
2723                 spin_lock(&tmp->spinlock);
2724                 if (tmp->type == DLM_MLE_MIGRATION) {
2725                         if (master == dlm->node_num) {
2726                                 /* ah another process raced me to it */
2727                                 mlog(0, "tried to migrate %.*s, but some "
2728                                      "process beat me to it\n",
2729                                      namelen, name);
2730                                 ret = -EEXIST;
2731                         } else {
2732                                 /* bad.  2 NODES are trying to migrate! */
2733                                 mlog(ML_ERROR, "migration error  mle: "
2734                                      "master=%u new_master=%u // request: "
2735                                      "master=%u new_master=%u // "
2736                                      "lockres=%.*s\n",
2737                                      tmp->master, tmp->new_master,
2738                                      master, new_master,
2739                                      namelen, name);
2740                                 BUG();
2741                         }
2742                 } else {
2743                         /* this is essentially what assert_master does */
2744                         tmp->master = master;
2745                         atomic_set(&tmp->woken, 1);
2746                         wake_up(&tmp->wq);
2747                         /* remove it from the list so that only one
2748                          * mle will be found */
2749                         list_del_init(&tmp->list);
2750                         __dlm_mle_detach_hb_events(dlm, mle);
2751                 }
2752                 spin_unlock(&tmp->spinlock);
2753         }
2754
2755         /* now add a migration mle to the tail of the list */
2756         dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
2757         mle->new_master = new_master;
2758         mle->master = master;
2759         /* do this for consistency with other mle types */
2760         set_bit(new_master, mle->maybe_map);
2761         list_add(&mle->list, &dlm->master_list);
2762
2763         return ret;
2764 }
2765
2766
2767 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
2768 {
2769         struct list_head *iter, *iter2;
2770         struct dlm_master_list_entry *mle;
2771         struct dlm_lock_resource *res;
2772         unsigned int hash;
2773
2774         mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
2775 top:
2776         assert_spin_locked(&dlm->spinlock);
2777
2778         /* clean the master list */
2779         spin_lock(&dlm->master_lock);
2780         list_for_each_safe(iter, iter2, &dlm->master_list) {
2781                 mle = list_entry(iter, struct dlm_master_list_entry, list);
2782
2783                 BUG_ON(mle->type != DLM_MLE_BLOCK &&
2784                        mle->type != DLM_MLE_MASTER &&
2785                        mle->type != DLM_MLE_MIGRATION);
2786
2787                 /* MASTER mles are initiated locally.  the waiting
2788                  * process will notice the node map change
2789                  * shortly.  let that happen as normal. */
2790                 if (mle->type == DLM_MLE_MASTER)
2791                         continue;
2792
2793
2794                 /* BLOCK mles are initiated by other nodes.
2795                  * need to clean up if the dead node would have
2796                  * been the master. */
2797                 if (mle->type == DLM_MLE_BLOCK) {
2798                         int bit;
2799
2800                         spin_lock(&mle->spinlock);
2801                         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
2802                         if (bit != dead_node) {
2803                                 mlog(0, "mle found, but dead node %u would "
2804                                      "not have been master\n", dead_node);
2805                                 spin_unlock(&mle->spinlock);
2806                         } else {
2807                                 /* must drop the refcount by one since the
2808                                  * assert_master will never arrive.  this
2809                                  * may result in the mle being unlinked and
2810                                  * freed, but there may still be a process
2811                                  * waiting in the dlmlock path which is fine. */
2812                                 mlog(0, "node %u was expected master\n",
2813                                      dead_node);
2814                                 atomic_set(&mle->woken, 1);
2815                                 spin_unlock(&mle->spinlock);
2816                                 wake_up(&mle->wq);
2817                                 /* do not need events any longer, so detach 
2818                                  * from heartbeat */
2819                                 __dlm_mle_detach_hb_events(dlm, mle);
2820                                 __dlm_put_mle(mle);
2821                         }
2822                         continue;
2823                 }
2824
2825                 /* everything else is a MIGRATION mle */
2826
2827                 /* the rule for MIGRATION mles is that the master
2828                  * becomes UNKNOWN if *either* the original or
2829                  * the new master dies.  all UNKNOWN lockreses
2830                  * are sent to whichever node becomes the recovery
2831                  * master.  the new master is responsible for
2832                  * determining if there is still a master for
2833                  * this lockres, or if he needs to take over
2834                  * mastery.  either way, this node should expect
2835                  * another message to resolve this. */
2836                 if (mle->master != dead_node &&
2837                     mle->new_master != dead_node)
2838                         continue;
2839
2840                 /* if we have reached this point, this mle needs to
2841                  * be removed from the list and freed. */
2842
2843                 /* remove from the list early.  NOTE: unlinking
2844                  * list_head while in list_for_each_safe */
2845                 __dlm_mle_detach_hb_events(dlm, mle);
2846                 spin_lock(&mle->spinlock);
2847                 list_del_init(&mle->list);
2848                 atomic_set(&mle->woken, 1);
2849                 spin_unlock(&mle->spinlock);
2850                 wake_up(&mle->wq);
2851
2852                 mlog(0, "%s: node %u died during migration from "
2853                      "%u to %u!\n", dlm->name, dead_node,
2854                      mle->master, mle->new_master);
2855                 /* if there is a lockres associated with this
2856                  * mle, find it and set its owner to UNKNOWN */
2857                 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
2858                 res = __dlm_lookup_lockres(dlm, mle->u.name.name,
2859                                            mle->u.name.len, hash);
2860                 if (res) {
2861                         /* unfortunately if we hit this rare case, our
2862                          * lock ordering is messed.  we need to drop
2863                          * the master lock so that we can take the
2864                          * lockres lock, meaning that we will have to
2865                          * restart from the head of list. */
2866                         spin_unlock(&dlm->master_lock);
2867
2868                         /* move lockres onto recovery list */
2869                         spin_lock(&res->spinlock);
2870                         dlm_set_lockres_owner(dlm, res,
2871                                         DLM_LOCK_RES_OWNER_UNKNOWN);
2872                         dlm_move_lockres_to_recovery_list(dlm, res);
2873                         spin_unlock(&res->spinlock);
2874                         dlm_lockres_put(res);
2875
2876                         /* about to get rid of mle, detach from heartbeat */
2877                         __dlm_mle_detach_hb_events(dlm, mle);
2878
2879                         /* dump the mle */
2880                         spin_lock(&dlm->master_lock);
2881                         __dlm_put_mle(mle);
2882                         spin_unlock(&dlm->master_lock);
2883
2884                         /* restart */
2885                         goto top;
2886                 }
2887
2888                 /* this may be the last reference */
2889                 __dlm_put_mle(mle);
2890         }
2891         spin_unlock(&dlm->master_lock);
2892 }
2893
2894
2895 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2896                          u8 old_master)
2897 {
2898         struct dlm_node_iter iter;
2899         int ret = 0;
2900
2901         spin_lock(&dlm->spinlock);
2902         dlm_node_iter_init(dlm->domain_map, &iter);
2903         clear_bit(old_master, iter.node_map);
2904         clear_bit(dlm->node_num, iter.node_map);
2905         spin_unlock(&dlm->spinlock);
2906
2907         mlog(0, "now time to do a migrate request to other nodes\n");
2908         ret = dlm_do_migrate_request(dlm, res, old_master,
2909                                      dlm->node_num, &iter);
2910         if (ret < 0) {
2911                 mlog_errno(ret);
2912                 goto leave;
2913         }
2914
2915         mlog(0, "doing assert master of %.*s to all except the original node\n",
2916              res->lockname.len, res->lockname.name);
2917         /* this call now finishes out the nodemap
2918          * even if one or more nodes die */
2919         ret = dlm_do_assert_master(dlm, res->lockname.name,
2920                                    res->lockname.len, iter.node_map,
2921                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
2922         if (ret < 0) {
2923                 /* no longer need to retry.  all living nodes contacted. */
2924                 mlog_errno(ret);
2925                 ret = 0;
2926         }
2927
2928         memset(iter.node_map, 0, sizeof(iter.node_map));
2929         set_bit(old_master, iter.node_map);
2930         mlog(0, "doing assert master of %.*s back to %u\n",
2931              res->lockname.len, res->lockname.name, old_master);
2932         ret = dlm_do_assert_master(dlm, res->lockname.name,
2933                                    res->lockname.len, iter.node_map,
2934                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
2935         if (ret < 0) {
2936                 mlog(0, "assert master to original master failed "
2937                      "with %d.\n", ret);
2938                 /* the only nonzero status here would be because of
2939                  * a dead original node.  we're done. */
2940                 ret = 0;
2941         }
2942
2943         /* all done, set the owner, clear the flag */
2944         spin_lock(&res->spinlock);
2945         dlm_set_lockres_owner(dlm, res, dlm->node_num);
2946         res->state &= ~DLM_LOCK_RES_MIGRATING;
2947         spin_unlock(&res->spinlock);
2948         /* re-dirty it on the new master */
2949         dlm_kick_thread(dlm, res);
2950         wake_up(&res->wq);
2951 leave:
2952         return ret;
2953 }
2954
2955 /*
2956  * LOCKRES AST REFCOUNT
2957  * this is integral to migration
2958  */
2959
2960 /* for future intent to call an ast, reserve one ahead of time.
2961  * this should be called only after waiting on the lockres
2962  * with dlm_wait_on_lockres, and while still holding the
2963  * spinlock after the call. */
2964 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
2965 {
2966         assert_spin_locked(&res->spinlock);
2967         if (res->state & DLM_LOCK_RES_MIGRATING) {
2968                 __dlm_print_one_lock_resource(res);
2969         }
2970         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
2971
2972         atomic_inc(&res->asts_reserved);
2973 }
2974
2975 /*
2976  * used to drop the reserved ast, either because it went unused,
2977  * or because the ast/bast was actually called.
2978  *
2979  * also, if there is a pending migration on this lockres,
2980  * and this was the last pending ast on the lockres,
2981  * atomically set the MIGRATING flag before we drop the lock.
2982  * this is how we ensure that migration can proceed with no
2983  * asts in progress.  note that it is ok if the state of the
2984  * queues is such that a lock should be granted in the future
2985  * or that a bast should be fired, because the new master will
2986  * shuffle the lists on this lockres as soon as it is migrated.
2987  */
2988 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
2989                              struct dlm_lock_resource *res)
2990 {
2991         if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
2992                 return;
2993
2994         if (!res->migration_pending) {
2995                 spin_unlock(&res->spinlock);
2996                 return;
2997         }
2998
2999         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3000         res->migration_pending = 0;
3001         res->state |= DLM_LOCK_RES_MIGRATING;
3002         spin_unlock(&res->spinlock);
3003         wake_up(&res->wq);
3004         wake_up(&dlm->migration_wq);
3005 }