Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec...
[pandora-kernel.git] / fs / ocfs2 / dlm / dlmmaster.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmmod.c
5  *
6  * standalone DLM module
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
41
42
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
46
47 #include "dlmapi.h"
48 #include "dlmcommon.h"
49 #include "dlmdomain.h"
50 #include "dlmdebug.h"
51
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
54
55 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
56                               struct dlm_master_list_entry *mle,
57                               struct o2nm_node *node,
58                               int idx);
59 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
60                             struct dlm_master_list_entry *mle,
61                             struct o2nm_node *node,
62                             int idx);
63
64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
65 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
66                                 struct dlm_lock_resource *res,
67                                 void *nodemap, u32 flags);
68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
69
70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
71                                 struct dlm_master_list_entry *mle,
72                                 const char *name,
73                                 unsigned int namelen)
74 {
75         if (dlm != mle->dlm)
76                 return 0;
77
78         if (namelen != mle->mnamelen ||
79             memcmp(name, mle->mname, namelen) != 0)
80                 return 0;
81
82         return 1;
83 }
84
85 static struct kmem_cache *dlm_lockres_cache = NULL;
86 static struct kmem_cache *dlm_lockname_cache = NULL;
87 static struct kmem_cache *dlm_mle_cache = NULL;
88
89 static void dlm_mle_release(struct kref *kref);
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
91                         enum dlm_mle_type type,
92                         struct dlm_ctxt *dlm,
93                         struct dlm_lock_resource *res,
94                         const char *name,
95                         unsigned int namelen);
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
98 static int dlm_find_mle(struct dlm_ctxt *dlm,
99                         struct dlm_master_list_entry **mle,
100                         char *name, unsigned int namelen);
101
102 static int dlm_do_master_request(struct dlm_lock_resource *res,
103                                  struct dlm_master_list_entry *mle, int to);
104
105
106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
107                                      struct dlm_lock_resource *res,
108                                      struct dlm_master_list_entry *mle,
109                                      int *blocked);
110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
111                                     struct dlm_lock_resource *res,
112                                     struct dlm_master_list_entry *mle,
113                                     int blocked);
114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
115                                  struct dlm_lock_resource *res,
116                                  struct dlm_master_list_entry *mle,
117                                  struct dlm_master_list_entry **oldmle,
118                                  const char *name, unsigned int namelen,
119                                  u8 new_master, u8 master);
120
121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
122                                     struct dlm_lock_resource *res);
123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
124                                       struct dlm_lock_resource *res);
125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
126                                        struct dlm_lock_resource *res,
127                                        u8 target);
128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
129                                        struct dlm_lock_resource *res);
130
131
132 int dlm_is_host_down(int errno)
133 {
134         switch (errno) {
135                 case -EBADF:
136                 case -ECONNREFUSED:
137                 case -ENOTCONN:
138                 case -ECONNRESET:
139                 case -EPIPE:
140                 case -EHOSTDOWN:
141                 case -EHOSTUNREACH:
142                 case -ETIMEDOUT:
143                 case -ECONNABORTED:
144                 case -ENETDOWN:
145                 case -ENETUNREACH:
146                 case -ENETRESET:
147                 case -ESHUTDOWN:
148                 case -ENOPROTOOPT:
149                 case -EINVAL:   /* if returned from our tcp code,
150                                    this means there is no socket */
151                         return 1;
152         }
153         return 0;
154 }
155
156
157 /*
158  * MASTER LIST FUNCTIONS
159  */
160
161
162 /*
163  * regarding master list entries and heartbeat callbacks:
164  *
165  * in order to avoid sleeping and allocation that occurs in
166  * heartbeat, master list entries are simply attached to the
167  * dlm's established heartbeat callbacks.  the mle is attached
168  * when it is created, and since the dlm->spinlock is held at
169  * that time, any heartbeat event will be properly discovered
170  * by the mle.  the mle needs to be detached from the
171  * dlm->mle_hb_events list as soon as heartbeat events are no
172  * longer useful to the mle, and before the mle is freed.
173  *
174  * as a general rule, heartbeat events are no longer needed by
175  * the mle once an "answer" regarding the lock master has been
176  * received.
177  */
178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
179                                               struct dlm_master_list_entry *mle)
180 {
181         assert_spin_locked(&dlm->spinlock);
182
183         list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
184 }
185
186
187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
188                                               struct dlm_master_list_entry *mle)
189 {
190         if (!list_empty(&mle->hb_events))
191                 list_del_init(&mle->hb_events);
192 }
193
194
195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
196                                             struct dlm_master_list_entry *mle)
197 {
198         spin_lock(&dlm->spinlock);
199         __dlm_mle_detach_hb_events(dlm, mle);
200         spin_unlock(&dlm->spinlock);
201 }
202
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
204 {
205         struct dlm_ctxt *dlm;
206         dlm = mle->dlm;
207
208         assert_spin_locked(&dlm->spinlock);
209         assert_spin_locked(&dlm->master_lock);
210         mle->inuse++;
211         kref_get(&mle->mle_refs);
212 }
213
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
215 {
216         struct dlm_ctxt *dlm;
217         dlm = mle->dlm;
218
219         spin_lock(&dlm->spinlock);
220         spin_lock(&dlm->master_lock);
221         mle->inuse--;
222         __dlm_put_mle(mle);
223         spin_unlock(&dlm->master_lock);
224         spin_unlock(&dlm->spinlock);
225
226 }
227
228 /* remove from list and free */
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
230 {
231         struct dlm_ctxt *dlm;
232         dlm = mle->dlm;
233
234         assert_spin_locked(&dlm->spinlock);
235         assert_spin_locked(&dlm->master_lock);
236         if (!atomic_read(&mle->mle_refs.refcount)) {
237                 /* this may or may not crash, but who cares.
238                  * it's a BUG. */
239                 mlog(ML_ERROR, "bad mle: %p\n", mle);
240                 dlm_print_one_mle(mle);
241                 BUG();
242         } else
243                 kref_put(&mle->mle_refs, dlm_mle_release);
244 }
245
246
247 /* must not have any spinlocks coming in */
248 static void dlm_put_mle(struct dlm_master_list_entry *mle)
249 {
250         struct dlm_ctxt *dlm;
251         dlm = mle->dlm;
252
253         spin_lock(&dlm->spinlock);
254         spin_lock(&dlm->master_lock);
255         __dlm_put_mle(mle);
256         spin_unlock(&dlm->master_lock);
257         spin_unlock(&dlm->spinlock);
258 }
259
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
261 {
262         kref_get(&mle->mle_refs);
263 }
264
265 static void dlm_init_mle(struct dlm_master_list_entry *mle,
266                         enum dlm_mle_type type,
267                         struct dlm_ctxt *dlm,
268                         struct dlm_lock_resource *res,
269                         const char *name,
270                         unsigned int namelen)
271 {
272         assert_spin_locked(&dlm->spinlock);
273
274         mle->dlm = dlm;
275         mle->type = type;
276         INIT_HLIST_NODE(&mle->master_hash_node);
277         INIT_LIST_HEAD(&mle->hb_events);
278         memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
279         spin_lock_init(&mle->spinlock);
280         init_waitqueue_head(&mle->wq);
281         atomic_set(&mle->woken, 0);
282         kref_init(&mle->mle_refs);
283         memset(mle->response_map, 0, sizeof(mle->response_map));
284         mle->master = O2NM_MAX_NODES;
285         mle->new_master = O2NM_MAX_NODES;
286         mle->inuse = 0;
287
288         BUG_ON(mle->type != DLM_MLE_BLOCK &&
289                mle->type != DLM_MLE_MASTER &&
290                mle->type != DLM_MLE_MIGRATION);
291
292         if (mle->type == DLM_MLE_MASTER) {
293                 BUG_ON(!res);
294                 mle->mleres = res;
295                 memcpy(mle->mname, res->lockname.name, res->lockname.len);
296                 mle->mnamelen = res->lockname.len;
297                 mle->mnamehash = res->lockname.hash;
298         } else {
299                 BUG_ON(!name);
300                 mle->mleres = NULL;
301                 memcpy(mle->mname, name, namelen);
302                 mle->mnamelen = namelen;
303                 mle->mnamehash = dlm_lockid_hash(name, namelen);
304         }
305
306         atomic_inc(&dlm->mle_tot_count[mle->type]);
307         atomic_inc(&dlm->mle_cur_count[mle->type]);
308
309         /* copy off the node_map and register hb callbacks on our copy */
310         memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
311         memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
312         clear_bit(dlm->node_num, mle->vote_map);
313         clear_bit(dlm->node_num, mle->node_map);
314
315         /* attach the mle to the domain node up/down events */
316         __dlm_mle_attach_hb_events(dlm, mle);
317 }
318
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
320 {
321         assert_spin_locked(&dlm->spinlock);
322         assert_spin_locked(&dlm->master_lock);
323
324         if (!hlist_unhashed(&mle->master_hash_node))
325                 hlist_del_init(&mle->master_hash_node);
326 }
327
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
329 {
330         struct hlist_head *bucket;
331
332         assert_spin_locked(&dlm->master_lock);
333
334         bucket = dlm_master_hash(dlm, mle->mnamehash);
335         hlist_add_head(&mle->master_hash_node, bucket);
336 }
337
338 /* returns 1 if found, 0 if not */
339 static int dlm_find_mle(struct dlm_ctxt *dlm,
340                         struct dlm_master_list_entry **mle,
341                         char *name, unsigned int namelen)
342 {
343         struct dlm_master_list_entry *tmpmle;
344         struct hlist_head *bucket;
345         struct hlist_node *list;
346         unsigned int hash;
347
348         assert_spin_locked(&dlm->master_lock);
349
350         hash = dlm_lockid_hash(name, namelen);
351         bucket = dlm_master_hash(dlm, hash);
352         hlist_for_each(list, bucket) {
353                 tmpmle = hlist_entry(list, struct dlm_master_list_entry,
354                                      master_hash_node);
355                 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
356                         continue;
357                 dlm_get_mle(tmpmle);
358                 *mle = tmpmle;
359                 return 1;
360         }
361         return 0;
362 }
363
364 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
365 {
366         struct dlm_master_list_entry *mle;
367
368         assert_spin_locked(&dlm->spinlock);
369
370         list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
371                 if (node_up)
372                         dlm_mle_node_up(dlm, mle, NULL, idx);
373                 else
374                         dlm_mle_node_down(dlm, mle, NULL, idx);
375         }
376 }
377
378 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
379                               struct dlm_master_list_entry *mle,
380                               struct o2nm_node *node, int idx)
381 {
382         spin_lock(&mle->spinlock);
383
384         if (!test_bit(idx, mle->node_map))
385                 mlog(0, "node %u already removed from nodemap!\n", idx);
386         else
387                 clear_bit(idx, mle->node_map);
388
389         spin_unlock(&mle->spinlock);
390 }
391
392 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
393                             struct dlm_master_list_entry *mle,
394                             struct o2nm_node *node, int idx)
395 {
396         spin_lock(&mle->spinlock);
397
398         if (test_bit(idx, mle->node_map))
399                 mlog(0, "node %u already in node map!\n", idx);
400         else
401                 set_bit(idx, mle->node_map);
402
403         spin_unlock(&mle->spinlock);
404 }
405
406
407 int dlm_init_mle_cache(void)
408 {
409         dlm_mle_cache = kmem_cache_create("o2dlm_mle",
410                                           sizeof(struct dlm_master_list_entry),
411                                           0, SLAB_HWCACHE_ALIGN,
412                                           NULL);
413         if (dlm_mle_cache == NULL)
414                 return -ENOMEM;
415         return 0;
416 }
417
418 void dlm_destroy_mle_cache(void)
419 {
420         if (dlm_mle_cache)
421                 kmem_cache_destroy(dlm_mle_cache);
422 }
423
424 static void dlm_mle_release(struct kref *kref)
425 {
426         struct dlm_master_list_entry *mle;
427         struct dlm_ctxt *dlm;
428
429         mlog_entry_void();
430
431         mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
432         dlm = mle->dlm;
433
434         assert_spin_locked(&dlm->spinlock);
435         assert_spin_locked(&dlm->master_lock);
436
437         mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
438              mle->type);
439
440         /* remove from list if not already */
441         __dlm_unlink_mle(dlm, mle);
442
443         /* detach the mle from the domain node up/down events */
444         __dlm_mle_detach_hb_events(dlm, mle);
445
446         atomic_dec(&dlm->mle_cur_count[mle->type]);
447
448         /* NOTE: kfree under spinlock here.
449          * if this is bad, we can move this to a freelist. */
450         kmem_cache_free(dlm_mle_cache, mle);
451 }
452
453
454 /*
455  * LOCK RESOURCE FUNCTIONS
456  */
457
458 int dlm_init_master_caches(void)
459 {
460         dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
461                                               sizeof(struct dlm_lock_resource),
462                                               0, SLAB_HWCACHE_ALIGN, NULL);
463         if (!dlm_lockres_cache)
464                 goto bail;
465
466         dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
467                                                DLM_LOCKID_NAME_MAX, 0,
468                                                SLAB_HWCACHE_ALIGN, NULL);
469         if (!dlm_lockname_cache)
470                 goto bail;
471
472         return 0;
473 bail:
474         dlm_destroy_master_caches();
475         return -ENOMEM;
476 }
477
478 void dlm_destroy_master_caches(void)
479 {
480         if (dlm_lockname_cache)
481                 kmem_cache_destroy(dlm_lockname_cache);
482
483         if (dlm_lockres_cache)
484                 kmem_cache_destroy(dlm_lockres_cache);
485 }
486
487 static void dlm_lockres_release(struct kref *kref)
488 {
489         struct dlm_lock_resource *res;
490         struct dlm_ctxt *dlm;
491
492         res = container_of(kref, struct dlm_lock_resource, refs);
493         dlm = res->dlm;
494
495         /* This should not happen -- all lockres' have a name
496          * associated with them at init time. */
497         BUG_ON(!res->lockname.name);
498
499         mlog(0, "destroying lockres %.*s\n", res->lockname.len,
500              res->lockname.name);
501
502         spin_lock(&dlm->track_lock);
503         if (!list_empty(&res->tracking))
504                 list_del_init(&res->tracking);
505         else {
506                 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
507                      res->lockname.len, res->lockname.name);
508                 dlm_print_one_lock_resource(res);
509         }
510         spin_unlock(&dlm->track_lock);
511
512         atomic_dec(&dlm->res_cur_count);
513
514         dlm_put(dlm);
515
516         if (!hlist_unhashed(&res->hash_node) ||
517             !list_empty(&res->granted) ||
518             !list_empty(&res->converting) ||
519             !list_empty(&res->blocked) ||
520             !list_empty(&res->dirty) ||
521             !list_empty(&res->recovering) ||
522             !list_empty(&res->purge)) {
523                 mlog(ML_ERROR,
524                      "Going to BUG for resource %.*s."
525                      "  We're on a list! [%c%c%c%c%c%c%c]\n",
526                      res->lockname.len, res->lockname.name,
527                      !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
528                      !list_empty(&res->granted) ? 'G' : ' ',
529                      !list_empty(&res->converting) ? 'C' : ' ',
530                      !list_empty(&res->blocked) ? 'B' : ' ',
531                      !list_empty(&res->dirty) ? 'D' : ' ',
532                      !list_empty(&res->recovering) ? 'R' : ' ',
533                      !list_empty(&res->purge) ? 'P' : ' ');
534
535                 dlm_print_one_lock_resource(res);
536         }
537
538         /* By the time we're ready to blow this guy away, we shouldn't
539          * be on any lists. */
540         BUG_ON(!hlist_unhashed(&res->hash_node));
541         BUG_ON(!list_empty(&res->granted));
542         BUG_ON(!list_empty(&res->converting));
543         BUG_ON(!list_empty(&res->blocked));
544         BUG_ON(!list_empty(&res->dirty));
545         BUG_ON(!list_empty(&res->recovering));
546         BUG_ON(!list_empty(&res->purge));
547
548         kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
549
550         kmem_cache_free(dlm_lockres_cache, res);
551 }
552
553 void dlm_lockres_put(struct dlm_lock_resource *res)
554 {
555         kref_put(&res->refs, dlm_lockres_release);
556 }
557
558 static void dlm_init_lockres(struct dlm_ctxt *dlm,
559                              struct dlm_lock_resource *res,
560                              const char *name, unsigned int namelen)
561 {
562         char *qname;
563
564         /* If we memset here, we lose our reference to the kmalloc'd
565          * res->lockname.name, so be sure to init every field
566          * correctly! */
567
568         qname = (char *) res->lockname.name;
569         memcpy(qname, name, namelen);
570
571         res->lockname.len = namelen;
572         res->lockname.hash = dlm_lockid_hash(name, namelen);
573
574         init_waitqueue_head(&res->wq);
575         spin_lock_init(&res->spinlock);
576         INIT_HLIST_NODE(&res->hash_node);
577         INIT_LIST_HEAD(&res->granted);
578         INIT_LIST_HEAD(&res->converting);
579         INIT_LIST_HEAD(&res->blocked);
580         INIT_LIST_HEAD(&res->dirty);
581         INIT_LIST_HEAD(&res->recovering);
582         INIT_LIST_HEAD(&res->purge);
583         INIT_LIST_HEAD(&res->tracking);
584         atomic_set(&res->asts_reserved, 0);
585         res->migration_pending = 0;
586         res->inflight_locks = 0;
587
588         /* put in dlm_lockres_release */
589         dlm_grab(dlm);
590         res->dlm = dlm;
591
592         kref_init(&res->refs);
593
594         atomic_inc(&dlm->res_tot_count);
595         atomic_inc(&dlm->res_cur_count);
596
597         /* just for consistency */
598         spin_lock(&res->spinlock);
599         dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
600         spin_unlock(&res->spinlock);
601
602         res->state = DLM_LOCK_RES_IN_PROGRESS;
603
604         res->last_used = 0;
605
606         spin_lock(&dlm->spinlock);
607         list_add_tail(&res->tracking, &dlm->tracking_list);
608         spin_unlock(&dlm->spinlock);
609
610         memset(res->lvb, 0, DLM_LVB_LEN);
611         memset(res->refmap, 0, sizeof(res->refmap));
612 }
613
614 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
615                                    const char *name,
616                                    unsigned int namelen)
617 {
618         struct dlm_lock_resource *res = NULL;
619
620         res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
621         if (!res)
622                 goto error;
623
624         res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
625         if (!res->lockname.name)
626                 goto error;
627
628         dlm_init_lockres(dlm, res, name, namelen);
629         return res;
630
631 error:
632         if (res && res->lockname.name)
633                 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
634
635         if (res)
636                 kmem_cache_free(dlm_lockres_cache, res);
637         return NULL;
638 }
639
640 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
641                                    struct dlm_lock_resource *res,
642                                    int new_lockres,
643                                    const char *file,
644                                    int line)
645 {
646         if (!new_lockres)
647                 assert_spin_locked(&res->spinlock);
648
649         if (!test_bit(dlm->node_num, res->refmap)) {
650                 BUG_ON(res->inflight_locks != 0);
651                 dlm_lockres_set_refmap_bit(dlm->node_num, res);
652         }
653         res->inflight_locks++;
654         mlog(0, "%s:%.*s: inflight++: now %u\n",
655              dlm->name, res->lockname.len, res->lockname.name,
656              res->inflight_locks);
657 }
658
659 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
660                                    struct dlm_lock_resource *res,
661                                    const char *file,
662                                    int line)
663 {
664         assert_spin_locked(&res->spinlock);
665
666         BUG_ON(res->inflight_locks == 0);
667         res->inflight_locks--;
668         mlog(0, "%s:%.*s: inflight--: now %u\n",
669              dlm->name, res->lockname.len, res->lockname.name,
670              res->inflight_locks);
671         if (res->inflight_locks == 0)
672                 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
673         wake_up(&res->wq);
674 }
675
676 /*
677  * lookup a lock resource by name.
678  * may already exist in the hashtable.
679  * lockid is null terminated
680  *
681  * if not, allocate enough for the lockres and for
682  * the temporary structure used in doing the mastering.
683  *
684  * also, do a lookup in the dlm->master_list to see
685  * if another node has begun mastering the same lock.
686  * if so, there should be a block entry in there
687  * for this name, and we should *not* attempt to master
688  * the lock here.   need to wait around for that node
689  * to assert_master (or die).
690  *
691  */
692 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
693                                           const char *lockid,
694                                           int namelen,
695                                           int flags)
696 {
697         struct dlm_lock_resource *tmpres=NULL, *res=NULL;
698         struct dlm_master_list_entry *mle = NULL;
699         struct dlm_master_list_entry *alloc_mle = NULL;
700         int blocked = 0;
701         int ret, nodenum;
702         struct dlm_node_iter iter;
703         unsigned int hash;
704         int tries = 0;
705         int bit, wait_on_recovery = 0;
706         int drop_inflight_if_nonlocal = 0;
707
708         BUG_ON(!lockid);
709
710         hash = dlm_lockid_hash(lockid, namelen);
711
712         mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
713
714 lookup:
715         spin_lock(&dlm->spinlock);
716         tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
717         if (tmpres) {
718                 int dropping_ref = 0;
719
720                 spin_unlock(&dlm->spinlock);
721
722                 spin_lock(&tmpres->spinlock);
723                 /* We wait for the other thread that is mastering the resource */
724                 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
725                         __dlm_wait_on_lockres(tmpres);
726                         BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
727                 }
728
729                 if (tmpres->owner == dlm->node_num) {
730                         BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
731                         dlm_lockres_grab_inflight_ref(dlm, tmpres);
732                 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
733                         dropping_ref = 1;
734                 spin_unlock(&tmpres->spinlock);
735
736                 /* wait until done messaging the master, drop our ref to allow
737                  * the lockres to be purged, start over. */
738                 if (dropping_ref) {
739                         spin_lock(&tmpres->spinlock);
740                         __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
741                         spin_unlock(&tmpres->spinlock);
742                         dlm_lockres_put(tmpres);
743                         tmpres = NULL;
744                         goto lookup;
745                 }
746
747                 mlog(0, "found in hash!\n");
748                 if (res)
749                         dlm_lockres_put(res);
750                 res = tmpres;
751                 goto leave;
752         }
753
754         if (!res) {
755                 spin_unlock(&dlm->spinlock);
756                 mlog(0, "allocating a new resource\n");
757                 /* nothing found and we need to allocate one. */
758                 alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
759                 if (!alloc_mle)
760                         goto leave;
761                 res = dlm_new_lockres(dlm, lockid, namelen);
762                 if (!res)
763                         goto leave;
764                 goto lookup;
765         }
766
767         mlog(0, "no lockres found, allocated our own: %p\n", res);
768
769         if (flags & LKM_LOCAL) {
770                 /* caller knows it's safe to assume it's not mastered elsewhere
771                  * DONE!  return right away */
772                 spin_lock(&res->spinlock);
773                 dlm_change_lockres_owner(dlm, res, dlm->node_num);
774                 __dlm_insert_lockres(dlm, res);
775                 dlm_lockres_grab_inflight_ref(dlm, res);
776                 spin_unlock(&res->spinlock);
777                 spin_unlock(&dlm->spinlock);
778                 /* lockres still marked IN_PROGRESS */
779                 goto wake_waiters;
780         }
781
782         /* check master list to see if another node has started mastering it */
783         spin_lock(&dlm->master_lock);
784
785         /* if we found a block, wait for lock to be mastered by another node */
786         blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
787         if (blocked) {
788                 int mig;
789                 if (mle->type == DLM_MLE_MASTER) {
790                         mlog(ML_ERROR, "master entry for nonexistent lock!\n");
791                         BUG();
792                 }
793                 mig = (mle->type == DLM_MLE_MIGRATION);
794                 /* if there is a migration in progress, let the migration
795                  * finish before continuing.  we can wait for the absence
796                  * of the MIGRATION mle: either the migrate finished or
797                  * one of the nodes died and the mle was cleaned up.
798                  * if there is a BLOCK here, but it already has a master
799                  * set, we are too late.  the master does not have a ref
800                  * for us in the refmap.  detach the mle and drop it.
801                  * either way, go back to the top and start over. */
802                 if (mig || mle->master != O2NM_MAX_NODES) {
803                         BUG_ON(mig && mle->master == dlm->node_num);
804                         /* we arrived too late.  the master does not
805                          * have a ref for us. retry. */
806                         mlog(0, "%s:%.*s: late on %s\n",
807                              dlm->name, namelen, lockid,
808                              mig ?  "MIGRATION" : "BLOCK");
809                         spin_unlock(&dlm->master_lock);
810                         spin_unlock(&dlm->spinlock);
811
812                         /* master is known, detach */
813                         if (!mig)
814                                 dlm_mle_detach_hb_events(dlm, mle);
815                         dlm_put_mle(mle);
816                         mle = NULL;
817                         /* this is lame, but we cant wait on either
818                          * the mle or lockres waitqueue here */
819                         if (mig)
820                                 msleep(100);
821                         goto lookup;
822                 }
823         } else {
824                 /* go ahead and try to master lock on this node */
825                 mle = alloc_mle;
826                 /* make sure this does not get freed below */
827                 alloc_mle = NULL;
828                 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
829                 set_bit(dlm->node_num, mle->maybe_map);
830                 __dlm_insert_mle(dlm, mle);
831
832                 /* still holding the dlm spinlock, check the recovery map
833                  * to see if there are any nodes that still need to be
834                  * considered.  these will not appear in the mle nodemap
835                  * but they might own this lockres.  wait on them. */
836                 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
837                 if (bit < O2NM_MAX_NODES) {
838                         mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
839                              "recover before lock mastery can begin\n",
840                              dlm->name, namelen, (char *)lockid, bit);
841                         wait_on_recovery = 1;
842                 }
843         }
844
845         /* at this point there is either a DLM_MLE_BLOCK or a
846          * DLM_MLE_MASTER on the master list, so it's safe to add the
847          * lockres to the hashtable.  anyone who finds the lock will
848          * still have to wait on the IN_PROGRESS. */
849
850         /* finally add the lockres to its hash bucket */
851         __dlm_insert_lockres(dlm, res);
852         /* since this lockres is new it doesnt not require the spinlock */
853         dlm_lockres_grab_inflight_ref_new(dlm, res);
854
855         /* if this node does not become the master make sure to drop
856          * this inflight reference below */
857         drop_inflight_if_nonlocal = 1;
858
859         /* get an extra ref on the mle in case this is a BLOCK
860          * if so, the creator of the BLOCK may try to put the last
861          * ref at this time in the assert master handler, so we
862          * need an extra one to keep from a bad ptr deref. */
863         dlm_get_mle_inuse(mle);
864         spin_unlock(&dlm->master_lock);
865         spin_unlock(&dlm->spinlock);
866
867 redo_request:
868         while (wait_on_recovery) {
869                 /* any cluster changes that occurred after dropping the
870                  * dlm spinlock would be detectable be a change on the mle,
871                  * so we only need to clear out the recovery map once. */
872                 if (dlm_is_recovery_lock(lockid, namelen)) {
873                         mlog(ML_NOTICE, "%s: recovery map is not empty, but "
874                              "must master $RECOVERY lock now\n", dlm->name);
875                         if (!dlm_pre_master_reco_lockres(dlm, res))
876                                 wait_on_recovery = 0;
877                         else {
878                                 mlog(0, "%s: waiting 500ms for heartbeat state "
879                                     "change\n", dlm->name);
880                                 msleep(500);
881                         }
882                         continue;
883                 }
884
885                 dlm_kick_recovery_thread(dlm);
886                 msleep(1000);
887                 dlm_wait_for_recovery(dlm);
888
889                 spin_lock(&dlm->spinlock);
890                 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
891                 if (bit < O2NM_MAX_NODES) {
892                         mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
893                              "recover before lock mastery can begin\n",
894                              dlm->name, namelen, (char *)lockid, bit);
895                         wait_on_recovery = 1;
896                 } else
897                         wait_on_recovery = 0;
898                 spin_unlock(&dlm->spinlock);
899
900                 if (wait_on_recovery)
901                         dlm_wait_for_node_recovery(dlm, bit, 10000);
902         }
903
904         /* must wait for lock to be mastered elsewhere */
905         if (blocked)
906                 goto wait;
907
908         ret = -EINVAL;
909         dlm_node_iter_init(mle->vote_map, &iter);
910         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
911                 ret = dlm_do_master_request(res, mle, nodenum);
912                 if (ret < 0)
913                         mlog_errno(ret);
914                 if (mle->master != O2NM_MAX_NODES) {
915                         /* found a master ! */
916                         if (mle->master <= nodenum)
917                                 break;
918                         /* if our master request has not reached the master
919                          * yet, keep going until it does.  this is how the
920                          * master will know that asserts are needed back to
921                          * the lower nodes. */
922                         mlog(0, "%s:%.*s: requests only up to %u but master "
923                              "is %u, keep going\n", dlm->name, namelen,
924                              lockid, nodenum, mle->master);
925                 }
926         }
927
928 wait:
929         /* keep going until the response map includes all nodes */
930         ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
931         if (ret < 0) {
932                 wait_on_recovery = 1;
933                 mlog(0, "%s:%.*s: node map changed, redo the "
934                      "master request now, blocked=%d\n",
935                      dlm->name, res->lockname.len,
936                      res->lockname.name, blocked);
937                 if (++tries > 20) {
938                         mlog(ML_ERROR, "%s:%.*s: spinning on "
939                              "dlm_wait_for_lock_mastery, blocked=%d\n",
940                              dlm->name, res->lockname.len,
941                              res->lockname.name, blocked);
942                         dlm_print_one_lock_resource(res);
943                         dlm_print_one_mle(mle);
944                         tries = 0;
945                 }
946                 goto redo_request;
947         }
948
949         mlog(0, "lockres mastered by %u\n", res->owner);
950         /* make sure we never continue without this */
951         BUG_ON(res->owner == O2NM_MAX_NODES);
952
953         /* master is known, detach if not already detached */
954         dlm_mle_detach_hb_events(dlm, mle);
955         dlm_put_mle(mle);
956         /* put the extra ref */
957         dlm_put_mle_inuse(mle);
958
959 wake_waiters:
960         spin_lock(&res->spinlock);
961         if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
962                 dlm_lockres_drop_inflight_ref(dlm, res);
963         res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
964         spin_unlock(&res->spinlock);
965         wake_up(&res->wq);
966
967 leave:
968         /* need to free the unused mle */
969         if (alloc_mle)
970                 kmem_cache_free(dlm_mle_cache, alloc_mle);
971
972         return res;
973 }
974
975
976 #define DLM_MASTERY_TIMEOUT_MS   5000
977
978 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
979                                      struct dlm_lock_resource *res,
980                                      struct dlm_master_list_entry *mle,
981                                      int *blocked)
982 {
983         u8 m;
984         int ret, bit;
985         int map_changed, voting_done;
986         int assert, sleep;
987
988 recheck:
989         ret = 0;
990         assert = 0;
991
992         /* check if another node has already become the owner */
993         spin_lock(&res->spinlock);
994         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
995                 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
996                      res->lockname.len, res->lockname.name, res->owner);
997                 spin_unlock(&res->spinlock);
998                 /* this will cause the master to re-assert across
999                  * the whole cluster, freeing up mles */
1000                 if (res->owner != dlm->node_num) {
1001                         ret = dlm_do_master_request(res, mle, res->owner);
1002                         if (ret < 0) {
1003                                 /* give recovery a chance to run */
1004                                 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1005                                 msleep(500);
1006                                 goto recheck;
1007                         }
1008                 }
1009                 ret = 0;
1010                 goto leave;
1011         }
1012         spin_unlock(&res->spinlock);
1013
1014         spin_lock(&mle->spinlock);
1015         m = mle->master;
1016         map_changed = (memcmp(mle->vote_map, mle->node_map,
1017                               sizeof(mle->vote_map)) != 0);
1018         voting_done = (memcmp(mle->vote_map, mle->response_map,
1019                              sizeof(mle->vote_map)) == 0);
1020
1021         /* restart if we hit any errors */
1022         if (map_changed) {
1023                 int b;
1024                 mlog(0, "%s: %.*s: node map changed, restarting\n",
1025                      dlm->name, res->lockname.len, res->lockname.name);
1026                 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1027                 b = (mle->type == DLM_MLE_BLOCK);
1028                 if ((*blocked && !b) || (!*blocked && b)) {
1029                         mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1030                              dlm->name, res->lockname.len, res->lockname.name,
1031                              *blocked, b);
1032                         *blocked = b;
1033                 }
1034                 spin_unlock(&mle->spinlock);
1035                 if (ret < 0) {
1036                         mlog_errno(ret);
1037                         goto leave;
1038                 }
1039                 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1040                      "rechecking now\n", dlm->name, res->lockname.len,
1041                      res->lockname.name);
1042                 goto recheck;
1043         } else {
1044                 if (!voting_done) {
1045                         mlog(0, "map not changed and voting not done "
1046                              "for %s:%.*s\n", dlm->name, res->lockname.len,
1047                              res->lockname.name);
1048                 }
1049         }
1050
1051         if (m != O2NM_MAX_NODES) {
1052                 /* another node has done an assert!
1053                  * all done! */
1054                 sleep = 0;
1055         } else {
1056                 sleep = 1;
1057                 /* have all nodes responded? */
1058                 if (voting_done && !*blocked) {
1059                         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1060                         if (dlm->node_num <= bit) {
1061                                 /* my node number is lowest.
1062                                  * now tell other nodes that I am
1063                                  * mastering this. */
1064                                 mle->master = dlm->node_num;
1065                                 /* ref was grabbed in get_lock_resource
1066                                  * will be dropped in dlmlock_master */
1067                                 assert = 1;
1068                                 sleep = 0;
1069                         }
1070                         /* if voting is done, but we have not received
1071                          * an assert master yet, we must sleep */
1072                 }
1073         }
1074
1075         spin_unlock(&mle->spinlock);
1076
1077         /* sleep if we haven't finished voting yet */
1078         if (sleep) {
1079                 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1080
1081                 /*
1082                 if (atomic_read(&mle->mle_refs.refcount) < 2)
1083                         mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1084                         atomic_read(&mle->mle_refs.refcount),
1085                         res->lockname.len, res->lockname.name);
1086                 */
1087                 atomic_set(&mle->woken, 0);
1088                 (void)wait_event_timeout(mle->wq,
1089                                          (atomic_read(&mle->woken) == 1),
1090                                          timeo);
1091                 if (res->owner == O2NM_MAX_NODES) {
1092                         mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1093                              res->lockname.len, res->lockname.name);
1094                         goto recheck;
1095                 }
1096                 mlog(0, "done waiting, master is %u\n", res->owner);
1097                 ret = 0;
1098                 goto leave;
1099         }
1100
1101         ret = 0;   /* done */
1102         if (assert) {
1103                 m = dlm->node_num;
1104                 mlog(0, "about to master %.*s here, this=%u\n",
1105                      res->lockname.len, res->lockname.name, m);
1106                 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1107                 if (ret) {
1108                         /* This is a failure in the network path,
1109                          * not in the response to the assert_master
1110                          * (any nonzero response is a BUG on this node).
1111                          * Most likely a socket just got disconnected
1112                          * due to node death. */
1113                         mlog_errno(ret);
1114                 }
1115                 /* no longer need to restart lock mastery.
1116                  * all living nodes have been contacted. */
1117                 ret = 0;
1118         }
1119
1120         /* set the lockres owner */
1121         spin_lock(&res->spinlock);
1122         /* mastery reference obtained either during
1123          * assert_master_handler or in get_lock_resource */
1124         dlm_change_lockres_owner(dlm, res, m);
1125         spin_unlock(&res->spinlock);
1126
1127 leave:
1128         return ret;
1129 }
1130
1131 struct dlm_bitmap_diff_iter
1132 {
1133         int curnode;
1134         unsigned long *orig_bm;
1135         unsigned long *cur_bm;
1136         unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1137 };
1138
1139 enum dlm_node_state_change
1140 {
1141         NODE_DOWN = -1,
1142         NODE_NO_CHANGE = 0,
1143         NODE_UP
1144 };
1145
1146 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1147                                       unsigned long *orig_bm,
1148                                       unsigned long *cur_bm)
1149 {
1150         unsigned long p1, p2;
1151         int i;
1152
1153         iter->curnode = -1;
1154         iter->orig_bm = orig_bm;
1155         iter->cur_bm = cur_bm;
1156
1157         for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1158                 p1 = *(iter->orig_bm + i);
1159                 p2 = *(iter->cur_bm + i);
1160                 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1161         }
1162 }
1163
1164 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1165                                      enum dlm_node_state_change *state)
1166 {
1167         int bit;
1168
1169         if (iter->curnode >= O2NM_MAX_NODES)
1170                 return -ENOENT;
1171
1172         bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1173                             iter->curnode+1);
1174         if (bit >= O2NM_MAX_NODES) {
1175                 iter->curnode = O2NM_MAX_NODES;
1176                 return -ENOENT;
1177         }
1178
1179         /* if it was there in the original then this node died */
1180         if (test_bit(bit, iter->orig_bm))
1181                 *state = NODE_DOWN;
1182         else
1183                 *state = NODE_UP;
1184
1185         iter->curnode = bit;
1186         return bit;
1187 }
1188
1189
1190 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1191                                     struct dlm_lock_resource *res,
1192                                     struct dlm_master_list_entry *mle,
1193                                     int blocked)
1194 {
1195         struct dlm_bitmap_diff_iter bdi;
1196         enum dlm_node_state_change sc;
1197         int node;
1198         int ret = 0;
1199
1200         mlog(0, "something happened such that the "
1201              "master process may need to be restarted!\n");
1202
1203         assert_spin_locked(&mle->spinlock);
1204
1205         dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1206         node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1207         while (node >= 0) {
1208                 if (sc == NODE_UP) {
1209                         /* a node came up.  clear any old vote from
1210                          * the response map and set it in the vote map
1211                          * then restart the mastery. */
1212                         mlog(ML_NOTICE, "node %d up while restarting\n", node);
1213
1214                         /* redo the master request, but only for the new node */
1215                         mlog(0, "sending request to new node\n");
1216                         clear_bit(node, mle->response_map);
1217                         set_bit(node, mle->vote_map);
1218                 } else {
1219                         mlog(ML_ERROR, "node down! %d\n", node);
1220                         if (blocked) {
1221                                 int lowest = find_next_bit(mle->maybe_map,
1222                                                        O2NM_MAX_NODES, 0);
1223
1224                                 /* act like it was never there */
1225                                 clear_bit(node, mle->maybe_map);
1226
1227                                 if (node == lowest) {
1228                                         mlog(0, "expected master %u died"
1229                                             " while this node was blocked "
1230                                             "waiting on it!\n", node);
1231                                         lowest = find_next_bit(mle->maybe_map,
1232                                                         O2NM_MAX_NODES,
1233                                                         lowest+1);
1234                                         if (lowest < O2NM_MAX_NODES) {
1235                                                 mlog(0, "%s:%.*s:still "
1236                                                      "blocked. waiting on %u "
1237                                                      "now\n", dlm->name,
1238                                                      res->lockname.len,
1239                                                      res->lockname.name,
1240                                                      lowest);
1241                                         } else {
1242                                                 /* mle is an MLE_BLOCK, but
1243                                                  * there is now nothing left to
1244                                                  * block on.  we need to return
1245                                                  * all the way back out and try
1246                                                  * again with an MLE_MASTER.
1247                                                  * dlm_do_local_recovery_cleanup
1248                                                  * has already run, so the mle
1249                                                  * refcount is ok */
1250                                                 mlog(0, "%s:%.*s: no "
1251                                                      "longer blocking. try to "
1252                                                      "master this here\n",
1253                                                      dlm->name,
1254                                                      res->lockname.len,
1255                                                      res->lockname.name);
1256                                                 mle->type = DLM_MLE_MASTER;
1257                                                 mle->mleres = res;
1258                                         }
1259                                 }
1260                         }
1261
1262                         /* now blank out everything, as if we had never
1263                          * contacted anyone */
1264                         memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1265                         memset(mle->response_map, 0, sizeof(mle->response_map));
1266                         /* reset the vote_map to the current node_map */
1267                         memcpy(mle->vote_map, mle->node_map,
1268                                sizeof(mle->node_map));
1269                         /* put myself into the maybe map */
1270                         if (mle->type != DLM_MLE_BLOCK)
1271                                 set_bit(dlm->node_num, mle->maybe_map);
1272                 }
1273                 ret = -EAGAIN;
1274                 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1275         }
1276         return ret;
1277 }
1278
1279
1280 /*
1281  * DLM_MASTER_REQUEST_MSG
1282  *
1283  * returns: 0 on success,
1284  *          -errno on a network error
1285  *
1286  * on error, the caller should assume the target node is "dead"
1287  *
1288  */
1289
1290 static int dlm_do_master_request(struct dlm_lock_resource *res,
1291                                  struct dlm_master_list_entry *mle, int to)
1292 {
1293         struct dlm_ctxt *dlm = mle->dlm;
1294         struct dlm_master_request request;
1295         int ret, response=0, resend;
1296
1297         memset(&request, 0, sizeof(request));
1298         request.node_idx = dlm->node_num;
1299
1300         BUG_ON(mle->type == DLM_MLE_MIGRATION);
1301
1302         request.namelen = (u8)mle->mnamelen;
1303         memcpy(request.name, mle->mname, request.namelen);
1304
1305 again:
1306         ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1307                                  sizeof(request), to, &response);
1308         if (ret < 0)  {
1309                 if (ret == -ESRCH) {
1310                         /* should never happen */
1311                         mlog(ML_ERROR, "TCP stack not ready!\n");
1312                         BUG();
1313                 } else if (ret == -EINVAL) {
1314                         mlog(ML_ERROR, "bad args passed to o2net!\n");
1315                         BUG();
1316                 } else if (ret == -ENOMEM) {
1317                         mlog(ML_ERROR, "out of memory while trying to send "
1318                              "network message!  retrying\n");
1319                         /* this is totally crude */
1320                         msleep(50);
1321                         goto again;
1322                 } else if (!dlm_is_host_down(ret)) {
1323                         /* not a network error. bad. */
1324                         mlog_errno(ret);
1325                         mlog(ML_ERROR, "unhandled error!");
1326                         BUG();
1327                 }
1328                 /* all other errors should be network errors,
1329                  * and likely indicate node death */
1330                 mlog(ML_ERROR, "link to %d went down!\n", to);
1331                 goto out;
1332         }
1333
1334         ret = 0;
1335         resend = 0;
1336         spin_lock(&mle->spinlock);
1337         switch (response) {
1338                 case DLM_MASTER_RESP_YES:
1339                         set_bit(to, mle->response_map);
1340                         mlog(0, "node %u is the master, response=YES\n", to);
1341                         mlog(0, "%s:%.*s: master node %u now knows I have a "
1342                              "reference\n", dlm->name, res->lockname.len,
1343                              res->lockname.name, to);
1344                         mle->master = to;
1345                         break;
1346                 case DLM_MASTER_RESP_NO:
1347                         mlog(0, "node %u not master, response=NO\n", to);
1348                         set_bit(to, mle->response_map);
1349                         break;
1350                 case DLM_MASTER_RESP_MAYBE:
1351                         mlog(0, "node %u not master, response=MAYBE\n", to);
1352                         set_bit(to, mle->response_map);
1353                         set_bit(to, mle->maybe_map);
1354                         break;
1355                 case DLM_MASTER_RESP_ERROR:
1356                         mlog(0, "node %u hit an error, resending\n", to);
1357                         resend = 1;
1358                         response = 0;
1359                         break;
1360                 default:
1361                         mlog(ML_ERROR, "bad response! %u\n", response);
1362                         BUG();
1363         }
1364         spin_unlock(&mle->spinlock);
1365         if (resend) {
1366                 /* this is also totally crude */
1367                 msleep(50);
1368                 goto again;
1369         }
1370
1371 out:
1372         return ret;
1373 }
1374
1375 /*
1376  * locks that can be taken here:
1377  * dlm->spinlock
1378  * res->spinlock
1379  * mle->spinlock
1380  * dlm->master_list
1381  *
1382  * if possible, TRIM THIS DOWN!!!
1383  */
1384 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1385                                void **ret_data)
1386 {
1387         u8 response = DLM_MASTER_RESP_MAYBE;
1388         struct dlm_ctxt *dlm = data;
1389         struct dlm_lock_resource *res = NULL;
1390         struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1391         struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1392         char *name;
1393         unsigned int namelen, hash;
1394         int found, ret;
1395         int set_maybe;
1396         int dispatch_assert = 0;
1397
1398         if (!dlm_grab(dlm))
1399                 return DLM_MASTER_RESP_NO;
1400
1401         if (!dlm_domain_fully_joined(dlm)) {
1402                 response = DLM_MASTER_RESP_NO;
1403                 goto send_response;
1404         }
1405
1406         name = request->name;
1407         namelen = request->namelen;
1408         hash = dlm_lockid_hash(name, namelen);
1409
1410         if (namelen > DLM_LOCKID_NAME_MAX) {
1411                 response = DLM_IVBUFLEN;
1412                 goto send_response;
1413         }
1414
1415 way_up_top:
1416         spin_lock(&dlm->spinlock);
1417         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1418         if (res) {
1419                 spin_unlock(&dlm->spinlock);
1420
1421                 /* take care of the easy cases up front */
1422                 spin_lock(&res->spinlock);
1423                 if (res->state & (DLM_LOCK_RES_RECOVERING|
1424                                   DLM_LOCK_RES_MIGRATING)) {
1425                         spin_unlock(&res->spinlock);
1426                         mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1427                              "being recovered/migrated\n");
1428                         response = DLM_MASTER_RESP_ERROR;
1429                         if (mle)
1430                                 kmem_cache_free(dlm_mle_cache, mle);
1431                         goto send_response;
1432                 }
1433
1434                 if (res->owner == dlm->node_num) {
1435                         mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1436                              dlm->name, namelen, name, request->node_idx);
1437                         dlm_lockres_set_refmap_bit(request->node_idx, res);
1438                         spin_unlock(&res->spinlock);
1439                         response = DLM_MASTER_RESP_YES;
1440                         if (mle)
1441                                 kmem_cache_free(dlm_mle_cache, mle);
1442
1443                         /* this node is the owner.
1444                          * there is some extra work that needs to
1445                          * happen now.  the requesting node has
1446                          * caused all nodes up to this one to
1447                          * create mles.  this node now needs to
1448                          * go back and clean those up. */
1449                         dispatch_assert = 1;
1450                         goto send_response;
1451                 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1452                         spin_unlock(&res->spinlock);
1453                         // mlog(0, "node %u is the master\n", res->owner);
1454                         response = DLM_MASTER_RESP_NO;
1455                         if (mle)
1456                                 kmem_cache_free(dlm_mle_cache, mle);
1457                         goto send_response;
1458                 }
1459
1460                 /* ok, there is no owner.  either this node is
1461                  * being blocked, or it is actively trying to
1462                  * master this lock. */
1463                 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1464                         mlog(ML_ERROR, "lock with no owner should be "
1465                              "in-progress!\n");
1466                         BUG();
1467                 }
1468
1469                 // mlog(0, "lockres is in progress...\n");
1470                 spin_lock(&dlm->master_lock);
1471                 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1472                 if (!found) {
1473                         mlog(ML_ERROR, "no mle found for this lock!\n");
1474                         BUG();
1475                 }
1476                 set_maybe = 1;
1477                 spin_lock(&tmpmle->spinlock);
1478                 if (tmpmle->type == DLM_MLE_BLOCK) {
1479                         // mlog(0, "this node is waiting for "
1480                         // "lockres to be mastered\n");
1481                         response = DLM_MASTER_RESP_NO;
1482                 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1483                         mlog(0, "node %u is master, but trying to migrate to "
1484                              "node %u.\n", tmpmle->master, tmpmle->new_master);
1485                         if (tmpmle->master == dlm->node_num) {
1486                                 mlog(ML_ERROR, "no owner on lockres, but this "
1487                                      "node is trying to migrate it to %u?!\n",
1488                                      tmpmle->new_master);
1489                                 BUG();
1490                         } else {
1491                                 /* the real master can respond on its own */
1492                                 response = DLM_MASTER_RESP_NO;
1493                         }
1494                 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1495                         set_maybe = 0;
1496                         if (tmpmle->master == dlm->node_num) {
1497                                 response = DLM_MASTER_RESP_YES;
1498                                 /* this node will be the owner.
1499                                  * go back and clean the mles on any
1500                                  * other nodes */
1501                                 dispatch_assert = 1;
1502                                 dlm_lockres_set_refmap_bit(request->node_idx, res);
1503                                 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1504                                      dlm->name, namelen, name,
1505                                      request->node_idx);
1506                         } else
1507                                 response = DLM_MASTER_RESP_NO;
1508                 } else {
1509                         // mlog(0, "this node is attempting to "
1510                         // "master lockres\n");
1511                         response = DLM_MASTER_RESP_MAYBE;
1512                 }
1513                 if (set_maybe)
1514                         set_bit(request->node_idx, tmpmle->maybe_map);
1515                 spin_unlock(&tmpmle->spinlock);
1516
1517                 spin_unlock(&dlm->master_lock);
1518                 spin_unlock(&res->spinlock);
1519
1520                 /* keep the mle attached to heartbeat events */
1521                 dlm_put_mle(tmpmle);
1522                 if (mle)
1523                         kmem_cache_free(dlm_mle_cache, mle);
1524                 goto send_response;
1525         }
1526
1527         /*
1528          * lockres doesn't exist on this node
1529          * if there is an MLE_BLOCK, return NO
1530          * if there is an MLE_MASTER, return MAYBE
1531          * otherwise, add an MLE_BLOCK, return NO
1532          */
1533         spin_lock(&dlm->master_lock);
1534         found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1535         if (!found) {
1536                 /* this lockid has never been seen on this node yet */
1537                 // mlog(0, "no mle found\n");
1538                 if (!mle) {
1539                         spin_unlock(&dlm->master_lock);
1540                         spin_unlock(&dlm->spinlock);
1541
1542                         mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1543                         if (!mle) {
1544                                 response = DLM_MASTER_RESP_ERROR;
1545                                 mlog_errno(-ENOMEM);
1546                                 goto send_response;
1547                         }
1548                         goto way_up_top;
1549                 }
1550
1551                 // mlog(0, "this is second time thru, already allocated, "
1552                 // "add the block.\n");
1553                 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1554                 set_bit(request->node_idx, mle->maybe_map);
1555                 __dlm_insert_mle(dlm, mle);
1556                 response = DLM_MASTER_RESP_NO;
1557         } else {
1558                 // mlog(0, "mle was found\n");
1559                 set_maybe = 1;
1560                 spin_lock(&tmpmle->spinlock);
1561                 if (tmpmle->master == dlm->node_num) {
1562                         mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1563                         BUG();
1564                 }
1565                 if (tmpmle->type == DLM_MLE_BLOCK)
1566                         response = DLM_MASTER_RESP_NO;
1567                 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1568                         mlog(0, "migration mle was found (%u->%u)\n",
1569                              tmpmle->master, tmpmle->new_master);
1570                         /* real master can respond on its own */
1571                         response = DLM_MASTER_RESP_NO;
1572                 } else
1573                         response = DLM_MASTER_RESP_MAYBE;
1574                 if (set_maybe)
1575                         set_bit(request->node_idx, tmpmle->maybe_map);
1576                 spin_unlock(&tmpmle->spinlock);
1577         }
1578         spin_unlock(&dlm->master_lock);
1579         spin_unlock(&dlm->spinlock);
1580
1581         if (found) {
1582                 /* keep the mle attached to heartbeat events */
1583                 dlm_put_mle(tmpmle);
1584         }
1585 send_response:
1586         /*
1587          * __dlm_lookup_lockres() grabbed a reference to this lockres.
1588          * The reference is released by dlm_assert_master_worker() under
1589          * the call to dlm_dispatch_assert_master().  If
1590          * dlm_assert_master_worker() isn't called, we drop it here.
1591          */
1592         if (dispatch_assert) {
1593                 if (response != DLM_MASTER_RESP_YES)
1594                         mlog(ML_ERROR, "invalid response %d\n", response);
1595                 if (!res) {
1596                         mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1597                         BUG();
1598                 }
1599                 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1600                              dlm->node_num, res->lockname.len, res->lockname.name);
1601                 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1602                                                  DLM_ASSERT_MASTER_MLE_CLEANUP);
1603                 if (ret < 0) {
1604                         mlog(ML_ERROR, "failed to dispatch assert master work\n");
1605                         response = DLM_MASTER_RESP_ERROR;
1606                         dlm_lockres_put(res);
1607                 }
1608         } else {
1609                 if (res)
1610                         dlm_lockres_put(res);
1611         }
1612
1613         dlm_put(dlm);
1614         return response;
1615 }
1616
1617 /*
1618  * DLM_ASSERT_MASTER_MSG
1619  */
1620
1621
1622 /*
1623  * NOTE: this can be used for debugging
1624  * can periodically run all locks owned by this node
1625  * and re-assert across the cluster...
1626  */
1627 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1628                                 struct dlm_lock_resource *res,
1629                                 void *nodemap, u32 flags)
1630 {
1631         struct dlm_assert_master assert;
1632         int to, tmpret;
1633         struct dlm_node_iter iter;
1634         int ret = 0;
1635         int reassert;
1636         const char *lockname = res->lockname.name;
1637         unsigned int namelen = res->lockname.len;
1638
1639         BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1640
1641         spin_lock(&res->spinlock);
1642         res->state |= DLM_LOCK_RES_SETREF_INPROG;
1643         spin_unlock(&res->spinlock);
1644
1645 again:
1646         reassert = 0;
1647
1648         /* note that if this nodemap is empty, it returns 0 */
1649         dlm_node_iter_init(nodemap, &iter);
1650         while ((to = dlm_node_iter_next(&iter)) >= 0) {
1651                 int r = 0;
1652                 struct dlm_master_list_entry *mle = NULL;
1653
1654                 mlog(0, "sending assert master to %d (%.*s)\n", to,
1655                      namelen, lockname);
1656                 memset(&assert, 0, sizeof(assert));
1657                 assert.node_idx = dlm->node_num;
1658                 assert.namelen = namelen;
1659                 memcpy(assert.name, lockname, namelen);
1660                 assert.flags = cpu_to_be32(flags);
1661
1662                 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1663                                             &assert, sizeof(assert), to, &r);
1664                 if (tmpret < 0) {
1665                         mlog(ML_ERROR, "Error %d when sending message %u (key "
1666                              "0x%x) to node %u\n", tmpret,
1667                              DLM_ASSERT_MASTER_MSG, dlm->key, to);
1668                         if (!dlm_is_host_down(tmpret)) {
1669                                 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1670                                 BUG();
1671                         }
1672                         /* a node died.  finish out the rest of the nodes. */
1673                         mlog(0, "link to %d went down!\n", to);
1674                         /* any nonzero status return will do */
1675                         ret = tmpret;
1676                         r = 0;
1677                 } else if (r < 0) {
1678                         /* ok, something horribly messed.  kill thyself. */
1679                         mlog(ML_ERROR,"during assert master of %.*s to %u, "
1680                              "got %d.\n", namelen, lockname, to, r);
1681                         spin_lock(&dlm->spinlock);
1682                         spin_lock(&dlm->master_lock);
1683                         if (dlm_find_mle(dlm, &mle, (char *)lockname,
1684                                          namelen)) {
1685                                 dlm_print_one_mle(mle);
1686                                 __dlm_put_mle(mle);
1687                         }
1688                         spin_unlock(&dlm->master_lock);
1689                         spin_unlock(&dlm->spinlock);
1690                         BUG();
1691                 }
1692
1693                 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1694                     !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1695                                 mlog(ML_ERROR, "%.*s: very strange, "
1696                                      "master MLE but no lockres on %u\n",
1697                                      namelen, lockname, to);
1698                 }
1699
1700                 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1701                         mlog(0, "%.*s: node %u create mles on other "
1702                              "nodes and requests a re-assert\n",
1703                              namelen, lockname, to);
1704                         reassert = 1;
1705                 }
1706                 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1707                         mlog(0, "%.*s: node %u has a reference to this "
1708                              "lockres, set the bit in the refmap\n",
1709                              namelen, lockname, to);
1710                         spin_lock(&res->spinlock);
1711                         dlm_lockres_set_refmap_bit(to, res);
1712                         spin_unlock(&res->spinlock);
1713                 }
1714         }
1715
1716         if (reassert)
1717                 goto again;
1718
1719         spin_lock(&res->spinlock);
1720         res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1721         spin_unlock(&res->spinlock);
1722         wake_up(&res->wq);
1723
1724         return ret;
1725 }
1726
1727 /*
1728  * locks that can be taken here:
1729  * dlm->spinlock
1730  * res->spinlock
1731  * mle->spinlock
1732  * dlm->master_list
1733  *
1734  * if possible, TRIM THIS DOWN!!!
1735  */
1736 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1737                               void **ret_data)
1738 {
1739         struct dlm_ctxt *dlm = data;
1740         struct dlm_master_list_entry *mle = NULL;
1741         struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1742         struct dlm_lock_resource *res = NULL;
1743         char *name;
1744         unsigned int namelen, hash;
1745         u32 flags;
1746         int master_request = 0, have_lockres_ref = 0;
1747         int ret = 0;
1748
1749         if (!dlm_grab(dlm))
1750                 return 0;
1751
1752         name = assert->name;
1753         namelen = assert->namelen;
1754         hash = dlm_lockid_hash(name, namelen);
1755         flags = be32_to_cpu(assert->flags);
1756
1757         if (namelen > DLM_LOCKID_NAME_MAX) {
1758                 mlog(ML_ERROR, "Invalid name length!");
1759                 goto done;
1760         }
1761
1762         spin_lock(&dlm->spinlock);
1763
1764         if (flags)
1765                 mlog(0, "assert_master with flags: %u\n", flags);
1766
1767         /* find the MLE */
1768         spin_lock(&dlm->master_lock);
1769         if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1770                 /* not an error, could be master just re-asserting */
1771                 mlog(0, "just got an assert_master from %u, but no "
1772                      "MLE for it! (%.*s)\n", assert->node_idx,
1773                      namelen, name);
1774         } else {
1775                 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1776                 if (bit >= O2NM_MAX_NODES) {
1777                         /* not necessarily an error, though less likely.
1778                          * could be master just re-asserting. */
1779                         mlog(0, "no bits set in the maybe_map, but %u "
1780                              "is asserting! (%.*s)\n", assert->node_idx,
1781                              namelen, name);
1782                 } else if (bit != assert->node_idx) {
1783                         if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1784                                 mlog(0, "master %u was found, %u should "
1785                                      "back off\n", assert->node_idx, bit);
1786                         } else {
1787                                 /* with the fix for bug 569, a higher node
1788                                  * number winning the mastery will respond
1789                                  * YES to mastery requests, but this node
1790                                  * had no way of knowing.  let it pass. */
1791                                 mlog(0, "%u is the lowest node, "
1792                                      "%u is asserting. (%.*s)  %u must "
1793                                      "have begun after %u won.\n", bit,
1794                                      assert->node_idx, namelen, name, bit,
1795                                      assert->node_idx);
1796                         }
1797                 }
1798                 if (mle->type == DLM_MLE_MIGRATION) {
1799                         if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1800                                 mlog(0, "%s:%.*s: got cleanup assert"
1801                                      " from %u for migration\n",
1802                                      dlm->name, namelen, name,
1803                                      assert->node_idx);
1804                         } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1805                                 mlog(0, "%s:%.*s: got unrelated assert"
1806                                      " from %u for migration, ignoring\n",
1807                                      dlm->name, namelen, name,
1808                                      assert->node_idx);
1809                                 __dlm_put_mle(mle);
1810                                 spin_unlock(&dlm->master_lock);
1811                                 spin_unlock(&dlm->spinlock);
1812                                 goto done;
1813                         }
1814                 }
1815         }
1816         spin_unlock(&dlm->master_lock);
1817
1818         /* ok everything checks out with the MLE
1819          * now check to see if there is a lockres */
1820         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1821         if (res) {
1822                 spin_lock(&res->spinlock);
1823                 if (res->state & DLM_LOCK_RES_RECOVERING)  {
1824                         mlog(ML_ERROR, "%u asserting but %.*s is "
1825                              "RECOVERING!\n", assert->node_idx, namelen, name);
1826                         goto kill;
1827                 }
1828                 if (!mle) {
1829                         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1830                             res->owner != assert->node_idx) {
1831                                 mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1832                                      "but current owner is %u! (%.*s)\n",
1833                                      assert->node_idx, res->owner, namelen,
1834                                      name);
1835                                 __dlm_print_one_lock_resource(res);
1836                                 BUG();
1837                         }
1838                 } else if (mle->type != DLM_MLE_MIGRATION) {
1839                         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1840                                 /* owner is just re-asserting */
1841                                 if (res->owner == assert->node_idx) {
1842                                         mlog(0, "owner %u re-asserting on "
1843                                              "lock %.*s\n", assert->node_idx,
1844                                              namelen, name);
1845                                         goto ok;
1846                                 }
1847                                 mlog(ML_ERROR, "got assert_master from "
1848                                      "node %u, but %u is the owner! "
1849                                      "(%.*s)\n", assert->node_idx,
1850                                      res->owner, namelen, name);
1851                                 goto kill;
1852                         }
1853                         if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1854                                 mlog(ML_ERROR, "got assert from %u, but lock "
1855                                      "with no owner should be "
1856                                      "in-progress! (%.*s)\n",
1857                                      assert->node_idx,
1858                                      namelen, name);
1859                                 goto kill;
1860                         }
1861                 } else /* mle->type == DLM_MLE_MIGRATION */ {
1862                         /* should only be getting an assert from new master */
1863                         if (assert->node_idx != mle->new_master) {
1864                                 mlog(ML_ERROR, "got assert from %u, but "
1865                                      "new master is %u, and old master "
1866                                      "was %u (%.*s)\n",
1867                                      assert->node_idx, mle->new_master,
1868                                      mle->master, namelen, name);
1869                                 goto kill;
1870                         }
1871
1872                 }
1873 ok:
1874                 spin_unlock(&res->spinlock);
1875         }
1876
1877         // mlog(0, "woo!  got an assert_master from node %u!\n",
1878         //           assert->node_idx);
1879         if (mle) {
1880                 int extra_ref = 0;
1881                 int nn = -1;
1882                 int rr, err = 0;
1883
1884                 spin_lock(&mle->spinlock);
1885                 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1886                         extra_ref = 1;
1887                 else {
1888                         /* MASTER mle: if any bits set in the response map
1889                          * then the calling node needs to re-assert to clear
1890                          * up nodes that this node contacted */
1891                         while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1892                                                     nn+1)) < O2NM_MAX_NODES) {
1893                                 if (nn != dlm->node_num && nn != assert->node_idx)
1894                                         master_request = 1;
1895                         }
1896                 }
1897                 mle->master = assert->node_idx;
1898                 atomic_set(&mle->woken, 1);
1899                 wake_up(&mle->wq);
1900                 spin_unlock(&mle->spinlock);
1901
1902                 if (res) {
1903                         int wake = 0;
1904                         spin_lock(&res->spinlock);
1905                         if (mle->type == DLM_MLE_MIGRATION) {
1906                                 mlog(0, "finishing off migration of lockres %.*s, "
1907                                         "from %u to %u\n",
1908                                         res->lockname.len, res->lockname.name,
1909                                         dlm->node_num, mle->new_master);
1910                                 res->state &= ~DLM_LOCK_RES_MIGRATING;
1911                                 wake = 1;
1912                                 dlm_change_lockres_owner(dlm, res, mle->new_master);
1913                                 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1914                         } else {
1915                                 dlm_change_lockres_owner(dlm, res, mle->master);
1916                         }
1917                         spin_unlock(&res->spinlock);
1918                         have_lockres_ref = 1;
1919                         if (wake)
1920                                 wake_up(&res->wq);
1921                 }
1922
1923                 /* master is known, detach if not already detached.
1924                  * ensures that only one assert_master call will happen
1925                  * on this mle. */
1926                 spin_lock(&dlm->master_lock);
1927
1928                 rr = atomic_read(&mle->mle_refs.refcount);
1929                 if (mle->inuse > 0) {
1930                         if (extra_ref && rr < 3)
1931                                 err = 1;
1932                         else if (!extra_ref && rr < 2)
1933                                 err = 1;
1934                 } else {
1935                         if (extra_ref && rr < 2)
1936                                 err = 1;
1937                         else if (!extra_ref && rr < 1)
1938                                 err = 1;
1939                 }
1940                 if (err) {
1941                         mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1942                              "that will mess up this node, refs=%d, extra=%d, "
1943                              "inuse=%d\n", dlm->name, namelen, name,
1944                              assert->node_idx, rr, extra_ref, mle->inuse);
1945                         dlm_print_one_mle(mle);
1946                 }
1947                 __dlm_unlink_mle(dlm, mle);
1948                 __dlm_mle_detach_hb_events(dlm, mle);
1949                 __dlm_put_mle(mle);
1950                 if (extra_ref) {
1951                         /* the assert master message now balances the extra
1952                          * ref given by the master / migration request message.
1953                          * if this is the last put, it will be removed
1954                          * from the list. */
1955                         __dlm_put_mle(mle);
1956                 }
1957                 spin_unlock(&dlm->master_lock);
1958         } else if (res) {
1959                 if (res->owner != assert->node_idx) {
1960                         mlog(0, "assert_master from %u, but current "
1961                              "owner is %u (%.*s), no mle\n", assert->node_idx,
1962                              res->owner, namelen, name);
1963                 }
1964         }
1965         spin_unlock(&dlm->spinlock);
1966
1967 done:
1968         ret = 0;
1969         if (res) {
1970                 spin_lock(&res->spinlock);
1971                 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1972                 spin_unlock(&res->spinlock);
1973                 *ret_data = (void *)res;
1974         }
1975         dlm_put(dlm);
1976         if (master_request) {
1977                 mlog(0, "need to tell master to reassert\n");
1978                 /* positive. negative would shoot down the node. */
1979                 ret |= DLM_ASSERT_RESPONSE_REASSERT;
1980                 if (!have_lockres_ref) {
1981                         mlog(ML_ERROR, "strange, got assert from %u, MASTER "
1982                              "mle present here for %s:%.*s, but no lockres!\n",
1983                              assert->node_idx, dlm->name, namelen, name);
1984                 }
1985         }
1986         if (have_lockres_ref) {
1987                 /* let the master know we have a reference to the lockres */
1988                 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
1989                 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
1990                      dlm->name, namelen, name, assert->node_idx);
1991         }
1992         return ret;
1993
1994 kill:
1995         /* kill the caller! */
1996         mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
1997              "and killing the other node now!  This node is OK and can continue.\n");
1998         __dlm_print_one_lock_resource(res);
1999         spin_unlock(&res->spinlock);
2000         spin_unlock(&dlm->spinlock);
2001         *ret_data = (void *)res;
2002         dlm_put(dlm);
2003         return -EINVAL;
2004 }
2005
2006 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2007 {
2008         struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2009
2010         if (ret_data) {
2011                 spin_lock(&res->spinlock);
2012                 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2013                 spin_unlock(&res->spinlock);
2014                 wake_up(&res->wq);
2015                 dlm_lockres_put(res);
2016         }
2017         return;
2018 }
2019
2020 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2021                                struct dlm_lock_resource *res,
2022                                int ignore_higher, u8 request_from, u32 flags)
2023 {
2024         struct dlm_work_item *item;
2025         item = kzalloc(sizeof(*item), GFP_NOFS);
2026         if (!item)
2027                 return -ENOMEM;
2028
2029
2030         /* queue up work for dlm_assert_master_worker */
2031         dlm_grab(dlm);  /* get an extra ref for the work item */
2032         dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2033         item->u.am.lockres = res; /* already have a ref */
2034         /* can optionally ignore node numbers higher than this node */
2035         item->u.am.ignore_higher = ignore_higher;
2036         item->u.am.request_from = request_from;
2037         item->u.am.flags = flags;
2038
2039         if (ignore_higher)
2040                 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2041                      res->lockname.name);
2042
2043         spin_lock(&dlm->work_lock);
2044         list_add_tail(&item->list, &dlm->work_list);
2045         spin_unlock(&dlm->work_lock);
2046
2047         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2048         return 0;
2049 }
2050
2051 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2052 {
2053         struct dlm_ctxt *dlm = data;
2054         int ret = 0;
2055         struct dlm_lock_resource *res;
2056         unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2057         int ignore_higher;
2058         int bit;
2059         u8 request_from;
2060         u32 flags;
2061
2062         dlm = item->dlm;
2063         res = item->u.am.lockres;
2064         ignore_higher = item->u.am.ignore_higher;
2065         request_from = item->u.am.request_from;
2066         flags = item->u.am.flags;
2067
2068         spin_lock(&dlm->spinlock);
2069         memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2070         spin_unlock(&dlm->spinlock);
2071
2072         clear_bit(dlm->node_num, nodemap);
2073         if (ignore_higher) {
2074                 /* if is this just to clear up mles for nodes below
2075                  * this node, do not send the message to the original
2076                  * caller or any node number higher than this */
2077                 clear_bit(request_from, nodemap);
2078                 bit = dlm->node_num;
2079                 while (1) {
2080                         bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2081                                             bit+1);
2082                         if (bit >= O2NM_MAX_NODES)
2083                                 break;
2084                         clear_bit(bit, nodemap);
2085                 }
2086         }
2087
2088         /*
2089          * If we're migrating this lock to someone else, we are no
2090          * longer allowed to assert out own mastery.  OTOH, we need to
2091          * prevent migration from starting while we're still asserting
2092          * our dominance.  The reserved ast delays migration.
2093          */
2094         spin_lock(&res->spinlock);
2095         if (res->state & DLM_LOCK_RES_MIGRATING) {
2096                 mlog(0, "Someone asked us to assert mastery, but we're "
2097                      "in the middle of migration.  Skipping assert, "
2098                      "the new master will handle that.\n");
2099                 spin_unlock(&res->spinlock);
2100                 goto put;
2101         } else
2102                 __dlm_lockres_reserve_ast(res);
2103         spin_unlock(&res->spinlock);
2104
2105         /* this call now finishes out the nodemap
2106          * even if one or more nodes die */
2107         mlog(0, "worker about to master %.*s here, this=%u\n",
2108                      res->lockname.len, res->lockname.name, dlm->node_num);
2109         ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2110         if (ret < 0) {
2111                 /* no need to restart, we are done */
2112                 if (!dlm_is_host_down(ret))
2113                         mlog_errno(ret);
2114         }
2115
2116         /* Ok, we've asserted ourselves.  Let's let migration start. */
2117         dlm_lockres_release_ast(dlm, res);
2118
2119 put:
2120         dlm_lockres_put(res);
2121
2122         mlog(0, "finished with dlm_assert_master_worker\n");
2123 }
2124
2125 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2126  * We cannot wait for node recovery to complete to begin mastering this
2127  * lockres because this lockres is used to kick off recovery! ;-)
2128  * So, do a pre-check on all living nodes to see if any of those nodes
2129  * think that $RECOVERY is currently mastered by a dead node.  If so,
2130  * we wait a short time to allow that node to get notified by its own
2131  * heartbeat stack, then check again.  All $RECOVERY lock resources
2132  * mastered by dead nodes are purged when the hearbeat callback is
2133  * fired, so we can know for sure that it is safe to continue once
2134  * the node returns a live node or no node.  */
2135 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2136                                        struct dlm_lock_resource *res)
2137 {
2138         struct dlm_node_iter iter;
2139         int nodenum;
2140         int ret = 0;
2141         u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2142
2143         spin_lock(&dlm->spinlock);
2144         dlm_node_iter_init(dlm->domain_map, &iter);
2145         spin_unlock(&dlm->spinlock);
2146
2147         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2148                 /* do not send to self */
2149                 if (nodenum == dlm->node_num)
2150                         continue;
2151                 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2152                 if (ret < 0) {
2153                         mlog_errno(ret);
2154                         if (!dlm_is_host_down(ret))
2155                                 BUG();
2156                         /* host is down, so answer for that node would be
2157                          * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2158                         ret = 0;
2159                 }
2160
2161                 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2162                         /* check to see if this master is in the recovery map */
2163                         spin_lock(&dlm->spinlock);
2164                         if (test_bit(master, dlm->recovery_map)) {
2165                                 mlog(ML_NOTICE, "%s: node %u has not seen "
2166                                      "node %u go down yet, and thinks the "
2167                                      "dead node is mastering the recovery "
2168                                      "lock.  must wait.\n", dlm->name,
2169                                      nodenum, master);
2170                                 ret = -EAGAIN;
2171                         }
2172                         spin_unlock(&dlm->spinlock);
2173                         mlog(0, "%s: reco lock master is %u\n", dlm->name,
2174                              master);
2175                         break;
2176                 }
2177         }
2178         return ret;
2179 }
2180
2181 /*
2182  * DLM_DEREF_LOCKRES_MSG
2183  */
2184
2185 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2186 {
2187         struct dlm_deref_lockres deref;
2188         int ret = 0, r;
2189         const char *lockname;
2190         unsigned int namelen;
2191
2192         lockname = res->lockname.name;
2193         namelen = res->lockname.len;
2194         BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2195
2196         mlog(0, "%s:%.*s: sending deref to %d\n",
2197              dlm->name, namelen, lockname, res->owner);
2198         memset(&deref, 0, sizeof(deref));
2199         deref.node_idx = dlm->node_num;
2200         deref.namelen = namelen;
2201         memcpy(deref.name, lockname, namelen);
2202
2203         ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2204                                  &deref, sizeof(deref), res->owner, &r);
2205         if (ret < 0)
2206                 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
2207                      "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
2208                      res->owner);
2209         else if (r < 0) {
2210                 /* BAD.  other node says I did not have a ref. */
2211                 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2212                     "(master=%u) got %d.\n", dlm->name, namelen,
2213                     lockname, res->owner, r);
2214                 dlm_print_one_lock_resource(res);
2215                 BUG();
2216         }
2217         return ret;
2218 }
2219
2220 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2221                               void **ret_data)
2222 {
2223         struct dlm_ctxt *dlm = data;
2224         struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2225         struct dlm_lock_resource *res = NULL;
2226         char *name;
2227         unsigned int namelen;
2228         int ret = -EINVAL;
2229         u8 node;
2230         unsigned int hash;
2231         struct dlm_work_item *item;
2232         int cleared = 0;
2233         int dispatch = 0;
2234
2235         if (!dlm_grab(dlm))
2236                 return 0;
2237
2238         name = deref->name;
2239         namelen = deref->namelen;
2240         node = deref->node_idx;
2241
2242         if (namelen > DLM_LOCKID_NAME_MAX) {
2243                 mlog(ML_ERROR, "Invalid name length!");
2244                 goto done;
2245         }
2246         if (deref->node_idx >= O2NM_MAX_NODES) {
2247                 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2248                 goto done;
2249         }
2250
2251         hash = dlm_lockid_hash(name, namelen);
2252
2253         spin_lock(&dlm->spinlock);
2254         res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2255         if (!res) {
2256                 spin_unlock(&dlm->spinlock);
2257                 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2258                      dlm->name, namelen, name);
2259                 goto done;
2260         }
2261         spin_unlock(&dlm->spinlock);
2262
2263         spin_lock(&res->spinlock);
2264         if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2265                 dispatch = 1;
2266         else {
2267                 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2268                 if (test_bit(node, res->refmap)) {
2269                         dlm_lockres_clear_refmap_bit(node, res);
2270                         cleared = 1;
2271                 }
2272         }
2273         spin_unlock(&res->spinlock);
2274
2275         if (!dispatch) {
2276                 if (cleared)
2277                         dlm_lockres_calc_usage(dlm, res);
2278                 else {
2279                         mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2280                         "but it is already dropped!\n", dlm->name,
2281                         res->lockname.len, res->lockname.name, node);
2282                         dlm_print_one_lock_resource(res);
2283                 }
2284                 ret = 0;
2285                 goto done;
2286         }
2287
2288         item = kzalloc(sizeof(*item), GFP_NOFS);
2289         if (!item) {
2290                 ret = -ENOMEM;
2291                 mlog_errno(ret);
2292                 goto done;
2293         }
2294
2295         dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2296         item->u.dl.deref_res = res;
2297         item->u.dl.deref_node = node;
2298
2299         spin_lock(&dlm->work_lock);
2300         list_add_tail(&item->list, &dlm->work_list);
2301         spin_unlock(&dlm->work_lock);
2302
2303         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2304         return 0;
2305
2306 done:
2307         if (res)
2308                 dlm_lockres_put(res);
2309         dlm_put(dlm);
2310
2311         return ret;
2312 }
2313
2314 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2315 {
2316         struct dlm_ctxt *dlm;
2317         struct dlm_lock_resource *res;
2318         u8 node;
2319         u8 cleared = 0;
2320
2321         dlm = item->dlm;
2322         res = item->u.dl.deref_res;
2323         node = item->u.dl.deref_node;
2324
2325         spin_lock(&res->spinlock);
2326         BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2327         if (test_bit(node, res->refmap)) {
2328                 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2329                 dlm_lockres_clear_refmap_bit(node, res);
2330                 cleared = 1;
2331         }
2332         spin_unlock(&res->spinlock);
2333
2334         if (cleared) {
2335                 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2336                      dlm->name, res->lockname.len, res->lockname.name, node);
2337                 dlm_lockres_calc_usage(dlm, res);
2338         } else {
2339                 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2340                      "but it is already dropped!\n", dlm->name,
2341                      res->lockname.len, res->lockname.name, node);
2342                 dlm_print_one_lock_resource(res);
2343         }
2344
2345         dlm_lockres_put(res);
2346 }
2347
2348 /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
2349  * if not. If 0, numlocks is set to the number of locks in the lockres.
2350  */
2351 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2352                                       struct dlm_lock_resource *res,
2353                                       int *numlocks)
2354 {
2355         int ret;
2356         int i;
2357         int count = 0;
2358         struct list_head *queue;
2359         struct dlm_lock *lock;
2360
2361         assert_spin_locked(&res->spinlock);
2362
2363         ret = -EINVAL;
2364         if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2365                 mlog(0, "cannot migrate lockres with unknown owner!\n");
2366                 goto leave;
2367         }
2368
2369         if (res->owner != dlm->node_num) {
2370                 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2371                 goto leave;
2372         }
2373
2374         ret = 0;
2375         queue = &res->granted;
2376         for (i = 0; i < 3; i++) {
2377                 list_for_each_entry(lock, queue, list) {
2378                         ++count;
2379                         if (lock->ml.node == dlm->node_num) {
2380                                 mlog(0, "found a lock owned by this node still "
2381                                      "on the %s queue!  will not migrate this "
2382                                      "lockres\n", (i == 0 ? "granted" :
2383                                                    (i == 1 ? "converting" :
2384                                                     "blocked")));
2385                                 ret = -ENOTEMPTY;
2386                                 goto leave;
2387                         }
2388                 }
2389                 queue++;
2390         }
2391
2392         *numlocks = count;
2393         mlog(0, "migrateable lockres having %d locks\n", *numlocks);
2394
2395 leave:
2396         return ret;
2397 }
2398
2399 /*
2400  * DLM_MIGRATE_LOCKRES
2401  */
2402
2403
2404 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2405                                struct dlm_lock_resource *res,
2406                                u8 target)
2407 {
2408         struct dlm_master_list_entry *mle = NULL;
2409         struct dlm_master_list_entry *oldmle = NULL;
2410         struct dlm_migratable_lockres *mres = NULL;
2411         int ret = 0;
2412         const char *name;
2413         unsigned int namelen;
2414         int mle_added = 0;
2415         int numlocks;
2416         int wake = 0;
2417
2418         if (!dlm_grab(dlm))
2419                 return -EINVAL;
2420
2421         name = res->lockname.name;
2422         namelen = res->lockname.len;
2423
2424         mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2425
2426         /*
2427          * ensure this lockres is a proper candidate for migration
2428          */
2429         spin_lock(&res->spinlock);
2430         ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2431         if (ret < 0) {
2432                 spin_unlock(&res->spinlock);
2433                 goto leave;
2434         }
2435         spin_unlock(&res->spinlock);
2436
2437         /* no work to do */
2438         if (numlocks == 0) {
2439                 mlog(0, "no locks were found on this lockres! done!\n");
2440                 goto leave;
2441         }
2442
2443         /*
2444          * preallocate up front
2445          * if this fails, abort
2446          */
2447
2448         ret = -ENOMEM;
2449         mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2450         if (!mres) {
2451                 mlog_errno(ret);
2452                 goto leave;
2453         }
2454
2455         mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2456         if (!mle) {
2457                 mlog_errno(ret);
2458                 goto leave;
2459         }
2460         ret = 0;
2461
2462         /*
2463          * find a node to migrate the lockres to
2464          */
2465
2466         mlog(0, "picking a migration node\n");
2467         spin_lock(&dlm->spinlock);
2468         /* pick a new node */
2469         if (!test_bit(target, dlm->domain_map) ||
2470             target >= O2NM_MAX_NODES) {
2471                 target = dlm_pick_migration_target(dlm, res);
2472         }
2473         mlog(0, "node %u chosen for migration\n", target);
2474
2475         if (target >= O2NM_MAX_NODES ||
2476             !test_bit(target, dlm->domain_map)) {
2477                 /* target chosen is not alive */
2478                 ret = -EINVAL;
2479         }
2480
2481         if (ret) {
2482                 spin_unlock(&dlm->spinlock);
2483                 goto fail;
2484         }
2485
2486         mlog(0, "continuing with target = %u\n", target);
2487
2488         /*
2489          * clear any existing master requests and
2490          * add the migration mle to the list
2491          */
2492         spin_lock(&dlm->master_lock);
2493         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2494                                     namelen, target, dlm->node_num);
2495         spin_unlock(&dlm->master_lock);
2496         spin_unlock(&dlm->spinlock);
2497
2498         if (ret == -EEXIST) {
2499                 mlog(0, "another process is already migrating it\n");
2500                 goto fail;
2501         }
2502         mle_added = 1;
2503
2504         /*
2505          * set the MIGRATING flag and flush asts
2506          * if we fail after this we need to re-dirty the lockres
2507          */
2508         if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2509                 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2510                      "the target went down.\n", res->lockname.len,
2511                      res->lockname.name, target);
2512                 spin_lock(&res->spinlock);
2513                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2514                 wake = 1;
2515                 spin_unlock(&res->spinlock);
2516                 ret = -EINVAL;
2517         }
2518
2519 fail:
2520         if (oldmle) {
2521                 /* master is known, detach if not already detached */
2522                 dlm_mle_detach_hb_events(dlm, oldmle);
2523                 dlm_put_mle(oldmle);
2524         }
2525
2526         if (ret < 0) {
2527                 if (mle_added) {
2528                         dlm_mle_detach_hb_events(dlm, mle);
2529                         dlm_put_mle(mle);
2530                 } else if (mle) {
2531                         kmem_cache_free(dlm_mle_cache, mle);
2532                 }
2533                 goto leave;
2534         }
2535
2536         /*
2537          * at this point, we have a migration target, an mle
2538          * in the master list, and the MIGRATING flag set on
2539          * the lockres
2540          */
2541
2542         /* now that remote nodes are spinning on the MIGRATING flag,
2543          * ensure that all assert_master work is flushed. */
2544         flush_workqueue(dlm->dlm_worker);
2545
2546         /* get an extra reference on the mle.
2547          * otherwise the assert_master from the new
2548          * master will destroy this.
2549          * also, make sure that all callers of dlm_get_mle
2550          * take both dlm->spinlock and dlm->master_lock */
2551         spin_lock(&dlm->spinlock);
2552         spin_lock(&dlm->master_lock);
2553         dlm_get_mle_inuse(mle);
2554         spin_unlock(&dlm->master_lock);
2555         spin_unlock(&dlm->spinlock);
2556
2557         /* notify new node and send all lock state */
2558         /* call send_one_lockres with migration flag.
2559          * this serves as notice to the target node that a
2560          * migration is starting. */
2561         ret = dlm_send_one_lockres(dlm, res, mres, target,
2562                                    DLM_MRES_MIGRATION);
2563
2564         if (ret < 0) {
2565                 mlog(0, "migration to node %u failed with %d\n",
2566                      target, ret);
2567                 /* migration failed, detach and clean up mle */
2568                 dlm_mle_detach_hb_events(dlm, mle);
2569                 dlm_put_mle(mle);
2570                 dlm_put_mle_inuse(mle);
2571                 spin_lock(&res->spinlock);
2572                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2573                 wake = 1;
2574                 spin_unlock(&res->spinlock);
2575                 goto leave;
2576         }
2577
2578         /* at this point, the target sends a message to all nodes,
2579          * (using dlm_do_migrate_request).  this node is skipped since
2580          * we had to put an mle in the list to begin the process.  this
2581          * node now waits for target to do an assert master.  this node
2582          * will be the last one notified, ensuring that the migration
2583          * is complete everywhere.  if the target dies while this is
2584          * going on, some nodes could potentially see the target as the
2585          * master, so it is important that my recovery finds the migration
2586          * mle and sets the master to UNKNOWN. */
2587
2588
2589         /* wait for new node to assert master */
2590         while (1) {
2591                 ret = wait_event_interruptible_timeout(mle->wq,
2592                                         (atomic_read(&mle->woken) == 1),
2593                                         msecs_to_jiffies(5000));
2594
2595                 if (ret >= 0) {
2596                         if (atomic_read(&mle->woken) == 1 ||
2597                             res->owner == target)
2598                                 break;
2599
2600                         mlog(0, "%s:%.*s: timed out during migration\n",
2601                              dlm->name, res->lockname.len, res->lockname.name);
2602                         /* avoid hang during shutdown when migrating lockres
2603                          * to a node which also goes down */
2604                         if (dlm_is_node_dead(dlm, target)) {
2605                                 mlog(0, "%s:%.*s: expected migration "
2606                                      "target %u is no longer up, restarting\n",
2607                                      dlm->name, res->lockname.len,
2608                                      res->lockname.name, target);
2609                                 ret = -EINVAL;
2610                                 /* migration failed, detach and clean up mle */
2611                                 dlm_mle_detach_hb_events(dlm, mle);
2612                                 dlm_put_mle(mle);
2613                                 dlm_put_mle_inuse(mle);
2614                                 spin_lock(&res->spinlock);
2615                                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2616                                 wake = 1;
2617                                 spin_unlock(&res->spinlock);
2618                                 goto leave;
2619                         }
2620                 } else
2621                         mlog(0, "%s:%.*s: caught signal during migration\n",
2622                              dlm->name, res->lockname.len, res->lockname.name);
2623         }
2624
2625         /* all done, set the owner, clear the flag */
2626         spin_lock(&res->spinlock);
2627         dlm_set_lockres_owner(dlm, res, target);
2628         res->state &= ~DLM_LOCK_RES_MIGRATING;
2629         dlm_remove_nonlocal_locks(dlm, res);
2630         spin_unlock(&res->spinlock);
2631         wake_up(&res->wq);
2632
2633         /* master is known, detach if not already detached */
2634         dlm_mle_detach_hb_events(dlm, mle);
2635         dlm_put_mle_inuse(mle);
2636         ret = 0;
2637
2638         dlm_lockres_calc_usage(dlm, res);
2639
2640 leave:
2641         /* re-dirty the lockres if we failed */
2642         if (ret < 0)
2643                 dlm_kick_thread(dlm, res);
2644
2645         /* wake up waiters if the MIGRATING flag got set
2646          * but migration failed */
2647         if (wake)
2648                 wake_up(&res->wq);
2649
2650         /* TODO: cleanup */
2651         if (mres)
2652                 free_page((unsigned long)mres);
2653
2654         dlm_put(dlm);
2655
2656         mlog(0, "returning %d\n", ret);
2657         return ret;
2658 }
2659
2660 #define DLM_MIGRATION_RETRY_MS  100
2661
2662 /* Should be called only after beginning the domain leave process.
2663  * There should not be any remaining locks on nonlocal lock resources,
2664  * and there should be no local locks left on locally mastered resources.
2665  *
2666  * Called with the dlm spinlock held, may drop it to do migration, but
2667  * will re-acquire before exit.
2668  *
2669  * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2670 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2671 {
2672         int ret;
2673         int lock_dropped = 0;
2674         int numlocks;
2675
2676         spin_lock(&res->spinlock);
2677         if (res->owner != dlm->node_num) {
2678                 if (!__dlm_lockres_unused(res)) {
2679                         mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2680                              "trying to free this but locks remain\n",
2681                              dlm->name, res->lockname.len, res->lockname.name);
2682                 }
2683                 spin_unlock(&res->spinlock);
2684                 goto leave;
2685         }
2686
2687         /* No need to migrate a lockres having no locks */
2688         ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2689         if (ret >= 0 && numlocks == 0) {
2690                 spin_unlock(&res->spinlock);
2691                 goto leave;
2692         }
2693         spin_unlock(&res->spinlock);
2694
2695         /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2696         spin_unlock(&dlm->spinlock);
2697         lock_dropped = 1;
2698         while (1) {
2699                 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2700                 if (ret >= 0)
2701                         break;
2702                 if (ret == -ENOTEMPTY) {
2703                         mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2704                                 res->lockname.len, res->lockname.name);
2705                         BUG();
2706                 }
2707
2708                 mlog(0, "lockres %.*s: migrate failed, "
2709                      "retrying\n", res->lockname.len,
2710                      res->lockname.name);
2711                 msleep(DLM_MIGRATION_RETRY_MS);
2712         }
2713         spin_lock(&dlm->spinlock);
2714 leave:
2715         return lock_dropped;
2716 }
2717
2718 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2719 {
2720         int ret;
2721         spin_lock(&dlm->ast_lock);
2722         spin_lock(&lock->spinlock);
2723         ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2724         spin_unlock(&lock->spinlock);
2725         spin_unlock(&dlm->ast_lock);
2726         return ret;
2727 }
2728
2729 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2730                                      struct dlm_lock_resource *res,
2731                                      u8 mig_target)
2732 {
2733         int can_proceed;
2734         spin_lock(&res->spinlock);
2735         can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2736         spin_unlock(&res->spinlock);
2737
2738         /* target has died, so make the caller break out of the
2739          * wait_event, but caller must recheck the domain_map */
2740         spin_lock(&dlm->spinlock);
2741         if (!test_bit(mig_target, dlm->domain_map))
2742                 can_proceed = 1;
2743         spin_unlock(&dlm->spinlock);
2744         return can_proceed;
2745 }
2746
2747 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2748                                 struct dlm_lock_resource *res)
2749 {
2750         int ret;
2751         spin_lock(&res->spinlock);
2752         ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2753         spin_unlock(&res->spinlock);
2754         return ret;
2755 }
2756
2757
2758 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2759                                        struct dlm_lock_resource *res,
2760                                        u8 target)
2761 {
2762         int ret = 0;
2763
2764         mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2765                res->lockname.len, res->lockname.name, dlm->node_num,
2766                target);
2767         /* need to set MIGRATING flag on lockres.  this is done by
2768          * ensuring that all asts have been flushed for this lockres. */
2769         spin_lock(&res->spinlock);
2770         BUG_ON(res->migration_pending);
2771         res->migration_pending = 1;
2772         /* strategy is to reserve an extra ast then release
2773          * it below, letting the release do all of the work */
2774         __dlm_lockres_reserve_ast(res);
2775         spin_unlock(&res->spinlock);
2776
2777         /* now flush all the pending asts */
2778         dlm_kick_thread(dlm, res);
2779         /* before waiting on DIRTY, block processes which may
2780          * try to dirty the lockres before MIGRATING is set */
2781         spin_lock(&res->spinlock);
2782         BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2783         res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2784         spin_unlock(&res->spinlock);
2785         /* now wait on any pending asts and the DIRTY state */
2786         wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2787         dlm_lockres_release_ast(dlm, res);
2788
2789         mlog(0, "about to wait on migration_wq, dirty=%s\n",
2790                res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2791         /* if the extra ref we just put was the final one, this
2792          * will pass thru immediately.  otherwise, we need to wait
2793          * for the last ast to finish. */
2794 again:
2795         ret = wait_event_interruptible_timeout(dlm->migration_wq,
2796                    dlm_migration_can_proceed(dlm, res, target),
2797                    msecs_to_jiffies(1000));
2798         if (ret < 0) {
2799                 mlog(0, "woken again: migrating? %s, dead? %s\n",
2800                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2801                        test_bit(target, dlm->domain_map) ? "no":"yes");
2802         } else {
2803                 mlog(0, "all is well: migrating? %s, dead? %s\n",
2804                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2805                        test_bit(target, dlm->domain_map) ? "no":"yes");
2806         }
2807         if (!dlm_migration_can_proceed(dlm, res, target)) {
2808                 mlog(0, "trying again...\n");
2809                 goto again;
2810         }
2811         /* now that we are sure the MIGRATING state is there, drop
2812          * the unneded state which blocked threads trying to DIRTY */
2813         spin_lock(&res->spinlock);
2814         BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2815         BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2816         res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2817         spin_unlock(&res->spinlock);
2818
2819         /* did the target go down or die? */
2820         spin_lock(&dlm->spinlock);
2821         if (!test_bit(target, dlm->domain_map)) {
2822                 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2823                      target);
2824                 ret = -EHOSTDOWN;
2825         }
2826         spin_unlock(&dlm->spinlock);
2827
2828         /*
2829          * at this point:
2830          *
2831          *   o the DLM_LOCK_RES_MIGRATING flag is set
2832          *   o there are no pending asts on this lockres
2833          *   o all processes trying to reserve an ast on this
2834          *     lockres must wait for the MIGRATING flag to clear
2835          */
2836         return ret;
2837 }
2838
2839 /* last step in the migration process.
2840  * original master calls this to free all of the dlm_lock
2841  * structures that used to be for other nodes. */
2842 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2843                                       struct dlm_lock_resource *res)
2844 {
2845         struct list_head *queue = &res->granted;
2846         int i, bit;
2847         struct dlm_lock *lock, *next;
2848
2849         assert_spin_locked(&res->spinlock);
2850
2851         BUG_ON(res->owner == dlm->node_num);
2852
2853         for (i=0; i<3; i++) {
2854                 list_for_each_entry_safe(lock, next, queue, list) {
2855                         if (lock->ml.node != dlm->node_num) {
2856                                 mlog(0, "putting lock for node %u\n",
2857                                      lock->ml.node);
2858                                 /* be extra careful */
2859                                 BUG_ON(!list_empty(&lock->ast_list));
2860                                 BUG_ON(!list_empty(&lock->bast_list));
2861                                 BUG_ON(lock->ast_pending);
2862                                 BUG_ON(lock->bast_pending);
2863                                 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2864                                 list_del_init(&lock->list);
2865                                 dlm_lock_put(lock);
2866                                 /* In a normal unlock, we would have added a
2867                                  * DLM_UNLOCK_FREE_LOCK action. Force it. */
2868                                 dlm_lock_put(lock);
2869                         }
2870                 }
2871                 queue++;
2872         }
2873         bit = 0;
2874         while (1) {
2875                 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2876                 if (bit >= O2NM_MAX_NODES)
2877                         break;
2878                 /* do not clear the local node reference, if there is a
2879                  * process holding this, let it drop the ref itself */
2880                 if (bit != dlm->node_num) {
2881                         mlog(0, "%s:%.*s: node %u had a ref to this "
2882                              "migrating lockres, clearing\n", dlm->name,
2883                              res->lockname.len, res->lockname.name, bit);
2884                         dlm_lockres_clear_refmap_bit(bit, res);
2885                 }
2886                 bit++;
2887         }
2888 }
2889
2890 /* for now this is not too intelligent.  we will
2891  * need stats to make this do the right thing.
2892  * this just finds the first lock on one of the
2893  * queues and uses that node as the target. */
2894 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2895                                     struct dlm_lock_resource *res)
2896 {
2897         int i;
2898         struct list_head *queue = &res->granted;
2899         struct dlm_lock *lock;
2900         int nodenum;
2901
2902         assert_spin_locked(&dlm->spinlock);
2903
2904         spin_lock(&res->spinlock);
2905         for (i=0; i<3; i++) {
2906                 list_for_each_entry(lock, queue, list) {
2907                         /* up to the caller to make sure this node
2908                          * is alive */
2909                         if (lock->ml.node != dlm->node_num) {
2910                                 spin_unlock(&res->spinlock);
2911                                 return lock->ml.node;
2912                         }
2913                 }
2914                 queue++;
2915         }
2916         spin_unlock(&res->spinlock);
2917         mlog(0, "have not found a suitable target yet! checking domain map\n");
2918
2919         /* ok now we're getting desperate.  pick anyone alive. */
2920         nodenum = -1;
2921         while (1) {
2922                 nodenum = find_next_bit(dlm->domain_map,
2923                                         O2NM_MAX_NODES, nodenum+1);
2924                 mlog(0, "found %d in domain map\n", nodenum);
2925                 if (nodenum >= O2NM_MAX_NODES)
2926                         break;
2927                 if (nodenum != dlm->node_num) {
2928                         mlog(0, "picking %d\n", nodenum);
2929                         return nodenum;
2930                 }
2931         }
2932
2933         mlog(0, "giving up.  no master to migrate to\n");
2934         return DLM_LOCK_RES_OWNER_UNKNOWN;
2935 }
2936
2937
2938
2939 /* this is called by the new master once all lockres
2940  * data has been received */
2941 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2942                                   struct dlm_lock_resource *res,
2943                                   u8 master, u8 new_master,
2944                                   struct dlm_node_iter *iter)
2945 {
2946         struct dlm_migrate_request migrate;
2947         int ret, skip, status = 0;
2948         int nodenum;
2949
2950         memset(&migrate, 0, sizeof(migrate));
2951         migrate.namelen = res->lockname.len;
2952         memcpy(migrate.name, res->lockname.name, migrate.namelen);
2953         migrate.new_master = new_master;
2954         migrate.master = master;
2955
2956         ret = 0;
2957
2958         /* send message to all nodes, except the master and myself */
2959         while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2960                 if (nodenum == master ||
2961                     nodenum == new_master)
2962                         continue;
2963
2964                 /* We could race exit domain. If exited, skip. */
2965                 spin_lock(&dlm->spinlock);
2966                 skip = (!test_bit(nodenum, dlm->domain_map));
2967                 spin_unlock(&dlm->spinlock);
2968                 if (skip) {
2969                         clear_bit(nodenum, iter->node_map);
2970                         continue;
2971                 }
2972
2973                 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2974                                          &migrate, sizeof(migrate), nodenum,
2975                                          &status);
2976                 if (ret < 0) {
2977                         mlog(ML_ERROR, "Error %d when sending message %u (key "
2978                              "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
2979                              dlm->key, nodenum);
2980                         if (!dlm_is_host_down(ret)) {
2981                                 mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2982                                 BUG();
2983                         }
2984                         clear_bit(nodenum, iter->node_map);
2985                         ret = 0;
2986                 } else if (status < 0) {
2987                         mlog(0, "migrate request (node %u) returned %d!\n",
2988                              nodenum, status);
2989                         ret = status;
2990                 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
2991                         /* during the migration request we short-circuited
2992                          * the mastery of the lockres.  make sure we have
2993                          * a mastery ref for nodenum */
2994                         mlog(0, "%s:%.*s: need ref for node %u\n",
2995                              dlm->name, res->lockname.len, res->lockname.name,
2996                              nodenum);
2997                         spin_lock(&res->spinlock);
2998                         dlm_lockres_set_refmap_bit(nodenum, res);
2999                         spin_unlock(&res->spinlock);
3000                 }
3001         }
3002
3003         if (ret < 0)
3004                 mlog_errno(ret);
3005
3006         mlog(0, "returning ret=%d\n", ret);
3007         return ret;
3008 }
3009
3010
3011 /* if there is an existing mle for this lockres, we now know who the master is.
3012  * (the one who sent us *this* message) we can clear it up right away.
3013  * since the process that put the mle on the list still has a reference to it,
3014  * we can unhash it now, set the master and wake the process.  as a result,
3015  * we will have no mle in the list to start with.  now we can add an mle for
3016  * the migration and this should be the only one found for those scanning the
3017  * list.  */
3018 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3019                                 void **ret_data)
3020 {
3021         struct dlm_ctxt *dlm = data;
3022         struct dlm_lock_resource *res = NULL;
3023         struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3024         struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3025         const char *name;
3026         unsigned int namelen, hash;
3027         int ret = 0;
3028
3029         if (!dlm_grab(dlm))
3030                 return -EINVAL;
3031
3032         name = migrate->name;
3033         namelen = migrate->namelen;
3034         hash = dlm_lockid_hash(name, namelen);
3035
3036         /* preallocate.. if this fails, abort */
3037         mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
3038
3039         if (!mle) {
3040                 ret = -ENOMEM;
3041                 goto leave;
3042         }
3043
3044         /* check for pre-existing lock */
3045         spin_lock(&dlm->spinlock);
3046         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3047         spin_lock(&dlm->master_lock);
3048
3049         if (res) {
3050                 spin_lock(&res->spinlock);
3051                 if (res->state & DLM_LOCK_RES_RECOVERING) {
3052                         /* if all is working ok, this can only mean that we got
3053                         * a migrate request from a node that we now see as
3054                         * dead.  what can we do here?  drop it to the floor? */
3055                         spin_unlock(&res->spinlock);
3056                         mlog(ML_ERROR, "Got a migrate request, but the "
3057                              "lockres is marked as recovering!");
3058                         kmem_cache_free(dlm_mle_cache, mle);
3059                         ret = -EINVAL; /* need a better solution */
3060                         goto unlock;
3061                 }
3062                 res->state |= DLM_LOCK_RES_MIGRATING;
3063                 spin_unlock(&res->spinlock);
3064         }
3065
3066         /* ignore status.  only nonzero status would BUG. */
3067         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3068                                     name, namelen,
3069                                     migrate->new_master,
3070                                     migrate->master);
3071
3072 unlock:
3073         spin_unlock(&dlm->master_lock);
3074         spin_unlock(&dlm->spinlock);
3075
3076         if (oldmle) {
3077                 /* master is known, detach if not already detached */
3078                 dlm_mle_detach_hb_events(dlm, oldmle);
3079                 dlm_put_mle(oldmle);
3080         }
3081
3082         if (res)
3083                 dlm_lockres_put(res);
3084 leave:
3085         dlm_put(dlm);
3086         return ret;
3087 }
3088
3089 /* must be holding dlm->spinlock and dlm->master_lock
3090  * when adding a migration mle, we can clear any other mles
3091  * in the master list because we know with certainty that
3092  * the master is "master".  so we remove any old mle from
3093  * the list after setting it's master field, and then add
3094  * the new migration mle.  this way we can hold with the rule
3095  * of having only one mle for a given lock name at all times. */
3096 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3097                                  struct dlm_lock_resource *res,
3098                                  struct dlm_master_list_entry *mle,
3099                                  struct dlm_master_list_entry **oldmle,
3100                                  const char *name, unsigned int namelen,
3101                                  u8 new_master, u8 master)
3102 {
3103         int found;
3104         int ret = 0;
3105
3106         *oldmle = NULL;
3107
3108         mlog_entry_void();
3109
3110         assert_spin_locked(&dlm->spinlock);
3111         assert_spin_locked(&dlm->master_lock);
3112
3113         /* caller is responsible for any ref taken here on oldmle */
3114         found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3115         if (found) {
3116                 struct dlm_master_list_entry *tmp = *oldmle;
3117                 spin_lock(&tmp->spinlock);
3118                 if (tmp->type == DLM_MLE_MIGRATION) {
3119                         if (master == dlm->node_num) {
3120                                 /* ah another process raced me to it */
3121                                 mlog(0, "tried to migrate %.*s, but some "
3122                                      "process beat me to it\n",
3123                                      namelen, name);
3124                                 ret = -EEXIST;
3125                         } else {
3126                                 /* bad.  2 NODES are trying to migrate! */
3127                                 mlog(ML_ERROR, "migration error  mle: "
3128                                      "master=%u new_master=%u // request: "
3129                                      "master=%u new_master=%u // "
3130                                      "lockres=%.*s\n",
3131                                      tmp->master, tmp->new_master,
3132                                      master, new_master,
3133                                      namelen, name);
3134                                 BUG();
3135                         }
3136                 } else {
3137                         /* this is essentially what assert_master does */
3138                         tmp->master = master;
3139                         atomic_set(&tmp->woken, 1);
3140                         wake_up(&tmp->wq);
3141                         /* remove it so that only one mle will be found */
3142                         __dlm_unlink_mle(dlm, tmp);
3143                         __dlm_mle_detach_hb_events(dlm, tmp);
3144                         ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3145                         mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3146                             "telling master to get ref for cleared out mle "
3147                             "during migration\n", dlm->name, namelen, name,
3148                             master, new_master);
3149                 }
3150                 spin_unlock(&tmp->spinlock);
3151         }
3152
3153         /* now add a migration mle to the tail of the list */
3154         dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3155         mle->new_master = new_master;
3156         /* the new master will be sending an assert master for this.
3157          * at that point we will get the refmap reference */
3158         mle->master = master;
3159         /* do this for consistency with other mle types */
3160         set_bit(new_master, mle->maybe_map);
3161         __dlm_insert_mle(dlm, mle);
3162
3163         return ret;
3164 }
3165
3166 /*
3167  * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3168  */
3169 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3170                                         struct dlm_master_list_entry *mle)
3171 {
3172         struct dlm_lock_resource *res;
3173
3174         /* Find the lockres associated to the mle and set its owner to UNK */
3175         res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3176                                    mle->mnamehash);
3177         if (res) {
3178                 spin_unlock(&dlm->master_lock);
3179
3180                 /* move lockres onto recovery list */
3181                 spin_lock(&res->spinlock);
3182                 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3183                 dlm_move_lockres_to_recovery_list(dlm, res);
3184                 spin_unlock(&res->spinlock);
3185                 dlm_lockres_put(res);
3186
3187                 /* about to get rid of mle, detach from heartbeat */
3188                 __dlm_mle_detach_hb_events(dlm, mle);
3189
3190                 /* dump the mle */
3191                 spin_lock(&dlm->master_lock);
3192                 __dlm_put_mle(mle);
3193                 spin_unlock(&dlm->master_lock);
3194         }
3195
3196         return res;
3197 }
3198
3199 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3200                                     struct dlm_master_list_entry *mle)
3201 {
3202         __dlm_mle_detach_hb_events(dlm, mle);
3203
3204         spin_lock(&mle->spinlock);
3205         __dlm_unlink_mle(dlm, mle);
3206         atomic_set(&mle->woken, 1);
3207         spin_unlock(&mle->spinlock);
3208
3209         wake_up(&mle->wq);
3210 }
3211
3212 static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3213                                 struct dlm_master_list_entry *mle, u8 dead_node)
3214 {
3215         int bit;
3216
3217         BUG_ON(mle->type != DLM_MLE_BLOCK);
3218
3219         spin_lock(&mle->spinlock);
3220         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3221         if (bit != dead_node) {
3222                 mlog(0, "mle found, but dead node %u would not have been "
3223                      "master\n", dead_node);
3224                 spin_unlock(&mle->spinlock);
3225         } else {
3226                 /* Must drop the refcount by one since the assert_master will
3227                  * never arrive. This may result in the mle being unlinked and
3228                  * freed, but there may still be a process waiting in the
3229                  * dlmlock path which is fine. */
3230                 mlog(0, "node %u was expected master\n", dead_node);
3231                 atomic_set(&mle->woken, 1);
3232                 spin_unlock(&mle->spinlock);
3233                 wake_up(&mle->wq);
3234
3235                 /* Do not need events any longer, so detach from heartbeat */
3236                 __dlm_mle_detach_hb_events(dlm, mle);
3237                 __dlm_put_mle(mle);
3238         }
3239 }
3240
3241 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3242 {
3243         struct dlm_master_list_entry *mle;
3244         struct dlm_lock_resource *res;
3245         struct hlist_head *bucket;
3246         struct hlist_node *list;
3247         unsigned int i;
3248
3249         mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
3250 top:
3251         assert_spin_locked(&dlm->spinlock);
3252
3253         /* clean the master list */
3254         spin_lock(&dlm->master_lock);
3255         for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3256                 bucket = dlm_master_hash(dlm, i);
3257                 hlist_for_each(list, bucket) {
3258                         mle = hlist_entry(list, struct dlm_master_list_entry,
3259                                           master_hash_node);
3260
3261                         BUG_ON(mle->type != DLM_MLE_BLOCK &&
3262                                mle->type != DLM_MLE_MASTER &&
3263                                mle->type != DLM_MLE_MIGRATION);
3264
3265                         /* MASTER mles are initiated locally. The waiting
3266                          * process will notice the node map change shortly.
3267                          * Let that happen as normal. */
3268                         if (mle->type == DLM_MLE_MASTER)
3269                                 continue;
3270
3271                         /* BLOCK mles are initiated by other nodes. Need to
3272                          * clean up if the dead node would have been the
3273                          * master. */
3274                         if (mle->type == DLM_MLE_BLOCK) {
3275                                 dlm_clean_block_mle(dlm, mle, dead_node);
3276                                 continue;
3277                         }
3278
3279                         /* Everything else is a MIGRATION mle */
3280
3281                         /* The rule for MIGRATION mles is that the master
3282                          * becomes UNKNOWN if *either* the original or the new
3283                          * master dies. All UNKNOWN lockres' are sent to
3284                          * whichever node becomes the recovery master. The new
3285                          * master is responsible for determining if there is
3286                          * still a master for this lockres, or if he needs to
3287                          * take over mastery. Either way, this node should
3288                          * expect another message to resolve this. */
3289
3290                         if (mle->master != dead_node &&
3291                             mle->new_master != dead_node)
3292                                 continue;
3293
3294                         /* If we have reached this point, this mle needs to be
3295                          * removed from the list and freed. */
3296                         dlm_clean_migration_mle(dlm, mle);
3297
3298                         mlog(0, "%s: node %u died during migration from "
3299                              "%u to %u!\n", dlm->name, dead_node, mle->master,
3300                              mle->new_master);
3301
3302                         /* If we find a lockres associated with the mle, we've
3303                          * hit this rare case that messes up our lock ordering.
3304                          * If so, we need to drop the master lock so that we can
3305                          * take the lockres lock, meaning that we will have to
3306                          * restart from the head of list. */
3307                         res = dlm_reset_mleres_owner(dlm, mle);
3308                         if (res)
3309                                 /* restart */
3310                                 goto top;
3311
3312                         /* This may be the last reference */
3313                         __dlm_put_mle(mle);
3314                 }
3315         }
3316         spin_unlock(&dlm->master_lock);
3317 }
3318
3319 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3320                          u8 old_master)
3321 {
3322         struct dlm_node_iter iter;
3323         int ret = 0;
3324
3325         spin_lock(&dlm->spinlock);
3326         dlm_node_iter_init(dlm->domain_map, &iter);
3327         clear_bit(old_master, iter.node_map);
3328         clear_bit(dlm->node_num, iter.node_map);
3329         spin_unlock(&dlm->spinlock);
3330
3331         /* ownership of the lockres is changing.  account for the
3332          * mastery reference here since old_master will briefly have
3333          * a reference after the migration completes */
3334         spin_lock(&res->spinlock);
3335         dlm_lockres_set_refmap_bit(old_master, res);
3336         spin_unlock(&res->spinlock);
3337
3338         mlog(0, "now time to do a migrate request to other nodes\n");
3339         ret = dlm_do_migrate_request(dlm, res, old_master,
3340                                      dlm->node_num, &iter);
3341         if (ret < 0) {
3342                 mlog_errno(ret);
3343                 goto leave;
3344         }
3345
3346         mlog(0, "doing assert master of %.*s to all except the original node\n",
3347              res->lockname.len, res->lockname.name);
3348         /* this call now finishes out the nodemap
3349          * even if one or more nodes die */
3350         ret = dlm_do_assert_master(dlm, res, iter.node_map,
3351                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
3352         if (ret < 0) {
3353                 /* no longer need to retry.  all living nodes contacted. */
3354                 mlog_errno(ret);
3355                 ret = 0;
3356         }
3357
3358         memset(iter.node_map, 0, sizeof(iter.node_map));
3359         set_bit(old_master, iter.node_map);
3360         mlog(0, "doing assert master of %.*s back to %u\n",
3361              res->lockname.len, res->lockname.name, old_master);
3362         ret = dlm_do_assert_master(dlm, res, iter.node_map,
3363                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
3364         if (ret < 0) {
3365                 mlog(0, "assert master to original master failed "
3366                      "with %d.\n", ret);
3367                 /* the only nonzero status here would be because of
3368                  * a dead original node.  we're done. */
3369                 ret = 0;
3370         }
3371
3372         /* all done, set the owner, clear the flag */
3373         spin_lock(&res->spinlock);
3374         dlm_set_lockres_owner(dlm, res, dlm->node_num);
3375         res->state &= ~DLM_LOCK_RES_MIGRATING;
3376         spin_unlock(&res->spinlock);
3377         /* re-dirty it on the new master */
3378         dlm_kick_thread(dlm, res);
3379         wake_up(&res->wq);
3380 leave:
3381         return ret;
3382 }
3383
3384 /*
3385  * LOCKRES AST REFCOUNT
3386  * this is integral to migration
3387  */
3388
3389 /* for future intent to call an ast, reserve one ahead of time.
3390  * this should be called only after waiting on the lockres
3391  * with dlm_wait_on_lockres, and while still holding the
3392  * spinlock after the call. */
3393 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3394 {
3395         assert_spin_locked(&res->spinlock);
3396         if (res->state & DLM_LOCK_RES_MIGRATING) {
3397                 __dlm_print_one_lock_resource(res);
3398         }
3399         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3400
3401         atomic_inc(&res->asts_reserved);
3402 }
3403
3404 /*
3405  * used to drop the reserved ast, either because it went unused,
3406  * or because the ast/bast was actually called.
3407  *
3408  * also, if there is a pending migration on this lockres,
3409  * and this was the last pending ast on the lockres,
3410  * atomically set the MIGRATING flag before we drop the lock.
3411  * this is how we ensure that migration can proceed with no
3412  * asts in progress.  note that it is ok if the state of the
3413  * queues is such that a lock should be granted in the future
3414  * or that a bast should be fired, because the new master will
3415  * shuffle the lists on this lockres as soon as it is migrated.
3416  */
3417 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3418                              struct dlm_lock_resource *res)
3419 {
3420         if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3421                 return;
3422
3423         if (!res->migration_pending) {
3424                 spin_unlock(&res->spinlock);
3425                 return;
3426         }
3427
3428         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3429         res->migration_pending = 0;
3430         res->state |= DLM_LOCK_RES_MIGRATING;
3431         spin_unlock(&res->spinlock);
3432         wake_up(&res->wq);
3433         wake_up(&dlm->migration_wq);
3434 }