Merge branch 'skip_delete_inode' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / fs / ocfs2 / dlm / dlmmaster.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmmod.c
5  *
6  * standalone DLM module
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
41
42
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
46
47 #include "dlmapi.h"
48 #include "dlmcommon.h"
49 #include "dlmdomain.h"
50 #include "dlmdebug.h"
51
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
54
55 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
56                               struct dlm_master_list_entry *mle,
57                               struct o2nm_node *node,
58                               int idx);
59 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
60                             struct dlm_master_list_entry *mle,
61                             struct o2nm_node *node,
62                             int idx);
63
64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
65 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
66                                 struct dlm_lock_resource *res,
67                                 void *nodemap, u32 flags);
68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
69
70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
71                                 struct dlm_master_list_entry *mle,
72                                 const char *name,
73                                 unsigned int namelen)
74 {
75         if (dlm != mle->dlm)
76                 return 0;
77
78         if (namelen != mle->mnamelen ||
79             memcmp(name, mle->mname, namelen) != 0)
80                 return 0;
81
82         return 1;
83 }
84
85 static struct kmem_cache *dlm_lockres_cache = NULL;
86 static struct kmem_cache *dlm_lockname_cache = NULL;
87 static struct kmem_cache *dlm_mle_cache = NULL;
88
89 static void dlm_mle_release(struct kref *kref);
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
91                         enum dlm_mle_type type,
92                         struct dlm_ctxt *dlm,
93                         struct dlm_lock_resource *res,
94                         const char *name,
95                         unsigned int namelen);
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
98 static int dlm_find_mle(struct dlm_ctxt *dlm,
99                         struct dlm_master_list_entry **mle,
100                         char *name, unsigned int namelen);
101
102 static int dlm_do_master_request(struct dlm_lock_resource *res,
103                                  struct dlm_master_list_entry *mle, int to);
104
105
106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
107                                      struct dlm_lock_resource *res,
108                                      struct dlm_master_list_entry *mle,
109                                      int *blocked);
110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
111                                     struct dlm_lock_resource *res,
112                                     struct dlm_master_list_entry *mle,
113                                     int blocked);
114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
115                                  struct dlm_lock_resource *res,
116                                  struct dlm_master_list_entry *mle,
117                                  struct dlm_master_list_entry **oldmle,
118                                  const char *name, unsigned int namelen,
119                                  u8 new_master, u8 master);
120
121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
122                                     struct dlm_lock_resource *res);
123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
124                                       struct dlm_lock_resource *res);
125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
126                                        struct dlm_lock_resource *res,
127                                        u8 target);
128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
129                                        struct dlm_lock_resource *res);
130
131
132 int dlm_is_host_down(int errno)
133 {
134         switch (errno) {
135                 case -EBADF:
136                 case -ECONNREFUSED:
137                 case -ENOTCONN:
138                 case -ECONNRESET:
139                 case -EPIPE:
140                 case -EHOSTDOWN:
141                 case -EHOSTUNREACH:
142                 case -ETIMEDOUT:
143                 case -ECONNABORTED:
144                 case -ENETDOWN:
145                 case -ENETUNREACH:
146                 case -ENETRESET:
147                 case -ESHUTDOWN:
148                 case -ENOPROTOOPT:
149                 case -EINVAL:   /* if returned from our tcp code,
150                                    this means there is no socket */
151                         return 1;
152         }
153         return 0;
154 }
155
156
157 /*
158  * MASTER LIST FUNCTIONS
159  */
160
161
162 /*
163  * regarding master list entries and heartbeat callbacks:
164  *
165  * in order to avoid sleeping and allocation that occurs in
166  * heartbeat, master list entries are simply attached to the
167  * dlm's established heartbeat callbacks.  the mle is attached
168  * when it is created, and since the dlm->spinlock is held at
169  * that time, any heartbeat event will be properly discovered
170  * by the mle.  the mle needs to be detached from the
171  * dlm->mle_hb_events list as soon as heartbeat events are no
172  * longer useful to the mle, and before the mle is freed.
173  *
174  * as a general rule, heartbeat events are no longer needed by
175  * the mle once an "answer" regarding the lock master has been
176  * received.
177  */
178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
179                                               struct dlm_master_list_entry *mle)
180 {
181         assert_spin_locked(&dlm->spinlock);
182
183         list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
184 }
185
186
187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
188                                               struct dlm_master_list_entry *mle)
189 {
190         if (!list_empty(&mle->hb_events))
191                 list_del_init(&mle->hb_events);
192 }
193
194
195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
196                                             struct dlm_master_list_entry *mle)
197 {
198         spin_lock(&dlm->spinlock);
199         __dlm_mle_detach_hb_events(dlm, mle);
200         spin_unlock(&dlm->spinlock);
201 }
202
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
204 {
205         struct dlm_ctxt *dlm;
206         dlm = mle->dlm;
207
208         assert_spin_locked(&dlm->spinlock);
209         assert_spin_locked(&dlm->master_lock);
210         mle->inuse++;
211         kref_get(&mle->mle_refs);
212 }
213
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
215 {
216         struct dlm_ctxt *dlm;
217         dlm = mle->dlm;
218
219         spin_lock(&dlm->spinlock);
220         spin_lock(&dlm->master_lock);
221         mle->inuse--;
222         __dlm_put_mle(mle);
223         spin_unlock(&dlm->master_lock);
224         spin_unlock(&dlm->spinlock);
225
226 }
227
228 /* remove from list and free */
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
230 {
231         struct dlm_ctxt *dlm;
232         dlm = mle->dlm;
233
234         assert_spin_locked(&dlm->spinlock);
235         assert_spin_locked(&dlm->master_lock);
236         if (!atomic_read(&mle->mle_refs.refcount)) {
237                 /* this may or may not crash, but who cares.
238                  * it's a BUG. */
239                 mlog(ML_ERROR, "bad mle: %p\n", mle);
240                 dlm_print_one_mle(mle);
241                 BUG();
242         } else
243                 kref_put(&mle->mle_refs, dlm_mle_release);
244 }
245
246
247 /* must not have any spinlocks coming in */
248 static void dlm_put_mle(struct dlm_master_list_entry *mle)
249 {
250         struct dlm_ctxt *dlm;
251         dlm = mle->dlm;
252
253         spin_lock(&dlm->spinlock);
254         spin_lock(&dlm->master_lock);
255         __dlm_put_mle(mle);
256         spin_unlock(&dlm->master_lock);
257         spin_unlock(&dlm->spinlock);
258 }
259
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
261 {
262         kref_get(&mle->mle_refs);
263 }
264
265 static void dlm_init_mle(struct dlm_master_list_entry *mle,
266                         enum dlm_mle_type type,
267                         struct dlm_ctxt *dlm,
268                         struct dlm_lock_resource *res,
269                         const char *name,
270                         unsigned int namelen)
271 {
272         assert_spin_locked(&dlm->spinlock);
273
274         mle->dlm = dlm;
275         mle->type = type;
276         INIT_HLIST_NODE(&mle->master_hash_node);
277         INIT_LIST_HEAD(&mle->hb_events);
278         memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
279         spin_lock_init(&mle->spinlock);
280         init_waitqueue_head(&mle->wq);
281         atomic_set(&mle->woken, 0);
282         kref_init(&mle->mle_refs);
283         memset(mle->response_map, 0, sizeof(mle->response_map));
284         mle->master = O2NM_MAX_NODES;
285         mle->new_master = O2NM_MAX_NODES;
286         mle->inuse = 0;
287
288         BUG_ON(mle->type != DLM_MLE_BLOCK &&
289                mle->type != DLM_MLE_MASTER &&
290                mle->type != DLM_MLE_MIGRATION);
291
292         if (mle->type == DLM_MLE_MASTER) {
293                 BUG_ON(!res);
294                 mle->mleres = res;
295                 memcpy(mle->mname, res->lockname.name, res->lockname.len);
296                 mle->mnamelen = res->lockname.len;
297                 mle->mnamehash = res->lockname.hash;
298         } else {
299                 BUG_ON(!name);
300                 mle->mleres = NULL;
301                 memcpy(mle->mname, name, namelen);
302                 mle->mnamelen = namelen;
303                 mle->mnamehash = dlm_lockid_hash(name, namelen);
304         }
305
306         atomic_inc(&dlm->mle_tot_count[mle->type]);
307         atomic_inc(&dlm->mle_cur_count[mle->type]);
308
309         /* copy off the node_map and register hb callbacks on our copy */
310         memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
311         memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
312         clear_bit(dlm->node_num, mle->vote_map);
313         clear_bit(dlm->node_num, mle->node_map);
314
315         /* attach the mle to the domain node up/down events */
316         __dlm_mle_attach_hb_events(dlm, mle);
317 }
318
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
320 {
321         assert_spin_locked(&dlm->spinlock);
322         assert_spin_locked(&dlm->master_lock);
323
324         if (!hlist_unhashed(&mle->master_hash_node))
325                 hlist_del_init(&mle->master_hash_node);
326 }
327
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
329 {
330         struct hlist_head *bucket;
331
332         assert_spin_locked(&dlm->master_lock);
333
334         bucket = dlm_master_hash(dlm, mle->mnamehash);
335         hlist_add_head(&mle->master_hash_node, bucket);
336 }
337
338 /* returns 1 if found, 0 if not */
339 static int dlm_find_mle(struct dlm_ctxt *dlm,
340                         struct dlm_master_list_entry **mle,
341                         char *name, unsigned int namelen)
342 {
343         struct dlm_master_list_entry *tmpmle;
344         struct hlist_head *bucket;
345         struct hlist_node *list;
346         unsigned int hash;
347
348         assert_spin_locked(&dlm->master_lock);
349
350         hash = dlm_lockid_hash(name, namelen);
351         bucket = dlm_master_hash(dlm, hash);
352         hlist_for_each(list, bucket) {
353                 tmpmle = hlist_entry(list, struct dlm_master_list_entry,
354                                      master_hash_node);
355                 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
356                         continue;
357                 dlm_get_mle(tmpmle);
358                 *mle = tmpmle;
359                 return 1;
360         }
361         return 0;
362 }
363
364 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
365 {
366         struct dlm_master_list_entry *mle;
367
368         assert_spin_locked(&dlm->spinlock);
369
370         list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
371                 if (node_up)
372                         dlm_mle_node_up(dlm, mle, NULL, idx);
373                 else
374                         dlm_mle_node_down(dlm, mle, NULL, idx);
375         }
376 }
377
378 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
379                               struct dlm_master_list_entry *mle,
380                               struct o2nm_node *node, int idx)
381 {
382         spin_lock(&mle->spinlock);
383
384         if (!test_bit(idx, mle->node_map))
385                 mlog(0, "node %u already removed from nodemap!\n", idx);
386         else
387                 clear_bit(idx, mle->node_map);
388
389         spin_unlock(&mle->spinlock);
390 }
391
392 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
393                             struct dlm_master_list_entry *mle,
394                             struct o2nm_node *node, int idx)
395 {
396         spin_lock(&mle->spinlock);
397
398         if (test_bit(idx, mle->node_map))
399                 mlog(0, "node %u already in node map!\n", idx);
400         else
401                 set_bit(idx, mle->node_map);
402
403         spin_unlock(&mle->spinlock);
404 }
405
406
407 int dlm_init_mle_cache(void)
408 {
409         dlm_mle_cache = kmem_cache_create("o2dlm_mle",
410                                           sizeof(struct dlm_master_list_entry),
411                                           0, SLAB_HWCACHE_ALIGN,
412                                           NULL);
413         if (dlm_mle_cache == NULL)
414                 return -ENOMEM;
415         return 0;
416 }
417
418 void dlm_destroy_mle_cache(void)
419 {
420         if (dlm_mle_cache)
421                 kmem_cache_destroy(dlm_mle_cache);
422 }
423
424 static void dlm_mle_release(struct kref *kref)
425 {
426         struct dlm_master_list_entry *mle;
427         struct dlm_ctxt *dlm;
428
429         mlog_entry_void();
430
431         mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
432         dlm = mle->dlm;
433
434         assert_spin_locked(&dlm->spinlock);
435         assert_spin_locked(&dlm->master_lock);
436
437         mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
438              mle->type);
439
440         /* remove from list if not already */
441         __dlm_unlink_mle(dlm, mle);
442
443         /* detach the mle from the domain node up/down events */
444         __dlm_mle_detach_hb_events(dlm, mle);
445
446         atomic_dec(&dlm->mle_cur_count[mle->type]);
447
448         /* NOTE: kfree under spinlock here.
449          * if this is bad, we can move this to a freelist. */
450         kmem_cache_free(dlm_mle_cache, mle);
451 }
452
453
454 /*
455  * LOCK RESOURCE FUNCTIONS
456  */
457
458 int dlm_init_master_caches(void)
459 {
460         dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
461                                               sizeof(struct dlm_lock_resource),
462                                               0, SLAB_HWCACHE_ALIGN, NULL);
463         if (!dlm_lockres_cache)
464                 goto bail;
465
466         dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
467                                                DLM_LOCKID_NAME_MAX, 0,
468                                                SLAB_HWCACHE_ALIGN, NULL);
469         if (!dlm_lockname_cache)
470                 goto bail;
471
472         return 0;
473 bail:
474         dlm_destroy_master_caches();
475         return -ENOMEM;
476 }
477
478 void dlm_destroy_master_caches(void)
479 {
480         if (dlm_lockname_cache)
481                 kmem_cache_destroy(dlm_lockname_cache);
482
483         if (dlm_lockres_cache)
484                 kmem_cache_destroy(dlm_lockres_cache);
485 }
486
487 static void dlm_lockres_release(struct kref *kref)
488 {
489         struct dlm_lock_resource *res;
490         struct dlm_ctxt *dlm;
491
492         res = container_of(kref, struct dlm_lock_resource, refs);
493         dlm = res->dlm;
494
495         /* This should not happen -- all lockres' have a name
496          * associated with them at init time. */
497         BUG_ON(!res->lockname.name);
498
499         mlog(0, "destroying lockres %.*s\n", res->lockname.len,
500              res->lockname.name);
501
502         spin_lock(&dlm->track_lock);
503         if (!list_empty(&res->tracking))
504                 list_del_init(&res->tracking);
505         else {
506                 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
507                      res->lockname.len, res->lockname.name);
508                 dlm_print_one_lock_resource(res);
509         }
510         spin_unlock(&dlm->track_lock);
511
512         atomic_dec(&dlm->res_cur_count);
513
514         dlm_put(dlm);
515
516         if (!hlist_unhashed(&res->hash_node) ||
517             !list_empty(&res->granted) ||
518             !list_empty(&res->converting) ||
519             !list_empty(&res->blocked) ||
520             !list_empty(&res->dirty) ||
521             !list_empty(&res->recovering) ||
522             !list_empty(&res->purge)) {
523                 mlog(ML_ERROR,
524                      "Going to BUG for resource %.*s."
525                      "  We're on a list! [%c%c%c%c%c%c%c]\n",
526                      res->lockname.len, res->lockname.name,
527                      !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
528                      !list_empty(&res->granted) ? 'G' : ' ',
529                      !list_empty(&res->converting) ? 'C' : ' ',
530                      !list_empty(&res->blocked) ? 'B' : ' ',
531                      !list_empty(&res->dirty) ? 'D' : ' ',
532                      !list_empty(&res->recovering) ? 'R' : ' ',
533                      !list_empty(&res->purge) ? 'P' : ' ');
534
535                 dlm_print_one_lock_resource(res);
536         }
537
538         /* By the time we're ready to blow this guy away, we shouldn't
539          * be on any lists. */
540         BUG_ON(!hlist_unhashed(&res->hash_node));
541         BUG_ON(!list_empty(&res->granted));
542         BUG_ON(!list_empty(&res->converting));
543         BUG_ON(!list_empty(&res->blocked));
544         BUG_ON(!list_empty(&res->dirty));
545         BUG_ON(!list_empty(&res->recovering));
546         BUG_ON(!list_empty(&res->purge));
547
548         kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
549
550         kmem_cache_free(dlm_lockres_cache, res);
551 }
552
553 void dlm_lockres_put(struct dlm_lock_resource *res)
554 {
555         kref_put(&res->refs, dlm_lockres_release);
556 }
557
558 static void dlm_init_lockres(struct dlm_ctxt *dlm,
559                              struct dlm_lock_resource *res,
560                              const char *name, unsigned int namelen)
561 {
562         char *qname;
563
564         /* If we memset here, we lose our reference to the kmalloc'd
565          * res->lockname.name, so be sure to init every field
566          * correctly! */
567
568         qname = (char *) res->lockname.name;
569         memcpy(qname, name, namelen);
570
571         res->lockname.len = namelen;
572         res->lockname.hash = dlm_lockid_hash(name, namelen);
573
574         init_waitqueue_head(&res->wq);
575         spin_lock_init(&res->spinlock);
576         INIT_HLIST_NODE(&res->hash_node);
577         INIT_LIST_HEAD(&res->granted);
578         INIT_LIST_HEAD(&res->converting);
579         INIT_LIST_HEAD(&res->blocked);
580         INIT_LIST_HEAD(&res->dirty);
581         INIT_LIST_HEAD(&res->recovering);
582         INIT_LIST_HEAD(&res->purge);
583         INIT_LIST_HEAD(&res->tracking);
584         atomic_set(&res->asts_reserved, 0);
585         res->migration_pending = 0;
586         res->inflight_locks = 0;
587
588         /* put in dlm_lockres_release */
589         dlm_grab(dlm);
590         res->dlm = dlm;
591
592         kref_init(&res->refs);
593
594         atomic_inc(&dlm->res_tot_count);
595         atomic_inc(&dlm->res_cur_count);
596
597         /* just for consistency */
598         spin_lock(&res->spinlock);
599         dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
600         spin_unlock(&res->spinlock);
601
602         res->state = DLM_LOCK_RES_IN_PROGRESS;
603
604         res->last_used = 0;
605
606         spin_lock(&dlm->spinlock);
607         list_add_tail(&res->tracking, &dlm->tracking_list);
608         spin_unlock(&dlm->spinlock);
609
610         memset(res->lvb, 0, DLM_LVB_LEN);
611         memset(res->refmap, 0, sizeof(res->refmap));
612 }
613
614 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
615                                    const char *name,
616                                    unsigned int namelen)
617 {
618         struct dlm_lock_resource *res = NULL;
619
620         res = (struct dlm_lock_resource *)
621                                 kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
622         if (!res)
623                 goto error;
624
625         res->lockname.name = (char *)
626                                 kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
627         if (!res->lockname.name)
628                 goto error;
629
630         dlm_init_lockres(dlm, res, name, namelen);
631         return res;
632
633 error:
634         if (res && res->lockname.name)
635                 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
636
637         if (res)
638                 kmem_cache_free(dlm_lockres_cache, res);
639         return NULL;
640 }
641
642 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
643                                    struct dlm_lock_resource *res,
644                                    int new_lockres,
645                                    const char *file,
646                                    int line)
647 {
648         if (!new_lockres)
649                 assert_spin_locked(&res->spinlock);
650
651         if (!test_bit(dlm->node_num, res->refmap)) {
652                 BUG_ON(res->inflight_locks != 0);
653                 dlm_lockres_set_refmap_bit(dlm->node_num, res);
654         }
655         res->inflight_locks++;
656         mlog(0, "%s:%.*s: inflight++: now %u\n",
657              dlm->name, res->lockname.len, res->lockname.name,
658              res->inflight_locks);
659 }
660
661 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
662                                    struct dlm_lock_resource *res,
663                                    const char *file,
664                                    int line)
665 {
666         assert_spin_locked(&res->spinlock);
667
668         BUG_ON(res->inflight_locks == 0);
669         res->inflight_locks--;
670         mlog(0, "%s:%.*s: inflight--: now %u\n",
671              dlm->name, res->lockname.len, res->lockname.name,
672              res->inflight_locks);
673         if (res->inflight_locks == 0)
674                 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
675         wake_up(&res->wq);
676 }
677
678 /*
679  * lookup a lock resource by name.
680  * may already exist in the hashtable.
681  * lockid is null terminated
682  *
683  * if not, allocate enough for the lockres and for
684  * the temporary structure used in doing the mastering.
685  *
686  * also, do a lookup in the dlm->master_list to see
687  * if another node has begun mastering the same lock.
688  * if so, there should be a block entry in there
689  * for this name, and we should *not* attempt to master
690  * the lock here.   need to wait around for that node
691  * to assert_master (or die).
692  *
693  */
694 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
695                                           const char *lockid,
696                                           int namelen,
697                                           int flags)
698 {
699         struct dlm_lock_resource *tmpres=NULL, *res=NULL;
700         struct dlm_master_list_entry *mle = NULL;
701         struct dlm_master_list_entry *alloc_mle = NULL;
702         int blocked = 0;
703         int ret, nodenum;
704         struct dlm_node_iter iter;
705         unsigned int hash;
706         int tries = 0;
707         int bit, wait_on_recovery = 0;
708         int drop_inflight_if_nonlocal = 0;
709
710         BUG_ON(!lockid);
711
712         hash = dlm_lockid_hash(lockid, namelen);
713
714         mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
715
716 lookup:
717         spin_lock(&dlm->spinlock);
718         tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
719         if (tmpres) {
720                 int dropping_ref = 0;
721
722                 spin_unlock(&dlm->spinlock);
723
724                 spin_lock(&tmpres->spinlock);
725                 /* We wait for the other thread that is mastering the resource */
726                 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
727                         __dlm_wait_on_lockres(tmpres);
728                         BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
729                 }
730
731                 if (tmpres->owner == dlm->node_num) {
732                         BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
733                         dlm_lockres_grab_inflight_ref(dlm, tmpres);
734                 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
735                         dropping_ref = 1;
736                 spin_unlock(&tmpres->spinlock);
737
738                 /* wait until done messaging the master, drop our ref to allow
739                  * the lockres to be purged, start over. */
740                 if (dropping_ref) {
741                         spin_lock(&tmpres->spinlock);
742                         __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
743                         spin_unlock(&tmpres->spinlock);
744                         dlm_lockres_put(tmpres);
745                         tmpres = NULL;
746                         goto lookup;
747                 }
748
749                 mlog(0, "found in hash!\n");
750                 if (res)
751                         dlm_lockres_put(res);
752                 res = tmpres;
753                 goto leave;
754         }
755
756         if (!res) {
757                 spin_unlock(&dlm->spinlock);
758                 mlog(0, "allocating a new resource\n");
759                 /* nothing found and we need to allocate one. */
760                 alloc_mle = (struct dlm_master_list_entry *)
761                         kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
762                 if (!alloc_mle)
763                         goto leave;
764                 res = dlm_new_lockres(dlm, lockid, namelen);
765                 if (!res)
766                         goto leave;
767                 goto lookup;
768         }
769
770         mlog(0, "no lockres found, allocated our own: %p\n", res);
771
772         if (flags & LKM_LOCAL) {
773                 /* caller knows it's safe to assume it's not mastered elsewhere
774                  * DONE!  return right away */
775                 spin_lock(&res->spinlock);
776                 dlm_change_lockres_owner(dlm, res, dlm->node_num);
777                 __dlm_insert_lockres(dlm, res);
778                 dlm_lockres_grab_inflight_ref(dlm, res);
779                 spin_unlock(&res->spinlock);
780                 spin_unlock(&dlm->spinlock);
781                 /* lockres still marked IN_PROGRESS */
782                 goto wake_waiters;
783         }
784
785         /* check master list to see if another node has started mastering it */
786         spin_lock(&dlm->master_lock);
787
788         /* if we found a block, wait for lock to be mastered by another node */
789         blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
790         if (blocked) {
791                 int mig;
792                 if (mle->type == DLM_MLE_MASTER) {
793                         mlog(ML_ERROR, "master entry for nonexistent lock!\n");
794                         BUG();
795                 }
796                 mig = (mle->type == DLM_MLE_MIGRATION);
797                 /* if there is a migration in progress, let the migration
798                  * finish before continuing.  we can wait for the absence
799                  * of the MIGRATION mle: either the migrate finished or
800                  * one of the nodes died and the mle was cleaned up.
801                  * if there is a BLOCK here, but it already has a master
802                  * set, we are too late.  the master does not have a ref
803                  * for us in the refmap.  detach the mle and drop it.
804                  * either way, go back to the top and start over. */
805                 if (mig || mle->master != O2NM_MAX_NODES) {
806                         BUG_ON(mig && mle->master == dlm->node_num);
807                         /* we arrived too late.  the master does not
808                          * have a ref for us. retry. */
809                         mlog(0, "%s:%.*s: late on %s\n",
810                              dlm->name, namelen, lockid,
811                              mig ?  "MIGRATION" : "BLOCK");
812                         spin_unlock(&dlm->master_lock);
813                         spin_unlock(&dlm->spinlock);
814
815                         /* master is known, detach */
816                         if (!mig)
817                                 dlm_mle_detach_hb_events(dlm, mle);
818                         dlm_put_mle(mle);
819                         mle = NULL;
820                         /* this is lame, but we cant wait on either
821                          * the mle or lockres waitqueue here */
822                         if (mig)
823                                 msleep(100);
824                         goto lookup;
825                 }
826         } else {
827                 /* go ahead and try to master lock on this node */
828                 mle = alloc_mle;
829                 /* make sure this does not get freed below */
830                 alloc_mle = NULL;
831                 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
832                 set_bit(dlm->node_num, mle->maybe_map);
833                 __dlm_insert_mle(dlm, mle);
834
835                 /* still holding the dlm spinlock, check the recovery map
836                  * to see if there are any nodes that still need to be
837                  * considered.  these will not appear in the mle nodemap
838                  * but they might own this lockres.  wait on them. */
839                 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
840                 if (bit < O2NM_MAX_NODES) {
841                         mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
842                              "recover before lock mastery can begin\n",
843                              dlm->name, namelen, (char *)lockid, bit);
844                         wait_on_recovery = 1;
845                 }
846         }
847
848         /* at this point there is either a DLM_MLE_BLOCK or a
849          * DLM_MLE_MASTER on the master list, so it's safe to add the
850          * lockres to the hashtable.  anyone who finds the lock will
851          * still have to wait on the IN_PROGRESS. */
852
853         /* finally add the lockres to its hash bucket */
854         __dlm_insert_lockres(dlm, res);
855         /* since this lockres is new it doesnt not require the spinlock */
856         dlm_lockres_grab_inflight_ref_new(dlm, res);
857
858         /* if this node does not become the master make sure to drop
859          * this inflight reference below */
860         drop_inflight_if_nonlocal = 1;
861
862         /* get an extra ref on the mle in case this is a BLOCK
863          * if so, the creator of the BLOCK may try to put the last
864          * ref at this time in the assert master handler, so we
865          * need an extra one to keep from a bad ptr deref. */
866         dlm_get_mle_inuse(mle);
867         spin_unlock(&dlm->master_lock);
868         spin_unlock(&dlm->spinlock);
869
870 redo_request:
871         while (wait_on_recovery) {
872                 /* any cluster changes that occurred after dropping the
873                  * dlm spinlock would be detectable be a change on the mle,
874                  * so we only need to clear out the recovery map once. */
875                 if (dlm_is_recovery_lock(lockid, namelen)) {
876                         mlog(ML_NOTICE, "%s: recovery map is not empty, but "
877                              "must master $RECOVERY lock now\n", dlm->name);
878                         if (!dlm_pre_master_reco_lockres(dlm, res))
879                                 wait_on_recovery = 0;
880                         else {
881                                 mlog(0, "%s: waiting 500ms for heartbeat state "
882                                     "change\n", dlm->name);
883                                 msleep(500);
884                         }
885                         continue;
886                 }
887
888                 dlm_kick_recovery_thread(dlm);
889                 msleep(1000);
890                 dlm_wait_for_recovery(dlm);
891
892                 spin_lock(&dlm->spinlock);
893                 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
894                 if (bit < O2NM_MAX_NODES) {
895                         mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
896                              "recover before lock mastery can begin\n",
897                              dlm->name, namelen, (char *)lockid, bit);
898                         wait_on_recovery = 1;
899                 } else
900                         wait_on_recovery = 0;
901                 spin_unlock(&dlm->spinlock);
902
903                 if (wait_on_recovery)
904                         dlm_wait_for_node_recovery(dlm, bit, 10000);
905         }
906
907         /* must wait for lock to be mastered elsewhere */
908         if (blocked)
909                 goto wait;
910
911         ret = -EINVAL;
912         dlm_node_iter_init(mle->vote_map, &iter);
913         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
914                 ret = dlm_do_master_request(res, mle, nodenum);
915                 if (ret < 0)
916                         mlog_errno(ret);
917                 if (mle->master != O2NM_MAX_NODES) {
918                         /* found a master ! */
919                         if (mle->master <= nodenum)
920                                 break;
921                         /* if our master request has not reached the master
922                          * yet, keep going until it does.  this is how the
923                          * master will know that asserts are needed back to
924                          * the lower nodes. */
925                         mlog(0, "%s:%.*s: requests only up to %u but master "
926                              "is %u, keep going\n", dlm->name, namelen,
927                              lockid, nodenum, mle->master);
928                 }
929         }
930
931 wait:
932         /* keep going until the response map includes all nodes */
933         ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
934         if (ret < 0) {
935                 wait_on_recovery = 1;
936                 mlog(0, "%s:%.*s: node map changed, redo the "
937                      "master request now, blocked=%d\n",
938                      dlm->name, res->lockname.len,
939                      res->lockname.name, blocked);
940                 if (++tries > 20) {
941                         mlog(ML_ERROR, "%s:%.*s: spinning on "
942                              "dlm_wait_for_lock_mastery, blocked=%d\n",
943                              dlm->name, res->lockname.len,
944                              res->lockname.name, blocked);
945                         dlm_print_one_lock_resource(res);
946                         dlm_print_one_mle(mle);
947                         tries = 0;
948                 }
949                 goto redo_request;
950         }
951
952         mlog(0, "lockres mastered by %u\n", res->owner);
953         /* make sure we never continue without this */
954         BUG_ON(res->owner == O2NM_MAX_NODES);
955
956         /* master is known, detach if not already detached */
957         dlm_mle_detach_hb_events(dlm, mle);
958         dlm_put_mle(mle);
959         /* put the extra ref */
960         dlm_put_mle_inuse(mle);
961
962 wake_waiters:
963         spin_lock(&res->spinlock);
964         if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
965                 dlm_lockres_drop_inflight_ref(dlm, res);
966         res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
967         spin_unlock(&res->spinlock);
968         wake_up(&res->wq);
969
970 leave:
971         /* need to free the unused mle */
972         if (alloc_mle)
973                 kmem_cache_free(dlm_mle_cache, alloc_mle);
974
975         return res;
976 }
977
978
979 #define DLM_MASTERY_TIMEOUT_MS   5000
980
981 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
982                                      struct dlm_lock_resource *res,
983                                      struct dlm_master_list_entry *mle,
984                                      int *blocked)
985 {
986         u8 m;
987         int ret, bit;
988         int map_changed, voting_done;
989         int assert, sleep;
990
991 recheck:
992         ret = 0;
993         assert = 0;
994
995         /* check if another node has already become the owner */
996         spin_lock(&res->spinlock);
997         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
998                 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
999                      res->lockname.len, res->lockname.name, res->owner);
1000                 spin_unlock(&res->spinlock);
1001                 /* this will cause the master to re-assert across
1002                  * the whole cluster, freeing up mles */
1003                 if (res->owner != dlm->node_num) {
1004                         ret = dlm_do_master_request(res, mle, res->owner);
1005                         if (ret < 0) {
1006                                 /* give recovery a chance to run */
1007                                 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1008                                 msleep(500);
1009                                 goto recheck;
1010                         }
1011                 }
1012                 ret = 0;
1013                 goto leave;
1014         }
1015         spin_unlock(&res->spinlock);
1016
1017         spin_lock(&mle->spinlock);
1018         m = mle->master;
1019         map_changed = (memcmp(mle->vote_map, mle->node_map,
1020                               sizeof(mle->vote_map)) != 0);
1021         voting_done = (memcmp(mle->vote_map, mle->response_map,
1022                              sizeof(mle->vote_map)) == 0);
1023
1024         /* restart if we hit any errors */
1025         if (map_changed) {
1026                 int b;
1027                 mlog(0, "%s: %.*s: node map changed, restarting\n",
1028                      dlm->name, res->lockname.len, res->lockname.name);
1029                 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1030                 b = (mle->type == DLM_MLE_BLOCK);
1031                 if ((*blocked && !b) || (!*blocked && b)) {
1032                         mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1033                              dlm->name, res->lockname.len, res->lockname.name,
1034                              *blocked, b);
1035                         *blocked = b;
1036                 }
1037                 spin_unlock(&mle->spinlock);
1038                 if (ret < 0) {
1039                         mlog_errno(ret);
1040                         goto leave;
1041                 }
1042                 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1043                      "rechecking now\n", dlm->name, res->lockname.len,
1044                      res->lockname.name);
1045                 goto recheck;
1046         } else {
1047                 if (!voting_done) {
1048                         mlog(0, "map not changed and voting not done "
1049                              "for %s:%.*s\n", dlm->name, res->lockname.len,
1050                              res->lockname.name);
1051                 }
1052         }
1053
1054         if (m != O2NM_MAX_NODES) {
1055                 /* another node has done an assert!
1056                  * all done! */
1057                 sleep = 0;
1058         } else {
1059                 sleep = 1;
1060                 /* have all nodes responded? */
1061                 if (voting_done && !*blocked) {
1062                         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1063                         if (dlm->node_num <= bit) {
1064                                 /* my node number is lowest.
1065                                  * now tell other nodes that I am
1066                                  * mastering this. */
1067                                 mle->master = dlm->node_num;
1068                                 /* ref was grabbed in get_lock_resource
1069                                  * will be dropped in dlmlock_master */
1070                                 assert = 1;
1071                                 sleep = 0;
1072                         }
1073                         /* if voting is done, but we have not received
1074                          * an assert master yet, we must sleep */
1075                 }
1076         }
1077
1078         spin_unlock(&mle->spinlock);
1079
1080         /* sleep if we haven't finished voting yet */
1081         if (sleep) {
1082                 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1083
1084                 /*
1085                 if (atomic_read(&mle->mle_refs.refcount) < 2)
1086                         mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1087                         atomic_read(&mle->mle_refs.refcount),
1088                         res->lockname.len, res->lockname.name);
1089                 */
1090                 atomic_set(&mle->woken, 0);
1091                 (void)wait_event_timeout(mle->wq,
1092                                          (atomic_read(&mle->woken) == 1),
1093                                          timeo);
1094                 if (res->owner == O2NM_MAX_NODES) {
1095                         mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1096                              res->lockname.len, res->lockname.name);
1097                         goto recheck;
1098                 }
1099                 mlog(0, "done waiting, master is %u\n", res->owner);
1100                 ret = 0;
1101                 goto leave;
1102         }
1103
1104         ret = 0;   /* done */
1105         if (assert) {
1106                 m = dlm->node_num;
1107                 mlog(0, "about to master %.*s here, this=%u\n",
1108                      res->lockname.len, res->lockname.name, m);
1109                 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1110                 if (ret) {
1111                         /* This is a failure in the network path,
1112                          * not in the response to the assert_master
1113                          * (any nonzero response is a BUG on this node).
1114                          * Most likely a socket just got disconnected
1115                          * due to node death. */
1116                         mlog_errno(ret);
1117                 }
1118                 /* no longer need to restart lock mastery.
1119                  * all living nodes have been contacted. */
1120                 ret = 0;
1121         }
1122
1123         /* set the lockres owner */
1124         spin_lock(&res->spinlock);
1125         /* mastery reference obtained either during
1126          * assert_master_handler or in get_lock_resource */
1127         dlm_change_lockres_owner(dlm, res, m);
1128         spin_unlock(&res->spinlock);
1129
1130 leave:
1131         return ret;
1132 }
1133
1134 struct dlm_bitmap_diff_iter
1135 {
1136         int curnode;
1137         unsigned long *orig_bm;
1138         unsigned long *cur_bm;
1139         unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1140 };
1141
1142 enum dlm_node_state_change
1143 {
1144         NODE_DOWN = -1,
1145         NODE_NO_CHANGE = 0,
1146         NODE_UP
1147 };
1148
1149 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1150                                       unsigned long *orig_bm,
1151                                       unsigned long *cur_bm)
1152 {
1153         unsigned long p1, p2;
1154         int i;
1155
1156         iter->curnode = -1;
1157         iter->orig_bm = orig_bm;
1158         iter->cur_bm = cur_bm;
1159
1160         for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1161                 p1 = *(iter->orig_bm + i);
1162                 p2 = *(iter->cur_bm + i);
1163                 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1164         }
1165 }
1166
1167 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1168                                      enum dlm_node_state_change *state)
1169 {
1170         int bit;
1171
1172         if (iter->curnode >= O2NM_MAX_NODES)
1173                 return -ENOENT;
1174
1175         bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1176                             iter->curnode+1);
1177         if (bit >= O2NM_MAX_NODES) {
1178                 iter->curnode = O2NM_MAX_NODES;
1179                 return -ENOENT;
1180         }
1181
1182         /* if it was there in the original then this node died */
1183         if (test_bit(bit, iter->orig_bm))
1184                 *state = NODE_DOWN;
1185         else
1186                 *state = NODE_UP;
1187
1188         iter->curnode = bit;
1189         return bit;
1190 }
1191
1192
1193 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1194                                     struct dlm_lock_resource *res,
1195                                     struct dlm_master_list_entry *mle,
1196                                     int blocked)
1197 {
1198         struct dlm_bitmap_diff_iter bdi;
1199         enum dlm_node_state_change sc;
1200         int node;
1201         int ret = 0;
1202
1203         mlog(0, "something happened such that the "
1204              "master process may need to be restarted!\n");
1205
1206         assert_spin_locked(&mle->spinlock);
1207
1208         dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1209         node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1210         while (node >= 0) {
1211                 if (sc == NODE_UP) {
1212                         /* a node came up.  clear any old vote from
1213                          * the response map and set it in the vote map
1214                          * then restart the mastery. */
1215                         mlog(ML_NOTICE, "node %d up while restarting\n", node);
1216
1217                         /* redo the master request, but only for the new node */
1218                         mlog(0, "sending request to new node\n");
1219                         clear_bit(node, mle->response_map);
1220                         set_bit(node, mle->vote_map);
1221                 } else {
1222                         mlog(ML_ERROR, "node down! %d\n", node);
1223                         if (blocked) {
1224                                 int lowest = find_next_bit(mle->maybe_map,
1225                                                        O2NM_MAX_NODES, 0);
1226
1227                                 /* act like it was never there */
1228                                 clear_bit(node, mle->maybe_map);
1229
1230                                 if (node == lowest) {
1231                                         mlog(0, "expected master %u died"
1232                                             " while this node was blocked "
1233                                             "waiting on it!\n", node);
1234                                         lowest = find_next_bit(mle->maybe_map,
1235                                                         O2NM_MAX_NODES,
1236                                                         lowest+1);
1237                                         if (lowest < O2NM_MAX_NODES) {
1238                                                 mlog(0, "%s:%.*s:still "
1239                                                      "blocked. waiting on %u "
1240                                                      "now\n", dlm->name,
1241                                                      res->lockname.len,
1242                                                      res->lockname.name,
1243                                                      lowest);
1244                                         } else {
1245                                                 /* mle is an MLE_BLOCK, but
1246                                                  * there is now nothing left to
1247                                                  * block on.  we need to return
1248                                                  * all the way back out and try
1249                                                  * again with an MLE_MASTER.
1250                                                  * dlm_do_local_recovery_cleanup
1251                                                  * has already run, so the mle
1252                                                  * refcount is ok */
1253                                                 mlog(0, "%s:%.*s: no "
1254                                                      "longer blocking. try to "
1255                                                      "master this here\n",
1256                                                      dlm->name,
1257                                                      res->lockname.len,
1258                                                      res->lockname.name);
1259                                                 mle->type = DLM_MLE_MASTER;
1260                                                 mle->mleres = res;
1261                                         }
1262                                 }
1263                         }
1264
1265                         /* now blank out everything, as if we had never
1266                          * contacted anyone */
1267                         memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1268                         memset(mle->response_map, 0, sizeof(mle->response_map));
1269                         /* reset the vote_map to the current node_map */
1270                         memcpy(mle->vote_map, mle->node_map,
1271                                sizeof(mle->node_map));
1272                         /* put myself into the maybe map */
1273                         if (mle->type != DLM_MLE_BLOCK)
1274                                 set_bit(dlm->node_num, mle->maybe_map);
1275                 }
1276                 ret = -EAGAIN;
1277                 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1278         }
1279         return ret;
1280 }
1281
1282
1283 /*
1284  * DLM_MASTER_REQUEST_MSG
1285  *
1286  * returns: 0 on success,
1287  *          -errno on a network error
1288  *
1289  * on error, the caller should assume the target node is "dead"
1290  *
1291  */
1292
1293 static int dlm_do_master_request(struct dlm_lock_resource *res,
1294                                  struct dlm_master_list_entry *mle, int to)
1295 {
1296         struct dlm_ctxt *dlm = mle->dlm;
1297         struct dlm_master_request request;
1298         int ret, response=0, resend;
1299
1300         memset(&request, 0, sizeof(request));
1301         request.node_idx = dlm->node_num;
1302
1303         BUG_ON(mle->type == DLM_MLE_MIGRATION);
1304
1305         request.namelen = (u8)mle->mnamelen;
1306         memcpy(request.name, mle->mname, request.namelen);
1307
1308 again:
1309         ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1310                                  sizeof(request), to, &response);
1311         if (ret < 0)  {
1312                 if (ret == -ESRCH) {
1313                         /* should never happen */
1314                         mlog(ML_ERROR, "TCP stack not ready!\n");
1315                         BUG();
1316                 } else if (ret == -EINVAL) {
1317                         mlog(ML_ERROR, "bad args passed to o2net!\n");
1318                         BUG();
1319                 } else if (ret == -ENOMEM) {
1320                         mlog(ML_ERROR, "out of memory while trying to send "
1321                              "network message!  retrying\n");
1322                         /* this is totally crude */
1323                         msleep(50);
1324                         goto again;
1325                 } else if (!dlm_is_host_down(ret)) {
1326                         /* not a network error. bad. */
1327                         mlog_errno(ret);
1328                         mlog(ML_ERROR, "unhandled error!");
1329                         BUG();
1330                 }
1331                 /* all other errors should be network errors,
1332                  * and likely indicate node death */
1333                 mlog(ML_ERROR, "link to %d went down!\n", to);
1334                 goto out;
1335         }
1336
1337         ret = 0;
1338         resend = 0;
1339         spin_lock(&mle->spinlock);
1340         switch (response) {
1341                 case DLM_MASTER_RESP_YES:
1342                         set_bit(to, mle->response_map);
1343                         mlog(0, "node %u is the master, response=YES\n", to);
1344                         mlog(0, "%s:%.*s: master node %u now knows I have a "
1345                              "reference\n", dlm->name, res->lockname.len,
1346                              res->lockname.name, to);
1347                         mle->master = to;
1348                         break;
1349                 case DLM_MASTER_RESP_NO:
1350                         mlog(0, "node %u not master, response=NO\n", to);
1351                         set_bit(to, mle->response_map);
1352                         break;
1353                 case DLM_MASTER_RESP_MAYBE:
1354                         mlog(0, "node %u not master, response=MAYBE\n", to);
1355                         set_bit(to, mle->response_map);
1356                         set_bit(to, mle->maybe_map);
1357                         break;
1358                 case DLM_MASTER_RESP_ERROR:
1359                         mlog(0, "node %u hit an error, resending\n", to);
1360                         resend = 1;
1361                         response = 0;
1362                         break;
1363                 default:
1364                         mlog(ML_ERROR, "bad response! %u\n", response);
1365                         BUG();
1366         }
1367         spin_unlock(&mle->spinlock);
1368         if (resend) {
1369                 /* this is also totally crude */
1370                 msleep(50);
1371                 goto again;
1372         }
1373
1374 out:
1375         return ret;
1376 }
1377
1378 /*
1379  * locks that can be taken here:
1380  * dlm->spinlock
1381  * res->spinlock
1382  * mle->spinlock
1383  * dlm->master_list
1384  *
1385  * if possible, TRIM THIS DOWN!!!
1386  */
1387 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1388                                void **ret_data)
1389 {
1390         u8 response = DLM_MASTER_RESP_MAYBE;
1391         struct dlm_ctxt *dlm = data;
1392         struct dlm_lock_resource *res = NULL;
1393         struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1394         struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1395         char *name;
1396         unsigned int namelen, hash;
1397         int found, ret;
1398         int set_maybe;
1399         int dispatch_assert = 0;
1400
1401         if (!dlm_grab(dlm))
1402                 return DLM_MASTER_RESP_NO;
1403
1404         if (!dlm_domain_fully_joined(dlm)) {
1405                 response = DLM_MASTER_RESP_NO;
1406                 goto send_response;
1407         }
1408
1409         name = request->name;
1410         namelen = request->namelen;
1411         hash = dlm_lockid_hash(name, namelen);
1412
1413         if (namelen > DLM_LOCKID_NAME_MAX) {
1414                 response = DLM_IVBUFLEN;
1415                 goto send_response;
1416         }
1417
1418 way_up_top:
1419         spin_lock(&dlm->spinlock);
1420         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1421         if (res) {
1422                 spin_unlock(&dlm->spinlock);
1423
1424                 /* take care of the easy cases up front */
1425                 spin_lock(&res->spinlock);
1426                 if (res->state & (DLM_LOCK_RES_RECOVERING|
1427                                   DLM_LOCK_RES_MIGRATING)) {
1428                         spin_unlock(&res->spinlock);
1429                         mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1430                              "being recovered/migrated\n");
1431                         response = DLM_MASTER_RESP_ERROR;
1432                         if (mle)
1433                                 kmem_cache_free(dlm_mle_cache, mle);
1434                         goto send_response;
1435                 }
1436
1437                 if (res->owner == dlm->node_num) {
1438                         mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1439                              dlm->name, namelen, name, request->node_idx);
1440                         dlm_lockres_set_refmap_bit(request->node_idx, res);
1441                         spin_unlock(&res->spinlock);
1442                         response = DLM_MASTER_RESP_YES;
1443                         if (mle)
1444                                 kmem_cache_free(dlm_mle_cache, mle);
1445
1446                         /* this node is the owner.
1447                          * there is some extra work that needs to
1448                          * happen now.  the requesting node has
1449                          * caused all nodes up to this one to
1450                          * create mles.  this node now needs to
1451                          * go back and clean those up. */
1452                         dispatch_assert = 1;
1453                         goto send_response;
1454                 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1455                         spin_unlock(&res->spinlock);
1456                         // mlog(0, "node %u is the master\n", res->owner);
1457                         response = DLM_MASTER_RESP_NO;
1458                         if (mle)
1459                                 kmem_cache_free(dlm_mle_cache, mle);
1460                         goto send_response;
1461                 }
1462
1463                 /* ok, there is no owner.  either this node is
1464                  * being blocked, or it is actively trying to
1465                  * master this lock. */
1466                 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1467                         mlog(ML_ERROR, "lock with no owner should be "
1468                              "in-progress!\n");
1469                         BUG();
1470                 }
1471
1472                 // mlog(0, "lockres is in progress...\n");
1473                 spin_lock(&dlm->master_lock);
1474                 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1475                 if (!found) {
1476                         mlog(ML_ERROR, "no mle found for this lock!\n");
1477                         BUG();
1478                 }
1479                 set_maybe = 1;
1480                 spin_lock(&tmpmle->spinlock);
1481                 if (tmpmle->type == DLM_MLE_BLOCK) {
1482                         // mlog(0, "this node is waiting for "
1483                         // "lockres to be mastered\n");
1484                         response = DLM_MASTER_RESP_NO;
1485                 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1486                         mlog(0, "node %u is master, but trying to migrate to "
1487                              "node %u.\n", tmpmle->master, tmpmle->new_master);
1488                         if (tmpmle->master == dlm->node_num) {
1489                                 mlog(ML_ERROR, "no owner on lockres, but this "
1490                                      "node is trying to migrate it to %u?!\n",
1491                                      tmpmle->new_master);
1492                                 BUG();
1493                         } else {
1494                                 /* the real master can respond on its own */
1495                                 response = DLM_MASTER_RESP_NO;
1496                         }
1497                 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1498                         set_maybe = 0;
1499                         if (tmpmle->master == dlm->node_num) {
1500                                 response = DLM_MASTER_RESP_YES;
1501                                 /* this node will be the owner.
1502                                  * go back and clean the mles on any
1503                                  * other nodes */
1504                                 dispatch_assert = 1;
1505                                 dlm_lockres_set_refmap_bit(request->node_idx, res);
1506                                 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1507                                      dlm->name, namelen, name,
1508                                      request->node_idx);
1509                         } else
1510                                 response = DLM_MASTER_RESP_NO;
1511                 } else {
1512                         // mlog(0, "this node is attempting to "
1513                         // "master lockres\n");
1514                         response = DLM_MASTER_RESP_MAYBE;
1515                 }
1516                 if (set_maybe)
1517                         set_bit(request->node_idx, tmpmle->maybe_map);
1518                 spin_unlock(&tmpmle->spinlock);
1519
1520                 spin_unlock(&dlm->master_lock);
1521                 spin_unlock(&res->spinlock);
1522
1523                 /* keep the mle attached to heartbeat events */
1524                 dlm_put_mle(tmpmle);
1525                 if (mle)
1526                         kmem_cache_free(dlm_mle_cache, mle);
1527                 goto send_response;
1528         }
1529
1530         /*
1531          * lockres doesn't exist on this node
1532          * if there is an MLE_BLOCK, return NO
1533          * if there is an MLE_MASTER, return MAYBE
1534          * otherwise, add an MLE_BLOCK, return NO
1535          */
1536         spin_lock(&dlm->master_lock);
1537         found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1538         if (!found) {
1539                 /* this lockid has never been seen on this node yet */
1540                 // mlog(0, "no mle found\n");
1541                 if (!mle) {
1542                         spin_unlock(&dlm->master_lock);
1543                         spin_unlock(&dlm->spinlock);
1544
1545                         mle = (struct dlm_master_list_entry *)
1546                                 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1547                         if (!mle) {
1548                                 response = DLM_MASTER_RESP_ERROR;
1549                                 mlog_errno(-ENOMEM);
1550                                 goto send_response;
1551                         }
1552                         goto way_up_top;
1553                 }
1554
1555                 // mlog(0, "this is second time thru, already allocated, "
1556                 // "add the block.\n");
1557                 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1558                 set_bit(request->node_idx, mle->maybe_map);
1559                 __dlm_insert_mle(dlm, mle);
1560                 response = DLM_MASTER_RESP_NO;
1561         } else {
1562                 // mlog(0, "mle was found\n");
1563                 set_maybe = 1;
1564                 spin_lock(&tmpmle->spinlock);
1565                 if (tmpmle->master == dlm->node_num) {
1566                         mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1567                         BUG();
1568                 }
1569                 if (tmpmle->type == DLM_MLE_BLOCK)
1570                         response = DLM_MASTER_RESP_NO;
1571                 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1572                         mlog(0, "migration mle was found (%u->%u)\n",
1573                              tmpmle->master, tmpmle->new_master);
1574                         /* real master can respond on its own */
1575                         response = DLM_MASTER_RESP_NO;
1576                 } else
1577                         response = DLM_MASTER_RESP_MAYBE;
1578                 if (set_maybe)
1579                         set_bit(request->node_idx, tmpmle->maybe_map);
1580                 spin_unlock(&tmpmle->spinlock);
1581         }
1582         spin_unlock(&dlm->master_lock);
1583         spin_unlock(&dlm->spinlock);
1584
1585         if (found) {
1586                 /* keep the mle attached to heartbeat events */
1587                 dlm_put_mle(tmpmle);
1588         }
1589 send_response:
1590         /*
1591          * __dlm_lookup_lockres() grabbed a reference to this lockres.
1592          * The reference is released by dlm_assert_master_worker() under
1593          * the call to dlm_dispatch_assert_master().  If
1594          * dlm_assert_master_worker() isn't called, we drop it here.
1595          */
1596         if (dispatch_assert) {
1597                 if (response != DLM_MASTER_RESP_YES)
1598                         mlog(ML_ERROR, "invalid response %d\n", response);
1599                 if (!res) {
1600                         mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1601                         BUG();
1602                 }
1603                 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1604                              dlm->node_num, res->lockname.len, res->lockname.name);
1605                 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1606                                                  DLM_ASSERT_MASTER_MLE_CLEANUP);
1607                 if (ret < 0) {
1608                         mlog(ML_ERROR, "failed to dispatch assert master work\n");
1609                         response = DLM_MASTER_RESP_ERROR;
1610                         dlm_lockres_put(res);
1611                 }
1612         } else {
1613                 if (res)
1614                         dlm_lockres_put(res);
1615         }
1616
1617         dlm_put(dlm);
1618         return response;
1619 }
1620
1621 /*
1622  * DLM_ASSERT_MASTER_MSG
1623  */
1624
1625
1626 /*
1627  * NOTE: this can be used for debugging
1628  * can periodically run all locks owned by this node
1629  * and re-assert across the cluster...
1630  */
1631 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1632                                 struct dlm_lock_resource *res,
1633                                 void *nodemap, u32 flags)
1634 {
1635         struct dlm_assert_master assert;
1636         int to, tmpret;
1637         struct dlm_node_iter iter;
1638         int ret = 0;
1639         int reassert;
1640         const char *lockname = res->lockname.name;
1641         unsigned int namelen = res->lockname.len;
1642
1643         BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1644
1645         spin_lock(&res->spinlock);
1646         res->state |= DLM_LOCK_RES_SETREF_INPROG;
1647         spin_unlock(&res->spinlock);
1648
1649 again:
1650         reassert = 0;
1651
1652         /* note that if this nodemap is empty, it returns 0 */
1653         dlm_node_iter_init(nodemap, &iter);
1654         while ((to = dlm_node_iter_next(&iter)) >= 0) {
1655                 int r = 0;
1656                 struct dlm_master_list_entry *mle = NULL;
1657
1658                 mlog(0, "sending assert master to %d (%.*s)\n", to,
1659                      namelen, lockname);
1660                 memset(&assert, 0, sizeof(assert));
1661                 assert.node_idx = dlm->node_num;
1662                 assert.namelen = namelen;
1663                 memcpy(assert.name, lockname, namelen);
1664                 assert.flags = cpu_to_be32(flags);
1665
1666                 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1667                                             &assert, sizeof(assert), to, &r);
1668                 if (tmpret < 0) {
1669                         mlog(0, "assert_master returned %d!\n", tmpret);
1670                         if (!dlm_is_host_down(tmpret)) {
1671                                 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1672                                 BUG();
1673                         }
1674                         /* a node died.  finish out the rest of the nodes. */
1675                         mlog(0, "link to %d went down!\n", to);
1676                         /* any nonzero status return will do */
1677                         ret = tmpret;
1678                         r = 0;
1679                 } else if (r < 0) {
1680                         /* ok, something horribly messed.  kill thyself. */
1681                         mlog(ML_ERROR,"during assert master of %.*s to %u, "
1682                              "got %d.\n", namelen, lockname, to, r);
1683                         spin_lock(&dlm->spinlock);
1684                         spin_lock(&dlm->master_lock);
1685                         if (dlm_find_mle(dlm, &mle, (char *)lockname,
1686                                          namelen)) {
1687                                 dlm_print_one_mle(mle);
1688                                 __dlm_put_mle(mle);
1689                         }
1690                         spin_unlock(&dlm->master_lock);
1691                         spin_unlock(&dlm->spinlock);
1692                         BUG();
1693                 }
1694
1695                 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1696                     !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1697                                 mlog(ML_ERROR, "%.*s: very strange, "
1698                                      "master MLE but no lockres on %u\n",
1699                                      namelen, lockname, to);
1700                 }
1701
1702                 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1703                         mlog(0, "%.*s: node %u create mles on other "
1704                              "nodes and requests a re-assert\n",
1705                              namelen, lockname, to);
1706                         reassert = 1;
1707                 }
1708                 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1709                         mlog(0, "%.*s: node %u has a reference to this "
1710                              "lockres, set the bit in the refmap\n",
1711                              namelen, lockname, to);
1712                         spin_lock(&res->spinlock);
1713                         dlm_lockres_set_refmap_bit(to, res);
1714                         spin_unlock(&res->spinlock);
1715                 }
1716         }
1717
1718         if (reassert)
1719                 goto again;
1720
1721         spin_lock(&res->spinlock);
1722         res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1723         spin_unlock(&res->spinlock);
1724         wake_up(&res->wq);
1725
1726         return ret;
1727 }
1728
1729 /*
1730  * locks that can be taken here:
1731  * dlm->spinlock
1732  * res->spinlock
1733  * mle->spinlock
1734  * dlm->master_list
1735  *
1736  * if possible, TRIM THIS DOWN!!!
1737  */
1738 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1739                               void **ret_data)
1740 {
1741         struct dlm_ctxt *dlm = data;
1742         struct dlm_master_list_entry *mle = NULL;
1743         struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1744         struct dlm_lock_resource *res = NULL;
1745         char *name;
1746         unsigned int namelen, hash;
1747         u32 flags;
1748         int master_request = 0, have_lockres_ref = 0;
1749         int ret = 0;
1750
1751         if (!dlm_grab(dlm))
1752                 return 0;
1753
1754         name = assert->name;
1755         namelen = assert->namelen;
1756         hash = dlm_lockid_hash(name, namelen);
1757         flags = be32_to_cpu(assert->flags);
1758
1759         if (namelen > DLM_LOCKID_NAME_MAX) {
1760                 mlog(ML_ERROR, "Invalid name length!");
1761                 goto done;
1762         }
1763
1764         spin_lock(&dlm->spinlock);
1765
1766         if (flags)
1767                 mlog(0, "assert_master with flags: %u\n", flags);
1768
1769         /* find the MLE */
1770         spin_lock(&dlm->master_lock);
1771         if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1772                 /* not an error, could be master just re-asserting */
1773                 mlog(0, "just got an assert_master from %u, but no "
1774                      "MLE for it! (%.*s)\n", assert->node_idx,
1775                      namelen, name);
1776         } else {
1777                 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1778                 if (bit >= O2NM_MAX_NODES) {
1779                         /* not necessarily an error, though less likely.
1780                          * could be master just re-asserting. */
1781                         mlog(0, "no bits set in the maybe_map, but %u "
1782                              "is asserting! (%.*s)\n", assert->node_idx,
1783                              namelen, name);
1784                 } else if (bit != assert->node_idx) {
1785                         if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1786                                 mlog(0, "master %u was found, %u should "
1787                                      "back off\n", assert->node_idx, bit);
1788                         } else {
1789                                 /* with the fix for bug 569, a higher node
1790                                  * number winning the mastery will respond
1791                                  * YES to mastery requests, but this node
1792                                  * had no way of knowing.  let it pass. */
1793                                 mlog(0, "%u is the lowest node, "
1794                                      "%u is asserting. (%.*s)  %u must "
1795                                      "have begun after %u won.\n", bit,
1796                                      assert->node_idx, namelen, name, bit,
1797                                      assert->node_idx);
1798                         }
1799                 }
1800                 if (mle->type == DLM_MLE_MIGRATION) {
1801                         if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1802                                 mlog(0, "%s:%.*s: got cleanup assert"
1803                                      " from %u for migration\n",
1804                                      dlm->name, namelen, name,
1805                                      assert->node_idx);
1806                         } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1807                                 mlog(0, "%s:%.*s: got unrelated assert"
1808                                      " from %u for migration, ignoring\n",
1809                                      dlm->name, namelen, name,
1810                                      assert->node_idx);
1811                                 __dlm_put_mle(mle);
1812                                 spin_unlock(&dlm->master_lock);
1813                                 spin_unlock(&dlm->spinlock);
1814                                 goto done;
1815                         }
1816                 }
1817         }
1818         spin_unlock(&dlm->master_lock);
1819
1820         /* ok everything checks out with the MLE
1821          * now check to see if there is a lockres */
1822         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1823         if (res) {
1824                 spin_lock(&res->spinlock);
1825                 if (res->state & DLM_LOCK_RES_RECOVERING)  {
1826                         mlog(ML_ERROR, "%u asserting but %.*s is "
1827                              "RECOVERING!\n", assert->node_idx, namelen, name);
1828                         goto kill;
1829                 }
1830                 if (!mle) {
1831                         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1832                             res->owner != assert->node_idx) {
1833                                 mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1834                                      "but current owner is %u! (%.*s)\n",
1835                                      assert->node_idx, res->owner, namelen,
1836                                      name);
1837                                 __dlm_print_one_lock_resource(res);
1838                                 BUG();
1839                         }
1840                 } else if (mle->type != DLM_MLE_MIGRATION) {
1841                         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1842                                 /* owner is just re-asserting */
1843                                 if (res->owner == assert->node_idx) {
1844                                         mlog(0, "owner %u re-asserting on "
1845                                              "lock %.*s\n", assert->node_idx,
1846                                              namelen, name);
1847                                         goto ok;
1848                                 }
1849                                 mlog(ML_ERROR, "got assert_master from "
1850                                      "node %u, but %u is the owner! "
1851                                      "(%.*s)\n", assert->node_idx,
1852                                      res->owner, namelen, name);
1853                                 goto kill;
1854                         }
1855                         if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1856                                 mlog(ML_ERROR, "got assert from %u, but lock "
1857                                      "with no owner should be "
1858                                      "in-progress! (%.*s)\n",
1859                                      assert->node_idx,
1860                                      namelen, name);
1861                                 goto kill;
1862                         }
1863                 } else /* mle->type == DLM_MLE_MIGRATION */ {
1864                         /* should only be getting an assert from new master */
1865                         if (assert->node_idx != mle->new_master) {
1866                                 mlog(ML_ERROR, "got assert from %u, but "
1867                                      "new master is %u, and old master "
1868                                      "was %u (%.*s)\n",
1869                                      assert->node_idx, mle->new_master,
1870                                      mle->master, namelen, name);
1871                                 goto kill;
1872                         }
1873
1874                 }
1875 ok:
1876                 spin_unlock(&res->spinlock);
1877         }
1878
1879         // mlog(0, "woo!  got an assert_master from node %u!\n",
1880         //           assert->node_idx);
1881         if (mle) {
1882                 int extra_ref = 0;
1883                 int nn = -1;
1884                 int rr, err = 0;
1885
1886                 spin_lock(&mle->spinlock);
1887                 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1888                         extra_ref = 1;
1889                 else {
1890                         /* MASTER mle: if any bits set in the response map
1891                          * then the calling node needs to re-assert to clear
1892                          * up nodes that this node contacted */
1893                         while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1894                                                     nn+1)) < O2NM_MAX_NODES) {
1895                                 if (nn != dlm->node_num && nn != assert->node_idx)
1896                                         master_request = 1;
1897                         }
1898                 }
1899                 mle->master = assert->node_idx;
1900                 atomic_set(&mle->woken, 1);
1901                 wake_up(&mle->wq);
1902                 spin_unlock(&mle->spinlock);
1903
1904                 if (res) {
1905                         int wake = 0;
1906                         spin_lock(&res->spinlock);
1907                         if (mle->type == DLM_MLE_MIGRATION) {
1908                                 mlog(0, "finishing off migration of lockres %.*s, "
1909                                         "from %u to %u\n",
1910                                         res->lockname.len, res->lockname.name,
1911                                         dlm->node_num, mle->new_master);
1912                                 res->state &= ~DLM_LOCK_RES_MIGRATING;
1913                                 wake = 1;
1914                                 dlm_change_lockres_owner(dlm, res, mle->new_master);
1915                                 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1916                         } else {
1917                                 dlm_change_lockres_owner(dlm, res, mle->master);
1918                         }
1919                         spin_unlock(&res->spinlock);
1920                         have_lockres_ref = 1;
1921                         if (wake)
1922                                 wake_up(&res->wq);
1923                 }
1924
1925                 /* master is known, detach if not already detached.
1926                  * ensures that only one assert_master call will happen
1927                  * on this mle. */
1928                 spin_lock(&dlm->master_lock);
1929
1930                 rr = atomic_read(&mle->mle_refs.refcount);
1931                 if (mle->inuse > 0) {
1932                         if (extra_ref && rr < 3)
1933                                 err = 1;
1934                         else if (!extra_ref && rr < 2)
1935                                 err = 1;
1936                 } else {
1937                         if (extra_ref && rr < 2)
1938                                 err = 1;
1939                         else if (!extra_ref && rr < 1)
1940                                 err = 1;
1941                 }
1942                 if (err) {
1943                         mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1944                              "that will mess up this node, refs=%d, extra=%d, "
1945                              "inuse=%d\n", dlm->name, namelen, name,
1946                              assert->node_idx, rr, extra_ref, mle->inuse);
1947                         dlm_print_one_mle(mle);
1948                 }
1949                 __dlm_unlink_mle(dlm, mle);
1950                 __dlm_mle_detach_hb_events(dlm, mle);
1951                 __dlm_put_mle(mle);
1952                 if (extra_ref) {
1953                         /* the assert master message now balances the extra
1954                          * ref given by the master / migration request message.
1955                          * if this is the last put, it will be removed
1956                          * from the list. */
1957                         __dlm_put_mle(mle);
1958                 }
1959                 spin_unlock(&dlm->master_lock);
1960         } else if (res) {
1961                 if (res->owner != assert->node_idx) {
1962                         mlog(0, "assert_master from %u, but current "
1963                              "owner is %u (%.*s), no mle\n", assert->node_idx,
1964                              res->owner, namelen, name);
1965                 }
1966         }
1967         spin_unlock(&dlm->spinlock);
1968
1969 done:
1970         ret = 0;
1971         if (res) {
1972                 spin_lock(&res->spinlock);
1973                 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1974                 spin_unlock(&res->spinlock);
1975                 *ret_data = (void *)res;
1976         }
1977         dlm_put(dlm);
1978         if (master_request) {
1979                 mlog(0, "need to tell master to reassert\n");
1980                 /* positive. negative would shoot down the node. */
1981                 ret |= DLM_ASSERT_RESPONSE_REASSERT;
1982                 if (!have_lockres_ref) {
1983                         mlog(ML_ERROR, "strange, got assert from %u, MASTER "
1984                              "mle present here for %s:%.*s, but no lockres!\n",
1985                              assert->node_idx, dlm->name, namelen, name);
1986                 }
1987         }
1988         if (have_lockres_ref) {
1989                 /* let the master know we have a reference to the lockres */
1990                 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
1991                 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
1992                      dlm->name, namelen, name, assert->node_idx);
1993         }
1994         return ret;
1995
1996 kill:
1997         /* kill the caller! */
1998         mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
1999              "and killing the other node now!  This node is OK and can continue.\n");
2000         __dlm_print_one_lock_resource(res);
2001         spin_unlock(&res->spinlock);
2002         spin_unlock(&dlm->spinlock);
2003         *ret_data = (void *)res;
2004         dlm_put(dlm);
2005         return -EINVAL;
2006 }
2007
2008 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2009 {
2010         struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2011
2012         if (ret_data) {
2013                 spin_lock(&res->spinlock);
2014                 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2015                 spin_unlock(&res->spinlock);
2016                 wake_up(&res->wq);
2017                 dlm_lockres_put(res);
2018         }
2019         return;
2020 }
2021
2022 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2023                                struct dlm_lock_resource *res,
2024                                int ignore_higher, u8 request_from, u32 flags)
2025 {
2026         struct dlm_work_item *item;
2027         item = kzalloc(sizeof(*item), GFP_NOFS);
2028         if (!item)
2029                 return -ENOMEM;
2030
2031
2032         /* queue up work for dlm_assert_master_worker */
2033         dlm_grab(dlm);  /* get an extra ref for the work item */
2034         dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2035         item->u.am.lockres = res; /* already have a ref */
2036         /* can optionally ignore node numbers higher than this node */
2037         item->u.am.ignore_higher = ignore_higher;
2038         item->u.am.request_from = request_from;
2039         item->u.am.flags = flags;
2040
2041         if (ignore_higher)
2042                 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2043                      res->lockname.name);
2044
2045         spin_lock(&dlm->work_lock);
2046         list_add_tail(&item->list, &dlm->work_list);
2047         spin_unlock(&dlm->work_lock);
2048
2049         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2050         return 0;
2051 }
2052
2053 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2054 {
2055         struct dlm_ctxt *dlm = data;
2056         int ret = 0;
2057         struct dlm_lock_resource *res;
2058         unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2059         int ignore_higher;
2060         int bit;
2061         u8 request_from;
2062         u32 flags;
2063
2064         dlm = item->dlm;
2065         res = item->u.am.lockres;
2066         ignore_higher = item->u.am.ignore_higher;
2067         request_from = item->u.am.request_from;
2068         flags = item->u.am.flags;
2069
2070         spin_lock(&dlm->spinlock);
2071         memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2072         spin_unlock(&dlm->spinlock);
2073
2074         clear_bit(dlm->node_num, nodemap);
2075         if (ignore_higher) {
2076                 /* if is this just to clear up mles for nodes below
2077                  * this node, do not send the message to the original
2078                  * caller or any node number higher than this */
2079                 clear_bit(request_from, nodemap);
2080                 bit = dlm->node_num;
2081                 while (1) {
2082                         bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2083                                             bit+1);
2084                         if (bit >= O2NM_MAX_NODES)
2085                                 break;
2086                         clear_bit(bit, nodemap);
2087                 }
2088         }
2089
2090         /*
2091          * If we're migrating this lock to someone else, we are no
2092          * longer allowed to assert out own mastery.  OTOH, we need to
2093          * prevent migration from starting while we're still asserting
2094          * our dominance.  The reserved ast delays migration.
2095          */
2096         spin_lock(&res->spinlock);
2097         if (res->state & DLM_LOCK_RES_MIGRATING) {
2098                 mlog(0, "Someone asked us to assert mastery, but we're "
2099                      "in the middle of migration.  Skipping assert, "
2100                      "the new master will handle that.\n");
2101                 spin_unlock(&res->spinlock);
2102                 goto put;
2103         } else
2104                 __dlm_lockres_reserve_ast(res);
2105         spin_unlock(&res->spinlock);
2106
2107         /* this call now finishes out the nodemap
2108          * even if one or more nodes die */
2109         mlog(0, "worker about to master %.*s here, this=%u\n",
2110                      res->lockname.len, res->lockname.name, dlm->node_num);
2111         ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2112         if (ret < 0) {
2113                 /* no need to restart, we are done */
2114                 if (!dlm_is_host_down(ret))
2115                         mlog_errno(ret);
2116         }
2117
2118         /* Ok, we've asserted ourselves.  Let's let migration start. */
2119         dlm_lockres_release_ast(dlm, res);
2120
2121 put:
2122         dlm_lockres_put(res);
2123
2124         mlog(0, "finished with dlm_assert_master_worker\n");
2125 }
2126
2127 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2128  * We cannot wait for node recovery to complete to begin mastering this
2129  * lockres because this lockres is used to kick off recovery! ;-)
2130  * So, do a pre-check on all living nodes to see if any of those nodes
2131  * think that $RECOVERY is currently mastered by a dead node.  If so,
2132  * we wait a short time to allow that node to get notified by its own
2133  * heartbeat stack, then check again.  All $RECOVERY lock resources
2134  * mastered by dead nodes are purged when the hearbeat callback is
2135  * fired, so we can know for sure that it is safe to continue once
2136  * the node returns a live node or no node.  */
2137 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2138                                        struct dlm_lock_resource *res)
2139 {
2140         struct dlm_node_iter iter;
2141         int nodenum;
2142         int ret = 0;
2143         u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2144
2145         spin_lock(&dlm->spinlock);
2146         dlm_node_iter_init(dlm->domain_map, &iter);
2147         spin_unlock(&dlm->spinlock);
2148
2149         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2150                 /* do not send to self */
2151                 if (nodenum == dlm->node_num)
2152                         continue;
2153                 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2154                 if (ret < 0) {
2155                         mlog_errno(ret);
2156                         if (!dlm_is_host_down(ret))
2157                                 BUG();
2158                         /* host is down, so answer for that node would be
2159                          * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2160                         ret = 0;
2161                 }
2162
2163                 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2164                         /* check to see if this master is in the recovery map */
2165                         spin_lock(&dlm->spinlock);
2166                         if (test_bit(master, dlm->recovery_map)) {
2167                                 mlog(ML_NOTICE, "%s: node %u has not seen "
2168                                      "node %u go down yet, and thinks the "
2169                                      "dead node is mastering the recovery "
2170                                      "lock.  must wait.\n", dlm->name,
2171                                      nodenum, master);
2172                                 ret = -EAGAIN;
2173                         }
2174                         spin_unlock(&dlm->spinlock);
2175                         mlog(0, "%s: reco lock master is %u\n", dlm->name,
2176                              master);
2177                         break;
2178                 }
2179         }
2180         return ret;
2181 }
2182
2183 /*
2184  * DLM_DEREF_LOCKRES_MSG
2185  */
2186
2187 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2188 {
2189         struct dlm_deref_lockres deref;
2190         int ret = 0, r;
2191         const char *lockname;
2192         unsigned int namelen;
2193
2194         lockname = res->lockname.name;
2195         namelen = res->lockname.len;
2196         BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2197
2198         mlog(0, "%s:%.*s: sending deref to %d\n",
2199              dlm->name, namelen, lockname, res->owner);
2200         memset(&deref, 0, sizeof(deref));
2201         deref.node_idx = dlm->node_num;
2202         deref.namelen = namelen;
2203         memcpy(deref.name, lockname, namelen);
2204
2205         ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2206                                  &deref, sizeof(deref), res->owner, &r);
2207         if (ret < 0)
2208                 mlog_errno(ret);
2209         else if (r < 0) {
2210                 /* BAD.  other node says I did not have a ref. */
2211                 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2212                     "(master=%u) got %d.\n", dlm->name, namelen,
2213                     lockname, res->owner, r);
2214                 dlm_print_one_lock_resource(res);
2215                 BUG();
2216         }
2217         return ret;
2218 }
2219
2220 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2221                               void **ret_data)
2222 {
2223         struct dlm_ctxt *dlm = data;
2224         struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2225         struct dlm_lock_resource *res = NULL;
2226         char *name;
2227         unsigned int namelen;
2228         int ret = -EINVAL;
2229         u8 node;
2230         unsigned int hash;
2231         struct dlm_work_item *item;
2232         int cleared = 0;
2233         int dispatch = 0;
2234
2235         if (!dlm_grab(dlm))
2236                 return 0;
2237
2238         name = deref->name;
2239         namelen = deref->namelen;
2240         node = deref->node_idx;
2241
2242         if (namelen > DLM_LOCKID_NAME_MAX) {
2243                 mlog(ML_ERROR, "Invalid name length!");
2244                 goto done;
2245         }
2246         if (deref->node_idx >= O2NM_MAX_NODES) {
2247                 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2248                 goto done;
2249         }
2250
2251         hash = dlm_lockid_hash(name, namelen);
2252
2253         spin_lock(&dlm->spinlock);
2254         res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2255         if (!res) {
2256                 spin_unlock(&dlm->spinlock);
2257                 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2258                      dlm->name, namelen, name);
2259                 goto done;
2260         }
2261         spin_unlock(&dlm->spinlock);
2262
2263         spin_lock(&res->spinlock);
2264         if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2265                 dispatch = 1;
2266         else {
2267                 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2268                 if (test_bit(node, res->refmap)) {
2269                         dlm_lockres_clear_refmap_bit(node, res);
2270                         cleared = 1;
2271                 }
2272         }
2273         spin_unlock(&res->spinlock);
2274
2275         if (!dispatch) {
2276                 if (cleared)
2277                         dlm_lockres_calc_usage(dlm, res);
2278                 else {
2279                         mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2280                         "but it is already dropped!\n", dlm->name,
2281                         res->lockname.len, res->lockname.name, node);
2282                         dlm_print_one_lock_resource(res);
2283                 }
2284                 ret = 0;
2285                 goto done;
2286         }
2287
2288         item = kzalloc(sizeof(*item), GFP_NOFS);
2289         if (!item) {
2290                 ret = -ENOMEM;
2291                 mlog_errno(ret);
2292                 goto done;
2293         }
2294
2295         dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2296         item->u.dl.deref_res = res;
2297         item->u.dl.deref_node = node;
2298
2299         spin_lock(&dlm->work_lock);
2300         list_add_tail(&item->list, &dlm->work_list);
2301         spin_unlock(&dlm->work_lock);
2302
2303         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2304         return 0;
2305
2306 done:
2307         if (res)
2308                 dlm_lockres_put(res);
2309         dlm_put(dlm);
2310
2311         return ret;
2312 }
2313
2314 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2315 {
2316         struct dlm_ctxt *dlm;
2317         struct dlm_lock_resource *res;
2318         u8 node;
2319         u8 cleared = 0;
2320
2321         dlm = item->dlm;
2322         res = item->u.dl.deref_res;
2323         node = item->u.dl.deref_node;
2324
2325         spin_lock(&res->spinlock);
2326         BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2327         if (test_bit(node, res->refmap)) {
2328                 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2329                 dlm_lockres_clear_refmap_bit(node, res);
2330                 cleared = 1;
2331         }
2332         spin_unlock(&res->spinlock);
2333
2334         if (cleared) {
2335                 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2336                      dlm->name, res->lockname.len, res->lockname.name, node);
2337                 dlm_lockres_calc_usage(dlm, res);
2338         } else {
2339                 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2340                      "but it is already dropped!\n", dlm->name,
2341                      res->lockname.len, res->lockname.name, node);
2342                 dlm_print_one_lock_resource(res);
2343         }
2344
2345         dlm_lockres_put(res);
2346 }
2347
2348 /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
2349  * if not. If 0, numlocks is set to the number of locks in the lockres.
2350  */
2351 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2352                                       struct dlm_lock_resource *res,
2353                                       int *numlocks)
2354 {
2355         int ret;
2356         int i;
2357         int count = 0;
2358         struct list_head *queue;
2359         struct dlm_lock *lock;
2360
2361         assert_spin_locked(&res->spinlock);
2362
2363         ret = -EINVAL;
2364         if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2365                 mlog(0, "cannot migrate lockres with unknown owner!\n");
2366                 goto leave;
2367         }
2368
2369         if (res->owner != dlm->node_num) {
2370                 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2371                 goto leave;
2372         }
2373
2374         ret = 0;
2375         queue = &res->granted;
2376         for (i = 0; i < 3; i++) {
2377                 list_for_each_entry(lock, queue, list) {
2378                         ++count;
2379                         if (lock->ml.node == dlm->node_num) {
2380                                 mlog(0, "found a lock owned by this node still "
2381                                      "on the %s queue!  will not migrate this "
2382                                      "lockres\n", (i == 0 ? "granted" :
2383                                                    (i == 1 ? "converting" :
2384                                                     "blocked")));
2385                                 ret = -ENOTEMPTY;
2386                                 goto leave;
2387                         }
2388                 }
2389                 queue++;
2390         }
2391
2392         *numlocks = count;
2393         mlog(0, "migrateable lockres having %d locks\n", *numlocks);
2394
2395 leave:
2396         return ret;
2397 }
2398
2399 /*
2400  * DLM_MIGRATE_LOCKRES
2401  */
2402
2403
2404 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2405                                struct dlm_lock_resource *res,
2406                                u8 target)
2407 {
2408         struct dlm_master_list_entry *mle = NULL;
2409         struct dlm_master_list_entry *oldmle = NULL;
2410         struct dlm_migratable_lockres *mres = NULL;
2411         int ret = 0;
2412         const char *name;
2413         unsigned int namelen;
2414         int mle_added = 0;
2415         int numlocks;
2416         int wake = 0;
2417
2418         if (!dlm_grab(dlm))
2419                 return -EINVAL;
2420
2421         name = res->lockname.name;
2422         namelen = res->lockname.len;
2423
2424         mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2425
2426         /*
2427          * ensure this lockres is a proper candidate for migration
2428          */
2429         spin_lock(&res->spinlock);
2430         ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2431         if (ret < 0) {
2432                 spin_unlock(&res->spinlock);
2433                 goto leave;
2434         }
2435         spin_unlock(&res->spinlock);
2436
2437         /* no work to do */
2438         if (numlocks == 0) {
2439                 mlog(0, "no locks were found on this lockres! done!\n");
2440                 goto leave;
2441         }
2442
2443         /*
2444          * preallocate up front
2445          * if this fails, abort
2446          */
2447
2448         ret = -ENOMEM;
2449         mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2450         if (!mres) {
2451                 mlog_errno(ret);
2452                 goto leave;
2453         }
2454
2455         mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2456                                                                 GFP_NOFS);
2457         if (!mle) {
2458                 mlog_errno(ret);
2459                 goto leave;
2460         }
2461         ret = 0;
2462
2463         /*
2464          * find a node to migrate the lockres to
2465          */
2466
2467         mlog(0, "picking a migration node\n");
2468         spin_lock(&dlm->spinlock);
2469         /* pick a new node */
2470         if (!test_bit(target, dlm->domain_map) ||
2471             target >= O2NM_MAX_NODES) {
2472                 target = dlm_pick_migration_target(dlm, res);
2473         }
2474         mlog(0, "node %u chosen for migration\n", target);
2475
2476         if (target >= O2NM_MAX_NODES ||
2477             !test_bit(target, dlm->domain_map)) {
2478                 /* target chosen is not alive */
2479                 ret = -EINVAL;
2480         }
2481
2482         if (ret) {
2483                 spin_unlock(&dlm->spinlock);
2484                 goto fail;
2485         }
2486
2487         mlog(0, "continuing with target = %u\n", target);
2488
2489         /*
2490          * clear any existing master requests and
2491          * add the migration mle to the list
2492          */
2493         spin_lock(&dlm->master_lock);
2494         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2495                                     namelen, target, dlm->node_num);
2496         spin_unlock(&dlm->master_lock);
2497         spin_unlock(&dlm->spinlock);
2498
2499         if (ret == -EEXIST) {
2500                 mlog(0, "another process is already migrating it\n");
2501                 goto fail;
2502         }
2503         mle_added = 1;
2504
2505         /*
2506          * set the MIGRATING flag and flush asts
2507          * if we fail after this we need to re-dirty the lockres
2508          */
2509         if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2510                 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2511                      "the target went down.\n", res->lockname.len,
2512                      res->lockname.name, target);
2513                 spin_lock(&res->spinlock);
2514                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2515                 wake = 1;
2516                 spin_unlock(&res->spinlock);
2517                 ret = -EINVAL;
2518         }
2519
2520 fail:
2521         if (oldmle) {
2522                 /* master is known, detach if not already detached */
2523                 dlm_mle_detach_hb_events(dlm, oldmle);
2524                 dlm_put_mle(oldmle);
2525         }
2526
2527         if (ret < 0) {
2528                 if (mle_added) {
2529                         dlm_mle_detach_hb_events(dlm, mle);
2530                         dlm_put_mle(mle);
2531                 } else if (mle) {
2532                         kmem_cache_free(dlm_mle_cache, mle);
2533                 }
2534                 goto leave;
2535         }
2536
2537         /*
2538          * at this point, we have a migration target, an mle
2539          * in the master list, and the MIGRATING flag set on
2540          * the lockres
2541          */
2542
2543         /* now that remote nodes are spinning on the MIGRATING flag,
2544          * ensure that all assert_master work is flushed. */
2545         flush_workqueue(dlm->dlm_worker);
2546
2547         /* get an extra reference on the mle.
2548          * otherwise the assert_master from the new
2549          * master will destroy this.
2550          * also, make sure that all callers of dlm_get_mle
2551          * take both dlm->spinlock and dlm->master_lock */
2552         spin_lock(&dlm->spinlock);
2553         spin_lock(&dlm->master_lock);
2554         dlm_get_mle_inuse(mle);
2555         spin_unlock(&dlm->master_lock);
2556         spin_unlock(&dlm->spinlock);
2557
2558         /* notify new node and send all lock state */
2559         /* call send_one_lockres with migration flag.
2560          * this serves as notice to the target node that a
2561          * migration is starting. */
2562         ret = dlm_send_one_lockres(dlm, res, mres, target,
2563                                    DLM_MRES_MIGRATION);
2564
2565         if (ret < 0) {
2566                 mlog(0, "migration to node %u failed with %d\n",
2567                      target, ret);
2568                 /* migration failed, detach and clean up mle */
2569                 dlm_mle_detach_hb_events(dlm, mle);
2570                 dlm_put_mle(mle);
2571                 dlm_put_mle_inuse(mle);
2572                 spin_lock(&res->spinlock);
2573                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2574                 wake = 1;
2575                 spin_unlock(&res->spinlock);
2576                 goto leave;
2577         }
2578
2579         /* at this point, the target sends a message to all nodes,
2580          * (using dlm_do_migrate_request).  this node is skipped since
2581          * we had to put an mle in the list to begin the process.  this
2582          * node now waits for target to do an assert master.  this node
2583          * will be the last one notified, ensuring that the migration
2584          * is complete everywhere.  if the target dies while this is
2585          * going on, some nodes could potentially see the target as the
2586          * master, so it is important that my recovery finds the migration
2587          * mle and sets the master to UNKNOWN. */
2588
2589
2590         /* wait for new node to assert master */
2591         while (1) {
2592                 ret = wait_event_interruptible_timeout(mle->wq,
2593                                         (atomic_read(&mle->woken) == 1),
2594                                         msecs_to_jiffies(5000));
2595
2596                 if (ret >= 0) {
2597                         if (atomic_read(&mle->woken) == 1 ||
2598                             res->owner == target)
2599                                 break;
2600
2601                         mlog(0, "%s:%.*s: timed out during migration\n",
2602                              dlm->name, res->lockname.len, res->lockname.name);
2603                         /* avoid hang during shutdown when migrating lockres
2604                          * to a node which also goes down */
2605                         if (dlm_is_node_dead(dlm, target)) {
2606                                 mlog(0, "%s:%.*s: expected migration "
2607                                      "target %u is no longer up, restarting\n",
2608                                      dlm->name, res->lockname.len,
2609                                      res->lockname.name, target);
2610                                 ret = -EINVAL;
2611                                 /* migration failed, detach and clean up mle */
2612                                 dlm_mle_detach_hb_events(dlm, mle);
2613                                 dlm_put_mle(mle);
2614                                 dlm_put_mle_inuse(mle);
2615                                 spin_lock(&res->spinlock);
2616                                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2617                                 wake = 1;
2618                                 spin_unlock(&res->spinlock);
2619                                 goto leave;
2620                         }
2621                 } else
2622                         mlog(0, "%s:%.*s: caught signal during migration\n",
2623                              dlm->name, res->lockname.len, res->lockname.name);
2624         }
2625
2626         /* all done, set the owner, clear the flag */
2627         spin_lock(&res->spinlock);
2628         dlm_set_lockres_owner(dlm, res, target);
2629         res->state &= ~DLM_LOCK_RES_MIGRATING;
2630         dlm_remove_nonlocal_locks(dlm, res);
2631         spin_unlock(&res->spinlock);
2632         wake_up(&res->wq);
2633
2634         /* master is known, detach if not already detached */
2635         dlm_mle_detach_hb_events(dlm, mle);
2636         dlm_put_mle_inuse(mle);
2637         ret = 0;
2638
2639         dlm_lockres_calc_usage(dlm, res);
2640
2641 leave:
2642         /* re-dirty the lockres if we failed */
2643         if (ret < 0)
2644                 dlm_kick_thread(dlm, res);
2645
2646         /* wake up waiters if the MIGRATING flag got set
2647          * but migration failed */
2648         if (wake)
2649                 wake_up(&res->wq);
2650
2651         /* TODO: cleanup */
2652         if (mres)
2653                 free_page((unsigned long)mres);
2654
2655         dlm_put(dlm);
2656
2657         mlog(0, "returning %d\n", ret);
2658         return ret;
2659 }
2660
2661 #define DLM_MIGRATION_RETRY_MS  100
2662
2663 /* Should be called only after beginning the domain leave process.
2664  * There should not be any remaining locks on nonlocal lock resources,
2665  * and there should be no local locks left on locally mastered resources.
2666  *
2667  * Called with the dlm spinlock held, may drop it to do migration, but
2668  * will re-acquire before exit.
2669  *
2670  * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2671 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2672 {
2673         int ret;
2674         int lock_dropped = 0;
2675         int numlocks;
2676
2677         spin_lock(&res->spinlock);
2678         if (res->owner != dlm->node_num) {
2679                 if (!__dlm_lockres_unused(res)) {
2680                         mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2681                              "trying to free this but locks remain\n",
2682                              dlm->name, res->lockname.len, res->lockname.name);
2683                 }
2684                 spin_unlock(&res->spinlock);
2685                 goto leave;
2686         }
2687
2688         /* No need to migrate a lockres having no locks */
2689         ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2690         if (ret >= 0 && numlocks == 0) {
2691                 spin_unlock(&res->spinlock);
2692                 goto leave;
2693         }
2694         spin_unlock(&res->spinlock);
2695
2696         /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2697         spin_unlock(&dlm->spinlock);
2698         lock_dropped = 1;
2699         while (1) {
2700                 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2701                 if (ret >= 0)
2702                         break;
2703                 if (ret == -ENOTEMPTY) {
2704                         mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2705                                 res->lockname.len, res->lockname.name);
2706                         BUG();
2707                 }
2708
2709                 mlog(0, "lockres %.*s: migrate failed, "
2710                      "retrying\n", res->lockname.len,
2711                      res->lockname.name);
2712                 msleep(DLM_MIGRATION_RETRY_MS);
2713         }
2714         spin_lock(&dlm->spinlock);
2715 leave:
2716         return lock_dropped;
2717 }
2718
2719 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2720 {
2721         int ret;
2722         spin_lock(&dlm->ast_lock);
2723         spin_lock(&lock->spinlock);
2724         ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2725         spin_unlock(&lock->spinlock);
2726         spin_unlock(&dlm->ast_lock);
2727         return ret;
2728 }
2729
2730 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2731                                      struct dlm_lock_resource *res,
2732                                      u8 mig_target)
2733 {
2734         int can_proceed;
2735         spin_lock(&res->spinlock);
2736         can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2737         spin_unlock(&res->spinlock);
2738
2739         /* target has died, so make the caller break out of the
2740          * wait_event, but caller must recheck the domain_map */
2741         spin_lock(&dlm->spinlock);
2742         if (!test_bit(mig_target, dlm->domain_map))
2743                 can_proceed = 1;
2744         spin_unlock(&dlm->spinlock);
2745         return can_proceed;
2746 }
2747
2748 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2749                                 struct dlm_lock_resource *res)
2750 {
2751         int ret;
2752         spin_lock(&res->spinlock);
2753         ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2754         spin_unlock(&res->spinlock);
2755         return ret;
2756 }
2757
2758
2759 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2760                                        struct dlm_lock_resource *res,
2761                                        u8 target)
2762 {
2763         int ret = 0;
2764
2765         mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2766                res->lockname.len, res->lockname.name, dlm->node_num,
2767                target);
2768         /* need to set MIGRATING flag on lockres.  this is done by
2769          * ensuring that all asts have been flushed for this lockres. */
2770         spin_lock(&res->spinlock);
2771         BUG_ON(res->migration_pending);
2772         res->migration_pending = 1;
2773         /* strategy is to reserve an extra ast then release
2774          * it below, letting the release do all of the work */
2775         __dlm_lockres_reserve_ast(res);
2776         spin_unlock(&res->spinlock);
2777
2778         /* now flush all the pending asts */
2779         dlm_kick_thread(dlm, res);
2780         /* before waiting on DIRTY, block processes which may
2781          * try to dirty the lockres before MIGRATING is set */
2782         spin_lock(&res->spinlock);
2783         BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2784         res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2785         spin_unlock(&res->spinlock);
2786         /* now wait on any pending asts and the DIRTY state */
2787         wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2788         dlm_lockres_release_ast(dlm, res);
2789
2790         mlog(0, "about to wait on migration_wq, dirty=%s\n",
2791                res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2792         /* if the extra ref we just put was the final one, this
2793          * will pass thru immediately.  otherwise, we need to wait
2794          * for the last ast to finish. */
2795 again:
2796         ret = wait_event_interruptible_timeout(dlm->migration_wq,
2797                    dlm_migration_can_proceed(dlm, res, target),
2798                    msecs_to_jiffies(1000));
2799         if (ret < 0) {
2800                 mlog(0, "woken again: migrating? %s, dead? %s\n",
2801                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2802                        test_bit(target, dlm->domain_map) ? "no":"yes");
2803         } else {
2804                 mlog(0, "all is well: migrating? %s, dead? %s\n",
2805                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2806                        test_bit(target, dlm->domain_map) ? "no":"yes");
2807         }
2808         if (!dlm_migration_can_proceed(dlm, res, target)) {
2809                 mlog(0, "trying again...\n");
2810                 goto again;
2811         }
2812         /* now that we are sure the MIGRATING state is there, drop
2813          * the unneded state which blocked threads trying to DIRTY */
2814         spin_lock(&res->spinlock);
2815         BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2816         BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2817         res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2818         spin_unlock(&res->spinlock);
2819
2820         /* did the target go down or die? */
2821         spin_lock(&dlm->spinlock);
2822         if (!test_bit(target, dlm->domain_map)) {
2823                 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2824                      target);
2825                 ret = -EHOSTDOWN;
2826         }
2827         spin_unlock(&dlm->spinlock);
2828
2829         /*
2830          * at this point:
2831          *
2832          *   o the DLM_LOCK_RES_MIGRATING flag is set
2833          *   o there are no pending asts on this lockres
2834          *   o all processes trying to reserve an ast on this
2835          *     lockres must wait for the MIGRATING flag to clear
2836          */
2837         return ret;
2838 }
2839
2840 /* last step in the migration process.
2841  * original master calls this to free all of the dlm_lock
2842  * structures that used to be for other nodes. */
2843 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2844                                       struct dlm_lock_resource *res)
2845 {
2846         struct list_head *queue = &res->granted;
2847         int i, bit;
2848         struct dlm_lock *lock, *next;
2849
2850         assert_spin_locked(&res->spinlock);
2851
2852         BUG_ON(res->owner == dlm->node_num);
2853
2854         for (i=0; i<3; i++) {
2855                 list_for_each_entry_safe(lock, next, queue, list) {
2856                         if (lock->ml.node != dlm->node_num) {
2857                                 mlog(0, "putting lock for node %u\n",
2858                                      lock->ml.node);
2859                                 /* be extra careful */
2860                                 BUG_ON(!list_empty(&lock->ast_list));
2861                                 BUG_ON(!list_empty(&lock->bast_list));
2862                                 BUG_ON(lock->ast_pending);
2863                                 BUG_ON(lock->bast_pending);
2864                                 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2865                                 list_del_init(&lock->list);
2866                                 dlm_lock_put(lock);
2867                                 /* In a normal unlock, we would have added a
2868                                  * DLM_UNLOCK_FREE_LOCK action. Force it. */
2869                                 dlm_lock_put(lock);
2870                         }
2871                 }
2872                 queue++;
2873         }
2874         bit = 0;
2875         while (1) {
2876                 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2877                 if (bit >= O2NM_MAX_NODES)
2878                         break;
2879                 /* do not clear the local node reference, if there is a
2880                  * process holding this, let it drop the ref itself */
2881                 if (bit != dlm->node_num) {
2882                         mlog(0, "%s:%.*s: node %u had a ref to this "
2883                              "migrating lockres, clearing\n", dlm->name,
2884                              res->lockname.len, res->lockname.name, bit);
2885                         dlm_lockres_clear_refmap_bit(bit, res);
2886                 }
2887                 bit++;
2888         }
2889 }
2890
2891 /* for now this is not too intelligent.  we will
2892  * need stats to make this do the right thing.
2893  * this just finds the first lock on one of the
2894  * queues and uses that node as the target. */
2895 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2896                                     struct dlm_lock_resource *res)
2897 {
2898         int i;
2899         struct list_head *queue = &res->granted;
2900         struct dlm_lock *lock;
2901         int nodenum;
2902
2903         assert_spin_locked(&dlm->spinlock);
2904
2905         spin_lock(&res->spinlock);
2906         for (i=0; i<3; i++) {
2907                 list_for_each_entry(lock, queue, list) {
2908                         /* up to the caller to make sure this node
2909                          * is alive */
2910                         if (lock->ml.node != dlm->node_num) {
2911                                 spin_unlock(&res->spinlock);
2912                                 return lock->ml.node;
2913                         }
2914                 }
2915                 queue++;
2916         }
2917         spin_unlock(&res->spinlock);
2918         mlog(0, "have not found a suitable target yet! checking domain map\n");
2919
2920         /* ok now we're getting desperate.  pick anyone alive. */
2921         nodenum = -1;
2922         while (1) {
2923                 nodenum = find_next_bit(dlm->domain_map,
2924                                         O2NM_MAX_NODES, nodenum+1);
2925                 mlog(0, "found %d in domain map\n", nodenum);
2926                 if (nodenum >= O2NM_MAX_NODES)
2927                         break;
2928                 if (nodenum != dlm->node_num) {
2929                         mlog(0, "picking %d\n", nodenum);
2930                         return nodenum;
2931                 }
2932         }
2933
2934         mlog(0, "giving up.  no master to migrate to\n");
2935         return DLM_LOCK_RES_OWNER_UNKNOWN;
2936 }
2937
2938
2939
2940 /* this is called by the new master once all lockres
2941  * data has been received */
2942 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2943                                   struct dlm_lock_resource *res,
2944                                   u8 master, u8 new_master,
2945                                   struct dlm_node_iter *iter)
2946 {
2947         struct dlm_migrate_request migrate;
2948         int ret, skip, status = 0;
2949         int nodenum;
2950
2951         memset(&migrate, 0, sizeof(migrate));
2952         migrate.namelen = res->lockname.len;
2953         memcpy(migrate.name, res->lockname.name, migrate.namelen);
2954         migrate.new_master = new_master;
2955         migrate.master = master;
2956
2957         ret = 0;
2958
2959         /* send message to all nodes, except the master and myself */
2960         while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2961                 if (nodenum == master ||
2962                     nodenum == new_master)
2963                         continue;
2964
2965                 /* We could race exit domain. If exited, skip. */
2966                 spin_lock(&dlm->spinlock);
2967                 skip = (!test_bit(nodenum, dlm->domain_map));
2968                 spin_unlock(&dlm->spinlock);
2969                 if (skip) {
2970                         clear_bit(nodenum, iter->node_map);
2971                         continue;
2972                 }
2973
2974                 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2975                                          &migrate, sizeof(migrate), nodenum,
2976                                          &status);
2977                 if (ret < 0) {
2978                         mlog(0, "migrate_request returned %d!\n", ret);
2979                         if (!dlm_is_host_down(ret)) {
2980                                 mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2981                                 BUG();
2982                         }
2983                         clear_bit(nodenum, iter->node_map);
2984                         ret = 0;
2985                 } else if (status < 0) {
2986                         mlog(0, "migrate request (node %u) returned %d!\n",
2987                              nodenum, status);
2988                         ret = status;
2989                 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
2990                         /* during the migration request we short-circuited
2991                          * the mastery of the lockres.  make sure we have
2992                          * a mastery ref for nodenum */
2993                         mlog(0, "%s:%.*s: need ref for node %u\n",
2994                              dlm->name, res->lockname.len, res->lockname.name,
2995                              nodenum);
2996                         spin_lock(&res->spinlock);
2997                         dlm_lockres_set_refmap_bit(nodenum, res);
2998                         spin_unlock(&res->spinlock);
2999                 }
3000         }
3001
3002         if (ret < 0)
3003                 mlog_errno(ret);
3004
3005         mlog(0, "returning ret=%d\n", ret);
3006         return ret;
3007 }
3008
3009
3010 /* if there is an existing mle for this lockres, we now know who the master is.
3011  * (the one who sent us *this* message) we can clear it up right away.
3012  * since the process that put the mle on the list still has a reference to it,
3013  * we can unhash it now, set the master and wake the process.  as a result,
3014  * we will have no mle in the list to start with.  now we can add an mle for
3015  * the migration and this should be the only one found for those scanning the
3016  * list.  */
3017 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3018                                 void **ret_data)
3019 {
3020         struct dlm_ctxt *dlm = data;
3021         struct dlm_lock_resource *res = NULL;
3022         struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3023         struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3024         const char *name;
3025         unsigned int namelen, hash;
3026         int ret = 0;
3027
3028         if (!dlm_grab(dlm))
3029                 return -EINVAL;
3030
3031         name = migrate->name;
3032         namelen = migrate->namelen;
3033         hash = dlm_lockid_hash(name, namelen);
3034
3035         /* preallocate.. if this fails, abort */
3036         mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
3037                                                          GFP_NOFS);
3038
3039         if (!mle) {
3040                 ret = -ENOMEM;
3041                 goto leave;
3042         }
3043
3044         /* check for pre-existing lock */
3045         spin_lock(&dlm->spinlock);
3046         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3047         spin_lock(&dlm->master_lock);
3048
3049         if (res) {
3050                 spin_lock(&res->spinlock);
3051                 if (res->state & DLM_LOCK_RES_RECOVERING) {
3052                         /* if all is working ok, this can only mean that we got
3053                         * a migrate request from a node that we now see as
3054                         * dead.  what can we do here?  drop it to the floor? */
3055                         spin_unlock(&res->spinlock);
3056                         mlog(ML_ERROR, "Got a migrate request, but the "
3057                              "lockres is marked as recovering!");
3058                         kmem_cache_free(dlm_mle_cache, mle);
3059                         ret = -EINVAL; /* need a better solution */
3060                         goto unlock;
3061                 }
3062                 res->state |= DLM_LOCK_RES_MIGRATING;
3063                 spin_unlock(&res->spinlock);
3064         }
3065
3066         /* ignore status.  only nonzero status would BUG. */
3067         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3068                                     name, namelen,
3069                                     migrate->new_master,
3070                                     migrate->master);
3071
3072 unlock:
3073         spin_unlock(&dlm->master_lock);
3074         spin_unlock(&dlm->spinlock);
3075
3076         if (oldmle) {
3077                 /* master is known, detach if not already detached */
3078                 dlm_mle_detach_hb_events(dlm, oldmle);
3079                 dlm_put_mle(oldmle);
3080         }
3081
3082         if (res)
3083                 dlm_lockres_put(res);
3084 leave:
3085         dlm_put(dlm);
3086         return ret;
3087 }
3088
3089 /* must be holding dlm->spinlock and dlm->master_lock
3090  * when adding a migration mle, we can clear any other mles
3091  * in the master list because we know with certainty that
3092  * the master is "master".  so we remove any old mle from
3093  * the list after setting it's master field, and then add
3094  * the new migration mle.  this way we can hold with the rule
3095  * of having only one mle for a given lock name at all times. */
3096 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3097                                  struct dlm_lock_resource *res,
3098                                  struct dlm_master_list_entry *mle,
3099                                  struct dlm_master_list_entry **oldmle,
3100                                  const char *name, unsigned int namelen,
3101                                  u8 new_master, u8 master)
3102 {
3103         int found;
3104         int ret = 0;
3105
3106         *oldmle = NULL;
3107
3108         mlog_entry_void();
3109
3110         assert_spin_locked(&dlm->spinlock);
3111         assert_spin_locked(&dlm->master_lock);
3112
3113         /* caller is responsible for any ref taken here on oldmle */
3114         found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3115         if (found) {
3116                 struct dlm_master_list_entry *tmp = *oldmle;
3117                 spin_lock(&tmp->spinlock);
3118                 if (tmp->type == DLM_MLE_MIGRATION) {
3119                         if (master == dlm->node_num) {
3120                                 /* ah another process raced me to it */
3121                                 mlog(0, "tried to migrate %.*s, but some "
3122                                      "process beat me to it\n",
3123                                      namelen, name);
3124                                 ret = -EEXIST;
3125                         } else {
3126                                 /* bad.  2 NODES are trying to migrate! */
3127                                 mlog(ML_ERROR, "migration error  mle: "
3128                                      "master=%u new_master=%u // request: "
3129                                      "master=%u new_master=%u // "
3130                                      "lockres=%.*s\n",
3131                                      tmp->master, tmp->new_master,
3132                                      master, new_master,
3133                                      namelen, name);
3134                                 BUG();
3135                         }
3136                 } else {
3137                         /* this is essentially what assert_master does */
3138                         tmp->master = master;
3139                         atomic_set(&tmp->woken, 1);
3140                         wake_up(&tmp->wq);
3141                         /* remove it so that only one mle will be found */
3142                         __dlm_unlink_mle(dlm, tmp);
3143                         __dlm_mle_detach_hb_events(dlm, tmp);
3144                         ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3145                         mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3146                             "telling master to get ref for cleared out mle "
3147                             "during migration\n", dlm->name, namelen, name,
3148                             master, new_master);
3149                 }
3150                 spin_unlock(&tmp->spinlock);
3151         }
3152
3153         /* now add a migration mle to the tail of the list */
3154         dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3155         mle->new_master = new_master;
3156         /* the new master will be sending an assert master for this.
3157          * at that point we will get the refmap reference */
3158         mle->master = master;
3159         /* do this for consistency with other mle types */
3160         set_bit(new_master, mle->maybe_map);
3161         __dlm_insert_mle(dlm, mle);
3162
3163         return ret;
3164 }
3165
3166 /*
3167  * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3168  */
3169 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3170                                         struct dlm_master_list_entry *mle)
3171 {
3172         struct dlm_lock_resource *res;
3173
3174         /* Find the lockres associated to the mle and set its owner to UNK */
3175         res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3176                                    mle->mnamehash);
3177         if (res) {
3178                 spin_unlock(&dlm->master_lock);
3179
3180                 /* move lockres onto recovery list */
3181                 spin_lock(&res->spinlock);
3182                 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3183                 dlm_move_lockres_to_recovery_list(dlm, res);
3184                 spin_unlock(&res->spinlock);
3185                 dlm_lockres_put(res);
3186
3187                 /* about to get rid of mle, detach from heartbeat */
3188                 __dlm_mle_detach_hb_events(dlm, mle);
3189
3190                 /* dump the mle */
3191                 spin_lock(&dlm->master_lock);
3192                 __dlm_put_mle(mle);
3193                 spin_unlock(&dlm->master_lock);
3194         }
3195
3196         return res;
3197 }
3198
3199 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3200                                     struct dlm_master_list_entry *mle)
3201 {
3202         __dlm_mle_detach_hb_events(dlm, mle);
3203
3204         spin_lock(&mle->spinlock);
3205         __dlm_unlink_mle(dlm, mle);
3206         atomic_set(&mle->woken, 1);
3207         spin_unlock(&mle->spinlock);
3208
3209         wake_up(&mle->wq);
3210 }
3211
3212 static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3213                                 struct dlm_master_list_entry *mle, u8 dead_node)
3214 {
3215         int bit;
3216
3217         BUG_ON(mle->type != DLM_MLE_BLOCK);
3218
3219         spin_lock(&mle->spinlock);
3220         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3221         if (bit != dead_node) {
3222                 mlog(0, "mle found, but dead node %u would not have been "
3223                      "master\n", dead_node);
3224                 spin_unlock(&mle->spinlock);
3225         } else {
3226                 /* Must drop the refcount by one since the assert_master will
3227                  * never arrive. This may result in the mle being unlinked and
3228                  * freed, but there may still be a process waiting in the
3229                  * dlmlock path which is fine. */
3230                 mlog(0, "node %u was expected master\n", dead_node);
3231                 atomic_set(&mle->woken, 1);
3232                 spin_unlock(&mle->spinlock);
3233                 wake_up(&mle->wq);
3234
3235                 /* Do not need events any longer, so detach from heartbeat */
3236                 __dlm_mle_detach_hb_events(dlm, mle);
3237                 __dlm_put_mle(mle);
3238         }
3239 }
3240
3241 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3242 {
3243         struct dlm_master_list_entry *mle;
3244         struct dlm_lock_resource *res;
3245         struct hlist_head *bucket;
3246         struct hlist_node *list;
3247         unsigned int i;
3248
3249         mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
3250 top:
3251         assert_spin_locked(&dlm->spinlock);
3252
3253         /* clean the master list */
3254         spin_lock(&dlm->master_lock);
3255         for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3256                 bucket = dlm_master_hash(dlm, i);
3257                 hlist_for_each(list, bucket) {
3258                         mle = hlist_entry(list, struct dlm_master_list_entry,
3259                                           master_hash_node);
3260
3261                         BUG_ON(mle->type != DLM_MLE_BLOCK &&
3262                                mle->type != DLM_MLE_MASTER &&
3263                                mle->type != DLM_MLE_MIGRATION);
3264
3265                         /* MASTER mles are initiated locally. The waiting
3266                          * process will notice the node map change shortly.
3267                          * Let that happen as normal. */
3268                         if (mle->type == DLM_MLE_MASTER)
3269                                 continue;
3270
3271                         /* BLOCK mles are initiated by other nodes. Need to
3272                          * clean up if the dead node would have been the
3273                          * master. */
3274                         if (mle->type == DLM_MLE_BLOCK) {
3275                                 dlm_clean_block_mle(dlm, mle, dead_node);
3276                                 continue;
3277                         }
3278
3279                         /* Everything else is a MIGRATION mle */
3280
3281                         /* The rule for MIGRATION mles is that the master
3282                          * becomes UNKNOWN if *either* the original or the new
3283                          * master dies. All UNKNOWN lockres' are sent to
3284                          * whichever node becomes the recovery master. The new
3285                          * master is responsible for determining if there is
3286                          * still a master for this lockres, or if he needs to
3287                          * take over mastery. Either way, this node should
3288                          * expect another message to resolve this. */
3289
3290                         if (mle->master != dead_node &&
3291                             mle->new_master != dead_node)
3292                                 continue;
3293
3294                         /* If we have reached this point, this mle needs to be
3295                          * removed from the list and freed. */
3296                         dlm_clean_migration_mle(dlm, mle);
3297
3298                         mlog(0, "%s: node %u died during migration from "
3299                              "%u to %u!\n", dlm->name, dead_node, mle->master,
3300                              mle->new_master);
3301
3302                         /* If we find a lockres associated with the mle, we've
3303                          * hit this rare case that messes up our lock ordering.
3304                          * If so, we need to drop the master lock so that we can
3305                          * take the lockres lock, meaning that we will have to
3306                          * restart from the head of list. */
3307                         res = dlm_reset_mleres_owner(dlm, mle);
3308                         if (res)
3309                                 /* restart */
3310                                 goto top;
3311
3312                         /* This may be the last reference */
3313                         __dlm_put_mle(mle);
3314                 }
3315         }
3316         spin_unlock(&dlm->master_lock);
3317 }
3318
3319 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3320                          u8 old_master)
3321 {
3322         struct dlm_node_iter iter;
3323         int ret = 0;
3324
3325         spin_lock(&dlm->spinlock);
3326         dlm_node_iter_init(dlm->domain_map, &iter);
3327         clear_bit(old_master, iter.node_map);
3328         clear_bit(dlm->node_num, iter.node_map);
3329         spin_unlock(&dlm->spinlock);
3330
3331         /* ownership of the lockres is changing.  account for the
3332          * mastery reference here since old_master will briefly have
3333          * a reference after the migration completes */
3334         spin_lock(&res->spinlock);
3335         dlm_lockres_set_refmap_bit(old_master, res);
3336         spin_unlock(&res->spinlock);
3337
3338         mlog(0, "now time to do a migrate request to other nodes\n");
3339         ret = dlm_do_migrate_request(dlm, res, old_master,
3340                                      dlm->node_num, &iter);
3341         if (ret < 0) {
3342                 mlog_errno(ret);
3343                 goto leave;
3344         }
3345
3346         mlog(0, "doing assert master of %.*s to all except the original node\n",
3347              res->lockname.len, res->lockname.name);
3348         /* this call now finishes out the nodemap
3349          * even if one or more nodes die */
3350         ret = dlm_do_assert_master(dlm, res, iter.node_map,
3351                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
3352         if (ret < 0) {
3353                 /* no longer need to retry.  all living nodes contacted. */
3354                 mlog_errno(ret);
3355                 ret = 0;
3356         }
3357
3358         memset(iter.node_map, 0, sizeof(iter.node_map));
3359         set_bit(old_master, iter.node_map);
3360         mlog(0, "doing assert master of %.*s back to %u\n",
3361              res->lockname.len, res->lockname.name, old_master);
3362         ret = dlm_do_assert_master(dlm, res, iter.node_map,
3363                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
3364         if (ret < 0) {
3365                 mlog(0, "assert master to original master failed "
3366                      "with %d.\n", ret);
3367                 /* the only nonzero status here would be because of
3368                  * a dead original node.  we're done. */
3369                 ret = 0;
3370         }
3371
3372         /* all done, set the owner, clear the flag */
3373         spin_lock(&res->spinlock);
3374         dlm_set_lockres_owner(dlm, res, dlm->node_num);
3375         res->state &= ~DLM_LOCK_RES_MIGRATING;
3376         spin_unlock(&res->spinlock);
3377         /* re-dirty it on the new master */
3378         dlm_kick_thread(dlm, res);
3379         wake_up(&res->wq);
3380 leave:
3381         return ret;
3382 }
3383
3384 /*
3385  * LOCKRES AST REFCOUNT
3386  * this is integral to migration
3387  */
3388
3389 /* for future intent to call an ast, reserve one ahead of time.
3390  * this should be called only after waiting on the lockres
3391  * with dlm_wait_on_lockres, and while still holding the
3392  * spinlock after the call. */
3393 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3394 {
3395         assert_spin_locked(&res->spinlock);
3396         if (res->state & DLM_LOCK_RES_MIGRATING) {
3397                 __dlm_print_one_lock_resource(res);
3398         }
3399         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3400
3401         atomic_inc(&res->asts_reserved);
3402 }
3403
3404 /*
3405  * used to drop the reserved ast, either because it went unused,
3406  * or because the ast/bast was actually called.
3407  *
3408  * also, if there is a pending migration on this lockres,
3409  * and this was the last pending ast on the lockres,
3410  * atomically set the MIGRATING flag before we drop the lock.
3411  * this is how we ensure that migration can proceed with no
3412  * asts in progress.  note that it is ok if the state of the
3413  * queues is such that a lock should be granted in the future
3414  * or that a bast should be fired, because the new master will
3415  * shuffle the lists on this lockres as soon as it is migrated.
3416  */
3417 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3418                              struct dlm_lock_resource *res)
3419 {
3420         if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3421                 return;
3422
3423         if (!res->migration_pending) {
3424                 spin_unlock(&res->spinlock);
3425                 return;
3426         }
3427
3428         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3429         res->migration_pending = 0;
3430         res->state |= DLM_LOCK_RES_MIGRATING;
3431         spin_unlock(&res->spinlock);
3432         wake_up(&res->wq);
3433         wake_up(&dlm->migration_wq);
3434 }