drbd: log UUIDs whenever they change
[pandora-kernel.git] / drivers / block / drbd / drbd_main.c
1 /*
2    drbd.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27  */
28
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
33 #include <net/sock.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
36 #include <linux/fs.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
40 #include <linux/mm.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
52
53 #include <linux/drbd_limits.h>
54 #include "drbd_int.h"
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57 #include "drbd_vli.h"
58
59 struct after_state_chg_work {
60         struct drbd_work w;
61         union drbd_state os;
62         union drbd_state ns;
63         enum chg_state_flags flags;
64         struct completion *done;
65 };
66
67 static DEFINE_MUTEX(drbd_main_mutex);
68 int drbdd_init(struct drbd_thread *);
69 int drbd_worker(struct drbd_thread *);
70 int drbd_asender(struct drbd_thread *);
71
72 int drbd_init(void);
73 static int drbd_open(struct block_device *bdev, fmode_t mode);
74 static int drbd_release(struct gendisk *gd, fmode_t mode);
75 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77                            union drbd_state ns, enum chg_state_flags flags);
78 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79 static void md_sync_timer_fn(unsigned long data);
80 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
82
83 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84               "Lars Ellenberg <lars@linbit.com>");
85 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86 MODULE_VERSION(REL_VERSION);
87 MODULE_LICENSE("GPL");
88 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89                  __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
90 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92 #include <linux/moduleparam.h>
93 /* allow_open_on_secondary */
94 MODULE_PARM_DESC(allow_oos, "DONT USE!");
95 /* thanks to these macros, if compiled into the kernel (not-module),
96  * this becomes the boot parameter drbd.minor_count */
97 module_param(minor_count, uint, 0444);
98 module_param(disable_sendpage, bool, 0644);
99 module_param(allow_oos, bool, 0);
100 module_param(cn_idx, uint, 0444);
101 module_param(proc_details, int, 0644);
102
103 #ifdef CONFIG_DRBD_FAULT_INJECTION
104 int enable_faults;
105 int fault_rate;
106 static int fault_count;
107 int fault_devs;
108 /* bitmap of enabled faults */
109 module_param(enable_faults, int, 0664);
110 /* fault rate % value - applies to all enabled faults */
111 module_param(fault_rate, int, 0664);
112 /* count of faults inserted */
113 module_param(fault_count, int, 0664);
114 /* bitmap of devices to insert faults on */
115 module_param(fault_devs, int, 0644);
116 #endif
117
118 /* module parameter, defined */
119 unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
120 int disable_sendpage;
121 int allow_oos;
122 unsigned int cn_idx = CN_IDX_DRBD;
123 int proc_details;       /* Detail level in proc drbd*/
124
125 /* Module parameter for setting the user mode helper program
126  * to run. Default is /sbin/drbdadm */
127 char usermode_helper[80] = "/sbin/drbdadm";
128
129 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
132  * as member "struct gendisk *vdisk;"
133  */
134 struct drbd_conf **minor_table;
135
136 struct kmem_cache *drbd_request_cache;
137 struct kmem_cache *drbd_ee_cache;       /* epoch entries */
138 struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
139 struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
140 mempool_t *drbd_request_mempool;
141 mempool_t *drbd_ee_mempool;
142
143 /* I do not use a standard mempool, because:
144    1) I want to hand out the pre-allocated objects first.
145    2) I want to be able to interrupt sleeping allocation with a signal.
146    Note: This is a single linked list, the next pointer is the private
147          member of struct page.
148  */
149 struct page *drbd_pp_pool;
150 spinlock_t   drbd_pp_lock;
151 int          drbd_pp_vacant;
152 wait_queue_head_t drbd_pp_wait;
153
154 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
156 static const struct block_device_operations drbd_ops = {
157         .owner =   THIS_MODULE,
158         .open =    drbd_open,
159         .release = drbd_release,
160 };
161
162 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164 #ifdef __CHECKER__
165 /* When checking with sparse, and this is an inline function, sparse will
166    give tons of false positives. When this is a real functions sparse works.
167  */
168 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169 {
170         int io_allowed;
171
172         atomic_inc(&mdev->local_cnt);
173         io_allowed = (mdev->state.disk >= mins);
174         if (!io_allowed) {
175                 if (atomic_dec_and_test(&mdev->local_cnt))
176                         wake_up(&mdev->misc_wait);
177         }
178         return io_allowed;
179 }
180
181 #endif
182
183 /**
184  * DOC: The transfer log
185  *
186  * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187  * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188  * of the list. There is always at least one &struct drbd_tl_epoch object.
189  *
190  * Each &struct drbd_tl_epoch has a circular double linked list of requests
191  * attached.
192  */
193 static int tl_init(struct drbd_conf *mdev)
194 {
195         struct drbd_tl_epoch *b;
196
197         /* during device minor initialization, we may well use GFP_KERNEL */
198         b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199         if (!b)
200                 return 0;
201         INIT_LIST_HEAD(&b->requests);
202         INIT_LIST_HEAD(&b->w.list);
203         b->next = NULL;
204         b->br_number = 4711;
205         b->n_writes = 0;
206         b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208         mdev->oldest_tle = b;
209         mdev->newest_tle = b;
210         INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
211
212         mdev->tl_hash = NULL;
213         mdev->tl_hash_s = 0;
214
215         return 1;
216 }
217
218 static void tl_cleanup(struct drbd_conf *mdev)
219 {
220         D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
221         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
222         kfree(mdev->oldest_tle);
223         mdev->oldest_tle = NULL;
224         kfree(mdev->unused_spare_tle);
225         mdev->unused_spare_tle = NULL;
226         kfree(mdev->tl_hash);
227         mdev->tl_hash = NULL;
228         mdev->tl_hash_s = 0;
229 }
230
231 /**
232  * _tl_add_barrier() - Adds a barrier to the transfer log
233  * @mdev:       DRBD device.
234  * @new:        Barrier to be added before the current head of the TL.
235  *
236  * The caller must hold the req_lock.
237  */
238 void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
239 {
240         struct drbd_tl_epoch *newest_before;
241
242         INIT_LIST_HEAD(&new->requests);
243         INIT_LIST_HEAD(&new->w.list);
244         new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
245         new->next = NULL;
246         new->n_writes = 0;
247
248         newest_before = mdev->newest_tle;
249         /* never send a barrier number == 0, because that is special-cased
250          * when using TCQ for our write ordering code */
251         new->br_number = (newest_before->br_number+1) ?: 1;
252         if (mdev->newest_tle != new) {
253                 mdev->newest_tle->next = new;
254                 mdev->newest_tle = new;
255         }
256 }
257
258 /**
259  * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
260  * @mdev:       DRBD device.
261  * @barrier_nr: Expected identifier of the DRBD write barrier packet.
262  * @set_size:   Expected number of requests before that barrier.
263  *
264  * In case the passed barrier_nr or set_size does not match the oldest
265  * &struct drbd_tl_epoch objects this function will cause a termination
266  * of the connection.
267  */
268 void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
269                        unsigned int set_size)
270 {
271         struct drbd_tl_epoch *b, *nob; /* next old barrier */
272         struct list_head *le, *tle;
273         struct drbd_request *r;
274
275         spin_lock_irq(&mdev->req_lock);
276
277         b = mdev->oldest_tle;
278
279         /* first some paranoia code */
280         if (b == NULL) {
281                 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
282                         barrier_nr);
283                 goto bail;
284         }
285         if (b->br_number != barrier_nr) {
286                 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
287                         barrier_nr, b->br_number);
288                 goto bail;
289         }
290         if (b->n_writes != set_size) {
291                 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
292                         barrier_nr, set_size, b->n_writes);
293                 goto bail;
294         }
295
296         /* Clean up list of requests processed during current epoch */
297         list_for_each_safe(le, tle, &b->requests) {
298                 r = list_entry(le, struct drbd_request, tl_requests);
299                 _req_mod(r, barrier_acked);
300         }
301         /* There could be requests on the list waiting for completion
302            of the write to the local disk. To avoid corruptions of
303            slab's data structures we have to remove the lists head.
304
305            Also there could have been a barrier ack out of sequence, overtaking
306            the write acks - which would be a bug and violating write ordering.
307            To not deadlock in case we lose connection while such requests are
308            still pending, we need some way to find them for the
309            _req_mode(connection_lost_while_pending).
310
311            These have been list_move'd to the out_of_sequence_requests list in
312            _req_mod(, barrier_acked) above.
313            */
314         list_del_init(&b->requests);
315
316         nob = b->next;
317         if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
318                 _tl_add_barrier(mdev, b);
319                 if (nob)
320                         mdev->oldest_tle = nob;
321                 /* if nob == NULL b was the only barrier, and becomes the new
322                    barrier. Therefore mdev->oldest_tle points already to b */
323         } else {
324                 D_ASSERT(nob != NULL);
325                 mdev->oldest_tle = nob;
326                 kfree(b);
327         }
328
329         spin_unlock_irq(&mdev->req_lock);
330         dec_ap_pending(mdev);
331
332         return;
333
334 bail:
335         spin_unlock_irq(&mdev->req_lock);
336         drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
337 }
338
339
340 /* In C_AHEAD mode only out_of_sync packets are sent for requests. Detach
341  * those requests from the newsest barrier when changing to an other cstate.
342  *
343  * That headless list vanishes when the last request finished its write or
344  * send out_of_sync packet.  */
345 static void tl_forget(struct drbd_conf *mdev)
346 {
347         struct drbd_tl_epoch *b;
348
349         if (test_bit(CREATE_BARRIER, &mdev->flags))
350                 return;
351
352         b = mdev->newest_tle;
353         list_del(&b->requests);
354         _tl_add_barrier(mdev, b);
355 }
356
357 /**
358  * _tl_restart() - Walks the transfer log, and applies an action to all requests
359  * @mdev:       DRBD device.
360  * @what:       The action/event to perform with all request objects
361  *
362  * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
363  * restart_frozen_disk_io.
364  */
365 static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
366 {
367         struct drbd_tl_epoch *b, *tmp, **pn;
368         struct list_head *le, *tle, carry_reads;
369         struct drbd_request *req;
370         int rv, n_writes, n_reads;
371
372         b = mdev->oldest_tle;
373         pn = &mdev->oldest_tle;
374         while (b) {
375                 n_writes = 0;
376                 n_reads = 0;
377                 INIT_LIST_HEAD(&carry_reads);
378                 list_for_each_safe(le, tle, &b->requests) {
379                         req = list_entry(le, struct drbd_request, tl_requests);
380                         rv = _req_mod(req, what);
381
382                         n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
383                         n_reads  += (rv & MR_READ) >> MR_READ_SHIFT;
384                 }
385                 tmp = b->next;
386
387                 if (n_writes) {
388                         if (what == resend) {
389                                 b->n_writes = n_writes;
390                                 if (b->w.cb == NULL) {
391                                         b->w.cb = w_send_barrier;
392                                         inc_ap_pending(mdev);
393                                         set_bit(CREATE_BARRIER, &mdev->flags);
394                                 }
395
396                                 drbd_queue_work(&mdev->data.work, &b->w);
397                         }
398                         pn = &b->next;
399                 } else {
400                         if (n_reads)
401                                 list_add(&carry_reads, &b->requests);
402                         /* there could still be requests on that ring list,
403                          * in case local io is still pending */
404                         list_del(&b->requests);
405
406                         /* dec_ap_pending corresponding to queue_barrier.
407                          * the newest barrier may not have been queued yet,
408                          * in which case w.cb is still NULL. */
409                         if (b->w.cb != NULL)
410                                 dec_ap_pending(mdev);
411
412                         if (b == mdev->newest_tle) {
413                                 /* recycle, but reinit! */
414                                 D_ASSERT(tmp == NULL);
415                                 INIT_LIST_HEAD(&b->requests);
416                                 list_splice(&carry_reads, &b->requests);
417                                 INIT_LIST_HEAD(&b->w.list);
418                                 b->w.cb = NULL;
419                                 b->br_number = net_random();
420                                 b->n_writes = 0;
421
422                                 *pn = b;
423                                 break;
424                         }
425                         *pn = tmp;
426                         kfree(b);
427                 }
428                 b = tmp;
429                 list_splice(&carry_reads, &b->requests);
430         }
431 }
432
433
434 /**
435  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
436  * @mdev:       DRBD device.
437  *
438  * This is called after the connection to the peer was lost. The storage covered
439  * by the requests on the transfer gets marked as our of sync. Called from the
440  * receiver thread and the worker thread.
441  */
442 void tl_clear(struct drbd_conf *mdev)
443 {
444         struct list_head *le, *tle;
445         struct drbd_request *r;
446
447         spin_lock_irq(&mdev->req_lock);
448
449         _tl_restart(mdev, connection_lost_while_pending);
450
451         /* we expect this list to be empty. */
452         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
453
454         /* but just in case, clean it up anyways! */
455         list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
456                 r = list_entry(le, struct drbd_request, tl_requests);
457                 /* It would be nice to complete outside of spinlock.
458                  * But this is easier for now. */
459                 _req_mod(r, connection_lost_while_pending);
460         }
461
462         /* ensure bit indicating barrier is required is clear */
463         clear_bit(CREATE_BARRIER, &mdev->flags);
464
465         memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
466
467         spin_unlock_irq(&mdev->req_lock);
468 }
469
470 void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
471 {
472         spin_lock_irq(&mdev->req_lock);
473         _tl_restart(mdev, what);
474         spin_unlock_irq(&mdev->req_lock);
475 }
476
477 /**
478  * cl_wide_st_chg() - true if the state change is a cluster wide one
479  * @mdev:       DRBD device.
480  * @os:         old (current) state.
481  * @ns:         new (wanted) state.
482  */
483 static int cl_wide_st_chg(struct drbd_conf *mdev,
484                           union drbd_state os, union drbd_state ns)
485 {
486         return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
487                  ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
488                   (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
489                   (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
490                   (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
491                 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
492                 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
493 }
494
495 enum drbd_state_rv
496 drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
497                   union drbd_state mask, union drbd_state val)
498 {
499         unsigned long flags;
500         union drbd_state os, ns;
501         enum drbd_state_rv rv;
502
503         spin_lock_irqsave(&mdev->req_lock, flags);
504         os = mdev->state;
505         ns.i = (os.i & ~mask.i) | val.i;
506         rv = _drbd_set_state(mdev, ns, f, NULL);
507         ns = mdev->state;
508         spin_unlock_irqrestore(&mdev->req_lock, flags);
509
510         return rv;
511 }
512
513 /**
514  * drbd_force_state() - Impose a change which happens outside our control on our state
515  * @mdev:       DRBD device.
516  * @mask:       mask of state bits to change.
517  * @val:        value of new state bits.
518  */
519 void drbd_force_state(struct drbd_conf *mdev,
520         union drbd_state mask, union drbd_state val)
521 {
522         drbd_change_state(mdev, CS_HARD, mask, val);
523 }
524
525 static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
526 static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
527                                                     union drbd_state,
528                                                     union drbd_state);
529 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
530                                        union drbd_state ns, const char **warn_sync_abort);
531 int drbd_send_state_req(struct drbd_conf *,
532                         union drbd_state, union drbd_state);
533
534 static enum drbd_state_rv
535 _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
536              union drbd_state val)
537 {
538         union drbd_state os, ns;
539         unsigned long flags;
540         enum drbd_state_rv rv;
541
542         if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
543                 return SS_CW_SUCCESS;
544
545         if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
546                 return SS_CW_FAILED_BY_PEER;
547
548         rv = 0;
549         spin_lock_irqsave(&mdev->req_lock, flags);
550         os = mdev->state;
551         ns.i = (os.i & ~mask.i) | val.i;
552         ns = sanitize_state(mdev, os, ns, NULL);
553
554         if (!cl_wide_st_chg(mdev, os, ns))
555                 rv = SS_CW_NO_NEED;
556         if (!rv) {
557                 rv = is_valid_state(mdev, ns);
558                 if (rv == SS_SUCCESS) {
559                         rv = is_valid_state_transition(mdev, ns, os);
560                         if (rv == SS_SUCCESS)
561                                 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
562                 }
563         }
564         spin_unlock_irqrestore(&mdev->req_lock, flags);
565
566         return rv;
567 }
568
569 /**
570  * drbd_req_state() - Perform an eventually cluster wide state change
571  * @mdev:       DRBD device.
572  * @mask:       mask of state bits to change.
573  * @val:        value of new state bits.
574  * @f:          flags
575  *
576  * Should not be called directly, use drbd_request_state() or
577  * _drbd_request_state().
578  */
579 static enum drbd_state_rv
580 drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
581                union drbd_state val, enum chg_state_flags f)
582 {
583         struct completion done;
584         unsigned long flags;
585         union drbd_state os, ns;
586         enum drbd_state_rv rv;
587
588         init_completion(&done);
589
590         if (f & CS_SERIALIZE)
591                 mutex_lock(&mdev->state_mutex);
592
593         spin_lock_irqsave(&mdev->req_lock, flags);
594         os = mdev->state;
595         ns.i = (os.i & ~mask.i) | val.i;
596         ns = sanitize_state(mdev, os, ns, NULL);
597
598         if (cl_wide_st_chg(mdev, os, ns)) {
599                 rv = is_valid_state(mdev, ns);
600                 if (rv == SS_SUCCESS)
601                         rv = is_valid_state_transition(mdev, ns, os);
602                 spin_unlock_irqrestore(&mdev->req_lock, flags);
603
604                 if (rv < SS_SUCCESS) {
605                         if (f & CS_VERBOSE)
606                                 print_st_err(mdev, os, ns, rv);
607                         goto abort;
608                 }
609
610                 drbd_state_lock(mdev);
611                 if (!drbd_send_state_req(mdev, mask, val)) {
612                         drbd_state_unlock(mdev);
613                         rv = SS_CW_FAILED_BY_PEER;
614                         if (f & CS_VERBOSE)
615                                 print_st_err(mdev, os, ns, rv);
616                         goto abort;
617                 }
618
619                 wait_event(mdev->state_wait,
620                         (rv = _req_st_cond(mdev, mask, val)));
621
622                 if (rv < SS_SUCCESS) {
623                         drbd_state_unlock(mdev);
624                         if (f & CS_VERBOSE)
625                                 print_st_err(mdev, os, ns, rv);
626                         goto abort;
627                 }
628                 spin_lock_irqsave(&mdev->req_lock, flags);
629                 os = mdev->state;
630                 ns.i = (os.i & ~mask.i) | val.i;
631                 rv = _drbd_set_state(mdev, ns, f, &done);
632                 drbd_state_unlock(mdev);
633         } else {
634                 rv = _drbd_set_state(mdev, ns, f, &done);
635         }
636
637         spin_unlock_irqrestore(&mdev->req_lock, flags);
638
639         if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
640                 D_ASSERT(current != mdev->worker.task);
641                 wait_for_completion(&done);
642         }
643
644 abort:
645         if (f & CS_SERIALIZE)
646                 mutex_unlock(&mdev->state_mutex);
647
648         return rv;
649 }
650
651 /**
652  * _drbd_request_state() - Request a state change (with flags)
653  * @mdev:       DRBD device.
654  * @mask:       mask of state bits to change.
655  * @val:        value of new state bits.
656  * @f:          flags
657  *
658  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
659  * flag, or when logging of failed state change requests is not desired.
660  */
661 enum drbd_state_rv
662 _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
663                     union drbd_state val, enum chg_state_flags f)
664 {
665         enum drbd_state_rv rv;
666
667         wait_event(mdev->state_wait,
668                    (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
669
670         return rv;
671 }
672
673 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
674 {
675         dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
676             name,
677             drbd_conn_str(ns.conn),
678             drbd_role_str(ns.role),
679             drbd_role_str(ns.peer),
680             drbd_disk_str(ns.disk),
681             drbd_disk_str(ns.pdsk),
682             is_susp(ns) ? 's' : 'r',
683             ns.aftr_isp ? 'a' : '-',
684             ns.peer_isp ? 'p' : '-',
685             ns.user_isp ? 'u' : '-'
686             );
687 }
688
689 void print_st_err(struct drbd_conf *mdev, union drbd_state os,
690                   union drbd_state ns, enum drbd_state_rv err)
691 {
692         if (err == SS_IN_TRANSIENT_STATE)
693                 return;
694         dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
695         print_st(mdev, " state", os);
696         print_st(mdev, "wanted", ns);
697 }
698
699
700 /**
701  * is_valid_state() - Returns an SS_ error code if ns is not valid
702  * @mdev:       DRBD device.
703  * @ns:         State to consider.
704  */
705 static enum drbd_state_rv
706 is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
707 {
708         /* See drbd_state_sw_errors in drbd_strings.c */
709
710         enum drbd_fencing_p fp;
711         enum drbd_state_rv rv = SS_SUCCESS;
712
713         fp = FP_DONT_CARE;
714         if (get_ldev(mdev)) {
715                 fp = mdev->ldev->dc.fencing;
716                 put_ldev(mdev);
717         }
718
719         if (get_net_conf(mdev)) {
720                 if (!mdev->net_conf->two_primaries &&
721                     ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
722                         rv = SS_TWO_PRIMARIES;
723                 put_net_conf(mdev);
724         }
725
726         if (rv <= 0)
727                 /* already found a reason to abort */;
728         else if (ns.role == R_SECONDARY && mdev->open_cnt)
729                 rv = SS_DEVICE_IN_USE;
730
731         else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
732                 rv = SS_NO_UP_TO_DATE_DISK;
733
734         else if (fp >= FP_RESOURCE &&
735                  ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
736                 rv = SS_PRIMARY_NOP;
737
738         else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
739                 rv = SS_NO_UP_TO_DATE_DISK;
740
741         else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
742                 rv = SS_NO_LOCAL_DISK;
743
744         else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
745                 rv = SS_NO_REMOTE_DISK;
746
747         else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
748                 rv = SS_NO_UP_TO_DATE_DISK;
749
750         else if ((ns.conn == C_CONNECTED ||
751                   ns.conn == C_WF_BITMAP_S ||
752                   ns.conn == C_SYNC_SOURCE ||
753                   ns.conn == C_PAUSED_SYNC_S) &&
754                   ns.disk == D_OUTDATED)
755                 rv = SS_CONNECTED_OUTDATES;
756
757         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
758                  (mdev->sync_conf.verify_alg[0] == 0))
759                 rv = SS_NO_VERIFY_ALG;
760
761         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
762                   mdev->agreed_pro_version < 88)
763                 rv = SS_NOT_SUPPORTED;
764
765         return rv;
766 }
767
768 /**
769  * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
770  * @mdev:       DRBD device.
771  * @ns:         new state.
772  * @os:         old state.
773  */
774 static enum drbd_state_rv
775 is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
776                           union drbd_state os)
777 {
778         enum drbd_state_rv rv = SS_SUCCESS;
779
780         if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
781             os.conn > C_CONNECTED)
782                 rv = SS_RESYNC_RUNNING;
783
784         if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
785                 rv = SS_ALREADY_STANDALONE;
786
787         if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
788                 rv = SS_IS_DISKLESS;
789
790         if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
791                 rv = SS_NO_NET_CONFIG;
792
793         if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
794                 rv = SS_LOWER_THAN_OUTDATED;
795
796         if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
797                 rv = SS_IN_TRANSIENT_STATE;
798
799         if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
800                 rv = SS_IN_TRANSIENT_STATE;
801
802         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
803                 rv = SS_NEED_CONNECTION;
804
805         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
806             ns.conn != os.conn && os.conn > C_CONNECTED)
807                 rv = SS_RESYNC_RUNNING;
808
809         if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
810             os.conn < C_CONNECTED)
811                 rv = SS_NEED_CONNECTION;
812
813         if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
814             && os.conn < C_WF_REPORT_PARAMS)
815                 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
816
817         return rv;
818 }
819
820 /**
821  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
822  * @mdev:       DRBD device.
823  * @os:         old state.
824  * @ns:         new state.
825  * @warn_sync_abort:
826  *
827  * When we loose connection, we have to set the state of the peers disk (pdsk)
828  * to D_UNKNOWN. This rule and many more along those lines are in this function.
829  */
830 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
831                                        union drbd_state ns, const char **warn_sync_abort)
832 {
833         enum drbd_fencing_p fp;
834         enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
835
836         fp = FP_DONT_CARE;
837         if (get_ldev(mdev)) {
838                 fp = mdev->ldev->dc.fencing;
839                 put_ldev(mdev);
840         }
841
842         /* Disallow Network errors to configure a device's network part */
843         if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
844             os.conn <= C_DISCONNECTING)
845                 ns.conn = os.conn;
846
847         /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
848          * If you try to go into some Sync* state, that shall fail (elsewhere). */
849         if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
850             ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
851                 ns.conn = os.conn;
852
853         /* we cannot fail (again) if we already detached */
854         if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
855                 ns.disk = D_DISKLESS;
856
857         /* if we are only D_ATTACHING yet,
858          * we can (and should) go directly to D_DISKLESS. */
859         if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
860                 ns.disk = D_DISKLESS;
861
862         /* After C_DISCONNECTING only C_STANDALONE may follow */
863         if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
864                 ns.conn = os.conn;
865
866         if (ns.conn < C_CONNECTED) {
867                 ns.peer_isp = 0;
868                 ns.peer = R_UNKNOWN;
869                 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
870                         ns.pdsk = D_UNKNOWN;
871         }
872
873         /* Clear the aftr_isp when becoming unconfigured */
874         if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
875                 ns.aftr_isp = 0;
876
877         /* Abort resync if a disk fails/detaches */
878         if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
879             (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
880                 if (warn_sync_abort)
881                         *warn_sync_abort =
882                                 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
883                                 "Online-verify" : "Resync";
884                 ns.conn = C_CONNECTED;
885         }
886
887         /* Connection breaks down before we finished "Negotiating" */
888         if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
889             get_ldev_if_state(mdev, D_NEGOTIATING)) {
890                 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
891                         ns.disk = mdev->new_state_tmp.disk;
892                         ns.pdsk = mdev->new_state_tmp.pdsk;
893                 } else {
894                         dev_alert(DEV, "Connection lost while negotiating, no data!\n");
895                         ns.disk = D_DISKLESS;
896                         ns.pdsk = D_UNKNOWN;
897                 }
898                 put_ldev(mdev);
899         }
900
901         /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
902         if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
903                 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
904                         ns.disk = D_UP_TO_DATE;
905                 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
906                         ns.pdsk = D_UP_TO_DATE;
907         }
908
909         /* Implications of the connection stat on the disk states */
910         disk_min = D_DISKLESS;
911         disk_max = D_UP_TO_DATE;
912         pdsk_min = D_INCONSISTENT;
913         pdsk_max = D_UNKNOWN;
914         switch ((enum drbd_conns)ns.conn) {
915         case C_WF_BITMAP_T:
916         case C_PAUSED_SYNC_T:
917         case C_STARTING_SYNC_T:
918         case C_WF_SYNC_UUID:
919         case C_BEHIND:
920                 disk_min = D_INCONSISTENT;
921                 disk_max = D_OUTDATED;
922                 pdsk_min = D_UP_TO_DATE;
923                 pdsk_max = D_UP_TO_DATE;
924                 break;
925         case C_VERIFY_S:
926         case C_VERIFY_T:
927                 disk_min = D_UP_TO_DATE;
928                 disk_max = D_UP_TO_DATE;
929                 pdsk_min = D_UP_TO_DATE;
930                 pdsk_max = D_UP_TO_DATE;
931                 break;
932         case C_CONNECTED:
933                 disk_min = D_DISKLESS;
934                 disk_max = D_UP_TO_DATE;
935                 pdsk_min = D_DISKLESS;
936                 pdsk_max = D_UP_TO_DATE;
937                 break;
938         case C_WF_BITMAP_S:
939         case C_PAUSED_SYNC_S:
940         case C_STARTING_SYNC_S:
941         case C_AHEAD:
942                 disk_min = D_UP_TO_DATE;
943                 disk_max = D_UP_TO_DATE;
944                 pdsk_min = D_INCONSISTENT;
945                 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
946                 break;
947         case C_SYNC_TARGET:
948                 disk_min = D_INCONSISTENT;
949                 disk_max = D_INCONSISTENT;
950                 pdsk_min = D_UP_TO_DATE;
951                 pdsk_max = D_UP_TO_DATE;
952                 break;
953         case C_SYNC_SOURCE:
954                 disk_min = D_UP_TO_DATE;
955                 disk_max = D_UP_TO_DATE;
956                 pdsk_min = D_INCONSISTENT;
957                 pdsk_max = D_INCONSISTENT;
958                 break;
959         case C_STANDALONE:
960         case C_DISCONNECTING:
961         case C_UNCONNECTED:
962         case C_TIMEOUT:
963         case C_BROKEN_PIPE:
964         case C_NETWORK_FAILURE:
965         case C_PROTOCOL_ERROR:
966         case C_TEAR_DOWN:
967         case C_WF_CONNECTION:
968         case C_WF_REPORT_PARAMS:
969         case C_MASK:
970                 break;
971         }
972         if (ns.disk > disk_max)
973                 ns.disk = disk_max;
974
975         if (ns.disk < disk_min) {
976                 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
977                          drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
978                 ns.disk = disk_min;
979         }
980         if (ns.pdsk > pdsk_max)
981                 ns.pdsk = pdsk_max;
982
983         if (ns.pdsk < pdsk_min) {
984                 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
985                          drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
986                 ns.pdsk = pdsk_min;
987         }
988
989         if (fp == FP_STONITH &&
990             (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
991             !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
992                 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
993
994         if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
995             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
996             !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
997                 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
998
999         if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
1000                 if (ns.conn == C_SYNC_SOURCE)
1001                         ns.conn = C_PAUSED_SYNC_S;
1002                 if (ns.conn == C_SYNC_TARGET)
1003                         ns.conn = C_PAUSED_SYNC_T;
1004         } else {
1005                 if (ns.conn == C_PAUSED_SYNC_S)
1006                         ns.conn = C_SYNC_SOURCE;
1007                 if (ns.conn == C_PAUSED_SYNC_T)
1008                         ns.conn = C_SYNC_TARGET;
1009         }
1010
1011         return ns;
1012 }
1013
1014 /* helper for __drbd_set_state */
1015 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1016 {
1017         if (mdev->agreed_pro_version < 90)
1018                 mdev->ov_start_sector = 0;
1019         mdev->rs_total = drbd_bm_bits(mdev);
1020         mdev->ov_position = 0;
1021         if (cs == C_VERIFY_T) {
1022                 /* starting online verify from an arbitrary position
1023                  * does not fit well into the existing protocol.
1024                  * on C_VERIFY_T, we initialize ov_left and friends
1025                  * implicitly in receive_DataRequest once the
1026                  * first P_OV_REQUEST is received */
1027                 mdev->ov_start_sector = ~(sector_t)0;
1028         } else {
1029                 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
1030                 if (bit >= mdev->rs_total) {
1031                         mdev->ov_start_sector =
1032                                 BM_BIT_TO_SECT(mdev->rs_total - 1);
1033                         mdev->rs_total = 1;
1034                 } else
1035                         mdev->rs_total -= bit;
1036                 mdev->ov_position = mdev->ov_start_sector;
1037         }
1038         mdev->ov_left = mdev->rs_total;
1039 }
1040
1041 static void drbd_resume_al(struct drbd_conf *mdev)
1042 {
1043         if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1044                 dev_info(DEV, "Resumed AL updates\n");
1045 }
1046
1047 /**
1048  * __drbd_set_state() - Set a new DRBD state
1049  * @mdev:       DRBD device.
1050  * @ns:         new state.
1051  * @flags:      Flags
1052  * @done:       Optional completion, that will get completed after the after_state_ch() finished
1053  *
1054  * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1055  */
1056 enum drbd_state_rv
1057 __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1058                  enum chg_state_flags flags, struct completion *done)
1059 {
1060         union drbd_state os;
1061         enum drbd_state_rv rv = SS_SUCCESS;
1062         const char *warn_sync_abort = NULL;
1063         struct after_state_chg_work *ascw;
1064
1065         os = mdev->state;
1066
1067         ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
1068
1069         if (ns.i == os.i)
1070                 return SS_NOTHING_TO_DO;
1071
1072         if (!(flags & CS_HARD)) {
1073                 /*  pre-state-change checks ; only look at ns  */
1074                 /* See drbd_state_sw_errors in drbd_strings.c */
1075
1076                 rv = is_valid_state(mdev, ns);
1077                 if (rv < SS_SUCCESS) {
1078                         /* If the old state was illegal as well, then let
1079                            this happen...*/
1080
1081                         if (is_valid_state(mdev, os) == rv)
1082                                 rv = is_valid_state_transition(mdev, ns, os);
1083                 } else
1084                         rv = is_valid_state_transition(mdev, ns, os);
1085         }
1086
1087         if (rv < SS_SUCCESS) {
1088                 if (flags & CS_VERBOSE)
1089                         print_st_err(mdev, os, ns, rv);
1090                 return rv;
1091         }
1092
1093         if (warn_sync_abort)
1094                 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
1095
1096         {
1097         char *pbp, pb[300];
1098         pbp = pb;
1099         *pbp = 0;
1100         if (ns.role != os.role)
1101                 pbp += sprintf(pbp, "role( %s -> %s ) ",
1102                                drbd_role_str(os.role),
1103                                drbd_role_str(ns.role));
1104         if (ns.peer != os.peer)
1105                 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1106                                drbd_role_str(os.peer),
1107                                drbd_role_str(ns.peer));
1108         if (ns.conn != os.conn)
1109                 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1110                                drbd_conn_str(os.conn),
1111                                drbd_conn_str(ns.conn));
1112         if (ns.disk != os.disk)
1113                 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1114                                drbd_disk_str(os.disk),
1115                                drbd_disk_str(ns.disk));
1116         if (ns.pdsk != os.pdsk)
1117                 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1118                                drbd_disk_str(os.pdsk),
1119                                drbd_disk_str(ns.pdsk));
1120         if (is_susp(ns) != is_susp(os))
1121                 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1122                                is_susp(os),
1123                                is_susp(ns));
1124         if (ns.aftr_isp != os.aftr_isp)
1125                 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1126                                os.aftr_isp,
1127                                ns.aftr_isp);
1128         if (ns.peer_isp != os.peer_isp)
1129                 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1130                                os.peer_isp,
1131                                ns.peer_isp);
1132         if (ns.user_isp != os.user_isp)
1133                 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1134                                os.user_isp,
1135                                ns.user_isp);
1136         dev_info(DEV, "%s\n", pb);
1137         }
1138
1139         /* solve the race between becoming unconfigured,
1140          * worker doing the cleanup, and
1141          * admin reconfiguring us:
1142          * on (re)configure, first set CONFIG_PENDING,
1143          * then wait for a potentially exiting worker,
1144          * start the worker, and schedule one no_op.
1145          * then proceed with configuration.
1146          */
1147         if (ns.disk == D_DISKLESS &&
1148             ns.conn == C_STANDALONE &&
1149             ns.role == R_SECONDARY &&
1150             !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1151                 set_bit(DEVICE_DYING, &mdev->flags);
1152
1153         /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1154          * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1155          * drbd_ldev_destroy() won't happen before our corresponding
1156          * after_state_ch works run, where we put_ldev again. */
1157         if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1158             (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1159                 atomic_inc(&mdev->local_cnt);
1160
1161         mdev->state = ns;
1162
1163         if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1164                 drbd_print_uuids(mdev, "attached to UUIDs");
1165
1166         wake_up(&mdev->misc_wait);
1167         wake_up(&mdev->state_wait);
1168
1169         /* aborted verify run. log the last position */
1170         if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1171             ns.conn < C_CONNECTED) {
1172                 mdev->ov_start_sector =
1173                         BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
1174                 dev_info(DEV, "Online Verify reached sector %llu\n",
1175                         (unsigned long long)mdev->ov_start_sector);
1176         }
1177
1178         if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1179             (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
1180                 dev_info(DEV, "Syncer continues.\n");
1181                 mdev->rs_paused += (long)jiffies
1182                                   -(long)mdev->rs_mark_time[mdev->rs_last_mark];
1183                 if (ns.conn == C_SYNC_TARGET)
1184                         mod_timer(&mdev->resync_timer, jiffies);
1185         }
1186
1187         if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
1188             (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1189                 dev_info(DEV, "Resync suspended\n");
1190                 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
1191         }
1192
1193         if (os.conn == C_CONNECTED &&
1194             (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1195                 unsigned long now = jiffies;
1196                 int i;
1197
1198                 set_ov_position(mdev, ns.conn);
1199                 mdev->rs_start = now;
1200                 mdev->rs_last_events = 0;
1201                 mdev->rs_last_sect_ev = 0;
1202                 mdev->ov_last_oos_size = 0;
1203                 mdev->ov_last_oos_start = 0;
1204
1205                 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1206                         mdev->rs_mark_left[i] = mdev->ov_left;
1207                         mdev->rs_mark_time[i] = now;
1208                 }
1209
1210                 drbd_rs_controller_reset(mdev);
1211
1212                 if (ns.conn == C_VERIFY_S) {
1213                         dev_info(DEV, "Starting Online Verify from sector %llu\n",
1214                                         (unsigned long long)mdev->ov_position);
1215                         mod_timer(&mdev->resync_timer, jiffies);
1216                 }
1217         }
1218
1219         if (get_ldev(mdev)) {
1220                 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1221                                                  MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1222                                                  MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1223
1224                 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1225                         mdf |= MDF_CRASHED_PRIMARY;
1226                 if (mdev->state.role == R_PRIMARY ||
1227                     (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1228                         mdf |= MDF_PRIMARY_IND;
1229                 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1230                         mdf |= MDF_CONNECTED_IND;
1231                 if (mdev->state.disk > D_INCONSISTENT)
1232                         mdf |= MDF_CONSISTENT;
1233                 if (mdev->state.disk > D_OUTDATED)
1234                         mdf |= MDF_WAS_UP_TO_DATE;
1235                 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1236                         mdf |= MDF_PEER_OUT_DATED;
1237                 if (mdf != mdev->ldev->md.flags) {
1238                         mdev->ldev->md.flags = mdf;
1239                         drbd_md_mark_dirty(mdev);
1240                 }
1241                 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1242                         drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1243                 put_ldev(mdev);
1244         }
1245
1246         /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1247         if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1248             os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1249                 set_bit(CONSIDER_RESYNC, &mdev->flags);
1250
1251         /* Receiver should clean up itself */
1252         if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1253                 drbd_thread_stop_nowait(&mdev->receiver);
1254
1255         /* Now the receiver finished cleaning up itself, it should die */
1256         if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1257                 drbd_thread_stop_nowait(&mdev->receiver);
1258
1259         /* Upon network failure, we need to restart the receiver. */
1260         if (os.conn > C_TEAR_DOWN &&
1261             ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1262                 drbd_thread_restart_nowait(&mdev->receiver);
1263
1264         /* Resume AL writing if we get a connection */
1265         if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1266                 drbd_resume_al(mdev);
1267
1268         /* Start a new epoch in case we start to mirror write requests */
1269         if (!drbd_should_do_remote(os) && drbd_should_do_remote(ns))
1270                 tl_forget(mdev);
1271
1272         /* Do not add local-only requests to an epoch with mirrored requests */
1273         if (drbd_should_do_remote(os) && !drbd_should_do_remote(ns))
1274                 set_bit(CREATE_BARRIER, &mdev->flags);
1275
1276         ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1277         if (ascw) {
1278                 ascw->os = os;
1279                 ascw->ns = ns;
1280                 ascw->flags = flags;
1281                 ascw->w.cb = w_after_state_ch;
1282                 ascw->done = done;
1283                 drbd_queue_work(&mdev->data.work, &ascw->w);
1284         } else {
1285                 dev_warn(DEV, "Could not kmalloc an ascw\n");
1286         }
1287
1288         return rv;
1289 }
1290
1291 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1292 {
1293         struct after_state_chg_work *ascw =
1294                 container_of(w, struct after_state_chg_work, w);
1295         after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1296         if (ascw->flags & CS_WAIT_COMPLETE) {
1297                 D_ASSERT(ascw->done != NULL);
1298                 complete(ascw->done);
1299         }
1300         kfree(ascw);
1301
1302         return 1;
1303 }
1304
1305 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1306 {
1307         if (rv) {
1308                 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1309                 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1310                 return;
1311         }
1312
1313         switch (mdev->state.conn) {
1314         case C_STARTING_SYNC_T:
1315                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1316                 break;
1317         case C_STARTING_SYNC_S:
1318                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1319                 break;
1320         }
1321 }
1322
1323 int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
1324 {
1325         int rv;
1326
1327         D_ASSERT(current == mdev->worker.task);
1328
1329         /* open coded non-blocking drbd_suspend_io(mdev); */
1330         set_bit(SUSPEND_IO, &mdev->flags);
1331         if (!is_susp(mdev->state))
1332                 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
1333
1334         drbd_bm_lock(mdev, why);
1335         rv = io_fn(mdev);
1336         drbd_bm_unlock(mdev);
1337
1338         drbd_resume_io(mdev);
1339
1340         return rv;
1341 }
1342
1343 /**
1344  * after_state_ch() - Perform after state change actions that may sleep
1345  * @mdev:       DRBD device.
1346  * @os:         old state.
1347  * @ns:         new state.
1348  * @flags:      Flags
1349  */
1350 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1351                            union drbd_state ns, enum chg_state_flags flags)
1352 {
1353         enum drbd_fencing_p fp;
1354         enum drbd_req_event what = nothing;
1355         union drbd_state nsm = (union drbd_state){ .i = -1 };
1356
1357         if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1358                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1359                 if (mdev->p_uuid)
1360                         mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1361         }
1362
1363         fp = FP_DONT_CARE;
1364         if (get_ldev(mdev)) {
1365                 fp = mdev->ldev->dc.fencing;
1366                 put_ldev(mdev);
1367         }
1368
1369         /* Inform userspace about the change... */
1370         drbd_bcast_state(mdev, ns);
1371
1372         if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1373             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1374                 drbd_khelper(mdev, "pri-on-incon-degr");
1375
1376         /* Here we have the actions that are performed after a
1377            state change. This function might sleep */
1378
1379         nsm.i = -1;
1380         if (ns.susp_nod) {
1381                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1382                         what = resend;
1383
1384                 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
1385                         what = restart_frozen_disk_io;
1386
1387                 if (what != nothing)
1388                         nsm.susp_nod = 0;
1389         }
1390
1391         if (ns.susp_fen) {
1392                 /* case1: The outdate peer handler is successful: */
1393                 if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
1394                         tl_clear(mdev);
1395                         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1396                                 drbd_uuid_new_current(mdev);
1397                                 clear_bit(NEW_CUR_UUID, &mdev->flags);
1398                         }
1399                         spin_lock_irq(&mdev->req_lock);
1400                         _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1401                         spin_unlock_irq(&mdev->req_lock);
1402                 }
1403                 /* case2: The connection was established again: */
1404                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1405                         clear_bit(NEW_CUR_UUID, &mdev->flags);
1406                         what = resend;
1407                         nsm.susp_fen = 0;
1408                 }
1409         }
1410
1411         if (what != nothing) {
1412                 spin_lock_irq(&mdev->req_lock);
1413                 _tl_restart(mdev, what);
1414                 nsm.i &= mdev->state.i;
1415                 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1416                 spin_unlock_irq(&mdev->req_lock);
1417         }
1418
1419         /* Became sync source.  With protocol >= 96, we still need to send out
1420          * the sync uuid now. Need to do that before any drbd_send_state, or
1421          * the other side may go "paused sync" before receiving the sync uuids,
1422          * which is unexpected. */
1423         if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1424             (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1425             mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1426                 drbd_gen_and_send_sync_uuid(mdev);
1427                 put_ldev(mdev);
1428         }
1429
1430         /* Do not change the order of the if above and the two below... */
1431         if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
1432                 drbd_send_uuids(mdev);
1433                 drbd_send_state(mdev);
1434         }
1435         /* No point in queuing send_bitmap if we don't have a connection
1436          * anymore, so check also the _current_ state, not only the new state
1437          * at the time this work was queued. */
1438         if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1439             mdev->state.conn == C_WF_BITMAP_S)
1440                 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
1441                                 "send_bitmap (WFBitMapS)");
1442
1443         /* Lost contact to peer's copy of the data */
1444         if ((os.pdsk >= D_INCONSISTENT &&
1445              os.pdsk != D_UNKNOWN &&
1446              os.pdsk != D_OUTDATED)
1447         &&  (ns.pdsk < D_INCONSISTENT ||
1448              ns.pdsk == D_UNKNOWN ||
1449              ns.pdsk == D_OUTDATED)) {
1450                 if (get_ldev(mdev)) {
1451                         if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1452                             mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1453                                 if (is_susp(mdev->state)) {
1454                                         set_bit(NEW_CUR_UUID, &mdev->flags);
1455                                 } else {
1456                                         drbd_uuid_new_current(mdev);
1457                                         drbd_send_uuids(mdev);
1458                                 }
1459                         }
1460                         put_ldev(mdev);
1461                 }
1462         }
1463
1464         if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1465                 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1466                         drbd_uuid_new_current(mdev);
1467                         drbd_send_uuids(mdev);
1468                 }
1469
1470                 /* D_DISKLESS Peer becomes secondary */
1471                 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1472                         drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote diskless peer");
1473                 put_ldev(mdev);
1474         }
1475
1476         /* Write out all changed bits on demote.
1477          * Though, no need to da that just yet
1478          * if there is a resync going on still */
1479         if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1480                 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
1481                 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote");
1482                 put_ldev(mdev);
1483         }
1484
1485         /* Last part of the attaching process ... */
1486         if (ns.conn >= C_CONNECTED &&
1487             os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1488                 drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
1489                 drbd_send_uuids(mdev);
1490                 drbd_send_state(mdev);
1491         }
1492
1493         /* We want to pause/continue resync, tell peer. */
1494         if (ns.conn >= C_CONNECTED &&
1495              ((os.aftr_isp != ns.aftr_isp) ||
1496               (os.user_isp != ns.user_isp)))
1497                 drbd_send_state(mdev);
1498
1499         /* In case one of the isp bits got set, suspend other devices. */
1500         if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1501             (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1502                 suspend_other_sg(mdev);
1503
1504         /* Make sure the peer gets informed about eventual state
1505            changes (ISP bits) while we were in WFReportParams. */
1506         if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1507                 drbd_send_state(mdev);
1508
1509         if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1510                 drbd_send_state(mdev);
1511
1512         /* We are in the progress to start a full sync... */
1513         if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1514             (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1515                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
1516
1517         /* We are invalidating our self... */
1518         if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1519             os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1520                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1521
1522         /* first half of local IO error, failure to attach,
1523          * or administrative detach */
1524         if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1525                 enum drbd_io_error_p eh;
1526                 int was_io_error;
1527                 /* corresponding get_ldev was in __drbd_set_state, to serialize
1528                  * our cleanup here with the transition to D_DISKLESS,
1529                  * so it is safe to dreference ldev here. */
1530                 eh = mdev->ldev->dc.on_io_error;
1531                 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1532
1533                 /* current state still has to be D_FAILED,
1534                  * there is only one way out: to D_DISKLESS,
1535                  * and that may only happen after our put_ldev below. */
1536                 if (mdev->state.disk != D_FAILED)
1537                         dev_err(DEV,
1538                                 "ASSERT FAILED: disk is %s during detach\n",
1539                                 drbd_disk_str(mdev->state.disk));
1540
1541                 if (drbd_send_state(mdev))
1542                         dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1543                 else
1544                         dev_err(DEV, "Sending state for detaching disk failed\n");
1545
1546                 drbd_rs_cancel_all(mdev);
1547
1548                 /* In case we want to get something to stable storage still,
1549                  * this may be the last chance.
1550                  * Following put_ldev may transition to D_DISKLESS. */
1551                 drbd_md_sync(mdev);
1552                 put_ldev(mdev);
1553
1554                 if (was_io_error && eh == EP_CALL_HELPER)
1555                         drbd_khelper(mdev, "local-io-error");
1556         }
1557
1558         /* second half of local IO error, failure to attach,
1559          * or administrative detach,
1560          * after local_cnt references have reached zero again */
1561         if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1562                 /* We must still be diskless,
1563                  * re-attach has to be serialized with this! */
1564                 if (mdev->state.disk != D_DISKLESS)
1565                         dev_err(DEV,
1566                                 "ASSERT FAILED: disk is %s while going diskless\n",
1567                                 drbd_disk_str(mdev->state.disk));
1568
1569                 mdev->rs_total = 0;
1570                 mdev->rs_failed = 0;
1571                 atomic_set(&mdev->rs_pending_cnt, 0);
1572
1573                 if (drbd_send_state(mdev))
1574                         dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1575                 else
1576                         dev_err(DEV, "Sending state for being diskless failed\n");
1577                 /* corresponding get_ldev in __drbd_set_state
1578                  * this may finaly trigger drbd_ldev_destroy. */
1579                 put_ldev(mdev);
1580         }
1581
1582         /* Disks got bigger while they were detached */
1583         if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1584             test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1585                 if (ns.conn == C_CONNECTED)
1586                         resync_after_online_grow(mdev);
1587         }
1588
1589         /* A resync finished or aborted, wake paused devices... */
1590         if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1591             (os.peer_isp && !ns.peer_isp) ||
1592             (os.user_isp && !ns.user_isp))
1593                 resume_next_sg(mdev);
1594
1595         /* sync target done with resync.  Explicitly notify peer, even though
1596          * it should (at least for non-empty resyncs) already know itself. */
1597         if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1598                 drbd_send_state(mdev);
1599
1600         /* This triggers bitmap writeout of potentially still unwritten pages
1601          * if the resync finished cleanly, or aborted because of peer disk
1602          * failure.  Resync aborted because of connection failure does bitmap
1603          * writeout from drbd_disconnect.
1604          * For resync aborted because of local disk failure, we cannot do
1605          * any bitmap writeout anymore.
1606          */
1607         if (os.conn > C_CONNECTED && ns.conn == C_CONNECTED &&
1608             mdev->state.conn == C_CONNECTED && get_ldev(mdev)) {
1609                 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
1610                 put_ldev(mdev);
1611         }
1612
1613         /* free tl_hash if we Got thawed and are C_STANDALONE */
1614         if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
1615                 drbd_free_tl_hash(mdev);
1616
1617         /* Upon network connection, we need to start the receiver */
1618         if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1619                 drbd_thread_start(&mdev->receiver);
1620
1621         /* Terminate worker thread if we are unconfigured - it will be
1622            restarted as needed... */
1623         if (ns.disk == D_DISKLESS &&
1624             ns.conn == C_STANDALONE &&
1625             ns.role == R_SECONDARY) {
1626                 if (os.aftr_isp != ns.aftr_isp)
1627                         resume_next_sg(mdev);
1628                 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1629                 if (test_bit(DEVICE_DYING, &mdev->flags))
1630                         drbd_thread_stop_nowait(&mdev->worker);
1631         }
1632
1633         drbd_md_sync(mdev);
1634 }
1635
1636
1637 static int drbd_thread_setup(void *arg)
1638 {
1639         struct drbd_thread *thi = (struct drbd_thread *) arg;
1640         struct drbd_conf *mdev = thi->mdev;
1641         unsigned long flags;
1642         int retval;
1643
1644 restart:
1645         retval = thi->function(thi);
1646
1647         spin_lock_irqsave(&thi->t_lock, flags);
1648
1649         /* if the receiver has been "Exiting", the last thing it did
1650          * was set the conn state to "StandAlone",
1651          * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1652          * and receiver thread will be "started".
1653          * drbd_thread_start needs to set "Restarting" in that case.
1654          * t_state check and assignment needs to be within the same spinlock,
1655          * so either thread_start sees Exiting, and can remap to Restarting,
1656          * or thread_start see None, and can proceed as normal.
1657          */
1658
1659         if (thi->t_state == Restarting) {
1660                 dev_info(DEV, "Restarting %s\n", current->comm);
1661                 thi->t_state = Running;
1662                 spin_unlock_irqrestore(&thi->t_lock, flags);
1663                 goto restart;
1664         }
1665
1666         thi->task = NULL;
1667         thi->t_state = None;
1668         smp_mb();
1669         complete(&thi->stop);
1670         spin_unlock_irqrestore(&thi->t_lock, flags);
1671
1672         dev_info(DEV, "Terminating %s\n", current->comm);
1673
1674         /* Release mod reference taken when thread was started */
1675         module_put(THIS_MODULE);
1676         return retval;
1677 }
1678
1679 static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1680                       int (*func) (struct drbd_thread *))
1681 {
1682         spin_lock_init(&thi->t_lock);
1683         thi->task    = NULL;
1684         thi->t_state = None;
1685         thi->function = func;
1686         thi->mdev = mdev;
1687 }
1688
1689 int drbd_thread_start(struct drbd_thread *thi)
1690 {
1691         struct drbd_conf *mdev = thi->mdev;
1692         struct task_struct *nt;
1693         unsigned long flags;
1694
1695         const char *me =
1696                 thi == &mdev->receiver ? "receiver" :
1697                 thi == &mdev->asender  ? "asender"  :
1698                 thi == &mdev->worker   ? "worker"   : "NONSENSE";
1699
1700         /* is used from state engine doing drbd_thread_stop_nowait,
1701          * while holding the req lock irqsave */
1702         spin_lock_irqsave(&thi->t_lock, flags);
1703
1704         switch (thi->t_state) {
1705         case None:
1706                 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1707                                 me, current->comm, current->pid);
1708
1709                 /* Get ref on module for thread - this is released when thread exits */
1710                 if (!try_module_get(THIS_MODULE)) {
1711                         dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1712                         spin_unlock_irqrestore(&thi->t_lock, flags);
1713                         return false;
1714                 }
1715
1716                 init_completion(&thi->stop);
1717                 D_ASSERT(thi->task == NULL);
1718                 thi->reset_cpu_mask = 1;
1719                 thi->t_state = Running;
1720                 spin_unlock_irqrestore(&thi->t_lock, flags);
1721                 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1722
1723                 nt = kthread_create(drbd_thread_setup, (void *) thi,
1724                                     "drbd%d_%s", mdev_to_minor(mdev), me);
1725
1726                 if (IS_ERR(nt)) {
1727                         dev_err(DEV, "Couldn't start thread\n");
1728
1729                         module_put(THIS_MODULE);
1730                         return false;
1731                 }
1732                 spin_lock_irqsave(&thi->t_lock, flags);
1733                 thi->task = nt;
1734                 thi->t_state = Running;
1735                 spin_unlock_irqrestore(&thi->t_lock, flags);
1736                 wake_up_process(nt);
1737                 break;
1738         case Exiting:
1739                 thi->t_state = Restarting;
1740                 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1741                                 me, current->comm, current->pid);
1742                 /* fall through */
1743         case Running:
1744         case Restarting:
1745         default:
1746                 spin_unlock_irqrestore(&thi->t_lock, flags);
1747                 break;
1748         }
1749
1750         return true;
1751 }
1752
1753
1754 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1755 {
1756         unsigned long flags;
1757
1758         enum drbd_thread_state ns = restart ? Restarting : Exiting;
1759
1760         /* may be called from state engine, holding the req lock irqsave */
1761         spin_lock_irqsave(&thi->t_lock, flags);
1762
1763         if (thi->t_state == None) {
1764                 spin_unlock_irqrestore(&thi->t_lock, flags);
1765                 if (restart)
1766                         drbd_thread_start(thi);
1767                 return;
1768         }
1769
1770         if (thi->t_state != ns) {
1771                 if (thi->task == NULL) {
1772                         spin_unlock_irqrestore(&thi->t_lock, flags);
1773                         return;
1774                 }
1775
1776                 thi->t_state = ns;
1777                 smp_mb();
1778                 init_completion(&thi->stop);
1779                 if (thi->task != current)
1780                         force_sig(DRBD_SIGKILL, thi->task);
1781
1782         }
1783
1784         spin_unlock_irqrestore(&thi->t_lock, flags);
1785
1786         if (wait)
1787                 wait_for_completion(&thi->stop);
1788 }
1789
1790 #ifdef CONFIG_SMP
1791 /**
1792  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1793  * @mdev:       DRBD device.
1794  *
1795  * Forces all threads of a device onto the same CPU. This is beneficial for
1796  * DRBD's performance. May be overwritten by user's configuration.
1797  */
1798 void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1799 {
1800         int ord, cpu;
1801
1802         /* user override. */
1803         if (cpumask_weight(mdev->cpu_mask))
1804                 return;
1805
1806         ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1807         for_each_online_cpu(cpu) {
1808                 if (ord-- == 0) {
1809                         cpumask_set_cpu(cpu, mdev->cpu_mask);
1810                         return;
1811                 }
1812         }
1813         /* should not be reached */
1814         cpumask_setall(mdev->cpu_mask);
1815 }
1816
1817 /**
1818  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1819  * @mdev:       DRBD device.
1820  *
1821  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1822  * prematurely.
1823  */
1824 void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1825 {
1826         struct task_struct *p = current;
1827         struct drbd_thread *thi =
1828                 p == mdev->asender.task  ? &mdev->asender  :
1829                 p == mdev->receiver.task ? &mdev->receiver :
1830                 p == mdev->worker.task   ? &mdev->worker   :
1831                 NULL;
1832         ERR_IF(thi == NULL)
1833                 return;
1834         if (!thi->reset_cpu_mask)
1835                 return;
1836         thi->reset_cpu_mask = 0;
1837         set_cpus_allowed_ptr(p, mdev->cpu_mask);
1838 }
1839 #endif
1840
1841 /* the appropriate socket mutex must be held already */
1842 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1843                           enum drbd_packets cmd, struct p_header80 *h,
1844                           size_t size, unsigned msg_flags)
1845 {
1846         int sent, ok;
1847
1848         ERR_IF(!h) return false;
1849         ERR_IF(!size) return false;
1850
1851         h->magic   = BE_DRBD_MAGIC;
1852         h->command = cpu_to_be16(cmd);
1853         h->length  = cpu_to_be16(size-sizeof(struct p_header80));
1854
1855         sent = drbd_send(mdev, sock, h, size, msg_flags);
1856
1857         ok = (sent == size);
1858         if (!ok)
1859                 dev_err(DEV, "short sent %s size=%d sent=%d\n",
1860                     cmdname(cmd), (int)size, sent);
1861         return ok;
1862 }
1863
1864 /* don't pass the socket. we may only look at it
1865  * when we hold the appropriate socket mutex.
1866  */
1867 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1868                   enum drbd_packets cmd, struct p_header80 *h, size_t size)
1869 {
1870         int ok = 0;
1871         struct socket *sock;
1872
1873         if (use_data_socket) {
1874                 mutex_lock(&mdev->data.mutex);
1875                 sock = mdev->data.socket;
1876         } else {
1877                 mutex_lock(&mdev->meta.mutex);
1878                 sock = mdev->meta.socket;
1879         }
1880
1881         /* drbd_disconnect() could have called drbd_free_sock()
1882          * while we were waiting in down()... */
1883         if (likely(sock != NULL))
1884                 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1885
1886         if (use_data_socket)
1887                 mutex_unlock(&mdev->data.mutex);
1888         else
1889                 mutex_unlock(&mdev->meta.mutex);
1890         return ok;
1891 }
1892
1893 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1894                    size_t size)
1895 {
1896         struct p_header80 h;
1897         int ok;
1898
1899         h.magic   = BE_DRBD_MAGIC;
1900         h.command = cpu_to_be16(cmd);
1901         h.length  = cpu_to_be16(size);
1902
1903         if (!drbd_get_data_sock(mdev))
1904                 return 0;
1905
1906         ok = (sizeof(h) ==
1907                 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1908         ok = ok && (size ==
1909                 drbd_send(mdev, mdev->data.socket, data, size, 0));
1910
1911         drbd_put_data_sock(mdev);
1912
1913         return ok;
1914 }
1915
1916 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1917 {
1918         struct p_rs_param_95 *p;
1919         struct socket *sock;
1920         int size, rv;
1921         const int apv = mdev->agreed_pro_version;
1922
1923         size = apv <= 87 ? sizeof(struct p_rs_param)
1924                 : apv == 88 ? sizeof(struct p_rs_param)
1925                         + strlen(mdev->sync_conf.verify_alg) + 1
1926                 : apv <= 94 ? sizeof(struct p_rs_param_89)
1927                 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
1928
1929         /* used from admin command context and receiver/worker context.
1930          * to avoid kmalloc, grab the socket right here,
1931          * then use the pre-allocated sbuf there */
1932         mutex_lock(&mdev->data.mutex);
1933         sock = mdev->data.socket;
1934
1935         if (likely(sock != NULL)) {
1936                 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1937
1938                 p = &mdev->data.sbuf.rs_param_95;
1939
1940                 /* initialize verify_alg and csums_alg */
1941                 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1942
1943                 p->rate = cpu_to_be32(sc->rate);
1944                 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1945                 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1946                 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1947                 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
1948
1949                 if (apv >= 88)
1950                         strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1951                 if (apv >= 89)
1952                         strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1953
1954                 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1955         } else
1956                 rv = 0; /* not ok */
1957
1958         mutex_unlock(&mdev->data.mutex);
1959
1960         return rv;
1961 }
1962
1963 int drbd_send_protocol(struct drbd_conf *mdev)
1964 {
1965         struct p_protocol *p;
1966         int size, cf, rv;
1967
1968         size = sizeof(struct p_protocol);
1969
1970         if (mdev->agreed_pro_version >= 87)
1971                 size += strlen(mdev->net_conf->integrity_alg) + 1;
1972
1973         /* we must not recurse into our own queue,
1974          * as that is blocked during handshake */
1975         p = kmalloc(size, GFP_NOIO);
1976         if (p == NULL)
1977                 return 0;
1978
1979         p->protocol      = cpu_to_be32(mdev->net_conf->wire_protocol);
1980         p->after_sb_0p   = cpu_to_be32(mdev->net_conf->after_sb_0p);
1981         p->after_sb_1p   = cpu_to_be32(mdev->net_conf->after_sb_1p);
1982         p->after_sb_2p   = cpu_to_be32(mdev->net_conf->after_sb_2p);
1983         p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1984
1985         cf = 0;
1986         if (mdev->net_conf->want_lose)
1987                 cf |= CF_WANT_LOSE;
1988         if (mdev->net_conf->dry_run) {
1989                 if (mdev->agreed_pro_version >= 92)
1990                         cf |= CF_DRY_RUN;
1991                 else {
1992                         dev_err(DEV, "--dry-run is not supported by peer");
1993                         kfree(p);
1994                         return -1;
1995                 }
1996         }
1997         p->conn_flags    = cpu_to_be32(cf);
1998
1999         if (mdev->agreed_pro_version >= 87)
2000                 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
2001
2002         rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
2003                            (struct p_header80 *)p, size);
2004         kfree(p);
2005         return rv;
2006 }
2007
2008 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
2009 {
2010         struct p_uuids p;
2011         int i;
2012
2013         if (!get_ldev_if_state(mdev, D_NEGOTIATING))
2014                 return 1;
2015
2016         for (i = UI_CURRENT; i < UI_SIZE; i++)
2017                 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
2018
2019         mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2020         p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2021         uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2022         uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2023         uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2024         p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2025
2026         put_ldev(mdev);
2027
2028         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
2029                              (struct p_header80 *)&p, sizeof(p));
2030 }
2031
2032 int drbd_send_uuids(struct drbd_conf *mdev)
2033 {
2034         return _drbd_send_uuids(mdev, 0);
2035 }
2036
2037 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2038 {
2039         return _drbd_send_uuids(mdev, 8);
2040 }
2041
2042 void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
2043 {
2044         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2045                 u64 *uuid = mdev->ldev->md.uuid;
2046                 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
2047                      text,
2048                      (unsigned long long)uuid[UI_CURRENT],
2049                      (unsigned long long)uuid[UI_BITMAP],
2050                      (unsigned long long)uuid[UI_HISTORY_START],
2051                      (unsigned long long)uuid[UI_HISTORY_END]);
2052                 put_ldev(mdev);
2053         } else {
2054                 dev_info(DEV, "%s effective data uuid: %016llX\n",
2055                                 text,
2056                                 (unsigned long long)mdev->ed_uuid);
2057         }
2058 }
2059
2060 int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
2061 {
2062         struct p_rs_uuid p;
2063         u64 uuid;
2064
2065         D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
2066
2067         uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
2068         drbd_uuid_set(mdev, UI_BITMAP, uuid);
2069         drbd_print_uuids(mdev, "updated sync UUID");
2070         drbd_md_sync(mdev);
2071         p.uuid = cpu_to_be64(uuid);
2072
2073         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
2074                              (struct p_header80 *)&p, sizeof(p));
2075 }
2076
2077 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
2078 {
2079         struct p_sizes p;
2080         sector_t d_size, u_size;
2081         int q_order_type;
2082         int ok;
2083
2084         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2085                 D_ASSERT(mdev->ldev->backing_bdev);
2086                 d_size = drbd_get_max_capacity(mdev->ldev);
2087                 u_size = mdev->ldev->dc.disk_size;
2088                 q_order_type = drbd_queue_order_type(mdev);
2089                 put_ldev(mdev);
2090         } else {
2091                 d_size = 0;
2092                 u_size = 0;
2093                 q_order_type = QUEUE_ORDERED_NONE;
2094         }
2095
2096         p.d_size = cpu_to_be64(d_size);
2097         p.u_size = cpu_to_be64(u_size);
2098         p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
2099         p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9);
2100         p.queue_order_type = cpu_to_be16(q_order_type);
2101         p.dds_flags = cpu_to_be16(flags);
2102
2103         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
2104                            (struct p_header80 *)&p, sizeof(p));
2105         return ok;
2106 }
2107
2108 /**
2109  * drbd_send_state() - Sends the drbd state to the peer
2110  * @mdev:       DRBD device.
2111  */
2112 int drbd_send_state(struct drbd_conf *mdev)
2113 {
2114         struct socket *sock;
2115         struct p_state p;
2116         int ok = 0;
2117
2118         /* Grab state lock so we wont send state if we're in the middle
2119          * of a cluster wide state change on another thread */
2120         drbd_state_lock(mdev);
2121
2122         mutex_lock(&mdev->data.mutex);
2123
2124         p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2125         sock = mdev->data.socket;
2126
2127         if (likely(sock != NULL)) {
2128                 ok = _drbd_send_cmd(mdev, sock, P_STATE,
2129                                     (struct p_header80 *)&p, sizeof(p), 0);
2130         }
2131
2132         mutex_unlock(&mdev->data.mutex);
2133
2134         drbd_state_unlock(mdev);
2135         return ok;
2136 }
2137
2138 int drbd_send_state_req(struct drbd_conf *mdev,
2139         union drbd_state mask, union drbd_state val)
2140 {
2141         struct p_req_state p;
2142
2143         p.mask    = cpu_to_be32(mask.i);
2144         p.val     = cpu_to_be32(val.i);
2145
2146         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
2147                              (struct p_header80 *)&p, sizeof(p));
2148 }
2149
2150 int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
2151 {
2152         struct p_req_state_reply p;
2153
2154         p.retcode    = cpu_to_be32(retcode);
2155
2156         return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
2157                              (struct p_header80 *)&p, sizeof(p));
2158 }
2159
2160 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2161         struct p_compressed_bm *p,
2162         struct bm_xfer_ctx *c)
2163 {
2164         struct bitstream bs;
2165         unsigned long plain_bits;
2166         unsigned long tmp;
2167         unsigned long rl;
2168         unsigned len;
2169         unsigned toggle;
2170         int bits;
2171
2172         /* may we use this feature? */
2173         if ((mdev->sync_conf.use_rle == 0) ||
2174                 (mdev->agreed_pro_version < 90))
2175                         return 0;
2176
2177         if (c->bit_offset >= c->bm_bits)
2178                 return 0; /* nothing to do. */
2179
2180         /* use at most thus many bytes */
2181         bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2182         memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2183         /* plain bits covered in this code string */
2184         plain_bits = 0;
2185
2186         /* p->encoding & 0x80 stores whether the first run length is set.
2187          * bit offset is implicit.
2188          * start with toggle == 2 to be able to tell the first iteration */
2189         toggle = 2;
2190
2191         /* see how much plain bits we can stuff into one packet
2192          * using RLE and VLI. */
2193         do {
2194                 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2195                                     : _drbd_bm_find_next(mdev, c->bit_offset);
2196                 if (tmp == -1UL)
2197                         tmp = c->bm_bits;
2198                 rl = tmp - c->bit_offset;
2199
2200                 if (toggle == 2) { /* first iteration */
2201                         if (rl == 0) {
2202                                 /* the first checked bit was set,
2203                                  * store start value, */
2204                                 DCBP_set_start(p, 1);
2205                                 /* but skip encoding of zero run length */
2206                                 toggle = !toggle;
2207                                 continue;
2208                         }
2209                         DCBP_set_start(p, 0);
2210                 }
2211
2212                 /* paranoia: catch zero runlength.
2213                  * can only happen if bitmap is modified while we scan it. */
2214                 if (rl == 0) {
2215                         dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2216                             "t:%u bo:%lu\n", toggle, c->bit_offset);
2217                         return -1;
2218                 }
2219
2220                 bits = vli_encode_bits(&bs, rl);
2221                 if (bits == -ENOBUFS) /* buffer full */
2222                         break;
2223                 if (bits <= 0) {
2224                         dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2225                         return 0;
2226                 }
2227
2228                 toggle = !toggle;
2229                 plain_bits += rl;
2230                 c->bit_offset = tmp;
2231         } while (c->bit_offset < c->bm_bits);
2232
2233         len = bs.cur.b - p->code + !!bs.cur.bit;
2234
2235         if (plain_bits < (len << 3)) {
2236                 /* incompressible with this method.
2237                  * we need to rewind both word and bit position. */
2238                 c->bit_offset -= plain_bits;
2239                 bm_xfer_ctx_bit_to_word_offset(c);
2240                 c->bit_offset = c->word_offset * BITS_PER_LONG;
2241                 return 0;
2242         }
2243
2244         /* RLE + VLI was able to compress it just fine.
2245          * update c->word_offset. */
2246         bm_xfer_ctx_bit_to_word_offset(c);
2247
2248         /* store pad_bits */
2249         DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2250
2251         return len;
2252 }
2253
2254 /**
2255  * send_bitmap_rle_or_plain
2256  *
2257  * Return 0 when done, 1 when another iteration is needed, and a negative error
2258  * code upon failure.
2259  */
2260 static int
2261 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
2262                          struct p_header80 *h, struct bm_xfer_ctx *c)
2263 {
2264         struct p_compressed_bm *p = (void*)h;
2265         unsigned long num_words;
2266         int len;
2267         int ok;
2268
2269         len = fill_bitmap_rle_bits(mdev, p, c);
2270
2271         if (len < 0)
2272                 return -EIO;
2273
2274         if (len) {
2275                 DCBP_set_code(p, RLE_VLI_Bits);
2276                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2277                         sizeof(*p) + len, 0);
2278
2279                 c->packets[0]++;
2280                 c->bytes[0] += sizeof(*p) + len;
2281
2282                 if (c->bit_offset >= c->bm_bits)
2283                         len = 0; /* DONE */
2284         } else {
2285                 /* was not compressible.
2286                  * send a buffer full of plain text bits instead. */
2287                 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2288                 len = num_words * sizeof(long);
2289                 if (len)
2290                         drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2291                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
2292                                    h, sizeof(struct p_header80) + len, 0);
2293                 c->word_offset += num_words;
2294                 c->bit_offset = c->word_offset * BITS_PER_LONG;
2295
2296                 c->packets[1]++;
2297                 c->bytes[1] += sizeof(struct p_header80) + len;
2298
2299                 if (c->bit_offset > c->bm_bits)
2300                         c->bit_offset = c->bm_bits;
2301         }
2302         if (ok) {
2303                 if (len == 0) {
2304                         INFO_bm_xfer_stats(mdev, "send", c);
2305                         return 0;
2306                 } else
2307                         return 1;
2308         }
2309         return -EIO;
2310 }
2311
2312 /* See the comment at receive_bitmap() */
2313 int _drbd_send_bitmap(struct drbd_conf *mdev)
2314 {
2315         struct bm_xfer_ctx c;
2316         struct p_header80 *p;
2317         int err;
2318
2319         ERR_IF(!mdev->bitmap) return false;
2320
2321         /* maybe we should use some per thread scratch page,
2322          * and allocate that during initial device creation? */
2323         p = (struct p_header80 *) __get_free_page(GFP_NOIO);
2324         if (!p) {
2325                 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2326                 return false;
2327         }
2328
2329         if (get_ldev(mdev)) {
2330                 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2331                         dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2332                         drbd_bm_set_all(mdev);
2333                         if (drbd_bm_write(mdev)) {
2334                                 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2335                                  * but otherwise process as per normal - need to tell other
2336                                  * side that a full resync is required! */
2337                                 dev_err(DEV, "Failed to write bitmap to disk!\n");
2338                         } else {
2339                                 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2340                                 drbd_md_sync(mdev);
2341                         }
2342                 }
2343                 put_ldev(mdev);
2344         }
2345
2346         c = (struct bm_xfer_ctx) {
2347                 .bm_bits = drbd_bm_bits(mdev),
2348                 .bm_words = drbd_bm_words(mdev),
2349         };
2350
2351         do {
2352                 err = send_bitmap_rle_or_plain(mdev, p, &c);
2353         } while (err > 0);
2354
2355         free_page((unsigned long) p);
2356         return err == 0;
2357 }
2358
2359 int drbd_send_bitmap(struct drbd_conf *mdev)
2360 {
2361         int err;
2362
2363         if (!drbd_get_data_sock(mdev))
2364                 return -1;
2365         err = !_drbd_send_bitmap(mdev);
2366         drbd_put_data_sock(mdev);
2367         return err;
2368 }
2369
2370 int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2371 {
2372         int ok;
2373         struct p_barrier_ack p;
2374
2375         p.barrier  = barrier_nr;
2376         p.set_size = cpu_to_be32(set_size);
2377
2378         if (mdev->state.conn < C_CONNECTED)
2379                 return false;
2380         ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2381                         (struct p_header80 *)&p, sizeof(p));
2382         return ok;
2383 }
2384
2385 /**
2386  * _drbd_send_ack() - Sends an ack packet
2387  * @mdev:       DRBD device.
2388  * @cmd:        Packet command code.
2389  * @sector:     sector, needs to be in big endian byte order
2390  * @blksize:    size in byte, needs to be in big endian byte order
2391  * @block_id:   Id, big endian byte order
2392  */
2393 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2394                           u64 sector,
2395                           u32 blksize,
2396                           u64 block_id)
2397 {
2398         int ok;
2399         struct p_block_ack p;
2400
2401         p.sector   = sector;
2402         p.block_id = block_id;
2403         p.blksize  = blksize;
2404         p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2405
2406         if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2407                 return false;
2408         ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2409                                 (struct p_header80 *)&p, sizeof(p));
2410         return ok;
2411 }
2412
2413 /* dp->sector and dp->block_id already/still in network byte order,
2414  * data_size is payload size according to dp->head,
2415  * and may need to be corrected for digest size. */
2416 int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2417                      struct p_data *dp, int data_size)
2418 {
2419         data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2420                 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
2421         return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2422                               dp->block_id);
2423 }
2424
2425 int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2426                      struct p_block_req *rp)
2427 {
2428         return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2429 }
2430
2431 /**
2432  * drbd_send_ack() - Sends an ack packet
2433  * @mdev:       DRBD device.
2434  * @cmd:        Packet command code.
2435  * @e:          Epoch entry.
2436  */
2437 int drbd_send_ack(struct drbd_conf *mdev,
2438         enum drbd_packets cmd, struct drbd_epoch_entry *e)
2439 {
2440         return _drbd_send_ack(mdev, cmd,
2441                               cpu_to_be64(e->sector),
2442                               cpu_to_be32(e->size),
2443                               e->block_id);
2444 }
2445
2446 /* This function misuses the block_id field to signal if the blocks
2447  * are is sync or not. */
2448 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2449                      sector_t sector, int blksize, u64 block_id)
2450 {
2451         return _drbd_send_ack(mdev, cmd,
2452                               cpu_to_be64(sector),
2453                               cpu_to_be32(blksize),
2454                               cpu_to_be64(block_id));
2455 }
2456
2457 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2458                        sector_t sector, int size, u64 block_id)
2459 {
2460         int ok;
2461         struct p_block_req p;
2462
2463         p.sector   = cpu_to_be64(sector);
2464         p.block_id = block_id;
2465         p.blksize  = cpu_to_be32(size);
2466
2467         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2468                                 (struct p_header80 *)&p, sizeof(p));
2469         return ok;
2470 }
2471
2472 int drbd_send_drequest_csum(struct drbd_conf *mdev,
2473                             sector_t sector, int size,
2474                             void *digest, int digest_size,
2475                             enum drbd_packets cmd)
2476 {
2477         int ok;
2478         struct p_block_req p;
2479
2480         p.sector   = cpu_to_be64(sector);
2481         p.block_id = BE_DRBD_MAGIC + 0xbeef;
2482         p.blksize  = cpu_to_be32(size);
2483
2484         p.head.magic   = BE_DRBD_MAGIC;
2485         p.head.command = cpu_to_be16(cmd);
2486         p.head.length  = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
2487
2488         mutex_lock(&mdev->data.mutex);
2489
2490         ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2491         ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2492
2493         mutex_unlock(&mdev->data.mutex);
2494
2495         return ok;
2496 }
2497
2498 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2499 {
2500         int ok;
2501         struct p_block_req p;
2502
2503         p.sector   = cpu_to_be64(sector);
2504         p.block_id = BE_DRBD_MAGIC + 0xbabe;
2505         p.blksize  = cpu_to_be32(size);
2506
2507         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2508                            (struct p_header80 *)&p, sizeof(p));
2509         return ok;
2510 }
2511
2512 /* called on sndtimeo
2513  * returns false if we should retry,
2514  * true if we think connection is dead
2515  */
2516 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2517 {
2518         int drop_it;
2519         /* long elapsed = (long)(jiffies - mdev->last_received); */
2520
2521         drop_it =   mdev->meta.socket == sock
2522                 || !mdev->asender.task
2523                 || get_t_state(&mdev->asender) != Running
2524                 || mdev->state.conn < C_CONNECTED;
2525
2526         if (drop_it)
2527                 return true;
2528
2529         drop_it = !--mdev->ko_count;
2530         if (!drop_it) {
2531                 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2532                        current->comm, current->pid, mdev->ko_count);
2533                 request_ping(mdev);
2534         }
2535
2536         return drop_it; /* && (mdev->state == R_PRIMARY) */;
2537 }
2538
2539 /* The idea of sendpage seems to be to put some kind of reference
2540  * to the page into the skb, and to hand it over to the NIC. In
2541  * this process get_page() gets called.
2542  *
2543  * As soon as the page was really sent over the network put_page()
2544  * gets called by some part of the network layer. [ NIC driver? ]
2545  *
2546  * [ get_page() / put_page() increment/decrement the count. If count
2547  *   reaches 0 the page will be freed. ]
2548  *
2549  * This works nicely with pages from FSs.
2550  * But this means that in protocol A we might signal IO completion too early!
2551  *
2552  * In order not to corrupt data during a resync we must make sure
2553  * that we do not reuse our own buffer pages (EEs) to early, therefore
2554  * we have the net_ee list.
2555  *
2556  * XFS seems to have problems, still, it submits pages with page_count == 0!
2557  * As a workaround, we disable sendpage on pages
2558  * with page_count == 0 or PageSlab.
2559  */
2560 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2561                    int offset, size_t size, unsigned msg_flags)
2562 {
2563         int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
2564         kunmap(page);
2565         if (sent == size)
2566                 mdev->send_cnt += size>>9;
2567         return sent == size;
2568 }
2569
2570 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2571                     int offset, size_t size, unsigned msg_flags)
2572 {
2573         mm_segment_t oldfs = get_fs();
2574         int sent, ok;
2575         int len = size;
2576
2577         /* e.g. XFS meta- & log-data is in slab pages, which have a
2578          * page_count of 0 and/or have PageSlab() set.
2579          * we cannot use send_page for those, as that does get_page();
2580          * put_page(); and would cause either a VM_BUG directly, or
2581          * __page_cache_release a page that would actually still be referenced
2582          * by someone, leading to some obscure delayed Oops somewhere else. */
2583         if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2584                 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
2585
2586         msg_flags |= MSG_NOSIGNAL;
2587         drbd_update_congested(mdev);
2588         set_fs(KERNEL_DS);
2589         do {
2590                 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2591                                                         offset, len,
2592                                                         msg_flags);
2593                 if (sent == -EAGAIN) {
2594                         if (we_should_drop_the_connection(mdev,
2595                                                           mdev->data.socket))
2596                                 break;
2597                         else
2598                                 continue;
2599                 }
2600                 if (sent <= 0) {
2601                         dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2602                              __func__, (int)size, len, sent);
2603                         break;
2604                 }
2605                 len    -= sent;
2606                 offset += sent;
2607         } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2608         set_fs(oldfs);
2609         clear_bit(NET_CONGESTED, &mdev->flags);
2610
2611         ok = (len == 0);
2612         if (likely(ok))
2613                 mdev->send_cnt += size>>9;
2614         return ok;
2615 }
2616
2617 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2618 {
2619         struct bio_vec *bvec;
2620         int i;
2621         /* hint all but last page with MSG_MORE */
2622         __bio_for_each_segment(bvec, bio, i, 0) {
2623                 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2624                                      bvec->bv_offset, bvec->bv_len,
2625                                      i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2626                         return 0;
2627         }
2628         return 1;
2629 }
2630
2631 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2632 {
2633         struct bio_vec *bvec;
2634         int i;
2635         /* hint all but last page with MSG_MORE */
2636         __bio_for_each_segment(bvec, bio, i, 0) {
2637                 if (!_drbd_send_page(mdev, bvec->bv_page,
2638                                      bvec->bv_offset, bvec->bv_len,
2639                                      i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2640                         return 0;
2641         }
2642         return 1;
2643 }
2644
2645 static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2646 {
2647         struct page *page = e->pages;
2648         unsigned len = e->size;
2649         /* hint all but last page with MSG_MORE */
2650         page_chain_for_each(page) {
2651                 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2652                 if (!_drbd_send_page(mdev, page, 0, l,
2653                                 page_chain_next(page) ? MSG_MORE : 0))
2654                         return 0;
2655                 len -= l;
2656         }
2657         return 1;
2658 }
2659
2660 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2661 {
2662         if (mdev->agreed_pro_version >= 95)
2663                 return  (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
2664                         (bi_rw & REQ_FUA ? DP_FUA : 0) |
2665                         (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2666                         (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2667         else
2668                 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
2669 }
2670
2671 /* Used to send write requests
2672  * R_PRIMARY -> Peer    (P_DATA)
2673  */
2674 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2675 {
2676         int ok = 1;
2677         struct p_data p;
2678         unsigned int dp_flags = 0;
2679         void *dgb;
2680         int dgs;
2681
2682         if (!drbd_get_data_sock(mdev))
2683                 return 0;
2684
2685         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2686                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2687
2688         if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
2689                 p.head.h80.magic   = BE_DRBD_MAGIC;
2690                 p.head.h80.command = cpu_to_be16(P_DATA);
2691                 p.head.h80.length  =
2692                         cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2693         } else {
2694                 p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
2695                 p.head.h95.command = cpu_to_be16(P_DATA);
2696                 p.head.h95.length  =
2697                         cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2698         }
2699
2700         p.sector   = cpu_to_be64(req->sector);
2701         p.block_id = (unsigned long)req;
2702         p.seq_num  = cpu_to_be32(req->seq_num =
2703                                  atomic_add_return(1, &mdev->packet_seq));
2704
2705         dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2706
2707         if (mdev->state.conn >= C_SYNC_SOURCE &&
2708             mdev->state.conn <= C_PAUSED_SYNC_T)
2709                 dp_flags |= DP_MAY_SET_IN_SYNC;
2710
2711         p.dp_flags = cpu_to_be32(dp_flags);
2712         set_bit(UNPLUG_REMOTE, &mdev->flags);
2713         ok = (sizeof(p) ==
2714                 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
2715         if (ok && dgs) {
2716                 dgb = mdev->int_dig_out;
2717                 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2718                 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2719         }
2720         if (ok) {
2721                 /* For protocol A, we have to memcpy the payload into
2722                  * socket buffers, as we may complete right away
2723                  * as soon as we handed it over to tcp, at which point the data
2724                  * pages may become invalid.
2725                  *
2726                  * For data-integrity enabled, we copy it as well, so we can be
2727                  * sure that even if the bio pages may still be modified, it
2728                  * won't change the data on the wire, thus if the digest checks
2729                  * out ok after sending on this side, but does not fit on the
2730                  * receiving side, we sure have detected corruption elsewhere.
2731                  */
2732                 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
2733                         ok = _drbd_send_bio(mdev, req->master_bio);
2734                 else
2735                         ok = _drbd_send_zc_bio(mdev, req->master_bio);
2736
2737                 /* double check digest, sometimes buffers have been modified in flight. */
2738                 if (dgs > 0 && dgs <= 64) {
2739                         /* 64 byte, 512 bit, is the larges digest size
2740                          * currently supported in kernel crypto. */
2741                         unsigned char digest[64];
2742                         drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2743                         if (memcmp(mdev->int_dig_out, digest, dgs)) {
2744                                 dev_warn(DEV,
2745                                         "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2746                                         (unsigned long long)req->sector, req->size);
2747                         }
2748                 } /* else if (dgs > 64) {
2749                      ... Be noisy about digest too large ...
2750                 } */
2751         }
2752
2753         drbd_put_data_sock(mdev);
2754
2755         return ok;
2756 }
2757
2758 /* answer packet, used to send data back for read requests:
2759  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
2760  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
2761  */
2762 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2763                     struct drbd_epoch_entry *e)
2764 {
2765         int ok;
2766         struct p_data p;
2767         void *dgb;
2768         int dgs;
2769
2770         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2771                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2772
2773         if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
2774                 p.head.h80.magic   = BE_DRBD_MAGIC;
2775                 p.head.h80.command = cpu_to_be16(cmd);
2776                 p.head.h80.length  =
2777                         cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2778         } else {
2779                 p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
2780                 p.head.h95.command = cpu_to_be16(cmd);
2781                 p.head.h95.length  =
2782                         cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2783         }
2784
2785         p.sector   = cpu_to_be64(e->sector);
2786         p.block_id = e->block_id;
2787         /* p.seq_num  = 0;    No sequence numbers here.. */
2788
2789         /* Only called by our kernel thread.
2790          * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2791          * in response to admin command or module unload.
2792          */
2793         if (!drbd_get_data_sock(mdev))
2794                 return 0;
2795
2796         ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
2797         if (ok && dgs) {
2798                 dgb = mdev->int_dig_out;
2799                 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
2800                 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2801         }
2802         if (ok)
2803                 ok = _drbd_send_zc_ee(mdev, e);
2804
2805         drbd_put_data_sock(mdev);
2806
2807         return ok;
2808 }
2809
2810 int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2811 {
2812         struct p_block_desc p;
2813
2814         p.sector  = cpu_to_be64(req->sector);
2815         p.blksize = cpu_to_be32(req->size);
2816
2817         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2818 }
2819
2820 /*
2821   drbd_send distinguishes two cases:
2822
2823   Packets sent via the data socket "sock"
2824   and packets sent via the meta data socket "msock"
2825
2826                     sock                      msock
2827   -----------------+-------------------------+------------------------------
2828   timeout           conf.timeout / 2          conf.timeout / 2
2829   timeout action    send a ping via msock     Abort communication
2830                                               and close all sockets
2831 */
2832
2833 /*
2834  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2835  */
2836 int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2837               void *buf, size_t size, unsigned msg_flags)
2838 {
2839         struct kvec iov;
2840         struct msghdr msg;
2841         int rv, sent = 0;
2842
2843         if (!sock)
2844                 return -1000;
2845
2846         /* THINK  if (signal_pending) return ... ? */
2847
2848         iov.iov_base = buf;
2849         iov.iov_len  = size;
2850
2851         msg.msg_name       = NULL;
2852         msg.msg_namelen    = 0;
2853         msg.msg_control    = NULL;
2854         msg.msg_controllen = 0;
2855         msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
2856
2857         if (sock == mdev->data.socket) {
2858                 mdev->ko_count = mdev->net_conf->ko_count;
2859                 drbd_update_congested(mdev);
2860         }
2861         do {
2862                 /* STRANGE
2863                  * tcp_sendmsg does _not_ use its size parameter at all ?
2864                  *
2865                  * -EAGAIN on timeout, -EINTR on signal.
2866                  */
2867 /* THINK
2868  * do we need to block DRBD_SIG if sock == &meta.socket ??
2869  * otherwise wake_asender() might interrupt some send_*Ack !
2870  */
2871                 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2872                 if (rv == -EAGAIN) {
2873                         if (we_should_drop_the_connection(mdev, sock))
2874                                 break;
2875                         else
2876                                 continue;
2877                 }
2878                 D_ASSERT(rv != 0);
2879                 if (rv == -EINTR) {
2880                         flush_signals(current);
2881                         rv = 0;
2882                 }
2883                 if (rv < 0)
2884                         break;
2885                 sent += rv;
2886                 iov.iov_base += rv;
2887                 iov.iov_len  -= rv;
2888         } while (sent < size);
2889
2890         if (sock == mdev->data.socket)
2891                 clear_bit(NET_CONGESTED, &mdev->flags);
2892
2893         if (rv <= 0) {
2894                 if (rv != -EAGAIN) {
2895                         dev_err(DEV, "%s_sendmsg returned %d\n",
2896                             sock == mdev->meta.socket ? "msock" : "sock",
2897                             rv);
2898                         drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2899                 } else
2900                         drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2901         }
2902
2903         return sent;
2904 }
2905
2906 static int drbd_open(struct block_device *bdev, fmode_t mode)
2907 {
2908         struct drbd_conf *mdev = bdev->bd_disk->private_data;
2909         unsigned long flags;
2910         int rv = 0;
2911
2912         mutex_lock(&drbd_main_mutex);
2913         spin_lock_irqsave(&mdev->req_lock, flags);
2914         /* to have a stable mdev->state.role
2915          * and no race with updating open_cnt */
2916
2917         if (mdev->state.role != R_PRIMARY) {
2918                 if (mode & FMODE_WRITE)
2919                         rv = -EROFS;
2920                 else if (!allow_oos)
2921                         rv = -EMEDIUMTYPE;
2922         }
2923
2924         if (!rv)
2925                 mdev->open_cnt++;
2926         spin_unlock_irqrestore(&mdev->req_lock, flags);
2927         mutex_unlock(&drbd_main_mutex);
2928
2929         return rv;
2930 }
2931
2932 static int drbd_release(struct gendisk *gd, fmode_t mode)
2933 {
2934         struct drbd_conf *mdev = gd->private_data;
2935         mutex_lock(&drbd_main_mutex);
2936         mdev->open_cnt--;
2937         mutex_unlock(&drbd_main_mutex);
2938         return 0;
2939 }
2940
2941 static void drbd_set_defaults(struct drbd_conf *mdev)
2942 {
2943         /* This way we get a compile error when sync_conf grows,
2944            and we forgot to initialize it here */
2945         mdev->sync_conf = (struct syncer_conf) {
2946                 /* .rate = */           DRBD_RATE_DEF,
2947                 /* .after = */          DRBD_AFTER_DEF,
2948                 /* .al_extents = */     DRBD_AL_EXTENTS_DEF,
2949                 /* .verify_alg = */     {}, 0,
2950                 /* .cpu_mask = */       {}, 0,
2951                 /* .csums_alg = */      {}, 0,
2952                 /* .use_rle = */        0,
2953                 /* .on_no_data = */     DRBD_ON_NO_DATA_DEF,
2954                 /* .c_plan_ahead = */   DRBD_C_PLAN_AHEAD_DEF,
2955                 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2956                 /* .c_fill_target = */  DRBD_C_FILL_TARGET_DEF,
2957                 /* .c_max_rate = */     DRBD_C_MAX_RATE_DEF,
2958                 /* .c_min_rate = */     DRBD_C_MIN_RATE_DEF
2959         };
2960
2961         /* Have to use that way, because the layout differs between
2962            big endian and little endian */
2963         mdev->state = (union drbd_state) {
2964                 { .role = R_SECONDARY,
2965                   .peer = R_UNKNOWN,
2966                   .conn = C_STANDALONE,
2967                   .disk = D_DISKLESS,
2968                   .pdsk = D_UNKNOWN,
2969                   .susp = 0,
2970                   .susp_nod = 0,
2971                   .susp_fen = 0
2972                 } };
2973 }
2974
2975 void drbd_init_set_defaults(struct drbd_conf *mdev)
2976 {
2977         /* the memset(,0,) did most of this.
2978          * note: only assignments, no allocation in here */
2979
2980         drbd_set_defaults(mdev);
2981
2982         atomic_set(&mdev->ap_bio_cnt, 0);
2983         atomic_set(&mdev->ap_pending_cnt, 0);
2984         atomic_set(&mdev->rs_pending_cnt, 0);
2985         atomic_set(&mdev->unacked_cnt, 0);
2986         atomic_set(&mdev->local_cnt, 0);
2987         atomic_set(&mdev->net_cnt, 0);
2988         atomic_set(&mdev->packet_seq, 0);
2989         atomic_set(&mdev->pp_in_use, 0);
2990         atomic_set(&mdev->pp_in_use_by_net, 0);
2991         atomic_set(&mdev->rs_sect_in, 0);
2992         atomic_set(&mdev->rs_sect_ev, 0);
2993         atomic_set(&mdev->ap_in_flight, 0);
2994
2995         mutex_init(&mdev->md_io_mutex);
2996         mutex_init(&mdev->data.mutex);
2997         mutex_init(&mdev->meta.mutex);
2998         sema_init(&mdev->data.work.s, 0);
2999         sema_init(&mdev->meta.work.s, 0);
3000         mutex_init(&mdev->state_mutex);
3001
3002         spin_lock_init(&mdev->data.work.q_lock);
3003         spin_lock_init(&mdev->meta.work.q_lock);
3004
3005         spin_lock_init(&mdev->al_lock);
3006         spin_lock_init(&mdev->req_lock);
3007         spin_lock_init(&mdev->peer_seq_lock);
3008         spin_lock_init(&mdev->epoch_lock);
3009
3010         INIT_LIST_HEAD(&mdev->active_ee);
3011         INIT_LIST_HEAD(&mdev->sync_ee);
3012         INIT_LIST_HEAD(&mdev->done_ee);
3013         INIT_LIST_HEAD(&mdev->read_ee);
3014         INIT_LIST_HEAD(&mdev->net_ee);
3015         INIT_LIST_HEAD(&mdev->resync_reads);
3016         INIT_LIST_HEAD(&mdev->data.work.q);
3017         INIT_LIST_HEAD(&mdev->meta.work.q);
3018         INIT_LIST_HEAD(&mdev->resync_work.list);
3019         INIT_LIST_HEAD(&mdev->unplug_work.list);
3020         INIT_LIST_HEAD(&mdev->go_diskless.list);
3021         INIT_LIST_HEAD(&mdev->md_sync_work.list);
3022         INIT_LIST_HEAD(&mdev->start_resync_work.list);
3023         INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
3024
3025         mdev->resync_work.cb  = w_resync_timer;
3026         mdev->unplug_work.cb  = w_send_write_hint;
3027         mdev->go_diskless.cb  = w_go_diskless;
3028         mdev->md_sync_work.cb = w_md_sync;
3029         mdev->bm_io_work.w.cb = w_bitmap_io;
3030         mdev->start_resync_work.cb = w_start_resync;
3031         init_timer(&mdev->resync_timer);
3032         init_timer(&mdev->md_sync_timer);
3033         init_timer(&mdev->start_resync_timer);
3034         mdev->resync_timer.function = resync_timer_fn;
3035         mdev->resync_timer.data = (unsigned long) mdev;
3036         mdev->md_sync_timer.function = md_sync_timer_fn;
3037         mdev->md_sync_timer.data = (unsigned long) mdev;
3038         mdev->start_resync_timer.function = start_resync_timer_fn;
3039         mdev->start_resync_timer.data = (unsigned long) mdev;
3040
3041         init_waitqueue_head(&mdev->misc_wait);
3042         init_waitqueue_head(&mdev->state_wait);
3043         init_waitqueue_head(&mdev->net_cnt_wait);
3044         init_waitqueue_head(&mdev->ee_wait);
3045         init_waitqueue_head(&mdev->al_wait);
3046         init_waitqueue_head(&mdev->seq_wait);
3047
3048         drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3049         drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3050         drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3051
3052         mdev->agreed_pro_version = PRO_VERSION_MAX;
3053         mdev->write_ordering = WO_bdev_flush;
3054         mdev->resync_wenr = LC_FREE;
3055 }
3056
3057 void drbd_mdev_cleanup(struct drbd_conf *mdev)
3058 {
3059         int i;
3060         if (mdev->receiver.t_state != None)
3061                 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3062                                 mdev->receiver.t_state);
3063
3064         /* no need to lock it, I'm the only thread alive */
3065         if (atomic_read(&mdev->current_epoch->epoch_size) !=  0)
3066                 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3067         mdev->al_writ_cnt  =
3068         mdev->bm_writ_cnt  =
3069         mdev->read_cnt     =
3070         mdev->recv_cnt     =
3071         mdev->send_cnt     =
3072         mdev->writ_cnt     =
3073         mdev->p_size       =
3074         mdev->rs_start     =
3075         mdev->rs_total     =
3076         mdev->rs_failed    = 0;
3077         mdev->rs_last_events = 0;
3078         mdev->rs_last_sect_ev = 0;
3079         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3080                 mdev->rs_mark_left[i] = 0;
3081                 mdev->rs_mark_time[i] = 0;
3082         }
3083         D_ASSERT(mdev->net_conf == NULL);
3084
3085         drbd_set_my_capacity(mdev, 0);
3086         if (mdev->bitmap) {
3087                 /* maybe never allocated. */
3088                 drbd_bm_resize(mdev, 0, 1);
3089                 drbd_bm_cleanup(mdev);
3090         }
3091
3092         drbd_free_resources(mdev);
3093         clear_bit(AL_SUSPENDED, &mdev->flags);
3094
3095         /*
3096          * currently we drbd_init_ee only on module load, so
3097          * we may do drbd_release_ee only on module unload!
3098          */
3099         D_ASSERT(list_empty(&mdev->active_ee));
3100         D_ASSERT(list_empty(&mdev->sync_ee));
3101         D_ASSERT(list_empty(&mdev->done_ee));
3102         D_ASSERT(list_empty(&mdev->read_ee));
3103         D_ASSERT(list_empty(&mdev->net_ee));
3104         D_ASSERT(list_empty(&mdev->resync_reads));
3105         D_ASSERT(list_empty(&mdev->data.work.q));
3106         D_ASSERT(list_empty(&mdev->meta.work.q));
3107         D_ASSERT(list_empty(&mdev->resync_work.list));
3108         D_ASSERT(list_empty(&mdev->unplug_work.list));
3109         D_ASSERT(list_empty(&mdev->go_diskless.list));
3110
3111         drbd_set_defaults(mdev);
3112 }
3113
3114
3115 static void drbd_destroy_mempools(void)
3116 {
3117         struct page *page;
3118
3119         while (drbd_pp_pool) {
3120                 page = drbd_pp_pool;
3121                 drbd_pp_pool = (struct page *)page_private(page);
3122                 __free_page(page);
3123                 drbd_pp_vacant--;
3124         }
3125
3126         /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3127
3128         if (drbd_ee_mempool)
3129                 mempool_destroy(drbd_ee_mempool);
3130         if (drbd_request_mempool)
3131                 mempool_destroy(drbd_request_mempool);
3132         if (drbd_ee_cache)
3133                 kmem_cache_destroy(drbd_ee_cache);
3134         if (drbd_request_cache)
3135                 kmem_cache_destroy(drbd_request_cache);
3136         if (drbd_bm_ext_cache)
3137                 kmem_cache_destroy(drbd_bm_ext_cache);
3138         if (drbd_al_ext_cache)
3139                 kmem_cache_destroy(drbd_al_ext_cache);
3140
3141         drbd_ee_mempool      = NULL;
3142         drbd_request_mempool = NULL;
3143         drbd_ee_cache        = NULL;
3144         drbd_request_cache   = NULL;
3145         drbd_bm_ext_cache    = NULL;
3146         drbd_al_ext_cache    = NULL;
3147
3148         return;
3149 }
3150
3151 static int drbd_create_mempools(void)
3152 {
3153         struct page *page;
3154         const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
3155         int i;
3156
3157         /* prepare our caches and mempools */
3158         drbd_request_mempool = NULL;
3159         drbd_ee_cache        = NULL;
3160         drbd_request_cache   = NULL;
3161         drbd_bm_ext_cache    = NULL;
3162         drbd_al_ext_cache    = NULL;
3163         drbd_pp_pool         = NULL;
3164
3165         /* caches */
3166         drbd_request_cache = kmem_cache_create(
3167                 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3168         if (drbd_request_cache == NULL)
3169                 goto Enomem;
3170
3171         drbd_ee_cache = kmem_cache_create(
3172                 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3173         if (drbd_ee_cache == NULL)
3174                 goto Enomem;
3175
3176         drbd_bm_ext_cache = kmem_cache_create(
3177                 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3178         if (drbd_bm_ext_cache == NULL)
3179                 goto Enomem;
3180
3181         drbd_al_ext_cache = kmem_cache_create(
3182                 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3183         if (drbd_al_ext_cache == NULL)
3184                 goto Enomem;
3185
3186         /* mempools */
3187         drbd_request_mempool = mempool_create(number,
3188                 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3189         if (drbd_request_mempool == NULL)
3190                 goto Enomem;
3191
3192         drbd_ee_mempool = mempool_create(number,
3193                 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
3194         if (drbd_ee_mempool == NULL)
3195                 goto Enomem;
3196
3197         /* drbd's page pool */
3198         spin_lock_init(&drbd_pp_lock);
3199
3200         for (i = 0; i < number; i++) {
3201                 page = alloc_page(GFP_HIGHUSER);
3202                 if (!page)
3203                         goto Enomem;
3204                 set_page_private(page, (unsigned long)drbd_pp_pool);
3205                 drbd_pp_pool = page;
3206         }
3207         drbd_pp_vacant = number;
3208
3209         return 0;
3210
3211 Enomem:
3212         drbd_destroy_mempools(); /* in case we allocated some */
3213         return -ENOMEM;
3214 }
3215
3216 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3217         void *unused)
3218 {
3219         /* just so we have it.  you never know what interesting things we
3220          * might want to do here some day...
3221          */
3222
3223         return NOTIFY_DONE;
3224 }
3225
3226 static struct notifier_block drbd_notifier = {
3227         .notifier_call = drbd_notify_sys,
3228 };
3229
3230 static void drbd_release_ee_lists(struct drbd_conf *mdev)
3231 {
3232         int rr;
3233
3234         rr = drbd_release_ee(mdev, &mdev->active_ee);
3235         if (rr)
3236                 dev_err(DEV, "%d EEs in active list found!\n", rr);
3237
3238         rr = drbd_release_ee(mdev, &mdev->sync_ee);
3239         if (rr)
3240                 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3241
3242         rr = drbd_release_ee(mdev, &mdev->read_ee);
3243         if (rr)
3244                 dev_err(DEV, "%d EEs in read list found!\n", rr);
3245
3246         rr = drbd_release_ee(mdev, &mdev->done_ee);
3247         if (rr)
3248                 dev_err(DEV, "%d EEs in done list found!\n", rr);
3249
3250         rr = drbd_release_ee(mdev, &mdev->net_ee);
3251         if (rr)
3252                 dev_err(DEV, "%d EEs in net list found!\n", rr);
3253 }
3254
3255 /* caution. no locking.
3256  * currently only used from module cleanup code. */
3257 static void drbd_delete_device(unsigned int minor)
3258 {
3259         struct drbd_conf *mdev = minor_to_mdev(minor);
3260
3261         if (!mdev)
3262                 return;
3263
3264         /* paranoia asserts */
3265         if (mdev->open_cnt != 0)
3266                 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3267                                 __FILE__ , __LINE__);
3268
3269         ERR_IF (!list_empty(&mdev->data.work.q)) {
3270                 struct list_head *lp;
3271                 list_for_each(lp, &mdev->data.work.q) {
3272                         dev_err(DEV, "lp = %p\n", lp);
3273                 }
3274         };
3275         /* end paranoia asserts */
3276
3277         del_gendisk(mdev->vdisk);
3278
3279         /* cleanup stuff that may have been allocated during
3280          * device (re-)configuration or state changes */
3281
3282         if (mdev->this_bdev)
3283                 bdput(mdev->this_bdev);
3284
3285         drbd_free_resources(mdev);
3286
3287         drbd_release_ee_lists(mdev);
3288
3289         /* should be free'd on disconnect? */
3290         kfree(mdev->ee_hash);
3291         /*
3292         mdev->ee_hash_s = 0;
3293         mdev->ee_hash = NULL;
3294         */
3295
3296         lc_destroy(mdev->act_log);
3297         lc_destroy(mdev->resync);
3298
3299         kfree(mdev->p_uuid);
3300         /* mdev->p_uuid = NULL; */
3301
3302         kfree(mdev->int_dig_out);
3303         kfree(mdev->int_dig_in);
3304         kfree(mdev->int_dig_vv);
3305
3306         /* cleanup the rest that has been
3307          * allocated from drbd_new_device
3308          * and actually free the mdev itself */
3309         drbd_free_mdev(mdev);
3310 }
3311
3312 static void drbd_cleanup(void)
3313 {
3314         unsigned int i;
3315
3316         unregister_reboot_notifier(&drbd_notifier);
3317
3318         /* first remove proc,
3319          * drbdsetup uses it's presence to detect
3320          * whether DRBD is loaded.
3321          * If we would get stuck in proc removal,
3322          * but have netlink already deregistered,
3323          * some drbdsetup commands may wait forever
3324          * for an answer.
3325          */
3326         if (drbd_proc)
3327                 remove_proc_entry("drbd", NULL);
3328
3329         drbd_nl_cleanup();
3330
3331         if (minor_table) {
3332                 i = minor_count;
3333                 while (i--)
3334                         drbd_delete_device(i);
3335                 drbd_destroy_mempools();
3336         }
3337
3338         kfree(minor_table);
3339
3340         unregister_blkdev(DRBD_MAJOR, "drbd");
3341
3342         printk(KERN_INFO "drbd: module cleanup done.\n");
3343 }
3344
3345 /**
3346  * drbd_congested() - Callback for pdflush
3347  * @congested_data:     User data
3348  * @bdi_bits:           Bits pdflush is currently interested in
3349  *
3350  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3351  */
3352 static int drbd_congested(void *congested_data, int bdi_bits)
3353 {
3354         struct drbd_conf *mdev = congested_data;
3355         struct request_queue *q;
3356         char reason = '-';
3357         int r = 0;
3358
3359         if (!may_inc_ap_bio(mdev)) {
3360                 /* DRBD has frozen IO */
3361                 r = bdi_bits;
3362                 reason = 'd';
3363                 goto out;
3364         }
3365
3366         if (get_ldev(mdev)) {
3367                 q = bdev_get_queue(mdev->ldev->backing_bdev);
3368                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3369                 put_ldev(mdev);
3370                 if (r)
3371                         reason = 'b';
3372         }
3373
3374         if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3375                 r |= (1 << BDI_async_congested);
3376                 reason = reason == 'b' ? 'a' : 'n';
3377         }
3378
3379 out:
3380         mdev->congestion_reason = reason;
3381         return r;
3382 }
3383
3384 struct drbd_conf *drbd_new_device(unsigned int minor)
3385 {
3386         struct drbd_conf *mdev;
3387         struct gendisk *disk;
3388         struct request_queue *q;
3389
3390         /* GFP_KERNEL, we are outside of all write-out paths */
3391         mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3392         if (!mdev)
3393                 return NULL;
3394         if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3395                 goto out_no_cpumask;
3396
3397         mdev->minor = minor;
3398
3399         drbd_init_set_defaults(mdev);
3400
3401         q = blk_alloc_queue(GFP_KERNEL);
3402         if (!q)
3403                 goto out_no_q;
3404         mdev->rq_queue = q;
3405         q->queuedata   = mdev;
3406
3407         disk = alloc_disk(1);
3408         if (!disk)
3409                 goto out_no_disk;
3410         mdev->vdisk = disk;
3411
3412         set_disk_ro(disk, true);
3413
3414         disk->queue = q;
3415         disk->major = DRBD_MAJOR;
3416         disk->first_minor = minor;
3417         disk->fops = &drbd_ops;
3418         sprintf(disk->disk_name, "drbd%d", minor);
3419         disk->private_data = mdev;
3420
3421         mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3422         /* we have no partitions. we contain only ourselves. */
3423         mdev->this_bdev->bd_contains = mdev->this_bdev;
3424
3425         q->backing_dev_info.congested_fn = drbd_congested;
3426         q->backing_dev_info.congested_data = mdev;
3427
3428         blk_queue_make_request(q, drbd_make_request);
3429         blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9);
3430         blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3431         blk_queue_merge_bvec(q, drbd_merge_bvec);
3432         q->queue_lock = &mdev->req_lock;
3433
3434         mdev->md_io_page = alloc_page(GFP_KERNEL);
3435         if (!mdev->md_io_page)
3436                 goto out_no_io_page;
3437
3438         if (drbd_bm_init(mdev))
3439                 goto out_no_bitmap;
3440         /* no need to lock access, we are still initializing this minor device. */
3441         if (!tl_init(mdev))
3442                 goto out_no_tl;
3443
3444         mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3445         if (!mdev->app_reads_hash)
3446                 goto out_no_app_reads;
3447
3448         mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3449         if (!mdev->current_epoch)
3450                 goto out_no_epoch;
3451
3452         INIT_LIST_HEAD(&mdev->current_epoch->list);
3453         mdev->epochs = 1;
3454
3455         return mdev;
3456
3457 /* out_whatever_else:
3458         kfree(mdev->current_epoch); */
3459 out_no_epoch:
3460         kfree(mdev->app_reads_hash);
3461 out_no_app_reads:
3462         tl_cleanup(mdev);
3463 out_no_tl:
3464         drbd_bm_cleanup(mdev);
3465 out_no_bitmap:
3466         __free_page(mdev->md_io_page);
3467 out_no_io_page:
3468         put_disk(disk);
3469 out_no_disk:
3470         blk_cleanup_queue(q);
3471 out_no_q:
3472         free_cpumask_var(mdev->cpu_mask);
3473 out_no_cpumask:
3474         kfree(mdev);
3475         return NULL;
3476 }
3477
3478 /* counterpart of drbd_new_device.
3479  * last part of drbd_delete_device. */
3480 void drbd_free_mdev(struct drbd_conf *mdev)
3481 {
3482         kfree(mdev->current_epoch);
3483         kfree(mdev->app_reads_hash);
3484         tl_cleanup(mdev);
3485         if (mdev->bitmap) /* should no longer be there. */
3486                 drbd_bm_cleanup(mdev);
3487         __free_page(mdev->md_io_page);
3488         put_disk(mdev->vdisk);
3489         blk_cleanup_queue(mdev->rq_queue);
3490         free_cpumask_var(mdev->cpu_mask);
3491         drbd_free_tl_hash(mdev);
3492         kfree(mdev);
3493 }
3494
3495
3496 int __init drbd_init(void)
3497 {
3498         int err;
3499
3500         if (sizeof(struct p_handshake) != 80) {
3501                 printk(KERN_ERR
3502                        "drbd: never change the size or layout "
3503                        "of the HandShake packet.\n");
3504                 return -EINVAL;
3505         }
3506
3507         if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
3508                 printk(KERN_ERR
3509                         "drbd: invalid minor_count (%d)\n", minor_count);
3510 #ifdef MODULE
3511                 return -EINVAL;
3512 #else
3513                 minor_count = 8;
3514 #endif
3515         }
3516
3517         err = drbd_nl_init();
3518         if (err)
3519                 return err;
3520
3521         err = register_blkdev(DRBD_MAJOR, "drbd");
3522         if (err) {
3523                 printk(KERN_ERR
3524                        "drbd: unable to register block device major %d\n",
3525                        DRBD_MAJOR);
3526                 return err;
3527         }
3528
3529         register_reboot_notifier(&drbd_notifier);
3530
3531         /*
3532          * allocate all necessary structs
3533          */
3534         err = -ENOMEM;
3535
3536         init_waitqueue_head(&drbd_pp_wait);
3537
3538         drbd_proc = NULL; /* play safe for drbd_cleanup */
3539         minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3540                                 GFP_KERNEL);
3541         if (!minor_table)
3542                 goto Enomem;
3543
3544         err = drbd_create_mempools();
3545         if (err)
3546                 goto Enomem;
3547
3548         drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3549         if (!drbd_proc) {
3550                 printk(KERN_ERR "drbd: unable to register proc file\n");
3551                 goto Enomem;
3552         }
3553
3554         rwlock_init(&global_state_lock);
3555
3556         printk(KERN_INFO "drbd: initialized. "
3557                "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3558                API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3559         printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3560         printk(KERN_INFO "drbd: registered as block device major %d\n",
3561                 DRBD_MAJOR);
3562         printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3563
3564         return 0; /* Success! */
3565
3566 Enomem:
3567         drbd_cleanup();
3568         if (err == -ENOMEM)
3569                 /* currently always the case */
3570                 printk(KERN_ERR "drbd: ran out of memory\n");
3571         else
3572                 printk(KERN_ERR "drbd: initialization failure\n");
3573         return err;
3574 }
3575
3576 void drbd_free_bc(struct drbd_backing_dev *ldev)
3577 {
3578         if (ldev == NULL)
3579                 return;
3580
3581         blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3582         blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3583
3584         kfree(ldev);
3585 }
3586
3587 void drbd_free_sock(struct drbd_conf *mdev)
3588 {
3589         if (mdev->data.socket) {
3590                 mutex_lock(&mdev->data.mutex);
3591                 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3592                 sock_release(mdev->data.socket);
3593                 mdev->data.socket = NULL;
3594                 mutex_unlock(&mdev->data.mutex);
3595         }
3596         if (mdev->meta.socket) {
3597                 mutex_lock(&mdev->meta.mutex);
3598                 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3599                 sock_release(mdev->meta.socket);
3600                 mdev->meta.socket = NULL;
3601                 mutex_unlock(&mdev->meta.mutex);
3602         }
3603 }
3604
3605
3606 void drbd_free_resources(struct drbd_conf *mdev)
3607 {
3608         crypto_free_hash(mdev->csums_tfm);
3609         mdev->csums_tfm = NULL;
3610         crypto_free_hash(mdev->verify_tfm);
3611         mdev->verify_tfm = NULL;
3612         crypto_free_hash(mdev->cram_hmac_tfm);
3613         mdev->cram_hmac_tfm = NULL;
3614         crypto_free_hash(mdev->integrity_w_tfm);
3615         mdev->integrity_w_tfm = NULL;
3616         crypto_free_hash(mdev->integrity_r_tfm);
3617         mdev->integrity_r_tfm = NULL;
3618
3619         drbd_free_sock(mdev);
3620
3621         __no_warn(local,
3622                   drbd_free_bc(mdev->ldev);
3623                   mdev->ldev = NULL;);
3624 }
3625
3626 /* meta data management */
3627
3628 struct meta_data_on_disk {
3629         u64 la_size;           /* last agreed size. */
3630         u64 uuid[UI_SIZE];   /* UUIDs. */
3631         u64 device_uuid;
3632         u64 reserved_u64_1;
3633         u32 flags;             /* MDF */
3634         u32 magic;
3635         u32 md_size_sect;
3636         u32 al_offset;         /* offset to this block */
3637         u32 al_nr_extents;     /* important for restoring the AL */
3638               /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3639         u32 bm_offset;         /* offset to the bitmap, from here */
3640         u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
3641         u32 reserved_u32[4];
3642
3643 } __packed;
3644
3645 /**
3646  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3647  * @mdev:       DRBD device.
3648  */
3649 void drbd_md_sync(struct drbd_conf *mdev)
3650 {
3651         struct meta_data_on_disk *buffer;
3652         sector_t sector;
3653         int i;
3654
3655         del_timer(&mdev->md_sync_timer);
3656         /* timer may be rearmed by drbd_md_mark_dirty() now. */
3657         if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3658                 return;
3659
3660         /* We use here D_FAILED and not D_ATTACHING because we try to write
3661          * metadata even if we detach due to a disk failure! */
3662         if (!get_ldev_if_state(mdev, D_FAILED))
3663                 return;
3664
3665         mutex_lock(&mdev->md_io_mutex);
3666         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3667         memset(buffer, 0, 512);
3668
3669         buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3670         for (i = UI_CURRENT; i < UI_SIZE; i++)
3671                 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3672         buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3673         buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3674
3675         buffer->md_size_sect  = cpu_to_be32(mdev->ldev->md.md_size_sect);
3676         buffer->al_offset     = cpu_to_be32(mdev->ldev->md.al_offset);
3677         buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3678         buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3679         buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3680
3681         buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3682
3683         D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3684         sector = mdev->ldev->md.md_offset;
3685
3686         if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3687                 /* this was a try anyways ... */
3688                 dev_err(DEV, "meta data update failed!\n");
3689                 drbd_chk_io_error(mdev, 1, true);
3690         }
3691
3692         /* Update mdev->ldev->md.la_size_sect,
3693          * since we updated it on metadata. */
3694         mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3695
3696         mutex_unlock(&mdev->md_io_mutex);
3697         put_ldev(mdev);
3698 }
3699
3700 /**
3701  * drbd_md_read() - Reads in the meta data super block
3702  * @mdev:       DRBD device.
3703  * @bdev:       Device from which the meta data should be read in.
3704  *
3705  * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
3706  * something goes wrong.  Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3707  */
3708 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3709 {
3710         struct meta_data_on_disk *buffer;
3711         int i, rv = NO_ERROR;
3712
3713         if (!get_ldev_if_state(mdev, D_ATTACHING))
3714                 return ERR_IO_MD_DISK;
3715
3716         mutex_lock(&mdev->md_io_mutex);
3717         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3718
3719         if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3720                 /* NOTE: cant do normal error processing here as this is
3721                    called BEFORE disk is attached */
3722                 dev_err(DEV, "Error while reading metadata.\n");
3723                 rv = ERR_IO_MD_DISK;
3724                 goto err;
3725         }
3726
3727         if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3728                 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3729                 rv = ERR_MD_INVALID;
3730                 goto err;
3731         }
3732         if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3733                 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3734                     be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3735                 rv = ERR_MD_INVALID;
3736                 goto err;
3737         }
3738         if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3739                 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3740                     be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3741                 rv = ERR_MD_INVALID;
3742                 goto err;
3743         }
3744         if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3745                 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3746                     be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3747                 rv = ERR_MD_INVALID;
3748                 goto err;
3749         }
3750
3751         if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3752                 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3753                     be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3754                 rv = ERR_MD_INVALID;
3755                 goto err;
3756         }
3757
3758         bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3759         for (i = UI_CURRENT; i < UI_SIZE; i++)
3760                 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3761         bdev->md.flags = be32_to_cpu(buffer->flags);
3762         mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3763         bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3764
3765         if (mdev->sync_conf.al_extents < 7)
3766                 mdev->sync_conf.al_extents = 127;
3767
3768  err:
3769         mutex_unlock(&mdev->md_io_mutex);
3770         put_ldev(mdev);
3771
3772         return rv;
3773 }
3774
3775 /**
3776  * drbd_md_mark_dirty() - Mark meta data super block as dirty
3777  * @mdev:       DRBD device.
3778  *
3779  * Call this function if you change anything that should be written to
3780  * the meta-data super block. This function sets MD_DIRTY, and starts a
3781  * timer that ensures that within five seconds you have to call drbd_md_sync().
3782  */
3783 #ifdef DEBUG
3784 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3785 {
3786         if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3787                 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3788                 mdev->last_md_mark_dirty.line = line;
3789                 mdev->last_md_mark_dirty.func = func;
3790         }
3791 }
3792 #else
3793 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3794 {
3795         if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
3796                 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3797 }
3798 #endif
3799
3800 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3801 {
3802         int i;
3803
3804         for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3805                 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3806 }
3807
3808 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3809 {
3810         if (idx == UI_CURRENT) {
3811                 if (mdev->state.role == R_PRIMARY)
3812                         val |= 1;
3813                 else
3814                         val &= ~((u64)1);
3815
3816                 drbd_set_ed_uuid(mdev, val);
3817         }
3818
3819         mdev->ldev->md.uuid[idx] = val;
3820         drbd_md_mark_dirty(mdev);
3821 }
3822
3823
3824 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3825 {
3826         if (mdev->ldev->md.uuid[idx]) {
3827                 drbd_uuid_move_history(mdev);
3828                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3829         }
3830         _drbd_uuid_set(mdev, idx, val);
3831 }
3832
3833 /**
3834  * drbd_uuid_new_current() - Creates a new current UUID
3835  * @mdev:       DRBD device.
3836  *
3837  * Creates a new current UUID, and rotates the old current UUID into
3838  * the bitmap slot. Causes an incremental resync upon next connect.
3839  */
3840 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3841 {
3842         u64 val;
3843         unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3844
3845         if (bm_uuid)
3846                 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3847
3848         mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3849
3850         get_random_bytes(&val, sizeof(u64));
3851         _drbd_uuid_set(mdev, UI_CURRENT, val);
3852         drbd_print_uuids(mdev, "new current UUID");
3853         /* get it to stable storage _now_ */
3854         drbd_md_sync(mdev);
3855 }
3856
3857 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3858 {
3859         if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3860                 return;
3861
3862         if (val == 0) {
3863                 drbd_uuid_move_history(mdev);
3864                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3865                 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3866         } else {
3867                 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3868                 if (bm_uuid)
3869                         dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3870
3871                 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3872         }
3873         drbd_md_mark_dirty(mdev);
3874 }
3875
3876 /**
3877  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3878  * @mdev:       DRBD device.
3879  *
3880  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3881  */
3882 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3883 {
3884         int rv = -EIO;
3885
3886         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3887                 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3888                 drbd_md_sync(mdev);
3889                 drbd_bm_set_all(mdev);
3890
3891                 rv = drbd_bm_write(mdev);
3892
3893                 if (!rv) {
3894                         drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3895                         drbd_md_sync(mdev);
3896                 }
3897
3898                 put_ldev(mdev);
3899         }
3900
3901         return rv;
3902 }
3903
3904 /**
3905  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3906  * @mdev:       DRBD device.
3907  *
3908  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3909  */
3910 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3911 {
3912         int rv = -EIO;
3913
3914         drbd_resume_al(mdev);
3915         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3916                 drbd_bm_clear_all(mdev);
3917                 rv = drbd_bm_write(mdev);
3918                 put_ldev(mdev);
3919         }
3920
3921         return rv;
3922 }
3923
3924 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3925 {
3926         struct bm_io_work *work = container_of(w, struct bm_io_work, w);
3927         int rv = -EIO;
3928
3929         D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3930
3931         if (get_ldev(mdev)) {
3932                 drbd_bm_lock(mdev, work->why);
3933                 rv = work->io_fn(mdev);
3934                 drbd_bm_unlock(mdev);
3935                 put_ldev(mdev);
3936         }
3937
3938         clear_bit(BITMAP_IO, &mdev->flags);
3939         smp_mb__after_clear_bit();
3940         wake_up(&mdev->misc_wait);
3941
3942         if (work->done)
3943                 work->done(mdev, rv);
3944
3945         clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3946         work->why = NULL;
3947
3948         return 1;
3949 }
3950
3951 void drbd_ldev_destroy(struct drbd_conf *mdev)
3952 {
3953         lc_destroy(mdev->resync);
3954         mdev->resync = NULL;
3955         lc_destroy(mdev->act_log);
3956         mdev->act_log = NULL;
3957         __no_warn(local,
3958                 drbd_free_bc(mdev->ldev);
3959                 mdev->ldev = NULL;);
3960
3961         if (mdev->md_io_tmpp) {
3962                 __free_page(mdev->md_io_tmpp);
3963                 mdev->md_io_tmpp = NULL;
3964         }
3965         clear_bit(GO_DISKLESS, &mdev->flags);
3966 }
3967
3968 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3969 {
3970         D_ASSERT(mdev->state.disk == D_FAILED);
3971         /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3972          * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3973          * the protected members anymore, though, so once put_ldev reaches zero
3974          * again, it will be safe to free them. */
3975         drbd_force_state(mdev, NS(disk, D_DISKLESS));
3976         return 1;
3977 }
3978
3979 void drbd_go_diskless(struct drbd_conf *mdev)
3980 {
3981         D_ASSERT(mdev->state.disk == D_FAILED);
3982         if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
3983                 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
3984 }
3985
3986 /**
3987  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3988  * @mdev:       DRBD device.
3989  * @io_fn:      IO callback to be called when bitmap IO is possible
3990  * @done:       callback to be called after the bitmap IO was performed
3991  * @why:        Descriptive text of the reason for doing the IO
3992  *
3993  * While IO on the bitmap happens we freeze application IO thus we ensure
3994  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3995  * called from worker context. It MUST NOT be used while a previous such
3996  * work is still pending!
3997  */
3998 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3999                           int (*io_fn)(struct drbd_conf *),
4000                           void (*done)(struct drbd_conf *, int),
4001                           char *why)
4002 {
4003         D_ASSERT(current == mdev->worker.task);
4004
4005         D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
4006         D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
4007         D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
4008         if (mdev->bm_io_work.why)
4009                 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
4010                         why, mdev->bm_io_work.why);
4011
4012         mdev->bm_io_work.io_fn = io_fn;
4013         mdev->bm_io_work.done = done;
4014         mdev->bm_io_work.why = why;
4015
4016         spin_lock_irq(&mdev->req_lock);
4017         set_bit(BITMAP_IO, &mdev->flags);
4018         if (atomic_read(&mdev->ap_bio_cnt) == 0) {
4019                 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
4020                         drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
4021         }
4022         spin_unlock_irq(&mdev->req_lock);
4023 }
4024
4025 /**
4026  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
4027  * @mdev:       DRBD device.
4028  * @io_fn:      IO callback to be called when bitmap IO is possible
4029  * @why:        Descriptive text of the reason for doing the IO
4030  *
4031  * freezes application IO while that the actual IO operations runs. This
4032  * functions MAY NOT be called from worker context.
4033  */
4034 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
4035 {
4036         int rv;
4037
4038         D_ASSERT(current != mdev->worker.task);
4039
4040         drbd_suspend_io(mdev);
4041
4042         drbd_bm_lock(mdev, why);
4043         rv = io_fn(mdev);
4044         drbd_bm_unlock(mdev);
4045
4046         drbd_resume_io(mdev);
4047
4048         return rv;
4049 }
4050
4051 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4052 {
4053         if ((mdev->ldev->md.flags & flag) != flag) {
4054                 drbd_md_mark_dirty(mdev);
4055                 mdev->ldev->md.flags |= flag;
4056         }
4057 }
4058
4059 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4060 {
4061         if ((mdev->ldev->md.flags & flag) != 0) {
4062                 drbd_md_mark_dirty(mdev);
4063                 mdev->ldev->md.flags &= ~flag;
4064         }
4065 }
4066 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4067 {
4068         return (bdev->md.flags & flag) != 0;
4069 }
4070
4071 static void md_sync_timer_fn(unsigned long data)
4072 {
4073         struct drbd_conf *mdev = (struct drbd_conf *) data;
4074
4075         drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4076 }
4077
4078 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4079 {
4080         dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
4081 #ifdef DEBUG
4082         dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4083                 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4084 #endif
4085         drbd_md_sync(mdev);
4086         return 1;
4087 }
4088
4089 #ifdef CONFIG_DRBD_FAULT_INJECTION
4090 /* Fault insertion support including random number generator shamelessly
4091  * stolen from kernel/rcutorture.c */
4092 struct fault_random_state {
4093         unsigned long state;
4094         unsigned long count;
4095 };
4096
4097 #define FAULT_RANDOM_MULT 39916801  /* prime */
4098 #define FAULT_RANDOM_ADD        479001701 /* prime */
4099 #define FAULT_RANDOM_REFRESH 10000
4100
4101 /*
4102  * Crude but fast random-number generator.  Uses a linear congruential
4103  * generator, with occasional help from get_random_bytes().
4104  */
4105 static unsigned long
4106 _drbd_fault_random(struct fault_random_state *rsp)
4107 {
4108         long refresh;
4109
4110         if (!rsp->count--) {
4111                 get_random_bytes(&refresh, sizeof(refresh));
4112                 rsp->state += refresh;
4113                 rsp->count = FAULT_RANDOM_REFRESH;
4114         }
4115         rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4116         return swahw32(rsp->state);
4117 }
4118
4119 static char *
4120 _drbd_fault_str(unsigned int type) {
4121         static char *_faults[] = {
4122                 [DRBD_FAULT_MD_WR] = "Meta-data write",
4123                 [DRBD_FAULT_MD_RD] = "Meta-data read",
4124                 [DRBD_FAULT_RS_WR] = "Resync write",
4125                 [DRBD_FAULT_RS_RD] = "Resync read",
4126                 [DRBD_FAULT_DT_WR] = "Data write",
4127                 [DRBD_FAULT_DT_RD] = "Data read",
4128                 [DRBD_FAULT_DT_RA] = "Data read ahead",
4129                 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
4130                 [DRBD_FAULT_AL_EE] = "EE allocation",
4131                 [DRBD_FAULT_RECEIVE] = "receive data corruption",
4132         };
4133
4134         return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4135 }
4136
4137 unsigned int
4138 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4139 {
4140         static struct fault_random_state rrs = {0, 0};
4141
4142         unsigned int ret = (
4143                 (fault_devs == 0 ||
4144                         ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4145                 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4146
4147         if (ret) {
4148                 fault_count++;
4149
4150                 if (__ratelimit(&drbd_ratelimit_state))
4151                         dev_warn(DEV, "***Simulating %s failure\n",
4152                                 _drbd_fault_str(type));
4153         }
4154
4155         return ret;
4156 }
4157 #endif
4158
4159 const char *drbd_buildtag(void)
4160 {
4161         /* DRBD built from external sources has here a reference to the
4162            git hash of the source code. */
4163
4164         static char buildtag[38] = "\0uilt-in";
4165
4166         if (buildtag[0] == 0) {
4167 #ifdef CONFIG_MODULES
4168                 if (THIS_MODULE != NULL)
4169                         sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4170                 else
4171 #endif
4172                         buildtag[0] = 'b';
4173         }
4174
4175         return buildtag;
4176 }
4177
4178 module_init(drbd_init)
4179 module_exit(drbd_cleanup)
4180
4181 EXPORT_SYMBOL(drbd_conn_str);
4182 EXPORT_SYMBOL(drbd_role_str);
4183 EXPORT_SYMBOL(drbd_disk_str);
4184 EXPORT_SYMBOL(drbd_set_st_err_str);