4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
34 #include <linux/ctype.h>
35 #include <linux/smp_lock.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
53 #include <linux/drbd_limits.h>
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
59 struct after_state_chg_work {
63 enum chg_state_flags flags;
64 struct completion *done;
67 int drbdd_init(struct drbd_thread *);
68 int drbd_worker(struct drbd_thread *);
69 int drbd_asender(struct drbd_thread *);
72 static int drbd_open(struct block_device *bdev, fmode_t mode);
73 static int drbd_release(struct gendisk *gd, fmode_t mode);
74 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
75 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
76 union drbd_state ns, enum chg_state_flags flags);
77 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
78 static void md_sync_timer_fn(unsigned long data);
79 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
82 "Lars Ellenberg <lars@linbit.com>");
83 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
84 MODULE_VERSION(REL_VERSION);
85 MODULE_LICENSE("GPL");
86 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)");
87 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
89 #include <linux/moduleparam.h>
90 /* allow_open_on_secondary */
91 MODULE_PARM_DESC(allow_oos, "DONT USE!");
92 /* thanks to these macros, if compiled into the kernel (not-module),
93 * this becomes the boot parameter drbd.minor_count */
94 module_param(minor_count, uint, 0444);
95 module_param(disable_sendpage, bool, 0644);
96 module_param(allow_oos, bool, 0);
97 module_param(cn_idx, uint, 0444);
98 module_param(proc_details, int, 0644);
100 #ifdef CONFIG_DRBD_FAULT_INJECTION
103 static int fault_count;
105 /* bitmap of enabled faults */
106 module_param(enable_faults, int, 0664);
107 /* fault rate % value - applies to all enabled faults */
108 module_param(fault_rate, int, 0664);
109 /* count of faults inserted */
110 module_param(fault_count, int, 0664);
111 /* bitmap of devices to insert faults on */
112 module_param(fault_devs, int, 0644);
115 /* module parameter, defined */
116 unsigned int minor_count = 32;
117 int disable_sendpage;
119 unsigned int cn_idx = CN_IDX_DRBD;
120 int proc_details; /* Detail level in proc drbd*/
122 /* Module parameter for setting the user mode helper program
123 * to run. Default is /sbin/drbdadm */
124 char usermode_helper[80] = "/sbin/drbdadm";
126 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
128 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
129 * as member "struct gendisk *vdisk;"
131 struct drbd_conf **minor_table;
133 struct kmem_cache *drbd_request_cache;
134 struct kmem_cache *drbd_ee_cache; /* epoch entries */
135 struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
136 struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
137 mempool_t *drbd_request_mempool;
138 mempool_t *drbd_ee_mempool;
140 /* I do not use a standard mempool, because:
141 1) I want to hand out the pre-allocated objects first.
142 2) I want to be able to interrupt sleeping allocation with a signal.
143 Note: This is a single linked list, the next pointer is the private
144 member of struct page.
146 struct page *drbd_pp_pool;
147 spinlock_t drbd_pp_lock;
149 wait_queue_head_t drbd_pp_wait;
151 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
153 static const struct block_device_operations drbd_ops = {
154 .owner = THIS_MODULE,
156 .release = drbd_release,
159 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
162 /* When checking with sparse, and this is an inline function, sparse will
163 give tons of false positives. When this is a real functions sparse works.
165 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169 atomic_inc(&mdev->local_cnt);
170 io_allowed = (mdev->state.disk >= mins);
172 if (atomic_dec_and_test(&mdev->local_cnt))
173 wake_up(&mdev->misc_wait);
181 * DOC: The transfer log
183 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
184 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
185 * of the list. There is always at least one &struct drbd_tl_epoch object.
187 * Each &struct drbd_tl_epoch has a circular double linked list of requests
190 static int tl_init(struct drbd_conf *mdev)
192 struct drbd_tl_epoch *b;
194 /* during device minor initialization, we may well use GFP_KERNEL */
195 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
198 INIT_LIST_HEAD(&b->requests);
199 INIT_LIST_HEAD(&b->w.list);
203 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
205 mdev->oldest_tle = b;
206 mdev->newest_tle = b;
207 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
209 mdev->tl_hash = NULL;
215 static void tl_cleanup(struct drbd_conf *mdev)
217 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
218 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
219 kfree(mdev->oldest_tle);
220 mdev->oldest_tle = NULL;
221 kfree(mdev->unused_spare_tle);
222 mdev->unused_spare_tle = NULL;
223 kfree(mdev->tl_hash);
224 mdev->tl_hash = NULL;
229 * _tl_add_barrier() - Adds a barrier to the transfer log
230 * @mdev: DRBD device.
231 * @new: Barrier to be added before the current head of the TL.
233 * The caller must hold the req_lock.
235 void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
237 struct drbd_tl_epoch *newest_before;
239 INIT_LIST_HEAD(&new->requests);
240 INIT_LIST_HEAD(&new->w.list);
241 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
245 newest_before = mdev->newest_tle;
246 /* never send a barrier number == 0, because that is special-cased
247 * when using TCQ for our write ordering code */
248 new->br_number = (newest_before->br_number+1) ?: 1;
249 if (mdev->newest_tle != new) {
250 mdev->newest_tle->next = new;
251 mdev->newest_tle = new;
256 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
257 * @mdev: DRBD device.
258 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
259 * @set_size: Expected number of requests before that barrier.
261 * In case the passed barrier_nr or set_size does not match the oldest
262 * &struct drbd_tl_epoch objects this function will cause a termination
265 void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
266 unsigned int set_size)
268 struct drbd_tl_epoch *b, *nob; /* next old barrier */
269 struct list_head *le, *tle;
270 struct drbd_request *r;
272 spin_lock_irq(&mdev->req_lock);
274 b = mdev->oldest_tle;
276 /* first some paranoia code */
278 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
282 if (b->br_number != barrier_nr) {
283 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
284 barrier_nr, b->br_number);
287 if (b->n_writes != set_size) {
288 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
289 barrier_nr, set_size, b->n_writes);
293 /* Clean up list of requests processed during current epoch */
294 list_for_each_safe(le, tle, &b->requests) {
295 r = list_entry(le, struct drbd_request, tl_requests);
296 _req_mod(r, barrier_acked);
298 /* There could be requests on the list waiting for completion
299 of the write to the local disk. To avoid corruptions of
300 slab's data structures we have to remove the lists head.
302 Also there could have been a barrier ack out of sequence, overtaking
303 the write acks - which would be a bug and violating write ordering.
304 To not deadlock in case we lose connection while such requests are
305 still pending, we need some way to find them for the
306 _req_mode(connection_lost_while_pending).
308 These have been list_move'd to the out_of_sequence_requests list in
309 _req_mod(, barrier_acked) above.
311 list_del_init(&b->requests);
314 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
315 _tl_add_barrier(mdev, b);
317 mdev->oldest_tle = nob;
318 /* if nob == NULL b was the only barrier, and becomes the new
319 barrier. Therefore mdev->oldest_tle points already to b */
321 D_ASSERT(nob != NULL);
322 mdev->oldest_tle = nob;
326 spin_unlock_irq(&mdev->req_lock);
327 dec_ap_pending(mdev);
332 spin_unlock_irq(&mdev->req_lock);
333 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
337 * _tl_restart() - Walks the transfer log, and applies an action to all requests
338 * @mdev: DRBD device.
339 * @what: The action/event to perform with all request objects
341 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
342 * restart_frozen_disk_io.
344 static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
346 struct drbd_tl_epoch *b, *tmp, **pn;
347 struct list_head *le, *tle, carry_reads;
348 struct drbd_request *req;
349 int rv, n_writes, n_reads;
351 b = mdev->oldest_tle;
352 pn = &mdev->oldest_tle;
356 INIT_LIST_HEAD(&carry_reads);
357 list_for_each_safe(le, tle, &b->requests) {
358 req = list_entry(le, struct drbd_request, tl_requests);
359 rv = _req_mod(req, what);
361 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
362 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
367 if (what == resend) {
368 b->n_writes = n_writes;
369 if (b->w.cb == NULL) {
370 b->w.cb = w_send_barrier;
371 inc_ap_pending(mdev);
372 set_bit(CREATE_BARRIER, &mdev->flags);
375 drbd_queue_work(&mdev->data.work, &b->w);
380 list_add(&carry_reads, &b->requests);
381 /* there could still be requests on that ring list,
382 * in case local io is still pending */
383 list_del(&b->requests);
385 /* dec_ap_pending corresponding to queue_barrier.
386 * the newest barrier may not have been queued yet,
387 * in which case w.cb is still NULL. */
389 dec_ap_pending(mdev);
391 if (b == mdev->newest_tle) {
392 /* recycle, but reinit! */
393 D_ASSERT(tmp == NULL);
394 INIT_LIST_HEAD(&b->requests);
395 list_splice(&carry_reads, &b->requests);
396 INIT_LIST_HEAD(&b->w.list);
398 b->br_number = net_random();
408 list_splice(&carry_reads, &b->requests);
414 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
415 * @mdev: DRBD device.
417 * This is called after the connection to the peer was lost. The storage covered
418 * by the requests on the transfer gets marked as our of sync. Called from the
419 * receiver thread and the worker thread.
421 void tl_clear(struct drbd_conf *mdev)
423 struct list_head *le, *tle;
424 struct drbd_request *r;
426 spin_lock_irq(&mdev->req_lock);
428 _tl_restart(mdev, connection_lost_while_pending);
430 /* we expect this list to be empty. */
431 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
433 /* but just in case, clean it up anyways! */
434 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
435 r = list_entry(le, struct drbd_request, tl_requests);
436 /* It would be nice to complete outside of spinlock.
437 * But this is easier for now. */
438 _req_mod(r, connection_lost_while_pending);
441 /* ensure bit indicating barrier is required is clear */
442 clear_bit(CREATE_BARRIER, &mdev->flags);
444 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
446 spin_unlock_irq(&mdev->req_lock);
449 void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
451 spin_lock_irq(&mdev->req_lock);
452 _tl_restart(mdev, what);
453 spin_unlock_irq(&mdev->req_lock);
457 * cl_wide_st_chg() - TRUE if the state change is a cluster wide one
458 * @mdev: DRBD device.
459 * @os: old (current) state.
460 * @ns: new (wanted) state.
462 static int cl_wide_st_chg(struct drbd_conf *mdev,
463 union drbd_state os, union drbd_state ns)
465 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
466 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
467 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
468 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
469 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
470 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
471 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
474 int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
475 union drbd_state mask, union drbd_state val)
478 union drbd_state os, ns;
481 spin_lock_irqsave(&mdev->req_lock, flags);
483 ns.i = (os.i & ~mask.i) | val.i;
484 rv = _drbd_set_state(mdev, ns, f, NULL);
486 spin_unlock_irqrestore(&mdev->req_lock, flags);
492 * drbd_force_state() - Impose a change which happens outside our control on our state
493 * @mdev: DRBD device.
494 * @mask: mask of state bits to change.
495 * @val: value of new state bits.
497 void drbd_force_state(struct drbd_conf *mdev,
498 union drbd_state mask, union drbd_state val)
500 drbd_change_state(mdev, CS_HARD, mask, val);
503 static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
504 static int is_valid_state_transition(struct drbd_conf *,
505 union drbd_state, union drbd_state);
506 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
507 union drbd_state ns, int *warn_sync_abort);
508 int drbd_send_state_req(struct drbd_conf *,
509 union drbd_state, union drbd_state);
511 static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
512 union drbd_state mask, union drbd_state val)
514 union drbd_state os, ns;
518 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
519 return SS_CW_SUCCESS;
521 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
522 return SS_CW_FAILED_BY_PEER;
525 spin_lock_irqsave(&mdev->req_lock, flags);
527 ns.i = (os.i & ~mask.i) | val.i;
528 ns = sanitize_state(mdev, os, ns, NULL);
530 if (!cl_wide_st_chg(mdev, os, ns))
533 rv = is_valid_state(mdev, ns);
534 if (rv == SS_SUCCESS) {
535 rv = is_valid_state_transition(mdev, ns, os);
536 if (rv == SS_SUCCESS)
537 rv = 0; /* cont waiting, otherwise fail. */
540 spin_unlock_irqrestore(&mdev->req_lock, flags);
546 * drbd_req_state() - Perform an eventually cluster wide state change
547 * @mdev: DRBD device.
548 * @mask: mask of state bits to change.
549 * @val: value of new state bits.
552 * Should not be called directly, use drbd_request_state() or
553 * _drbd_request_state().
555 static int drbd_req_state(struct drbd_conf *mdev,
556 union drbd_state mask, union drbd_state val,
557 enum chg_state_flags f)
559 struct completion done;
561 union drbd_state os, ns;
564 init_completion(&done);
566 if (f & CS_SERIALIZE)
567 mutex_lock(&mdev->state_mutex);
569 spin_lock_irqsave(&mdev->req_lock, flags);
571 ns.i = (os.i & ~mask.i) | val.i;
572 ns = sanitize_state(mdev, os, ns, NULL);
574 if (cl_wide_st_chg(mdev, os, ns)) {
575 rv = is_valid_state(mdev, ns);
576 if (rv == SS_SUCCESS)
577 rv = is_valid_state_transition(mdev, ns, os);
578 spin_unlock_irqrestore(&mdev->req_lock, flags);
580 if (rv < SS_SUCCESS) {
582 print_st_err(mdev, os, ns, rv);
586 drbd_state_lock(mdev);
587 if (!drbd_send_state_req(mdev, mask, val)) {
588 drbd_state_unlock(mdev);
589 rv = SS_CW_FAILED_BY_PEER;
591 print_st_err(mdev, os, ns, rv);
595 wait_event(mdev->state_wait,
596 (rv = _req_st_cond(mdev, mask, val)));
598 if (rv < SS_SUCCESS) {
599 drbd_state_unlock(mdev);
601 print_st_err(mdev, os, ns, rv);
604 spin_lock_irqsave(&mdev->req_lock, flags);
606 ns.i = (os.i & ~mask.i) | val.i;
607 rv = _drbd_set_state(mdev, ns, f, &done);
608 drbd_state_unlock(mdev);
610 rv = _drbd_set_state(mdev, ns, f, &done);
613 spin_unlock_irqrestore(&mdev->req_lock, flags);
615 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
616 D_ASSERT(current != mdev->worker.task);
617 wait_for_completion(&done);
621 if (f & CS_SERIALIZE)
622 mutex_unlock(&mdev->state_mutex);
628 * _drbd_request_state() - Request a state change (with flags)
629 * @mdev: DRBD device.
630 * @mask: mask of state bits to change.
631 * @val: value of new state bits.
634 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
635 * flag, or when logging of failed state change requests is not desired.
637 int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
638 union drbd_state val, enum chg_state_flags f)
642 wait_event(mdev->state_wait,
643 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
648 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
650 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
652 drbd_conn_str(ns.conn),
653 drbd_role_str(ns.role),
654 drbd_role_str(ns.peer),
655 drbd_disk_str(ns.disk),
656 drbd_disk_str(ns.pdsk),
658 ns.aftr_isp ? 'a' : '-',
659 ns.peer_isp ? 'p' : '-',
660 ns.user_isp ? 'u' : '-'
664 void print_st_err(struct drbd_conf *mdev,
665 union drbd_state os, union drbd_state ns, int err)
667 if (err == SS_IN_TRANSIENT_STATE)
669 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
670 print_st(mdev, " state", os);
671 print_st(mdev, "wanted", ns);
675 #define drbd_peer_str drbd_role_str
676 #define drbd_pdsk_str drbd_disk_str
678 #define drbd_susp_str(A) ((A) ? "1" : "0")
679 #define drbd_aftr_isp_str(A) ((A) ? "1" : "0")
680 #define drbd_peer_isp_str(A) ((A) ? "1" : "0")
681 #define drbd_user_isp_str(A) ((A) ? "1" : "0")
684 ({ if (ns.A != os.A) { \
685 pbp += sprintf(pbp, #A "( %s -> %s ) ", \
686 drbd_##A##_str(os.A), \
687 drbd_##A##_str(ns.A)); \
691 * is_valid_state() - Returns an SS_ error code if ns is not valid
692 * @mdev: DRBD device.
693 * @ns: State to consider.
695 static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
697 /* See drbd_state_sw_errors in drbd_strings.c */
699 enum drbd_fencing_p fp;
703 if (get_ldev(mdev)) {
704 fp = mdev->ldev->dc.fencing;
708 if (get_net_conf(mdev)) {
709 if (!mdev->net_conf->two_primaries &&
710 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
711 rv = SS_TWO_PRIMARIES;
716 /* already found a reason to abort */;
717 else if (ns.role == R_SECONDARY && mdev->open_cnt)
718 rv = SS_DEVICE_IN_USE;
720 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
721 rv = SS_NO_UP_TO_DATE_DISK;
723 else if (fp >= FP_RESOURCE &&
724 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
727 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
728 rv = SS_NO_UP_TO_DATE_DISK;
730 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
731 rv = SS_NO_LOCAL_DISK;
733 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
734 rv = SS_NO_REMOTE_DISK;
736 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
737 rv = SS_NO_UP_TO_DATE_DISK;
739 else if ((ns.conn == C_CONNECTED ||
740 ns.conn == C_WF_BITMAP_S ||
741 ns.conn == C_SYNC_SOURCE ||
742 ns.conn == C_PAUSED_SYNC_S) &&
743 ns.disk == D_OUTDATED)
744 rv = SS_CONNECTED_OUTDATES;
746 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
747 (mdev->sync_conf.verify_alg[0] == 0))
748 rv = SS_NO_VERIFY_ALG;
750 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
751 mdev->agreed_pro_version < 88)
752 rv = SS_NOT_SUPPORTED;
758 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
759 * @mdev: DRBD device.
763 static int is_valid_state_transition(struct drbd_conf *mdev,
764 union drbd_state ns, union drbd_state os)
768 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
769 os.conn > C_CONNECTED)
770 rv = SS_RESYNC_RUNNING;
772 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
773 rv = SS_ALREADY_STANDALONE;
775 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
778 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
779 rv = SS_NO_NET_CONFIG;
781 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
782 rv = SS_LOWER_THAN_OUTDATED;
784 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
785 rv = SS_IN_TRANSIENT_STATE;
787 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
788 rv = SS_IN_TRANSIENT_STATE;
790 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
791 rv = SS_NEED_CONNECTION;
793 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
794 ns.conn != os.conn && os.conn > C_CONNECTED)
795 rv = SS_RESYNC_RUNNING;
797 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
798 os.conn < C_CONNECTED)
799 rv = SS_NEED_CONNECTION;
805 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
806 * @mdev: DRBD device.
811 * When we loose connection, we have to set the state of the peers disk (pdsk)
812 * to D_UNKNOWN. This rule and many more along those lines are in this function.
814 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
815 union drbd_state ns, int *warn_sync_abort)
817 enum drbd_fencing_p fp;
820 if (get_ldev(mdev)) {
821 fp = mdev->ldev->dc.fencing;
825 /* Disallow Network errors to configure a device's network part */
826 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
827 os.conn <= C_DISCONNECTING)
830 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow */
831 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
832 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING)
835 /* After C_DISCONNECTING only C_STANDALONE may follow */
836 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
839 if (ns.conn < C_CONNECTED) {
842 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
846 /* Clear the aftr_isp when becoming unconfigured */
847 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
850 /* Abort resync if a disk fails/detaches */
851 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
852 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
854 *warn_sync_abort = 1;
855 ns.conn = C_CONNECTED;
858 if (ns.conn >= C_CONNECTED &&
859 ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) ||
860 (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) {
863 case C_PAUSED_SYNC_T:
864 ns.disk = D_OUTDATED;
869 case C_PAUSED_SYNC_S:
870 ns.disk = D_UP_TO_DATE;
873 ns.disk = D_INCONSISTENT;
874 dev_warn(DEV, "Implicitly set disk state Inconsistent!\n");
877 if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE)
878 dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n");
881 if (ns.conn >= C_CONNECTED &&
882 (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) {
886 case C_PAUSED_SYNC_T:
888 ns.pdsk = D_UP_TO_DATE;
891 case C_PAUSED_SYNC_S:
892 /* remap any consistent state to D_OUTDATED,
893 * but disallow "upgrade" of not even consistent states.
896 (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED)
897 ? os.pdsk : D_OUTDATED;
900 ns.pdsk = D_INCONSISTENT;
901 dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n");
904 if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE)
905 dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n");
908 /* Connection breaks down before we finished "Negotiating" */
909 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
910 get_ldev_if_state(mdev, D_NEGOTIATING)) {
911 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
912 ns.disk = mdev->new_state_tmp.disk;
913 ns.pdsk = mdev->new_state_tmp.pdsk;
915 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
916 ns.disk = D_DISKLESS;
922 if (fp == FP_STONITH &&
923 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
924 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
925 ns.susp = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
927 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
928 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
929 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
930 ns.susp = 1; /* Suspend IO while no data available (no accessible data available) */
932 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
933 if (ns.conn == C_SYNC_SOURCE)
934 ns.conn = C_PAUSED_SYNC_S;
935 if (ns.conn == C_SYNC_TARGET)
936 ns.conn = C_PAUSED_SYNC_T;
938 if (ns.conn == C_PAUSED_SYNC_S)
939 ns.conn = C_SYNC_SOURCE;
940 if (ns.conn == C_PAUSED_SYNC_T)
941 ns.conn = C_SYNC_TARGET;
947 /* helper for __drbd_set_state */
948 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
950 if (cs == C_VERIFY_T) {
951 /* starting online verify from an arbitrary position
952 * does not fit well into the existing protocol.
953 * on C_VERIFY_T, we initialize ov_left and friends
954 * implicitly in receive_DataRequest once the
955 * first P_OV_REQUEST is received */
956 mdev->ov_start_sector = ~(sector_t)0;
958 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
959 if (bit >= mdev->rs_total)
960 mdev->ov_start_sector =
961 BM_BIT_TO_SECT(mdev->rs_total - 1);
962 mdev->ov_position = mdev->ov_start_sector;
967 * __drbd_set_state() - Set a new DRBD state
968 * @mdev: DRBD device.
971 * @done: Optional completion, that will get completed after the after_state_ch() finished
973 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
975 int __drbd_set_state(struct drbd_conf *mdev,
976 union drbd_state ns, enum chg_state_flags flags,
977 struct completion *done)
981 int warn_sync_abort = 0;
982 struct after_state_chg_work *ascw;
986 ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
989 return SS_NOTHING_TO_DO;
991 if (!(flags & CS_HARD)) {
992 /* pre-state-change checks ; only look at ns */
993 /* See drbd_state_sw_errors in drbd_strings.c */
995 rv = is_valid_state(mdev, ns);
996 if (rv < SS_SUCCESS) {
997 /* If the old state was illegal as well, then let
1000 if (is_valid_state(mdev, os) == rv)
1001 rv = is_valid_state_transition(mdev, ns, os);
1003 rv = is_valid_state_transition(mdev, ns, os);
1006 if (rv < SS_SUCCESS) {
1007 if (flags & CS_VERBOSE)
1008 print_st_err(mdev, os, ns, rv);
1012 if (warn_sync_abort)
1013 dev_warn(DEV, "Resync aborted.\n");
1028 dev_info(DEV, "%s\n", pb);
1031 /* solve the race between becoming unconfigured,
1032 * worker doing the cleanup, and
1033 * admin reconfiguring us:
1034 * on (re)configure, first set CONFIG_PENDING,
1035 * then wait for a potentially exiting worker,
1036 * start the worker, and schedule one no_op.
1037 * then proceed with configuration.
1039 if (ns.disk == D_DISKLESS &&
1040 ns.conn == C_STANDALONE &&
1041 ns.role == R_SECONDARY &&
1042 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1043 set_bit(DEVICE_DYING, &mdev->flags);
1045 mdev->state.i = ns.i;
1046 wake_up(&mdev->misc_wait);
1047 wake_up(&mdev->state_wait);
1049 /* post-state-change actions */
1050 if (os.conn >= C_SYNC_SOURCE && ns.conn <= C_CONNECTED) {
1051 set_bit(STOP_SYNC_TIMER, &mdev->flags);
1052 mod_timer(&mdev->resync_timer, jiffies);
1055 /* aborted verify run. log the last position */
1056 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1057 ns.conn < C_CONNECTED) {
1058 mdev->ov_start_sector =
1059 BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left);
1060 dev_info(DEV, "Online Verify reached sector %llu\n",
1061 (unsigned long long)mdev->ov_start_sector);
1064 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1065 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1066 dev_info(DEV, "Syncer continues.\n");
1067 mdev->rs_paused += (long)jiffies-(long)mdev->rs_mark_time;
1068 if (ns.conn == C_SYNC_TARGET) {
1069 if (!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))
1070 mod_timer(&mdev->resync_timer, jiffies);
1071 /* This if (!test_bit) is only needed for the case
1072 that a device that has ceased to used its timer,
1073 i.e. it is already in drbd_resync_finished() gets
1074 paused and resumed. */
1078 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1079 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1080 dev_info(DEV, "Resync suspended\n");
1081 mdev->rs_mark_time = jiffies;
1082 if (ns.conn == C_PAUSED_SYNC_T)
1083 set_bit(STOP_SYNC_TIMER, &mdev->flags);
1086 if (os.conn == C_CONNECTED &&
1087 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1088 mdev->ov_position = 0;
1090 mdev->rs_mark_left = drbd_bm_bits(mdev);
1091 if (mdev->agreed_pro_version >= 90)
1092 set_ov_position(mdev, ns.conn);
1094 mdev->ov_start_sector = 0;
1095 mdev->ov_left = mdev->rs_total
1096 - BM_SECT_TO_BIT(mdev->ov_position);
1098 mdev->rs_mark_time = jiffies;
1099 mdev->ov_last_oos_size = 0;
1100 mdev->ov_last_oos_start = 0;
1102 if (ns.conn == C_VERIFY_S) {
1103 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1104 (unsigned long long)mdev->ov_position);
1105 mod_timer(&mdev->resync_timer, jiffies);
1109 if (get_ldev(mdev)) {
1110 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1111 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1112 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1114 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1115 mdf |= MDF_CRASHED_PRIMARY;
1116 if (mdev->state.role == R_PRIMARY ||
1117 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1118 mdf |= MDF_PRIMARY_IND;
1119 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1120 mdf |= MDF_CONNECTED_IND;
1121 if (mdev->state.disk > D_INCONSISTENT)
1122 mdf |= MDF_CONSISTENT;
1123 if (mdev->state.disk > D_OUTDATED)
1124 mdf |= MDF_WAS_UP_TO_DATE;
1125 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1126 mdf |= MDF_PEER_OUT_DATED;
1127 if (mdf != mdev->ldev->md.flags) {
1128 mdev->ldev->md.flags = mdf;
1129 drbd_md_mark_dirty(mdev);
1131 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1132 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1136 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1137 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1138 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1139 set_bit(CONSIDER_RESYNC, &mdev->flags);
1141 /* Receiver should clean up itself */
1142 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1143 drbd_thread_stop_nowait(&mdev->receiver);
1145 /* Now the receiver finished cleaning up itself, it should die */
1146 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1147 drbd_thread_stop_nowait(&mdev->receiver);
1149 /* Upon network failure, we need to restart the receiver. */
1150 if (os.conn > C_TEAR_DOWN &&
1151 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1152 drbd_thread_restart_nowait(&mdev->receiver);
1154 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1158 ascw->flags = flags;
1159 ascw->w.cb = w_after_state_ch;
1161 drbd_queue_work(&mdev->data.work, &ascw->w);
1163 dev_warn(DEV, "Could not kmalloc an ascw\n");
1169 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1171 struct after_state_chg_work *ascw =
1172 container_of(w, struct after_state_chg_work, w);
1173 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1174 if (ascw->flags & CS_WAIT_COMPLETE) {
1175 D_ASSERT(ascw->done != NULL);
1176 complete(ascw->done);
1183 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1186 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1187 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1191 switch (mdev->state.conn) {
1192 case C_STARTING_SYNC_T:
1193 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1195 case C_STARTING_SYNC_S:
1196 drbd_start_resync(mdev, C_SYNC_SOURCE);
1202 * after_state_ch() - Perform after state change actions that may sleep
1203 * @mdev: DRBD device.
1208 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1209 union drbd_state ns, enum chg_state_flags flags)
1211 enum drbd_fencing_p fp;
1213 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1214 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1216 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1220 if (get_ldev(mdev)) {
1221 fp = mdev->ldev->dc.fencing;
1225 /* Inform userspace about the change... */
1226 drbd_bcast_state(mdev, ns);
1228 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1229 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1230 drbd_khelper(mdev, "pri-on-incon-degr");
1232 /* Here we have the actions that are performed after a
1233 state change. This function might sleep */
1235 if (os.susp && ns.susp && mdev->sync_conf.on_no_data == OND_SUSPEND_IO) {
1236 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1237 if (ns.conn == C_CONNECTED) {
1238 spin_lock_irq(&mdev->req_lock);
1239 _tl_restart(mdev, resend);
1240 _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
1241 spin_unlock_irq(&mdev->req_lock);
1242 } else /* ns.conn > C_CONNECTED */
1243 dev_err(DEV, "Unexpected Resynd going on!\n");
1246 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING) {
1247 spin_lock_irq(&mdev->req_lock);
1248 _tl_restart(mdev, restart_frozen_disk_io);
1249 _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
1250 spin_unlock_irq(&mdev->req_lock);
1254 if (fp == FP_STONITH && ns.susp) {
1255 /* case1: The outdate peer handler is successful: */
1256 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
1258 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1259 drbd_uuid_new_current(mdev);
1260 clear_bit(NEW_CUR_UUID, &mdev->flags);
1263 spin_lock_irq(&mdev->req_lock);
1264 _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
1265 spin_unlock_irq(&mdev->req_lock);
1267 /* case2: The connection was established again: */
1268 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1269 clear_bit(NEW_CUR_UUID, &mdev->flags);
1270 spin_lock_irq(&mdev->req_lock);
1271 _tl_restart(mdev, resend);
1272 _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
1273 spin_unlock_irq(&mdev->req_lock);
1276 /* Do not change the order of the if above and the two below... */
1277 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1278 drbd_send_uuids(mdev);
1279 drbd_send_state(mdev);
1281 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
1282 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
1284 /* Lost contact to peer's copy of the data */
1285 if ((os.pdsk >= D_INCONSISTENT &&
1286 os.pdsk != D_UNKNOWN &&
1287 os.pdsk != D_OUTDATED)
1288 && (ns.pdsk < D_INCONSISTENT ||
1289 ns.pdsk == D_UNKNOWN ||
1290 ns.pdsk == D_OUTDATED)) {
1291 if (get_ldev(mdev)) {
1292 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1293 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1294 if (mdev->state.susp) {
1295 set_bit(NEW_CUR_UUID, &mdev->flags);
1297 drbd_uuid_new_current(mdev);
1298 drbd_send_uuids(mdev);
1305 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1306 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1307 drbd_uuid_new_current(mdev);
1308 drbd_send_uuids(mdev);
1311 /* D_DISKLESS Peer becomes secondary */
1312 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1313 drbd_al_to_on_disk_bm(mdev);
1317 /* Last part of the attaching process ... */
1318 if (ns.conn >= C_CONNECTED &&
1319 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1320 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
1321 drbd_send_uuids(mdev);
1322 drbd_send_state(mdev);
1325 /* We want to pause/continue resync, tell peer. */
1326 if (ns.conn >= C_CONNECTED &&
1327 ((os.aftr_isp != ns.aftr_isp) ||
1328 (os.user_isp != ns.user_isp)))
1329 drbd_send_state(mdev);
1331 /* In case one of the isp bits got set, suspend other devices. */
1332 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1333 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1334 suspend_other_sg(mdev);
1336 /* Make sure the peer gets informed about eventual state
1337 changes (ISP bits) while we were in WFReportParams. */
1338 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1339 drbd_send_state(mdev);
1341 /* We are in the progress to start a full sync... */
1342 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1343 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1344 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
1346 /* We are invalidating our self... */
1347 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1348 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1349 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1351 if (os.disk > D_FAILED && ns.disk == D_FAILED) {
1352 enum drbd_io_error_p eh;
1355 if (get_ldev_if_state(mdev, D_FAILED)) {
1356 eh = mdev->ldev->dc.on_io_error;
1360 drbd_rs_cancel_all(mdev);
1361 /* since get_ldev() only works as long as disk>=D_INCONSISTENT,
1362 and it is D_DISKLESS here, local_cnt can only go down, it can
1363 not increase... It will reach zero */
1364 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1366 mdev->rs_failed = 0;
1367 atomic_set(&mdev->rs_pending_cnt, 0);
1369 spin_lock_irq(&mdev->req_lock);
1370 _drbd_set_state(_NS(mdev, disk, D_DISKLESS), CS_HARD, NULL);
1371 spin_unlock_irq(&mdev->req_lock);
1373 if (eh == EP_CALL_HELPER)
1374 drbd_khelper(mdev, "local-io-error");
1377 if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
1379 if (os.disk == D_FAILED) /* && ns.disk == D_DISKLESS*/ {
1380 if (drbd_send_state(mdev))
1381 dev_warn(DEV, "Notified peer that my disk is broken.\n");
1383 dev_err(DEV, "Sending state in drbd_io_error() failed\n");
1386 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1387 lc_destroy(mdev->resync);
1388 mdev->resync = NULL;
1389 lc_destroy(mdev->act_log);
1390 mdev->act_log = NULL;
1392 drbd_free_bc(mdev->ldev);
1393 mdev->ldev = NULL;);
1395 if (mdev->md_io_tmpp)
1396 __free_page(mdev->md_io_tmpp);
1399 /* Disks got bigger while they were detached */
1400 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1401 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1402 if (ns.conn == C_CONNECTED)
1403 resync_after_online_grow(mdev);
1406 /* A resync finished or aborted, wake paused devices... */
1407 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1408 (os.peer_isp && !ns.peer_isp) ||
1409 (os.user_isp && !ns.user_isp))
1410 resume_next_sg(mdev);
1412 /* Upon network connection, we need to start the receiver */
1413 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1414 drbd_thread_start(&mdev->receiver);
1416 /* Terminate worker thread if we are unconfigured - it will be
1417 restarted as needed... */
1418 if (ns.disk == D_DISKLESS &&
1419 ns.conn == C_STANDALONE &&
1420 ns.role == R_SECONDARY) {
1421 if (os.aftr_isp != ns.aftr_isp)
1422 resume_next_sg(mdev);
1423 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1424 if (test_bit(DEVICE_DYING, &mdev->flags))
1425 drbd_thread_stop_nowait(&mdev->worker);
1432 static int drbd_thread_setup(void *arg)
1434 struct drbd_thread *thi = (struct drbd_thread *) arg;
1435 struct drbd_conf *mdev = thi->mdev;
1436 unsigned long flags;
1440 retval = thi->function(thi);
1442 spin_lock_irqsave(&thi->t_lock, flags);
1444 /* if the receiver has been "Exiting", the last thing it did
1445 * was set the conn state to "StandAlone",
1446 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1447 * and receiver thread will be "started".
1448 * drbd_thread_start needs to set "Restarting" in that case.
1449 * t_state check and assignment needs to be within the same spinlock,
1450 * so either thread_start sees Exiting, and can remap to Restarting,
1451 * or thread_start see None, and can proceed as normal.
1454 if (thi->t_state == Restarting) {
1455 dev_info(DEV, "Restarting %s\n", current->comm);
1456 thi->t_state = Running;
1457 spin_unlock_irqrestore(&thi->t_lock, flags);
1462 thi->t_state = None;
1464 complete(&thi->stop);
1465 spin_unlock_irqrestore(&thi->t_lock, flags);
1467 dev_info(DEV, "Terminating %s\n", current->comm);
1469 /* Release mod reference taken when thread was started */
1470 module_put(THIS_MODULE);
1474 static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1475 int (*func) (struct drbd_thread *))
1477 spin_lock_init(&thi->t_lock);
1479 thi->t_state = None;
1480 thi->function = func;
1484 int drbd_thread_start(struct drbd_thread *thi)
1486 struct drbd_conf *mdev = thi->mdev;
1487 struct task_struct *nt;
1488 unsigned long flags;
1491 thi == &mdev->receiver ? "receiver" :
1492 thi == &mdev->asender ? "asender" :
1493 thi == &mdev->worker ? "worker" : "NONSENSE";
1495 /* is used from state engine doing drbd_thread_stop_nowait,
1496 * while holding the req lock irqsave */
1497 spin_lock_irqsave(&thi->t_lock, flags);
1499 switch (thi->t_state) {
1501 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1502 me, current->comm, current->pid);
1504 /* Get ref on module for thread - this is released when thread exits */
1505 if (!try_module_get(THIS_MODULE)) {
1506 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1507 spin_unlock_irqrestore(&thi->t_lock, flags);
1511 init_completion(&thi->stop);
1512 D_ASSERT(thi->task == NULL);
1513 thi->reset_cpu_mask = 1;
1514 thi->t_state = Running;
1515 spin_unlock_irqrestore(&thi->t_lock, flags);
1516 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1518 nt = kthread_create(drbd_thread_setup, (void *) thi,
1519 "drbd%d_%s", mdev_to_minor(mdev), me);
1522 dev_err(DEV, "Couldn't start thread\n");
1524 module_put(THIS_MODULE);
1527 spin_lock_irqsave(&thi->t_lock, flags);
1529 thi->t_state = Running;
1530 spin_unlock_irqrestore(&thi->t_lock, flags);
1531 wake_up_process(nt);
1534 thi->t_state = Restarting;
1535 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1536 me, current->comm, current->pid);
1541 spin_unlock_irqrestore(&thi->t_lock, flags);
1549 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1551 unsigned long flags;
1553 enum drbd_thread_state ns = restart ? Restarting : Exiting;
1555 /* may be called from state engine, holding the req lock irqsave */
1556 spin_lock_irqsave(&thi->t_lock, flags);
1558 if (thi->t_state == None) {
1559 spin_unlock_irqrestore(&thi->t_lock, flags);
1561 drbd_thread_start(thi);
1565 if (thi->t_state != ns) {
1566 if (thi->task == NULL) {
1567 spin_unlock_irqrestore(&thi->t_lock, flags);
1573 init_completion(&thi->stop);
1574 if (thi->task != current)
1575 force_sig(DRBD_SIGKILL, thi->task);
1579 spin_unlock_irqrestore(&thi->t_lock, flags);
1582 wait_for_completion(&thi->stop);
1587 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1588 * @mdev: DRBD device.
1590 * Forces all threads of a device onto the same CPU. This is beneficial for
1591 * DRBD's performance. May be overwritten by user's configuration.
1593 void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1597 /* user override. */
1598 if (cpumask_weight(mdev->cpu_mask))
1601 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1602 for_each_online_cpu(cpu) {
1604 cpumask_set_cpu(cpu, mdev->cpu_mask);
1608 /* should not be reached */
1609 cpumask_setall(mdev->cpu_mask);
1613 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1614 * @mdev: DRBD device.
1616 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1619 void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1621 struct task_struct *p = current;
1622 struct drbd_thread *thi =
1623 p == mdev->asender.task ? &mdev->asender :
1624 p == mdev->receiver.task ? &mdev->receiver :
1625 p == mdev->worker.task ? &mdev->worker :
1629 if (!thi->reset_cpu_mask)
1631 thi->reset_cpu_mask = 0;
1632 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1636 /* the appropriate socket mutex must be held already */
1637 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1638 enum drbd_packets cmd, struct p_header *h,
1639 size_t size, unsigned msg_flags)
1643 ERR_IF(!h) return FALSE;
1644 ERR_IF(!size) return FALSE;
1646 h->magic = BE_DRBD_MAGIC;
1647 h->command = cpu_to_be16(cmd);
1648 h->length = cpu_to_be16(size-sizeof(struct p_header));
1650 sent = drbd_send(mdev, sock, h, size, msg_flags);
1652 ok = (sent == size);
1654 dev_err(DEV, "short sent %s size=%d sent=%d\n",
1655 cmdname(cmd), (int)size, sent);
1659 /* don't pass the socket. we may only look at it
1660 * when we hold the appropriate socket mutex.
1662 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1663 enum drbd_packets cmd, struct p_header *h, size_t size)
1666 struct socket *sock;
1668 if (use_data_socket) {
1669 mutex_lock(&mdev->data.mutex);
1670 sock = mdev->data.socket;
1672 mutex_lock(&mdev->meta.mutex);
1673 sock = mdev->meta.socket;
1676 /* drbd_disconnect() could have called drbd_free_sock()
1677 * while we were waiting in down()... */
1678 if (likely(sock != NULL))
1679 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1681 if (use_data_socket)
1682 mutex_unlock(&mdev->data.mutex);
1684 mutex_unlock(&mdev->meta.mutex);
1688 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1694 h.magic = BE_DRBD_MAGIC;
1695 h.command = cpu_to_be16(cmd);
1696 h.length = cpu_to_be16(size);
1698 if (!drbd_get_data_sock(mdev))
1702 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1704 drbd_send(mdev, mdev->data.socket, data, size, 0));
1706 drbd_put_data_sock(mdev);
1711 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1713 struct p_rs_param_89 *p;
1714 struct socket *sock;
1716 const int apv = mdev->agreed_pro_version;
1718 size = apv <= 87 ? sizeof(struct p_rs_param)
1719 : apv == 88 ? sizeof(struct p_rs_param)
1720 + strlen(mdev->sync_conf.verify_alg) + 1
1721 : /* 89 */ sizeof(struct p_rs_param_89);
1723 /* used from admin command context and receiver/worker context.
1724 * to avoid kmalloc, grab the socket right here,
1725 * then use the pre-allocated sbuf there */
1726 mutex_lock(&mdev->data.mutex);
1727 sock = mdev->data.socket;
1729 if (likely(sock != NULL)) {
1730 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1732 p = &mdev->data.sbuf.rs_param_89;
1734 /* initialize verify_alg and csums_alg */
1735 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1737 p->rate = cpu_to_be32(sc->rate);
1740 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1742 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1744 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1746 rv = 0; /* not ok */
1748 mutex_unlock(&mdev->data.mutex);
1753 int drbd_send_protocol(struct drbd_conf *mdev)
1755 struct p_protocol *p;
1758 size = sizeof(struct p_protocol);
1760 if (mdev->agreed_pro_version >= 87)
1761 size += strlen(mdev->net_conf->integrity_alg) + 1;
1763 /* we must not recurse into our own queue,
1764 * as that is blocked during handshake */
1765 p = kmalloc(size, GFP_NOIO);
1769 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
1770 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
1771 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
1772 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
1773 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1776 if (mdev->net_conf->want_lose)
1778 if (mdev->net_conf->dry_run) {
1779 if (mdev->agreed_pro_version >= 92)
1782 dev_err(DEV, "--dry-run is not supported by peer");
1787 p->conn_flags = cpu_to_be32(cf);
1789 if (mdev->agreed_pro_version >= 87)
1790 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1792 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
1793 (struct p_header *)p, size);
1798 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1803 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
1806 for (i = UI_CURRENT; i < UI_SIZE; i++)
1807 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
1809 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
1810 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
1811 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
1812 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
1813 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
1814 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
1818 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
1819 (struct p_header *)&p, sizeof(p));
1822 int drbd_send_uuids(struct drbd_conf *mdev)
1824 return _drbd_send_uuids(mdev, 0);
1827 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
1829 return _drbd_send_uuids(mdev, 8);
1833 int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
1837 p.uuid = cpu_to_be64(val);
1839 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
1840 (struct p_header *)&p, sizeof(p));
1843 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
1846 sector_t d_size, u_size;
1850 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1851 D_ASSERT(mdev->ldev->backing_bdev);
1852 d_size = drbd_get_max_capacity(mdev->ldev);
1853 u_size = mdev->ldev->dc.disk_size;
1854 q_order_type = drbd_queue_order_type(mdev);
1859 q_order_type = QUEUE_ORDERED_NONE;
1862 p.d_size = cpu_to_be64(d_size);
1863 p.u_size = cpu_to_be64(u_size);
1864 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1865 p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue));
1866 p.queue_order_type = cpu_to_be16(q_order_type);
1867 p.dds_flags = cpu_to_be16(flags);
1869 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
1870 (struct p_header *)&p, sizeof(p));
1875 * drbd_send_state() - Sends the drbd state to the peer
1876 * @mdev: DRBD device.
1878 int drbd_send_state(struct drbd_conf *mdev)
1880 struct socket *sock;
1884 /* Grab state lock so we wont send state if we're in the middle
1885 * of a cluster wide state change on another thread */
1886 drbd_state_lock(mdev);
1888 mutex_lock(&mdev->data.mutex);
1890 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
1891 sock = mdev->data.socket;
1893 if (likely(sock != NULL)) {
1894 ok = _drbd_send_cmd(mdev, sock, P_STATE,
1895 (struct p_header *)&p, sizeof(p), 0);
1898 mutex_unlock(&mdev->data.mutex);
1900 drbd_state_unlock(mdev);
1904 int drbd_send_state_req(struct drbd_conf *mdev,
1905 union drbd_state mask, union drbd_state val)
1907 struct p_req_state p;
1909 p.mask = cpu_to_be32(mask.i);
1910 p.val = cpu_to_be32(val.i);
1912 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
1913 (struct p_header *)&p, sizeof(p));
1916 int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
1918 struct p_req_state_reply p;
1920 p.retcode = cpu_to_be32(retcode);
1922 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
1923 (struct p_header *)&p, sizeof(p));
1926 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
1927 struct p_compressed_bm *p,
1928 struct bm_xfer_ctx *c)
1930 struct bitstream bs;
1931 unsigned long plain_bits;
1938 /* may we use this feature? */
1939 if ((mdev->sync_conf.use_rle == 0) ||
1940 (mdev->agreed_pro_version < 90))
1943 if (c->bit_offset >= c->bm_bits)
1944 return 0; /* nothing to do. */
1946 /* use at most thus many bytes */
1947 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
1948 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
1949 /* plain bits covered in this code string */
1952 /* p->encoding & 0x80 stores whether the first run length is set.
1953 * bit offset is implicit.
1954 * start with toggle == 2 to be able to tell the first iteration */
1957 /* see how much plain bits we can stuff into one packet
1958 * using RLE and VLI. */
1960 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1961 : _drbd_bm_find_next(mdev, c->bit_offset);
1964 rl = tmp - c->bit_offset;
1966 if (toggle == 2) { /* first iteration */
1968 /* the first checked bit was set,
1969 * store start value, */
1970 DCBP_set_start(p, 1);
1971 /* but skip encoding of zero run length */
1975 DCBP_set_start(p, 0);
1978 /* paranoia: catch zero runlength.
1979 * can only happen if bitmap is modified while we scan it. */
1981 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1982 "t:%u bo:%lu\n", toggle, c->bit_offset);
1986 bits = vli_encode_bits(&bs, rl);
1987 if (bits == -ENOBUFS) /* buffer full */
1990 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1996 c->bit_offset = tmp;
1997 } while (c->bit_offset < c->bm_bits);
1999 len = bs.cur.b - p->code + !!bs.cur.bit;
2001 if (plain_bits < (len << 3)) {
2002 /* incompressible with this method.
2003 * we need to rewind both word and bit position. */
2004 c->bit_offset -= plain_bits;
2005 bm_xfer_ctx_bit_to_word_offset(c);
2006 c->bit_offset = c->word_offset * BITS_PER_LONG;
2010 /* RLE + VLI was able to compress it just fine.
2011 * update c->word_offset. */
2012 bm_xfer_ctx_bit_to_word_offset(c);
2014 /* store pad_bits */
2015 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2020 enum { OK, FAILED, DONE }
2021 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
2022 struct p_header *h, struct bm_xfer_ctx *c)
2024 struct p_compressed_bm *p = (void*)h;
2025 unsigned long num_words;
2029 len = fill_bitmap_rle_bits(mdev, p, c);
2035 DCBP_set_code(p, RLE_VLI_Bits);
2036 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2037 sizeof(*p) + len, 0);
2040 c->bytes[0] += sizeof(*p) + len;
2042 if (c->bit_offset >= c->bm_bits)
2045 /* was not compressible.
2046 * send a buffer full of plain text bits instead. */
2047 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2048 len = num_words * sizeof(long);
2050 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2051 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
2052 h, sizeof(struct p_header) + len, 0);
2053 c->word_offset += num_words;
2054 c->bit_offset = c->word_offset * BITS_PER_LONG;
2057 c->bytes[1] += sizeof(struct p_header) + len;
2059 if (c->bit_offset > c->bm_bits)
2060 c->bit_offset = c->bm_bits;
2062 ok = ok ? ((len == 0) ? DONE : OK) : FAILED;
2065 INFO_bm_xfer_stats(mdev, "send", c);
2069 /* See the comment at receive_bitmap() */
2070 int _drbd_send_bitmap(struct drbd_conf *mdev)
2072 struct bm_xfer_ctx c;
2076 ERR_IF(!mdev->bitmap) return FALSE;
2078 /* maybe we should use some per thread scratch page,
2079 * and allocate that during initial device creation? */
2080 p = (struct p_header *) __get_free_page(GFP_NOIO);
2082 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2086 if (get_ldev(mdev)) {
2087 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2088 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2089 drbd_bm_set_all(mdev);
2090 if (drbd_bm_write(mdev)) {
2091 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2092 * but otherwise process as per normal - need to tell other
2093 * side that a full resync is required! */
2094 dev_err(DEV, "Failed to write bitmap to disk!\n");
2096 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2103 c = (struct bm_xfer_ctx) {
2104 .bm_bits = drbd_bm_bits(mdev),
2105 .bm_words = drbd_bm_words(mdev),
2109 ret = send_bitmap_rle_or_plain(mdev, p, &c);
2110 } while (ret == OK);
2112 free_page((unsigned long) p);
2113 return (ret == DONE);
2116 int drbd_send_bitmap(struct drbd_conf *mdev)
2120 if (!drbd_get_data_sock(mdev))
2122 err = !_drbd_send_bitmap(mdev);
2123 drbd_put_data_sock(mdev);
2127 int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2130 struct p_barrier_ack p;
2132 p.barrier = barrier_nr;
2133 p.set_size = cpu_to_be32(set_size);
2135 if (mdev->state.conn < C_CONNECTED)
2137 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2138 (struct p_header *)&p, sizeof(p));
2143 * _drbd_send_ack() - Sends an ack packet
2144 * @mdev: DRBD device.
2145 * @cmd: Packet command code.
2146 * @sector: sector, needs to be in big endian byte order
2147 * @blksize: size in byte, needs to be in big endian byte order
2148 * @block_id: Id, big endian byte order
2150 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2156 struct p_block_ack p;
2159 p.block_id = block_id;
2160 p.blksize = blksize;
2161 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2163 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2165 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2166 (struct p_header *)&p, sizeof(p));
2170 int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2173 const int header_size = sizeof(struct p_data)
2174 - sizeof(struct p_header);
2175 int data_size = ((struct p_header *)dp)->length - header_size;
2177 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2181 int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2182 struct p_block_req *rp)
2184 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2188 * drbd_send_ack() - Sends an ack packet
2189 * @mdev: DRBD device.
2190 * @cmd: Packet command code.
2193 int drbd_send_ack(struct drbd_conf *mdev,
2194 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2196 return _drbd_send_ack(mdev, cmd,
2197 cpu_to_be64(e->sector),
2198 cpu_to_be32(e->size),
2202 /* This function misuses the block_id field to signal if the blocks
2203 * are is sync or not. */
2204 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2205 sector_t sector, int blksize, u64 block_id)
2207 return _drbd_send_ack(mdev, cmd,
2208 cpu_to_be64(sector),
2209 cpu_to_be32(blksize),
2210 cpu_to_be64(block_id));
2213 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2214 sector_t sector, int size, u64 block_id)
2217 struct p_block_req p;
2219 p.sector = cpu_to_be64(sector);
2220 p.block_id = block_id;
2221 p.blksize = cpu_to_be32(size);
2223 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2224 (struct p_header *)&p, sizeof(p));
2228 int drbd_send_drequest_csum(struct drbd_conf *mdev,
2229 sector_t sector, int size,
2230 void *digest, int digest_size,
2231 enum drbd_packets cmd)
2234 struct p_block_req p;
2236 p.sector = cpu_to_be64(sector);
2237 p.block_id = BE_DRBD_MAGIC + 0xbeef;
2238 p.blksize = cpu_to_be32(size);
2240 p.head.magic = BE_DRBD_MAGIC;
2241 p.head.command = cpu_to_be16(cmd);
2242 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header) + digest_size);
2244 mutex_lock(&mdev->data.mutex);
2246 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2247 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2249 mutex_unlock(&mdev->data.mutex);
2254 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2257 struct p_block_req p;
2259 p.sector = cpu_to_be64(sector);
2260 p.block_id = BE_DRBD_MAGIC + 0xbabe;
2261 p.blksize = cpu_to_be32(size);
2263 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2264 (struct p_header *)&p, sizeof(p));
2268 /* called on sndtimeo
2269 * returns FALSE if we should retry,
2270 * TRUE if we think connection is dead
2272 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2275 /* long elapsed = (long)(jiffies - mdev->last_received); */
2277 drop_it = mdev->meta.socket == sock
2278 || !mdev->asender.task
2279 || get_t_state(&mdev->asender) != Running
2280 || mdev->state.conn < C_CONNECTED;
2285 drop_it = !--mdev->ko_count;
2287 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2288 current->comm, current->pid, mdev->ko_count);
2292 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2295 /* The idea of sendpage seems to be to put some kind of reference
2296 * to the page into the skb, and to hand it over to the NIC. In
2297 * this process get_page() gets called.
2299 * As soon as the page was really sent over the network put_page()
2300 * gets called by some part of the network layer. [ NIC driver? ]
2302 * [ get_page() / put_page() increment/decrement the count. If count
2303 * reaches 0 the page will be freed. ]
2305 * This works nicely with pages from FSs.
2306 * But this means that in protocol A we might signal IO completion too early!
2308 * In order not to corrupt data during a resync we must make sure
2309 * that we do not reuse our own buffer pages (EEs) to early, therefore
2310 * we have the net_ee list.
2312 * XFS seems to have problems, still, it submits pages with page_count == 0!
2313 * As a workaround, we disable sendpage on pages
2314 * with page_count == 0 or PageSlab.
2316 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2317 int offset, size_t size, unsigned msg_flags)
2319 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
2322 mdev->send_cnt += size>>9;
2323 return sent == size;
2326 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2327 int offset, size_t size, unsigned msg_flags)
2329 mm_segment_t oldfs = get_fs();
2333 /* e.g. XFS meta- & log-data is in slab pages, which have a
2334 * page_count of 0 and/or have PageSlab() set.
2335 * we cannot use send_page for those, as that does get_page();
2336 * put_page(); and would cause either a VM_BUG directly, or
2337 * __page_cache_release a page that would actually still be referenced
2338 * by someone, leading to some obscure delayed Oops somewhere else. */
2339 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2340 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
2342 msg_flags |= MSG_NOSIGNAL;
2343 drbd_update_congested(mdev);
2346 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2349 if (sent == -EAGAIN) {
2350 if (we_should_drop_the_connection(mdev,
2357 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2358 __func__, (int)size, len, sent);
2363 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2365 clear_bit(NET_CONGESTED, &mdev->flags);
2369 mdev->send_cnt += size>>9;
2373 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2375 struct bio_vec *bvec;
2377 /* hint all but last page with MSG_MORE */
2378 __bio_for_each_segment(bvec, bio, i, 0) {
2379 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2380 bvec->bv_offset, bvec->bv_len,
2381 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2387 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2389 struct bio_vec *bvec;
2391 /* hint all but last page with MSG_MORE */
2392 __bio_for_each_segment(bvec, bio, i, 0) {
2393 if (!_drbd_send_page(mdev, bvec->bv_page,
2394 bvec->bv_offset, bvec->bv_len,
2395 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2401 static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2403 struct page *page = e->pages;
2404 unsigned len = e->size;
2405 /* hint all but last page with MSG_MORE */
2406 page_chain_for_each(page) {
2407 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2408 if (!_drbd_send_page(mdev, page, 0, l,
2409 page_chain_next(page) ? MSG_MORE : 0))
2416 /* Used to send write requests
2417 * R_PRIMARY -> Peer (P_DATA)
2419 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2423 unsigned int dp_flags = 0;
2427 if (!drbd_get_data_sock(mdev))
2430 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2431 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2433 p.head.magic = BE_DRBD_MAGIC;
2434 p.head.command = cpu_to_be16(P_DATA);
2436 cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + req->size);
2438 p.sector = cpu_to_be64(req->sector);
2439 p.block_id = (unsigned long)req;
2440 p.seq_num = cpu_to_be32(req->seq_num =
2441 atomic_add_return(1, &mdev->packet_seq));
2444 /* NOTE: no need to check if barriers supported here as we would
2445 * not pass the test in make_request_common in that case
2447 if (req->master_bio->bi_rw & REQ_HARDBARRIER) {
2448 dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
2449 /* dp_flags |= DP_HARDBARRIER; */
2451 if (req->master_bio->bi_rw & REQ_SYNC)
2452 dp_flags |= DP_RW_SYNC;
2453 /* for now handle SYNCIO and UNPLUG
2454 * as if they still were one and the same flag */
2455 if (req->master_bio->bi_rw & REQ_UNPLUG)
2456 dp_flags |= DP_RW_SYNC;
2457 if (mdev->state.conn >= C_SYNC_SOURCE &&
2458 mdev->state.conn <= C_PAUSED_SYNC_T)
2459 dp_flags |= DP_MAY_SET_IN_SYNC;
2461 p.dp_flags = cpu_to_be32(dp_flags);
2462 set_bit(UNPLUG_REMOTE, &mdev->flags);
2464 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
2466 dgb = mdev->int_dig_out;
2467 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2468 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2471 if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
2472 ok = _drbd_send_bio(mdev, req->master_bio);
2474 ok = _drbd_send_zc_bio(mdev, req->master_bio);
2477 drbd_put_data_sock(mdev);
2482 /* answer packet, used to send data back for read requests:
2483 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2484 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2486 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2487 struct drbd_epoch_entry *e)
2494 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2495 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2497 p.head.magic = BE_DRBD_MAGIC;
2498 p.head.command = cpu_to_be16(cmd);
2500 cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + e->size);
2502 p.sector = cpu_to_be64(e->sector);
2503 p.block_id = e->block_id;
2504 /* p.seq_num = 0; No sequence numbers here.. */
2506 /* Only called by our kernel thread.
2507 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2508 * in response to admin command or module unload.
2510 if (!drbd_get_data_sock(mdev))
2513 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p,
2514 sizeof(p), dgs ? MSG_MORE : 0);
2516 dgb = mdev->int_dig_out;
2517 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
2518 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2521 ok = _drbd_send_zc_ee(mdev, e);
2523 drbd_put_data_sock(mdev);
2529 drbd_send distinguishes two cases:
2531 Packets sent via the data socket "sock"
2532 and packets sent via the meta data socket "msock"
2535 -----------------+-------------------------+------------------------------
2536 timeout conf.timeout / 2 conf.timeout / 2
2537 timeout action send a ping via msock Abort communication
2538 and close all sockets
2542 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2544 int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2545 void *buf, size_t size, unsigned msg_flags)
2554 /* THINK if (signal_pending) return ... ? */
2559 msg.msg_name = NULL;
2560 msg.msg_namelen = 0;
2561 msg.msg_control = NULL;
2562 msg.msg_controllen = 0;
2563 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2565 if (sock == mdev->data.socket) {
2566 mdev->ko_count = mdev->net_conf->ko_count;
2567 drbd_update_congested(mdev);
2571 * tcp_sendmsg does _not_ use its size parameter at all ?
2573 * -EAGAIN on timeout, -EINTR on signal.
2576 * do we need to block DRBD_SIG if sock == &meta.socket ??
2577 * otherwise wake_asender() might interrupt some send_*Ack !
2579 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2580 if (rv == -EAGAIN) {
2581 if (we_should_drop_the_connection(mdev, sock))
2588 flush_signals(current);
2596 } while (sent < size);
2598 if (sock == mdev->data.socket)
2599 clear_bit(NET_CONGESTED, &mdev->flags);
2602 if (rv != -EAGAIN) {
2603 dev_err(DEV, "%s_sendmsg returned %d\n",
2604 sock == mdev->meta.socket ? "msock" : "sock",
2606 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2608 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2614 static int drbd_open(struct block_device *bdev, fmode_t mode)
2616 struct drbd_conf *mdev = bdev->bd_disk->private_data;
2617 unsigned long flags;
2621 spin_lock_irqsave(&mdev->req_lock, flags);
2622 /* to have a stable mdev->state.role
2623 * and no race with updating open_cnt */
2625 if (mdev->state.role != R_PRIMARY) {
2626 if (mode & FMODE_WRITE)
2628 else if (!allow_oos)
2634 spin_unlock_irqrestore(&mdev->req_lock, flags);
2640 static int drbd_release(struct gendisk *gd, fmode_t mode)
2642 struct drbd_conf *mdev = gd->private_data;
2649 static void drbd_unplug_fn(struct request_queue *q)
2651 struct drbd_conf *mdev = q->queuedata;
2654 spin_lock_irq(q->queue_lock);
2656 spin_unlock_irq(q->queue_lock);
2658 /* only if connected */
2659 spin_lock_irq(&mdev->req_lock);
2660 if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
2661 D_ASSERT(mdev->state.role == R_PRIMARY);
2662 if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
2663 /* add to the data.work queue,
2664 * unless already queued.
2665 * XXX this might be a good addition to drbd_queue_work
2666 * anyways, to detect "double queuing" ... */
2667 if (list_empty(&mdev->unplug_work.list))
2668 drbd_queue_work(&mdev->data.work,
2669 &mdev->unplug_work);
2672 spin_unlock_irq(&mdev->req_lock);
2674 if (mdev->state.disk >= D_INCONSISTENT)
2678 static void drbd_set_defaults(struct drbd_conf *mdev)
2680 /* This way we get a compile error when sync_conf grows,
2681 and we forgot to initialize it here */
2682 mdev->sync_conf = (struct syncer_conf) {
2683 /* .rate = */ DRBD_RATE_DEF,
2684 /* .after = */ DRBD_AFTER_DEF,
2685 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
2686 /* .verify_alg = */ {}, 0,
2687 /* .cpu_mask = */ {}, 0,
2688 /* .csums_alg = */ {}, 0,
2692 /* Have to use that way, because the layout differs between
2693 big endian and little endian */
2694 mdev->state = (union drbd_state) {
2695 { .role = R_SECONDARY,
2697 .conn = C_STANDALONE,
2704 void drbd_init_set_defaults(struct drbd_conf *mdev)
2706 /* the memset(,0,) did most of this.
2707 * note: only assignments, no allocation in here */
2709 drbd_set_defaults(mdev);
2711 /* for now, we do NOT yet support it,
2712 * even though we start some framework
2713 * to eventually support barriers */
2714 set_bit(NO_BARRIER_SUPP, &mdev->flags);
2716 atomic_set(&mdev->ap_bio_cnt, 0);
2717 atomic_set(&mdev->ap_pending_cnt, 0);
2718 atomic_set(&mdev->rs_pending_cnt, 0);
2719 atomic_set(&mdev->unacked_cnt, 0);
2720 atomic_set(&mdev->local_cnt, 0);
2721 atomic_set(&mdev->net_cnt, 0);
2722 atomic_set(&mdev->packet_seq, 0);
2723 atomic_set(&mdev->pp_in_use, 0);
2725 mutex_init(&mdev->md_io_mutex);
2726 mutex_init(&mdev->data.mutex);
2727 mutex_init(&mdev->meta.mutex);
2728 sema_init(&mdev->data.work.s, 0);
2729 sema_init(&mdev->meta.work.s, 0);
2730 mutex_init(&mdev->state_mutex);
2732 spin_lock_init(&mdev->data.work.q_lock);
2733 spin_lock_init(&mdev->meta.work.q_lock);
2735 spin_lock_init(&mdev->al_lock);
2736 spin_lock_init(&mdev->req_lock);
2737 spin_lock_init(&mdev->peer_seq_lock);
2738 spin_lock_init(&mdev->epoch_lock);
2740 INIT_LIST_HEAD(&mdev->active_ee);
2741 INIT_LIST_HEAD(&mdev->sync_ee);
2742 INIT_LIST_HEAD(&mdev->done_ee);
2743 INIT_LIST_HEAD(&mdev->read_ee);
2744 INIT_LIST_HEAD(&mdev->net_ee);
2745 INIT_LIST_HEAD(&mdev->resync_reads);
2746 INIT_LIST_HEAD(&mdev->data.work.q);
2747 INIT_LIST_HEAD(&mdev->meta.work.q);
2748 INIT_LIST_HEAD(&mdev->resync_work.list);
2749 INIT_LIST_HEAD(&mdev->unplug_work.list);
2750 INIT_LIST_HEAD(&mdev->md_sync_work.list);
2751 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
2753 mdev->resync_work.cb = w_resync_inactive;
2754 mdev->unplug_work.cb = w_send_write_hint;
2755 mdev->md_sync_work.cb = w_md_sync;
2756 mdev->bm_io_work.w.cb = w_bitmap_io;
2757 init_timer(&mdev->resync_timer);
2758 init_timer(&mdev->md_sync_timer);
2759 mdev->resync_timer.function = resync_timer_fn;
2760 mdev->resync_timer.data = (unsigned long) mdev;
2761 mdev->md_sync_timer.function = md_sync_timer_fn;
2762 mdev->md_sync_timer.data = (unsigned long) mdev;
2764 init_waitqueue_head(&mdev->misc_wait);
2765 init_waitqueue_head(&mdev->state_wait);
2766 init_waitqueue_head(&mdev->ee_wait);
2767 init_waitqueue_head(&mdev->al_wait);
2768 init_waitqueue_head(&mdev->seq_wait);
2770 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
2771 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
2772 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
2774 mdev->agreed_pro_version = PRO_VERSION_MAX;
2775 mdev->write_ordering = WO_bio_barrier;
2776 mdev->resync_wenr = LC_FREE;
2779 void drbd_mdev_cleanup(struct drbd_conf *mdev)
2781 if (mdev->receiver.t_state != None)
2782 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2783 mdev->receiver.t_state);
2785 /* no need to lock it, I'm the only thread alive */
2786 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
2787 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
2798 mdev->rs_mark_left =
2799 mdev->rs_mark_time = 0;
2800 D_ASSERT(mdev->net_conf == NULL);
2802 drbd_set_my_capacity(mdev, 0);
2804 /* maybe never allocated. */
2805 drbd_bm_resize(mdev, 0, 1);
2806 drbd_bm_cleanup(mdev);
2809 drbd_free_resources(mdev);
2812 * currently we drbd_init_ee only on module load, so
2813 * we may do drbd_release_ee only on module unload!
2815 D_ASSERT(list_empty(&mdev->active_ee));
2816 D_ASSERT(list_empty(&mdev->sync_ee));
2817 D_ASSERT(list_empty(&mdev->done_ee));
2818 D_ASSERT(list_empty(&mdev->read_ee));
2819 D_ASSERT(list_empty(&mdev->net_ee));
2820 D_ASSERT(list_empty(&mdev->resync_reads));
2821 D_ASSERT(list_empty(&mdev->data.work.q));
2822 D_ASSERT(list_empty(&mdev->meta.work.q));
2823 D_ASSERT(list_empty(&mdev->resync_work.list));
2824 D_ASSERT(list_empty(&mdev->unplug_work.list));
2829 static void drbd_destroy_mempools(void)
2833 while (drbd_pp_pool) {
2834 page = drbd_pp_pool;
2835 drbd_pp_pool = (struct page *)page_private(page);
2840 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2842 if (drbd_ee_mempool)
2843 mempool_destroy(drbd_ee_mempool);
2844 if (drbd_request_mempool)
2845 mempool_destroy(drbd_request_mempool);
2847 kmem_cache_destroy(drbd_ee_cache);
2848 if (drbd_request_cache)
2849 kmem_cache_destroy(drbd_request_cache);
2850 if (drbd_bm_ext_cache)
2851 kmem_cache_destroy(drbd_bm_ext_cache);
2852 if (drbd_al_ext_cache)
2853 kmem_cache_destroy(drbd_al_ext_cache);
2855 drbd_ee_mempool = NULL;
2856 drbd_request_mempool = NULL;
2857 drbd_ee_cache = NULL;
2858 drbd_request_cache = NULL;
2859 drbd_bm_ext_cache = NULL;
2860 drbd_al_ext_cache = NULL;
2865 static int drbd_create_mempools(void)
2868 const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count;
2871 /* prepare our caches and mempools */
2872 drbd_request_mempool = NULL;
2873 drbd_ee_cache = NULL;
2874 drbd_request_cache = NULL;
2875 drbd_bm_ext_cache = NULL;
2876 drbd_al_ext_cache = NULL;
2877 drbd_pp_pool = NULL;
2880 drbd_request_cache = kmem_cache_create(
2881 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2882 if (drbd_request_cache == NULL)
2885 drbd_ee_cache = kmem_cache_create(
2886 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
2887 if (drbd_ee_cache == NULL)
2890 drbd_bm_ext_cache = kmem_cache_create(
2891 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2892 if (drbd_bm_ext_cache == NULL)
2895 drbd_al_ext_cache = kmem_cache_create(
2896 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2897 if (drbd_al_ext_cache == NULL)
2901 drbd_request_mempool = mempool_create(number,
2902 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2903 if (drbd_request_mempool == NULL)
2906 drbd_ee_mempool = mempool_create(number,
2907 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2908 if (drbd_request_mempool == NULL)
2911 /* drbd's page pool */
2912 spin_lock_init(&drbd_pp_lock);
2914 for (i = 0; i < number; i++) {
2915 page = alloc_page(GFP_HIGHUSER);
2918 set_page_private(page, (unsigned long)drbd_pp_pool);
2919 drbd_pp_pool = page;
2921 drbd_pp_vacant = number;
2926 drbd_destroy_mempools(); /* in case we allocated some */
2930 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2933 /* just so we have it. you never know what interesting things we
2934 * might want to do here some day...
2940 static struct notifier_block drbd_notifier = {
2941 .notifier_call = drbd_notify_sys,
2944 static void drbd_release_ee_lists(struct drbd_conf *mdev)
2948 rr = drbd_release_ee(mdev, &mdev->active_ee);
2950 dev_err(DEV, "%d EEs in active list found!\n", rr);
2952 rr = drbd_release_ee(mdev, &mdev->sync_ee);
2954 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2956 rr = drbd_release_ee(mdev, &mdev->read_ee);
2958 dev_err(DEV, "%d EEs in read list found!\n", rr);
2960 rr = drbd_release_ee(mdev, &mdev->done_ee);
2962 dev_err(DEV, "%d EEs in done list found!\n", rr);
2964 rr = drbd_release_ee(mdev, &mdev->net_ee);
2966 dev_err(DEV, "%d EEs in net list found!\n", rr);
2969 /* caution. no locking.
2970 * currently only used from module cleanup code. */
2971 static void drbd_delete_device(unsigned int minor)
2973 struct drbd_conf *mdev = minor_to_mdev(minor);
2978 /* paranoia asserts */
2979 if (mdev->open_cnt != 0)
2980 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
2981 __FILE__ , __LINE__);
2983 ERR_IF (!list_empty(&mdev->data.work.q)) {
2984 struct list_head *lp;
2985 list_for_each(lp, &mdev->data.work.q) {
2986 dev_err(DEV, "lp = %p\n", lp);
2989 /* end paranoia asserts */
2991 del_gendisk(mdev->vdisk);
2993 /* cleanup stuff that may have been allocated during
2994 * device (re-)configuration or state changes */
2996 if (mdev->this_bdev)
2997 bdput(mdev->this_bdev);
2999 drbd_free_resources(mdev);
3001 drbd_release_ee_lists(mdev);
3003 /* should be free'd on disconnect? */
3004 kfree(mdev->ee_hash);
3006 mdev->ee_hash_s = 0;
3007 mdev->ee_hash = NULL;
3010 lc_destroy(mdev->act_log);
3011 lc_destroy(mdev->resync);
3013 kfree(mdev->p_uuid);
3014 /* mdev->p_uuid = NULL; */
3016 kfree(mdev->int_dig_out);
3017 kfree(mdev->int_dig_in);
3018 kfree(mdev->int_dig_vv);
3020 /* cleanup the rest that has been
3021 * allocated from drbd_new_device
3022 * and actually free the mdev itself */
3023 drbd_free_mdev(mdev);
3026 static void drbd_cleanup(void)
3030 unregister_reboot_notifier(&drbd_notifier);
3036 remove_proc_entry("drbd", NULL);
3039 drbd_delete_device(i);
3040 drbd_destroy_mempools();
3045 unregister_blkdev(DRBD_MAJOR, "drbd");
3047 printk(KERN_INFO "drbd: module cleanup done.\n");
3051 * drbd_congested() - Callback for pdflush
3052 * @congested_data: User data
3053 * @bdi_bits: Bits pdflush is currently interested in
3055 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3057 static int drbd_congested(void *congested_data, int bdi_bits)
3059 struct drbd_conf *mdev = congested_data;
3060 struct request_queue *q;
3064 if (!__inc_ap_bio_cond(mdev)) {
3065 /* DRBD has frozen IO */
3071 if (get_ldev(mdev)) {
3072 q = bdev_get_queue(mdev->ldev->backing_bdev);
3073 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3079 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3080 r |= (1 << BDI_async_congested);
3081 reason = reason == 'b' ? 'a' : 'n';
3085 mdev->congestion_reason = reason;
3089 struct drbd_conf *drbd_new_device(unsigned int minor)
3091 struct drbd_conf *mdev;
3092 struct gendisk *disk;
3093 struct request_queue *q;
3095 /* GFP_KERNEL, we are outside of all write-out paths */
3096 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3099 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3100 goto out_no_cpumask;
3102 mdev->minor = minor;
3104 drbd_init_set_defaults(mdev);
3106 q = blk_alloc_queue(GFP_KERNEL);
3110 q->queuedata = mdev;
3112 disk = alloc_disk(1);
3117 set_disk_ro(disk, TRUE);
3120 disk->major = DRBD_MAJOR;
3121 disk->first_minor = minor;
3122 disk->fops = &drbd_ops;
3123 sprintf(disk->disk_name, "drbd%d", minor);
3124 disk->private_data = mdev;
3126 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3127 /* we have no partitions. we contain only ourselves. */
3128 mdev->this_bdev->bd_contains = mdev->this_bdev;
3130 q->backing_dev_info.congested_fn = drbd_congested;
3131 q->backing_dev_info.congested_data = mdev;
3133 blk_queue_make_request(q, drbd_make_request_26);
3134 blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
3135 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3136 blk_queue_merge_bvec(q, drbd_merge_bvec);
3137 q->queue_lock = &mdev->req_lock; /* needed since we use */
3138 /* plugging on a queue, that actually has no requests! */
3139 q->unplug_fn = drbd_unplug_fn;
3141 mdev->md_io_page = alloc_page(GFP_KERNEL);
3142 if (!mdev->md_io_page)
3143 goto out_no_io_page;
3145 if (drbd_bm_init(mdev))
3147 /* no need to lock access, we are still initializing this minor device. */
3151 mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3152 if (!mdev->app_reads_hash)
3153 goto out_no_app_reads;
3155 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3156 if (!mdev->current_epoch)
3159 INIT_LIST_HEAD(&mdev->current_epoch->list);
3164 /* out_whatever_else:
3165 kfree(mdev->current_epoch); */
3167 kfree(mdev->app_reads_hash);
3171 drbd_bm_cleanup(mdev);
3173 __free_page(mdev->md_io_page);
3177 blk_cleanup_queue(q);
3179 free_cpumask_var(mdev->cpu_mask);
3185 /* counterpart of drbd_new_device.
3186 * last part of drbd_delete_device. */
3187 void drbd_free_mdev(struct drbd_conf *mdev)
3189 kfree(mdev->current_epoch);
3190 kfree(mdev->app_reads_hash);
3192 if (mdev->bitmap) /* should no longer be there. */
3193 drbd_bm_cleanup(mdev);
3194 __free_page(mdev->md_io_page);
3195 put_disk(mdev->vdisk);
3196 blk_cleanup_queue(mdev->rq_queue);
3197 free_cpumask_var(mdev->cpu_mask);
3202 int __init drbd_init(void)
3206 if (sizeof(struct p_handshake) != 80) {
3208 "drbd: never change the size or layout "
3209 "of the HandShake packet.\n");
3213 if (1 > minor_count || minor_count > 255) {
3215 "drbd: invalid minor_count (%d)\n", minor_count);
3223 err = drbd_nl_init();
3227 err = register_blkdev(DRBD_MAJOR, "drbd");
3230 "drbd: unable to register block device major %d\n",
3235 register_reboot_notifier(&drbd_notifier);
3238 * allocate all necessary structs
3242 init_waitqueue_head(&drbd_pp_wait);
3244 drbd_proc = NULL; /* play safe for drbd_cleanup */
3245 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3250 err = drbd_create_mempools();
3254 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3256 printk(KERN_ERR "drbd: unable to register proc file\n");
3260 rwlock_init(&global_state_lock);
3262 printk(KERN_INFO "drbd: initialized. "
3263 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3264 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3265 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3266 printk(KERN_INFO "drbd: registered as block device major %d\n",
3268 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3270 return 0; /* Success! */
3275 /* currently always the case */
3276 printk(KERN_ERR "drbd: ran out of memory\n");
3278 printk(KERN_ERR "drbd: initialization failure\n");
3282 void drbd_free_bc(struct drbd_backing_dev *ldev)
3287 bd_release(ldev->backing_bdev);
3288 bd_release(ldev->md_bdev);
3290 fput(ldev->lo_file);
3291 fput(ldev->md_file);
3296 void drbd_free_sock(struct drbd_conf *mdev)
3298 if (mdev->data.socket) {
3299 mutex_lock(&mdev->data.mutex);
3300 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3301 sock_release(mdev->data.socket);
3302 mdev->data.socket = NULL;
3303 mutex_unlock(&mdev->data.mutex);
3305 if (mdev->meta.socket) {
3306 mutex_lock(&mdev->meta.mutex);
3307 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3308 sock_release(mdev->meta.socket);
3309 mdev->meta.socket = NULL;
3310 mutex_unlock(&mdev->meta.mutex);
3315 void drbd_free_resources(struct drbd_conf *mdev)
3317 crypto_free_hash(mdev->csums_tfm);
3318 mdev->csums_tfm = NULL;
3319 crypto_free_hash(mdev->verify_tfm);
3320 mdev->verify_tfm = NULL;
3321 crypto_free_hash(mdev->cram_hmac_tfm);
3322 mdev->cram_hmac_tfm = NULL;
3323 crypto_free_hash(mdev->integrity_w_tfm);
3324 mdev->integrity_w_tfm = NULL;
3325 crypto_free_hash(mdev->integrity_r_tfm);
3326 mdev->integrity_r_tfm = NULL;
3328 drbd_free_sock(mdev);
3331 drbd_free_bc(mdev->ldev);
3332 mdev->ldev = NULL;);
3335 /* meta data management */
3337 struct meta_data_on_disk {
3338 u64 la_size; /* last agreed size. */
3339 u64 uuid[UI_SIZE]; /* UUIDs. */
3342 u32 flags; /* MDF */
3345 u32 al_offset; /* offset to this block */
3346 u32 al_nr_extents; /* important for restoring the AL */
3347 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3348 u32 bm_offset; /* offset to the bitmap, from here */
3349 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3350 u32 reserved_u32[4];
3355 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3356 * @mdev: DRBD device.
3358 void drbd_md_sync(struct drbd_conf *mdev)
3360 struct meta_data_on_disk *buffer;
3364 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3366 del_timer(&mdev->md_sync_timer);
3368 /* We use here D_FAILED and not D_ATTACHING because we try to write
3369 * metadata even if we detach due to a disk failure! */
3370 if (!get_ldev_if_state(mdev, D_FAILED))
3373 mutex_lock(&mdev->md_io_mutex);
3374 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3375 memset(buffer, 0, 512);
3377 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3378 for (i = UI_CURRENT; i < UI_SIZE; i++)
3379 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3380 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3381 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3383 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3384 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3385 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3386 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3387 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3389 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3391 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3392 sector = mdev->ldev->md.md_offset;
3394 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3395 clear_bit(MD_DIRTY, &mdev->flags);
3397 /* this was a try anyways ... */
3398 dev_err(DEV, "meta data update failed!\n");
3400 drbd_chk_io_error(mdev, 1, TRUE);
3403 /* Update mdev->ldev->md.la_size_sect,
3404 * since we updated it on metadata. */
3405 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3407 mutex_unlock(&mdev->md_io_mutex);
3412 * drbd_md_read() - Reads in the meta data super block
3413 * @mdev: DRBD device.
3414 * @bdev: Device from which the meta data should be read in.
3416 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case
3417 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3419 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3421 struct meta_data_on_disk *buffer;
3422 int i, rv = NO_ERROR;
3424 if (!get_ldev_if_state(mdev, D_ATTACHING))
3425 return ERR_IO_MD_DISK;
3427 mutex_lock(&mdev->md_io_mutex);
3428 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3430 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3431 /* NOTE: cant do normal error processing here as this is
3432 called BEFORE disk is attached */
3433 dev_err(DEV, "Error while reading metadata.\n");
3434 rv = ERR_IO_MD_DISK;
3438 if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3439 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3440 rv = ERR_MD_INVALID;
3443 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3444 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3445 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3446 rv = ERR_MD_INVALID;
3449 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3450 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3451 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3452 rv = ERR_MD_INVALID;
3455 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3456 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3457 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3458 rv = ERR_MD_INVALID;
3462 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3463 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3464 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3465 rv = ERR_MD_INVALID;
3469 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3470 for (i = UI_CURRENT; i < UI_SIZE; i++)
3471 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3472 bdev->md.flags = be32_to_cpu(buffer->flags);
3473 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3474 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3476 if (mdev->sync_conf.al_extents < 7)
3477 mdev->sync_conf.al_extents = 127;
3480 mutex_unlock(&mdev->md_io_mutex);
3487 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3488 * @mdev: DRBD device.
3490 * Call this function if you change anything that should be written to
3491 * the meta-data super block. This function sets MD_DIRTY, and starts a
3492 * timer that ensures that within five seconds you have to call drbd_md_sync().
3494 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3496 set_bit(MD_DIRTY, &mdev->flags);
3497 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3501 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3505 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3506 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3509 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3511 if (idx == UI_CURRENT) {
3512 if (mdev->state.role == R_PRIMARY)
3517 drbd_set_ed_uuid(mdev, val);
3520 mdev->ldev->md.uuid[idx] = val;
3521 drbd_md_mark_dirty(mdev);
3525 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3527 if (mdev->ldev->md.uuid[idx]) {
3528 drbd_uuid_move_history(mdev);
3529 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3531 _drbd_uuid_set(mdev, idx, val);
3535 * drbd_uuid_new_current() - Creates a new current UUID
3536 * @mdev: DRBD device.
3538 * Creates a new current UUID, and rotates the old current UUID into
3539 * the bitmap slot. Causes an incremental resync upon next connect.
3541 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3545 dev_info(DEV, "Creating new current UUID\n");
3546 D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3547 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3549 get_random_bytes(&val, sizeof(u64));
3550 _drbd_uuid_set(mdev, UI_CURRENT, val);
3553 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3555 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3559 drbd_uuid_move_history(mdev);
3560 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3561 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3563 if (mdev->ldev->md.uuid[UI_BITMAP])
3564 dev_warn(DEV, "bm UUID already set");
3566 mdev->ldev->md.uuid[UI_BITMAP] = val;
3567 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3570 drbd_md_mark_dirty(mdev);
3574 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3575 * @mdev: DRBD device.
3577 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3579 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3583 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3584 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3586 drbd_bm_set_all(mdev);
3588 rv = drbd_bm_write(mdev);
3591 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3602 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3603 * @mdev: DRBD device.
3605 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3607 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3611 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3612 drbd_bm_clear_all(mdev);
3613 rv = drbd_bm_write(mdev);
3620 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3622 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
3625 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3627 drbd_bm_lock(mdev, work->why);
3628 rv = work->io_fn(mdev);
3629 drbd_bm_unlock(mdev);
3631 clear_bit(BITMAP_IO, &mdev->flags);
3632 wake_up(&mdev->misc_wait);
3635 work->done(mdev, rv);
3637 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3644 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3645 * @mdev: DRBD device.
3646 * @io_fn: IO callback to be called when bitmap IO is possible
3647 * @done: callback to be called after the bitmap IO was performed
3648 * @why: Descriptive text of the reason for doing the IO
3650 * While IO on the bitmap happens we freeze application IO thus we ensure
3651 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3652 * called from worker context. It MUST NOT be used while a previous such
3653 * work is still pending!
3655 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3656 int (*io_fn)(struct drbd_conf *),
3657 void (*done)(struct drbd_conf *, int),
3660 D_ASSERT(current == mdev->worker.task);
3662 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3663 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3664 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3665 if (mdev->bm_io_work.why)
3666 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3667 why, mdev->bm_io_work.why);
3669 mdev->bm_io_work.io_fn = io_fn;
3670 mdev->bm_io_work.done = done;
3671 mdev->bm_io_work.why = why;
3673 set_bit(BITMAP_IO, &mdev->flags);
3674 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
3675 if (list_empty(&mdev->bm_io_work.w.list)) {
3676 set_bit(BITMAP_IO_QUEUED, &mdev->flags);
3677 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
3679 dev_err(DEV, "FIXME avoided double queuing bm_io_work\n");
3684 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3685 * @mdev: DRBD device.
3686 * @io_fn: IO callback to be called when bitmap IO is possible
3687 * @why: Descriptive text of the reason for doing the IO
3689 * freezes application IO while that the actual IO operations runs. This
3690 * functions MAY NOT be called from worker context.
3692 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
3696 D_ASSERT(current != mdev->worker.task);
3698 drbd_suspend_io(mdev);
3700 drbd_bm_lock(mdev, why);
3702 drbd_bm_unlock(mdev);
3704 drbd_resume_io(mdev);
3709 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3711 if ((mdev->ldev->md.flags & flag) != flag) {
3712 drbd_md_mark_dirty(mdev);
3713 mdev->ldev->md.flags |= flag;
3717 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3719 if ((mdev->ldev->md.flags & flag) != 0) {
3720 drbd_md_mark_dirty(mdev);
3721 mdev->ldev->md.flags &= ~flag;
3724 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3726 return (bdev->md.flags & flag) != 0;
3729 static void md_sync_timer_fn(unsigned long data)
3731 struct drbd_conf *mdev = (struct drbd_conf *) data;
3733 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
3736 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3738 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3744 #ifdef CONFIG_DRBD_FAULT_INJECTION
3745 /* Fault insertion support including random number generator shamelessly
3746 * stolen from kernel/rcutorture.c */
3747 struct fault_random_state {
3748 unsigned long state;
3749 unsigned long count;
3752 #define FAULT_RANDOM_MULT 39916801 /* prime */
3753 #define FAULT_RANDOM_ADD 479001701 /* prime */
3754 #define FAULT_RANDOM_REFRESH 10000
3757 * Crude but fast random-number generator. Uses a linear congruential
3758 * generator, with occasional help from get_random_bytes().
3760 static unsigned long
3761 _drbd_fault_random(struct fault_random_state *rsp)
3765 if (!rsp->count--) {
3766 get_random_bytes(&refresh, sizeof(refresh));
3767 rsp->state += refresh;
3768 rsp->count = FAULT_RANDOM_REFRESH;
3770 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3771 return swahw32(rsp->state);
3775 _drbd_fault_str(unsigned int type) {
3776 static char *_faults[] = {
3777 [DRBD_FAULT_MD_WR] = "Meta-data write",
3778 [DRBD_FAULT_MD_RD] = "Meta-data read",
3779 [DRBD_FAULT_RS_WR] = "Resync write",
3780 [DRBD_FAULT_RS_RD] = "Resync read",
3781 [DRBD_FAULT_DT_WR] = "Data write",
3782 [DRBD_FAULT_DT_RD] = "Data read",
3783 [DRBD_FAULT_DT_RA] = "Data read ahead",
3784 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3785 [DRBD_FAULT_AL_EE] = "EE allocation",
3786 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3789 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3793 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3795 static struct fault_random_state rrs = {0, 0};
3797 unsigned int ret = (
3799 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3800 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3805 if (__ratelimit(&drbd_ratelimit_state))
3806 dev_warn(DEV, "***Simulating %s failure\n",
3807 _drbd_fault_str(type));
3814 const char *drbd_buildtag(void)
3816 /* DRBD built from external sources has here a reference to the
3817 git hash of the source code. */
3819 static char buildtag[38] = "\0uilt-in";
3821 if (buildtag[0] == 0) {
3822 #ifdef CONFIG_MODULES
3823 if (THIS_MODULE != NULL)
3824 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3833 module_init(drbd_init)
3834 module_exit(drbd_cleanup)
3836 EXPORT_SYMBOL(drbd_conn_str);
3837 EXPORT_SYMBOL(drbd_role_str);
3838 EXPORT_SYMBOL(drbd_disk_str);
3839 EXPORT_SYMBOL(drbd_set_st_err_str);