4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/connector.h>
33 #include <linux/blkpg.h>
34 #include <linux/cpumask.h>
37 #include "drbd_wrappers.h"
38 #include <asm/unaligned.h>
39 #include <linux/drbd_tag_magic.h>
40 #include <linux/drbd_limits.h>
41 #include <linux/compiler.h>
42 #include <linux/kthread.h>
44 static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
45 static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
46 static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
48 /* see get_sb_bdev and bd_claim */
49 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
51 /* Generate the tag_list to struct functions */
52 #define NL_PACKET(name, number, fields) \
53 static int name ## _from_tags(struct drbd_conf *mdev, \
54 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
55 static int name ## _from_tags(struct drbd_conf *mdev, \
56 unsigned short *tags, struct name *arg) \
61 while ((tag = get_unaligned(tags++)) != TT_END) { \
62 dlen = get_unaligned(tags++); \
63 switch (tag_number(tag)) { \
66 if (tag & T_MANDATORY) { \
67 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
71 tags = (unsigned short *)((char *)tags + dlen); \
75 #define NL_INTEGER(pn, pr, member) \
76 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
77 arg->member = get_unaligned((int *)(tags)); \
79 #define NL_INT64(pn, pr, member) \
80 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
81 arg->member = get_unaligned((u64 *)(tags)); \
83 #define NL_BIT(pn, pr, member) \
84 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
85 arg->member = *(char *)(tags) ? 1 : 0; \
87 #define NL_STRING(pn, pr, member, len) \
88 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
90 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
91 #member, dlen, (unsigned int)len); \
94 arg->member ## _len = dlen; \
95 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
97 #include "linux/drbd_nl.h"
99 /* Generate the struct to tag_list functions */
100 #define NL_PACKET(name, number, fields) \
101 static unsigned short* \
102 name ## _to_tags(struct drbd_conf *mdev, \
103 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
104 static unsigned short* \
105 name ## _to_tags(struct drbd_conf *mdev, \
106 struct name *arg, unsigned short *tags) \
112 #define NL_INTEGER(pn, pr, member) \
113 put_unaligned(pn | pr | TT_INTEGER, tags++); \
114 put_unaligned(sizeof(int), tags++); \
115 put_unaligned(arg->member, (int *)tags); \
116 tags = (unsigned short *)((char *)tags+sizeof(int));
117 #define NL_INT64(pn, pr, member) \
118 put_unaligned(pn | pr | TT_INT64, tags++); \
119 put_unaligned(sizeof(u64), tags++); \
120 put_unaligned(arg->member, (u64 *)tags); \
121 tags = (unsigned short *)((char *)tags+sizeof(u64));
122 #define NL_BIT(pn, pr, member) \
123 put_unaligned(pn | pr | TT_BIT, tags++); \
124 put_unaligned(sizeof(char), tags++); \
125 *(char *)tags = arg->member; \
126 tags = (unsigned short *)((char *)tags+sizeof(char));
127 #define NL_STRING(pn, pr, member, len) \
128 put_unaligned(pn | pr | TT_STRING, tags++); \
129 put_unaligned(arg->member ## _len, tags++); \
130 memcpy(tags, arg->member, arg->member ## _len); \
131 tags = (unsigned short *)((char *)tags + arg->member ## _len);
132 #include "linux/drbd_nl.h"
134 void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
135 void drbd_nl_send_reply(struct cn_msg *, int);
137 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
139 char *envp[] = { "HOME=/",
141 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
142 NULL, /* Will be set to address family */
143 NULL, /* Will be set to address */
146 char mb[12], af[20], ad[60], *afs;
147 char *argv[] = {usermode_helper, cmd, mb, NULL };
150 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
152 if (get_net_conf(mdev)) {
153 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
156 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
157 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
161 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
162 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
166 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
167 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
169 snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
175 /* The helper may take some time.
176 * write out any unsynced meta data changes now */
179 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
181 drbd_bcast_ev_helper(mdev, cmd);
182 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
184 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
185 usermode_helper, cmd, mb,
186 (ret >> 8) & 0xff, ret);
188 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
189 usermode_helper, cmd, mb,
190 (ret >> 8) & 0xff, ret);
192 if (ret < 0) /* Ignore any ERRNOs we got. */
198 enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
202 enum drbd_disk_state nps;
203 enum drbd_fencing_p fp;
205 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
207 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
208 fp = mdev->ldev->dc.fencing;
211 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
212 nps = mdev->state.pdsk;
216 r = drbd_khelper(mdev, "fence-peer");
218 switch ((r>>8) & 0xff) {
219 case 3: /* peer is inconsistent */
220 ex_to_string = "peer is inconsistent or worse";
221 nps = D_INCONSISTENT;
223 case 4: /* peer got outdated, or was already outdated */
224 ex_to_string = "peer was fenced";
227 case 5: /* peer was down */
228 if (mdev->state.disk == D_UP_TO_DATE) {
229 /* we will(have) create(d) a new UUID anyways... */
230 ex_to_string = "peer is unreachable, assumed to be dead";
233 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
234 nps = mdev->state.pdsk;
237 case 6: /* Peer is primary, voluntarily outdate myself.
238 * This is useful when an unconnected R_SECONDARY is asked to
239 * become R_PRIMARY, but finds the other peer being active. */
240 ex_to_string = "peer is active";
241 dev_warn(DEV, "Peer is primary, outdating myself.\n");
243 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
246 if (fp != FP_STONITH)
247 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
248 ex_to_string = "peer was stonithed";
252 /* The script is broken ... */
254 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
258 dev_info(DEV, "fence-peer helper returned %d (%s)\n",
259 (r>>8) & 0xff, ex_to_string);
262 if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
263 /* The handler was not successful... unfreeze here, the
264 state engine can not unfreeze... */
265 _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
271 static int _try_outdate_peer_async(void *data)
273 struct drbd_conf *mdev = (struct drbd_conf *)data;
274 enum drbd_disk_state nps;
276 nps = drbd_try_outdate_peer(mdev);
277 drbd_request_state(mdev, NS(pdsk, nps));
282 void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
284 struct task_struct *opa;
286 opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
288 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
291 int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
293 const int max_tries = 4;
297 union drbd_state mask, val;
298 enum drbd_disk_state nps;
300 if (new_role == R_PRIMARY)
301 request_ping(mdev); /* Detect a dead peer ASAP */
303 mutex_lock(&mdev->state_mutex);
305 mask.i = 0; mask.role = R_MASK;
306 val.i = 0; val.role = new_role;
308 while (try++ < max_tries) {
309 r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
311 /* in case we first succeeded to outdate,
312 * but now suddenly could establish a connection */
313 if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
319 if (r == SS_NO_UP_TO_DATE_DISK && force &&
320 (mdev->state.disk < D_UP_TO_DATE &&
321 mdev->state.disk >= D_INCONSISTENT)) {
323 val.disk = D_UP_TO_DATE;
328 if (r == SS_NO_UP_TO_DATE_DISK &&
329 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
330 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
331 nps = drbd_try_outdate_peer(mdev);
333 if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
334 val.disk = D_UP_TO_DATE;
344 if (r == SS_NOTHING_TO_DO)
346 if (r == SS_PRIMARY_NOP && mask.pdsk == 0) {
347 nps = drbd_try_outdate_peer(mdev);
349 if (force && nps > D_OUTDATED) {
350 dev_warn(DEV, "Forced into split brain situation!\n");
359 if (r == SS_TWO_PRIMARIES) {
360 /* Maybe the peer is detected as dead very soon...
361 retry at most once more in this case. */
362 __set_current_state(TASK_INTERRUPTIBLE);
363 schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
368 if (r < SS_SUCCESS) {
369 r = _drbd_request_state(mdev, mask, val,
370 CS_VERBOSE + CS_WAIT_COMPLETE);
381 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
383 /* Wait until nothing is on the fly :) */
384 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
386 if (new_role == R_SECONDARY) {
387 set_disk_ro(mdev->vdisk, TRUE);
388 if (get_ldev(mdev)) {
389 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
393 if (get_net_conf(mdev)) {
394 mdev->net_conf->want_lose = 0;
397 set_disk_ro(mdev->vdisk, FALSE);
398 if (get_ldev(mdev)) {
399 if (((mdev->state.conn < C_CONNECTED ||
400 mdev->state.pdsk <= D_FAILED)
401 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
402 drbd_uuid_new_current(mdev);
404 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
409 if ((new_role == R_SECONDARY) && get_ldev(mdev)) {
410 drbd_al_to_on_disk_bm(mdev);
414 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
415 /* if this was forced, we should consider sync */
417 drbd_send_uuids(mdev);
418 drbd_send_state(mdev);
423 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
425 mutex_unlock(&mdev->state_mutex);
429 static struct drbd_conf *ensure_mdev(int minor, int create)
431 struct drbd_conf *mdev;
433 if (minor >= minor_count)
436 mdev = minor_to_mdev(minor);
438 if (!mdev && create) {
439 struct gendisk *disk = NULL;
440 mdev = drbd_new_device(minor);
442 spin_lock_irq(&drbd_pp_lock);
443 if (minor_table[minor] == NULL) {
444 minor_table[minor] = mdev;
447 } /* else: we lost the race */
448 spin_unlock_irq(&drbd_pp_lock);
450 if (disk) /* we won the race above */
451 /* in case we ever add a drbd_delete_device(),
452 * don't forget the del_gendisk! */
454 else /* we lost the race above */
455 drbd_free_mdev(mdev);
457 mdev = minor_to_mdev(minor);
463 static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
464 struct drbd_nl_cfg_reply *reply)
466 struct primary primary_args;
468 memset(&primary_args, 0, sizeof(struct primary));
469 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
470 reply->ret_code = ERR_MANDATORY_TAG;
475 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
480 static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
481 struct drbd_nl_cfg_reply *reply)
483 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
488 /* initializes the md.*_offset members, so we are able to find
489 * the on disk meta data */
490 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
491 struct drbd_backing_dev *bdev)
493 sector_t md_size_sect = 0;
494 switch (bdev->dc.meta_dev_idx) {
496 /* v07 style fixed size indexed meta data */
497 bdev->md.md_size_sect = MD_RESERVED_SECT;
498 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
499 bdev->md.al_offset = MD_AL_OFFSET;
500 bdev->md.bm_offset = MD_BM_OFFSET;
502 case DRBD_MD_INDEX_FLEX_EXT:
503 /* just occupy the full device; unit: sectors */
504 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
505 bdev->md.md_offset = 0;
506 bdev->md.al_offset = MD_AL_OFFSET;
507 bdev->md.bm_offset = MD_BM_OFFSET;
509 case DRBD_MD_INDEX_INTERNAL:
510 case DRBD_MD_INDEX_FLEX_INT:
511 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
512 /* al size is still fixed */
513 bdev->md.al_offset = -MD_AL_MAX_SIZE;
514 /* we need (slightly less than) ~ this much bitmap sectors: */
515 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
516 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
517 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
518 md_size_sect = ALIGN(md_size_sect, 8);
520 /* plus the "drbd meta data super block",
521 * and the activity log; */
522 md_size_sect += MD_BM_OFFSET;
524 bdev->md.md_size_sect = md_size_sect;
525 /* bitmap offset is adjusted by 'super' block size */
526 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
531 char *ppsize(char *buf, unsigned long long size)
533 /* Needs 9 bytes at max. */
534 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
536 while (size >= 10000) {
538 size = (size >> 10) + !!(size & (1<<9));
541 sprintf(buf, "%lu %cB", (long)size, units[base]);
546 /* there is still a theoretical deadlock when called from receiver
547 * on an D_INCONSISTENT R_PRIMARY:
548 * remote READ does inc_ap_bio, receiver would need to receive answer
549 * packet from remote to dec_ap_bio again.
550 * receiver receive_sizes(), comes here,
551 * waits for ap_bio_cnt == 0. -> deadlock.
552 * but this cannot happen, actually, because:
553 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
554 * (not connected, or bad/no disk on peer):
555 * see drbd_fail_request_early, ap_bio_cnt is zero.
556 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
557 * peer may not initiate a resize.
559 void drbd_suspend_io(struct drbd_conf *mdev)
561 set_bit(SUSPEND_IO, &mdev->flags);
562 if (is_susp(mdev->state))
564 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
567 void drbd_resume_io(struct drbd_conf *mdev)
569 clear_bit(SUSPEND_IO, &mdev->flags);
570 wake_up(&mdev->misc_wait);
574 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
575 * @mdev: DRBD device.
577 * Returns 0 on success, negative return values indicate errors.
578 * You should call drbd_md_sync() after calling this function.
580 enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
582 sector_t prev_first_sect, prev_size; /* previous meta location */
587 int md_moved, la_size_changed;
588 enum determine_dev_size rv = unchanged;
591 * application request passes inc_ap_bio,
592 * but then cannot get an AL-reference.
593 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
596 * Suspend IO right here.
597 * still lock the act_log to not trigger ASSERTs there.
599 drbd_suspend_io(mdev);
601 /* no wait necessary anymore, actually we could assert that */
602 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
604 prev_first_sect = drbd_md_first_sector(mdev->ldev);
605 prev_size = mdev->ldev->md.md_size_sect;
606 la_size = mdev->ldev->md.la_size_sect;
608 /* TODO: should only be some assert here, not (re)init... */
609 drbd_md_set_sector_offsets(mdev, mdev->ldev);
611 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
613 if (drbd_get_capacity(mdev->this_bdev) != size ||
614 drbd_bm_capacity(mdev) != size) {
616 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
618 /* currently there is only one error: ENOMEM! */
619 size = drbd_bm_capacity(mdev)>>1;
621 dev_err(DEV, "OUT OF MEMORY! "
622 "Could not allocate bitmap!\n");
624 dev_err(DEV, "BM resizing failed. "
625 "Leaving size unchanged at size = %lu KB\n",
626 (unsigned long)size);
630 /* racy, see comments above. */
631 drbd_set_my_capacity(mdev, size);
632 mdev->ldev->md.la_size_sect = size;
633 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
634 (unsigned long long)size>>1);
636 if (rv == dev_size_error)
639 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
641 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
642 || prev_size != mdev->ldev->md.md_size_sect;
644 if (la_size_changed || md_moved) {
645 drbd_al_shrink(mdev); /* All extents inactive. */
646 dev_info(DEV, "Writing the whole bitmap, %s\n",
647 la_size_changed && md_moved ? "size changed and md moved" :
648 la_size_changed ? "size changed" : "md moved");
649 rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */
650 drbd_md_mark_dirty(mdev);
658 lc_unlock(mdev->act_log);
659 wake_up(&mdev->al_wait);
660 drbd_resume_io(mdev);
666 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
668 sector_t p_size = mdev->p_size; /* partner's disk size. */
669 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
670 sector_t m_size; /* my size */
671 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
674 m_size = drbd_get_max_capacity(bdev);
676 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
677 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
681 if (p_size && m_size) {
682 size = min_t(sector_t, p_size, m_size);
686 if (m_size && m_size < size)
688 if (p_size && p_size < size)
699 dev_err(DEV, "Both nodes diskless!\n");
703 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
704 (unsigned long)u_size>>1, (unsigned long)size>>1);
713 * drbd_check_al_size() - Ensures that the AL is of the right size
714 * @mdev: DRBD device.
716 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
717 * failed, and 0 on success. You should call drbd_md_sync() after you called
720 static int drbd_check_al_size(struct drbd_conf *mdev)
722 struct lru_cache *n, *t;
723 struct lc_element *e;
727 ERR_IF(mdev->sync_conf.al_extents < 7)
728 mdev->sync_conf.al_extents = 127;
731 mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
736 n = lc_create("act_log", drbd_al_ext_cache,
737 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
740 dev_err(DEV, "Cannot allocate act_log lru!\n");
743 spin_lock_irq(&mdev->al_lock);
745 for (i = 0; i < t->nr_elements; i++) {
746 e = lc_element_by_index(t, i);
748 dev_err(DEV, "refcnt(%d)==%d\n",
749 e->lc_number, e->refcnt);
755 spin_unlock_irq(&mdev->al_lock);
757 dev_err(DEV, "Activity log still in use!\n");
764 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
768 void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local)
770 struct request_queue * const q = mdev->rq_queue;
771 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
772 int max_segments = mdev->ldev->dc.max_bio_bvecs;
774 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
776 blk_queue_max_hw_sectors(q, max_seg_s >> 9);
777 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
778 blk_queue_max_segment_size(q, max_seg_s);
779 blk_queue_logical_block_size(q, 512);
780 blk_queue_segment_boundary(q, PAGE_SIZE-1);
781 blk_stack_limits(&q->limits, &b->limits, 0);
783 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q));
785 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
786 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
787 q->backing_dev_info.ra_pages,
788 b->backing_dev_info.ra_pages);
789 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
793 /* serialize deconfig (worker exiting, doing cleanup)
794 * and reconfig (drbdsetup disk, drbdsetup net)
796 * Wait for a potentially exiting worker, then restart it,
797 * or start a new one. Flush any pending work, there may still be an
798 * after_state_change queued.
800 static void drbd_reconfig_start(struct drbd_conf *mdev)
802 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
803 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
804 drbd_thread_start(&mdev->worker);
805 drbd_flush_workqueue(mdev);
808 /* if still unconfigured, stops worker again.
809 * if configured now, clears CONFIG_PENDING.
810 * wakes potential waiters */
811 static void drbd_reconfig_done(struct drbd_conf *mdev)
813 spin_lock_irq(&mdev->req_lock);
814 if (mdev->state.disk == D_DISKLESS &&
815 mdev->state.conn == C_STANDALONE &&
816 mdev->state.role == R_SECONDARY) {
817 set_bit(DEVICE_DYING, &mdev->flags);
818 drbd_thread_stop_nowait(&mdev->worker);
820 clear_bit(CONFIG_PENDING, &mdev->flags);
821 spin_unlock_irq(&mdev->req_lock);
822 wake_up(&mdev->state_wait);
825 /* Make sure IO is suspended before calling this function(). */
826 static void drbd_suspend_al(struct drbd_conf *mdev)
830 if (lc_try_lock(mdev->act_log)) {
831 drbd_al_shrink(mdev);
832 lc_unlock(mdev->act_log);
834 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
838 spin_lock_irq(&mdev->req_lock);
839 if (mdev->state.conn < C_CONNECTED)
840 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
842 spin_unlock_irq(&mdev->req_lock);
845 dev_info(DEV, "Suspended AL updates\n");
848 /* does always return 0;
849 * interesting return code is in reply->ret_code */
850 static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
851 struct drbd_nl_cfg_reply *reply)
853 enum drbd_ret_codes retcode;
854 enum determine_dev_size dd;
855 sector_t max_possible_sectors;
856 sector_t min_md_device_sectors;
857 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
858 struct inode *inode, *inode2;
859 struct lru_cache *resync_lru = NULL;
860 union drbd_state ns, os;
861 unsigned int max_seg_s;
863 int cp_discovered = 0;
864 int logical_block_size;
866 drbd_reconfig_start(mdev);
868 /* if you want to reconfigure, please tear down first */
869 if (mdev->state.disk > D_DISKLESS) {
870 retcode = ERR_DISK_CONFIGURED;
874 /* allocation not in the IO path, cqueue thread context */
875 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
881 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
882 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
883 nbc->dc.fencing = DRBD_FENCING_DEF;
884 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
886 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
887 retcode = ERR_MANDATORY_TAG;
891 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
892 retcode = ERR_MD_IDX_INVALID;
896 if (get_net_conf(mdev)) {
897 int prot = mdev->net_conf->wire_protocol;
899 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
900 retcode = ERR_STONITH_AND_PROT_A;
905 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
906 if (IS_ERR(nbc->lo_file)) {
907 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
908 PTR_ERR(nbc->lo_file));
910 retcode = ERR_OPEN_DISK;
914 inode = nbc->lo_file->f_dentry->d_inode;
916 if (!S_ISBLK(inode->i_mode)) {
917 retcode = ERR_DISK_NOT_BDEV;
921 nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
922 if (IS_ERR(nbc->md_file)) {
923 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
924 PTR_ERR(nbc->md_file));
926 retcode = ERR_OPEN_MD_DISK;
930 inode2 = nbc->md_file->f_dentry->d_inode;
932 if (!S_ISBLK(inode2->i_mode)) {
933 retcode = ERR_MD_NOT_BDEV;
937 nbc->backing_bdev = inode->i_bdev;
938 if (bd_claim(nbc->backing_bdev, mdev)) {
939 printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
940 nbc->backing_bdev, mdev,
941 nbc->backing_bdev->bd_holder,
942 nbc->backing_bdev->bd_contains->bd_holder,
943 nbc->backing_bdev->bd_holders);
944 retcode = ERR_BDCLAIM_DISK;
948 resync_lru = lc_create("resync", drbd_bm_ext_cache,
949 61, sizeof(struct bm_extent),
950 offsetof(struct bm_extent, lce));
953 goto release_bdev_fail;
956 /* meta_dev_idx >= 0: external fixed size,
957 * possibly multiple drbd sharing one meta device.
958 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is
959 * not yet used by some other drbd minor!
960 * (if you use drbd.conf + drbdadm,
961 * that should check it for you already; but if you don't, or someone
962 * fooled it, we need to double check here) */
963 nbc->md_bdev = inode2->i_bdev;
964 if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev
965 : (void *) drbd_m_holder)) {
966 retcode = ERR_BDCLAIM_MD_DISK;
967 goto release_bdev_fail;
970 if ((nbc->backing_bdev == nbc->md_bdev) !=
971 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
972 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
973 retcode = ERR_MD_IDX_INVALID;
974 goto release_bdev2_fail;
977 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
978 drbd_md_set_sector_offsets(mdev, nbc);
980 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
981 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
982 (unsigned long long) drbd_get_max_capacity(nbc),
983 (unsigned long long) nbc->dc.disk_size);
984 retcode = ERR_DISK_TO_SMALL;
985 goto release_bdev2_fail;
988 if (nbc->dc.meta_dev_idx < 0) {
989 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
990 /* at least one MB, otherwise it does not make sense */
991 min_md_device_sectors = (2<<10);
993 max_possible_sectors = DRBD_MAX_SECTORS;
994 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
997 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
998 retcode = ERR_MD_DISK_TO_SMALL;
999 dev_warn(DEV, "refusing attach: md-device too small, "
1000 "at least %llu sectors needed for this meta-disk type\n",
1001 (unsigned long long) min_md_device_sectors);
1002 goto release_bdev2_fail;
1005 /* Make sure the new disk is big enough
1006 * (we may currently be R_PRIMARY with no local disk...) */
1007 if (drbd_get_max_capacity(nbc) <
1008 drbd_get_capacity(mdev->this_bdev)) {
1009 retcode = ERR_DISK_TO_SMALL;
1010 goto release_bdev2_fail;
1013 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1015 if (nbc->known_size > max_possible_sectors) {
1016 dev_warn(DEV, "==> truncating very big lower level device "
1017 "to currently maximum possible %llu sectors <==\n",
1018 (unsigned long long) max_possible_sectors);
1019 if (nbc->dc.meta_dev_idx >= 0)
1020 dev_warn(DEV, "==>> using internal or flexible "
1021 "meta data may help <<==\n");
1024 drbd_suspend_io(mdev);
1025 /* also wait for the last barrier ack. */
1026 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
1027 /* and for any other previously queued work */
1028 drbd_flush_workqueue(mdev);
1030 retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1031 drbd_resume_io(mdev);
1032 if (retcode < SS_SUCCESS)
1033 goto release_bdev2_fail;
1035 if (!get_ldev_if_state(mdev, D_ATTACHING))
1036 goto force_diskless;
1038 drbd_md_set_sector_offsets(mdev, nbc);
1040 /* allocate a second IO page if logical_block_size != 512 */
1041 logical_block_size = bdev_logical_block_size(nbc->md_bdev);
1042 if (logical_block_size == 0)
1043 logical_block_size = MD_SECTOR_SIZE;
1045 if (logical_block_size != MD_SECTOR_SIZE) {
1046 if (!mdev->md_io_tmpp) {
1047 struct page *page = alloc_page(GFP_NOIO);
1049 goto force_diskless_dec;
1051 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
1052 logical_block_size, MD_SECTOR_SIZE);
1053 dev_warn(DEV, "Workaround engaged (has performance impact).\n");
1055 mdev->md_io_tmpp = page;
1059 if (!mdev->bitmap) {
1060 if (drbd_bm_init(mdev)) {
1061 retcode = ERR_NOMEM;
1062 goto force_diskless_dec;
1066 retcode = drbd_md_read(mdev, nbc);
1067 if (retcode != NO_ERROR)
1068 goto force_diskless_dec;
1070 if (mdev->state.conn < C_CONNECTED &&
1071 mdev->state.role == R_PRIMARY &&
1072 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1073 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1074 (unsigned long long)mdev->ed_uuid);
1075 retcode = ERR_DATA_NOT_CURRENT;
1076 goto force_diskless_dec;
1079 /* Since we are diskless, fix the activity log first... */
1080 if (drbd_check_al_size(mdev)) {
1081 retcode = ERR_NOMEM;
1082 goto force_diskless_dec;
1085 /* Prevent shrinking of consistent devices ! */
1086 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1087 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
1088 dev_warn(DEV, "refusing to truncate a consistent device\n");
1089 retcode = ERR_DISK_TO_SMALL;
1090 goto force_diskless_dec;
1093 if (!drbd_al_read_log(mdev, nbc)) {
1094 retcode = ERR_IO_MD_DISK;
1095 goto force_diskless_dec;
1098 /* Reset the "barriers don't work" bits here, then force meta data to
1099 * be written, to ensure we determine if barriers are supported. */
1100 if (nbc->dc.no_md_flush)
1101 set_bit(MD_NO_BARRIER, &mdev->flags);
1103 clear_bit(MD_NO_BARRIER, &mdev->flags);
1105 /* Point of no return reached.
1106 * Devices and memory are no longer released by error cleanup below.
1107 * now mdev takes over responsibility, and the state engine should
1108 * clean it up somewhere. */
1109 D_ASSERT(mdev->ldev == NULL);
1111 mdev->resync = resync_lru;
1115 mdev->write_ordering = WO_bio_barrier;
1116 drbd_bump_write_ordering(mdev, WO_bio_barrier);
1118 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1119 set_bit(CRASHED_PRIMARY, &mdev->flags);
1121 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1123 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1124 !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
1125 set_bit(CRASHED_PRIMARY, &mdev->flags);
1134 max_seg_s = DRBD_MAX_SEGMENT_SIZE;
1135 if (mdev->state.conn == C_CONNECTED) {
1136 /* We are Primary, Connected, and now attach a new local
1137 * backing store. We must not increase the user visible maximum
1138 * bio size on this device to something the peer may not be
1139 * able to handle. */
1140 if (mdev->agreed_pro_version < 94)
1141 max_seg_s = queue_max_segment_size(mdev->rq_queue);
1142 else if (mdev->agreed_pro_version == 94)
1143 max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
1144 /* else: drbd 8.3.9 and later, stay with default */
1147 drbd_setup_queue_param(mdev, max_seg_s);
1149 /* If I am currently not R_PRIMARY,
1150 * but meta data primary indicator is set,
1151 * I just now recover from a hard crash,
1152 * and have been R_PRIMARY before that crash.
1154 * Now, if I had no connection before that crash
1155 * (have been degraded R_PRIMARY), chances are that
1156 * I won't find my peer now either.
1158 * In that case, and _only_ in that case,
1159 * we use the degr-wfc-timeout instead of the default,
1160 * so we can automatically recover from a crash of a
1161 * degraded but active "cluster" after a certain timeout.
1163 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1164 if (mdev->state.role != R_PRIMARY &&
1165 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1166 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1167 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1169 dd = drbd_determin_dev_size(mdev, 0);
1170 if (dd == dev_size_error) {
1171 retcode = ERR_NOMEM_BITMAP;
1172 goto force_diskless_dec;
1173 } else if (dd == grew)
1174 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1176 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1177 dev_info(DEV, "Assuming that all blocks are out of sync "
1178 "(aka FullSync)\n");
1179 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
1180 retcode = ERR_IO_MD_DISK;
1181 goto force_diskless_dec;
1184 if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
1185 retcode = ERR_IO_MD_DISK;
1186 goto force_diskless_dec;
1190 if (cp_discovered) {
1191 drbd_al_apply_to_bm(mdev);
1192 drbd_al_to_on_disk_bm(mdev);
1195 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1196 drbd_suspend_al(mdev); /* IO is still suspended here... */
1198 spin_lock_irq(&mdev->req_lock);
1201 /* If MDF_CONSISTENT is not set go into inconsistent state,
1202 otherwise investigate MDF_WasUpToDate...
1203 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1204 otherwise into D_CONSISTENT state.
1206 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1207 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1208 ns.disk = D_CONSISTENT;
1210 ns.disk = D_OUTDATED;
1212 ns.disk = D_INCONSISTENT;
1215 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1216 ns.pdsk = D_OUTDATED;
1218 if ( ns.disk == D_CONSISTENT &&
1219 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1220 ns.disk = D_UP_TO_DATE;
1222 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1223 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1224 this point, because drbd_request_state() modifies these
1227 /* In case we are C_CONNECTED postpone any decision on the new disk
1228 state after the negotiation phase. */
1229 if (mdev->state.conn == C_CONNECTED) {
1230 mdev->new_state_tmp.i = ns.i;
1232 ns.disk = D_NEGOTIATING;
1234 /* We expect to receive up-to-date UUIDs soon.
1235 To avoid a race in receive_state, free p_uuid while
1236 holding req_lock. I.e. atomic with the state change */
1237 kfree(mdev->p_uuid);
1238 mdev->p_uuid = NULL;
1241 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1243 spin_unlock_irq(&mdev->req_lock);
1245 if (rv < SS_SUCCESS)
1246 goto force_diskless_dec;
1248 if (mdev->state.role == R_PRIMARY)
1249 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1251 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1253 drbd_md_mark_dirty(mdev);
1256 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1258 reply->ret_code = retcode;
1259 drbd_reconfig_done(mdev);
1265 drbd_force_state(mdev, NS(disk, D_DISKLESS));
1269 bd_release(nbc->md_bdev);
1272 bd_release(nbc->backing_bdev);
1281 lc_destroy(resync_lru);
1283 reply->ret_code = retcode;
1284 drbd_reconfig_done(mdev);
1288 static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1289 struct drbd_nl_cfg_reply *reply)
1291 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1295 static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1296 struct drbd_nl_cfg_reply *reply)
1299 enum drbd_ret_codes retcode;
1300 struct net_conf *new_conf = NULL;
1301 struct crypto_hash *tfm = NULL;
1302 struct crypto_hash *integrity_w_tfm = NULL;
1303 struct crypto_hash *integrity_r_tfm = NULL;
1304 struct hlist_head *new_tl_hash = NULL;
1305 struct hlist_head *new_ee_hash = NULL;
1306 struct drbd_conf *odev;
1307 char hmac_name[CRYPTO_MAX_ALG_NAME];
1308 void *int_dig_out = NULL;
1309 void *int_dig_in = NULL;
1310 void *int_dig_vv = NULL;
1311 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1313 drbd_reconfig_start(mdev);
1315 if (mdev->state.conn > C_STANDALONE) {
1316 retcode = ERR_NET_CONFIGURED;
1320 /* allocation not in the IO path, cqueue thread context */
1321 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1323 retcode = ERR_NOMEM;
1327 new_conf->timeout = DRBD_TIMEOUT_DEF;
1328 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
1329 new_conf->ping_int = DRBD_PING_INT_DEF;
1330 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
1331 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
1332 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
1333 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
1334 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
1335 new_conf->ko_count = DRBD_KO_COUNT_DEF;
1336 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
1337 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
1338 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
1339 new_conf->want_lose = 0;
1340 new_conf->two_primaries = 0;
1341 new_conf->wire_protocol = DRBD_PROT_C;
1342 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
1343 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
1345 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
1346 retcode = ERR_MANDATORY_TAG;
1350 if (new_conf->two_primaries
1351 && (new_conf->wire_protocol != DRBD_PROT_C)) {
1352 retcode = ERR_NOT_PROTO_C;
1356 if (get_ldev(mdev)) {
1357 enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1359 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1360 retcode = ERR_STONITH_AND_PROT_A;
1365 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1366 retcode = ERR_DISCARD;
1372 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1373 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1374 for (i = 0; i < minor_count; i++) {
1375 odev = minor_to_mdev(i);
1376 if (!odev || odev == mdev)
1378 if (get_net_conf(odev)) {
1379 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
1380 if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
1381 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1382 retcode = ERR_LOCAL_ADDR;
1384 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
1385 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
1386 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1387 retcode = ERR_PEER_ADDR;
1390 if (retcode != NO_ERROR)
1395 if (new_conf->cram_hmac_alg[0] != 0) {
1396 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1397 new_conf->cram_hmac_alg);
1398 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1401 retcode = ERR_AUTH_ALG;
1405 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1406 retcode = ERR_AUTH_ALG_ND;
1411 if (new_conf->integrity_alg[0]) {
1412 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1413 if (IS_ERR(integrity_w_tfm)) {
1414 integrity_w_tfm = NULL;
1415 retcode=ERR_INTEGRITY_ALG;
1419 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1420 retcode=ERR_INTEGRITY_ALG_ND;
1424 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1425 if (IS_ERR(integrity_r_tfm)) {
1426 integrity_r_tfm = NULL;
1427 retcode=ERR_INTEGRITY_ALG;
1432 ns = new_conf->max_epoch_size/8;
1433 if (mdev->tl_hash_s != ns) {
1434 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
1436 retcode = ERR_NOMEM;
1441 ns = new_conf->max_buffers/8;
1442 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
1443 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
1445 retcode = ERR_NOMEM;
1450 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1452 if (integrity_w_tfm) {
1453 i = crypto_hash_digestsize(integrity_w_tfm);
1454 int_dig_out = kmalloc(i, GFP_KERNEL);
1456 retcode = ERR_NOMEM;
1459 int_dig_in = kmalloc(i, GFP_KERNEL);
1461 retcode = ERR_NOMEM;
1464 int_dig_vv = kmalloc(i, GFP_KERNEL);
1466 retcode = ERR_NOMEM;
1471 if (!mdev->bitmap) {
1472 if(drbd_bm_init(mdev)) {
1473 retcode = ERR_NOMEM;
1478 drbd_flush_workqueue(mdev);
1479 spin_lock_irq(&mdev->req_lock);
1480 if (mdev->net_conf != NULL) {
1481 retcode = ERR_NET_CONFIGURED;
1482 spin_unlock_irq(&mdev->req_lock);
1485 mdev->net_conf = new_conf;
1491 kfree(mdev->tl_hash);
1492 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
1493 mdev->tl_hash = new_tl_hash;
1497 kfree(mdev->ee_hash);
1498 mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
1499 mdev->ee_hash = new_ee_hash;
1502 crypto_free_hash(mdev->cram_hmac_tfm);
1503 mdev->cram_hmac_tfm = tfm;
1505 crypto_free_hash(mdev->integrity_w_tfm);
1506 mdev->integrity_w_tfm = integrity_w_tfm;
1508 crypto_free_hash(mdev->integrity_r_tfm);
1509 mdev->integrity_r_tfm = integrity_r_tfm;
1511 kfree(mdev->int_dig_out);
1512 kfree(mdev->int_dig_in);
1513 kfree(mdev->int_dig_vv);
1514 mdev->int_dig_out=int_dig_out;
1515 mdev->int_dig_in=int_dig_in;
1516 mdev->int_dig_vv=int_dig_vv;
1517 retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
1518 spin_unlock_irq(&mdev->req_lock);
1520 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1521 reply->ret_code = retcode;
1522 drbd_reconfig_done(mdev);
1529 crypto_free_hash(tfm);
1530 crypto_free_hash(integrity_w_tfm);
1531 crypto_free_hash(integrity_r_tfm);
1536 reply->ret_code = retcode;
1537 drbd_reconfig_done(mdev);
1541 static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1542 struct drbd_nl_cfg_reply *reply)
1546 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
1548 if (retcode == SS_NOTHING_TO_DO)
1550 else if (retcode == SS_ALREADY_STANDALONE)
1552 else if (retcode == SS_PRIMARY_NOP) {
1553 /* Our statche checking code wants to see the peer outdated. */
1554 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1556 } else if (retcode == SS_CW_FAILED_BY_PEER) {
1557 /* The peer probably wants to see us outdated. */
1558 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1561 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
1562 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1563 retcode = SS_SUCCESS;
1567 if (retcode < SS_SUCCESS)
1570 if (wait_event_interruptible(mdev->state_wait,
1571 mdev->state.conn != C_DISCONNECTING)) {
1572 /* Do not test for mdev->state.conn == C_STANDALONE, since
1573 someone else might connect us in the mean time! */
1582 reply->ret_code = retcode;
1586 void resync_after_online_grow(struct drbd_conf *mdev)
1588 int iass; /* I am sync source */
1590 dev_info(DEV, "Resync of new storage after online grow\n");
1591 if (mdev->state.role != mdev->state.peer)
1592 iass = (mdev->state.role == R_PRIMARY);
1594 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1597 drbd_start_resync(mdev, C_SYNC_SOURCE);
1599 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
1602 static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1603 struct drbd_nl_cfg_reply *reply)
1606 int retcode = NO_ERROR;
1607 enum determine_dev_size dd;
1608 enum dds_flags ddsf;
1610 memset(&rs, 0, sizeof(struct resize));
1611 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
1612 retcode = ERR_MANDATORY_TAG;
1616 if (mdev->state.conn > C_CONNECTED) {
1617 retcode = ERR_RESIZE_RESYNC;
1621 if (mdev->state.role == R_SECONDARY &&
1622 mdev->state.peer == R_SECONDARY) {
1623 retcode = ERR_NO_PRIMARY;
1627 if (!get_ldev(mdev)) {
1628 retcode = ERR_NO_DISK;
1632 if (rs.no_resync && mdev->agreed_pro_version < 93) {
1633 retcode = ERR_NEED_APV_93;
1637 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
1638 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
1640 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
1641 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
1642 dd = drbd_determin_dev_size(mdev, ddsf);
1645 if (dd == dev_size_error) {
1646 retcode = ERR_NOMEM_BITMAP;
1650 if (mdev->state.conn == C_CONNECTED) {
1652 set_bit(RESIZE_PENDING, &mdev->flags);
1654 drbd_send_uuids(mdev);
1655 drbd_send_sizes(mdev, 1, ddsf);
1659 reply->ret_code = retcode;
1663 static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1664 struct drbd_nl_cfg_reply *reply)
1666 int retcode = NO_ERROR;
1668 int ovr; /* online verify running */
1669 int rsr; /* re-sync running */
1670 struct crypto_hash *verify_tfm = NULL;
1671 struct crypto_hash *csums_tfm = NULL;
1672 struct syncer_conf sc;
1673 cpumask_var_t new_cpu_mask;
1674 int *rs_plan_s = NULL;
1677 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1678 retcode = ERR_NOMEM;
1682 if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
1683 memset(&sc, 0, sizeof(struct syncer_conf));
1684 sc.rate = DRBD_RATE_DEF;
1685 sc.after = DRBD_AFTER_DEF;
1686 sc.al_extents = DRBD_AL_EXTENTS_DEF;
1687 sc.on_no_data = DRBD_ON_NO_DATA_DEF;
1688 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
1689 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
1690 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
1691 sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
1692 sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
1694 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
1696 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
1697 retcode = ERR_MANDATORY_TAG;
1701 /* re-sync running */
1702 rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
1703 mdev->state.conn == C_SYNC_TARGET ||
1704 mdev->state.conn == C_PAUSED_SYNC_S ||
1705 mdev->state.conn == C_PAUSED_SYNC_T );
1707 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
1708 retcode = ERR_CSUMS_RESYNC_RUNNING;
1712 if (!rsr && sc.csums_alg[0]) {
1713 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
1714 if (IS_ERR(csums_tfm)) {
1716 retcode = ERR_CSUMS_ALG;
1720 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1721 retcode = ERR_CSUMS_ALG_ND;
1726 /* online verify running */
1727 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
1730 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
1731 retcode = ERR_VERIFY_RUNNING;
1736 if (!ovr && sc.verify_alg[0]) {
1737 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
1738 if (IS_ERR(verify_tfm)) {
1740 retcode = ERR_VERIFY_ALG;
1744 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1745 retcode = ERR_VERIFY_ALG_ND;
1750 /* silently ignore cpu mask on UP kernel */
1751 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1752 err = __bitmap_parse(sc.cpu_mask, 32, 0,
1753 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1755 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
1756 retcode = ERR_CPU_MASK_PARSE;
1761 ERR_IF (sc.rate < 1) sc.rate = 1;
1762 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
1763 #define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
1764 if (sc.al_extents > AL_MAX) {
1765 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
1766 sc.al_extents = AL_MAX;
1770 /* to avoid spurious errors when configuring minors before configuring
1771 * the minors they depend on: if necessary, first create the minor we
1774 ensure_mdev(sc.after, 1);
1776 /* most sanity checks done, try to assign the new sync-after
1777 * dependency. need to hold the global lock in there,
1778 * to avoid a race in the dependency loop check. */
1779 retcode = drbd_alter_sa(mdev, sc.after);
1780 if (retcode != NO_ERROR)
1783 fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1784 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1785 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1787 dev_err(DEV, "kmalloc of fifo_buffer failed");
1788 retcode = ERR_NOMEM;
1793 /* ok, assign the rest of it as well.
1794 * lock against receive_SyncParam() */
1795 spin_lock(&mdev->peer_seq_lock);
1796 mdev->sync_conf = sc;
1799 crypto_free_hash(mdev->csums_tfm);
1800 mdev->csums_tfm = csums_tfm;
1805 crypto_free_hash(mdev->verify_tfm);
1806 mdev->verify_tfm = verify_tfm;
1810 if (fifo_size != mdev->rs_plan_s.size) {
1811 kfree(mdev->rs_plan_s.values);
1812 mdev->rs_plan_s.values = rs_plan_s;
1813 mdev->rs_plan_s.size = fifo_size;
1814 mdev->rs_planed = 0;
1818 spin_unlock(&mdev->peer_seq_lock);
1820 if (get_ldev(mdev)) {
1821 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1822 drbd_al_shrink(mdev);
1823 err = drbd_check_al_size(mdev);
1824 lc_unlock(mdev->act_log);
1825 wake_up(&mdev->al_wait);
1831 retcode = ERR_NOMEM;
1836 if (mdev->state.conn >= C_CONNECTED)
1837 drbd_send_sync_param(mdev, &sc);
1839 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
1840 cpumask_copy(mdev->cpu_mask, new_cpu_mask);
1841 drbd_calc_cpu_mask(mdev);
1842 mdev->receiver.reset_cpu_mask = 1;
1843 mdev->asender.reset_cpu_mask = 1;
1844 mdev->worker.reset_cpu_mask = 1;
1847 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1850 free_cpumask_var(new_cpu_mask);
1851 crypto_free_hash(csums_tfm);
1852 crypto_free_hash(verify_tfm);
1853 reply->ret_code = retcode;
1857 static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1858 struct drbd_nl_cfg_reply *reply)
1862 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
1864 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
1865 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1867 while (retcode == SS_NEED_CONNECTION) {
1868 spin_lock_irq(&mdev->req_lock);
1869 if (mdev->state.conn < C_CONNECTED)
1870 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
1871 spin_unlock_irq(&mdev->req_lock);
1873 if (retcode != SS_NEED_CONNECTION)
1876 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1879 reply->ret_code = retcode;
1883 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
1887 rv = drbd_bmio_set_n_write(mdev);
1888 drbd_suspend_al(mdev);
1892 static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1893 struct drbd_nl_cfg_reply *reply)
1897 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
1899 if (retcode < SS_SUCCESS) {
1900 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
1901 /* The peer will get a resync upon connect anyways. Just make that
1902 into a full resync. */
1903 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
1904 if (retcode >= SS_SUCCESS) {
1905 /* open coded drbd_bitmap_io() */
1906 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
1907 "set_n_write from invalidate_peer"))
1908 retcode = ERR_IO_MD_DISK;
1911 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
1914 reply->ret_code = retcode;
1918 static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1919 struct drbd_nl_cfg_reply *reply)
1921 int retcode = NO_ERROR;
1923 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
1924 retcode = ERR_PAUSE_IS_SET;
1926 reply->ret_code = retcode;
1930 static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1931 struct drbd_nl_cfg_reply *reply)
1933 int retcode = NO_ERROR;
1935 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO)
1936 retcode = ERR_PAUSE_IS_CLEAR;
1938 reply->ret_code = retcode;
1942 static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1943 struct drbd_nl_cfg_reply *reply)
1945 reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
1950 static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1951 struct drbd_nl_cfg_reply *reply)
1953 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1954 drbd_uuid_new_current(mdev);
1955 clear_bit(NEW_CUR_UUID, &mdev->flags);
1958 drbd_suspend_io(mdev);
1959 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
1960 if (reply->ret_code == SS_SUCCESS) {
1961 if (mdev->state.conn < C_CONNECTED)
1963 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
1964 tl_restart(mdev, fail_frozen_disk_io);
1966 drbd_resume_io(mdev);
1971 static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1972 struct drbd_nl_cfg_reply *reply)
1974 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
1978 static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1979 struct drbd_nl_cfg_reply *reply)
1983 tl = reply->tag_list;
1985 if (get_ldev(mdev)) {
1986 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
1990 if (get_net_conf(mdev)) {
1991 tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
1994 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
1996 put_unaligned(TT_END, tl++); /* Close the tag list */
1998 return (int)((char *)tl - (char *)reply->tag_list);
2001 static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2002 struct drbd_nl_cfg_reply *reply)
2004 unsigned short *tl = reply->tag_list;
2005 union drbd_state s = mdev->state;
2006 unsigned long rs_left;
2009 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
2011 /* no local ref, no bitmap, no syncer progress. */
2012 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
2013 if (get_ldev(mdev)) {
2014 drbd_get_syncer_progress(mdev, &rs_left, &res);
2015 tl = tl_add_int(tl, T_sync_progress, &res);
2019 put_unaligned(TT_END, tl++); /* Close the tag list */
2021 return (int)((char *)tl - (char *)reply->tag_list);
2024 static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2025 struct drbd_nl_cfg_reply *reply)
2029 tl = reply->tag_list;
2031 if (get_ldev(mdev)) {
2032 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
2033 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
2036 put_unaligned(TT_END, tl++); /* Close the tag list */
2038 return (int)((char *)tl - (char *)reply->tag_list);
2042 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
2043 * @mdev: DRBD device.
2044 * @nlp: Netlink/connector packet from drbdsetup
2045 * @reply: Reply packet for drbdsetup
2047 static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2048 struct drbd_nl_cfg_reply *reply)
2053 tl = reply->tag_list;
2055 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2056 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
2058 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
2059 put_unaligned(TT_END, tl++); /* Close the tag list */
2061 return (int)((char *)tl - (char *)reply->tag_list);
2064 static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2065 struct drbd_nl_cfg_reply *reply)
2067 /* default to resume from last known position, if possible */
2068 struct start_ov args =
2069 { .start_sector = mdev->ov_start_sector };
2071 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
2072 reply->ret_code = ERR_MANDATORY_TAG;
2075 /* w_make_ov_request expects position to be aligned */
2076 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
2077 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2082 static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2083 struct drbd_nl_cfg_reply *reply)
2085 int retcode = NO_ERROR;
2086 int skip_initial_sync = 0;
2089 struct new_c_uuid args;
2091 memset(&args, 0, sizeof(struct new_c_uuid));
2092 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
2093 reply->ret_code = ERR_MANDATORY_TAG;
2097 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
2099 if (!get_ldev(mdev)) {
2100 retcode = ERR_NO_DISK;
2104 /* this is "skip initial sync", assume to be clean */
2105 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
2106 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2107 dev_info(DEV, "Preparing to skip initial sync\n");
2108 skip_initial_sync = 1;
2109 } else if (mdev->state.conn != C_STANDALONE) {
2110 retcode = ERR_CONNECTED;
2114 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2115 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2117 if (args.clear_bm) {
2118 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
2120 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2121 retcode = ERR_IO_MD_DISK;
2123 if (skip_initial_sync) {
2124 drbd_send_uuids_skip_initial_sync(mdev);
2125 _drbd_uuid_set(mdev, UI_BITMAP, 0);
2126 spin_lock_irq(&mdev->req_lock);
2127 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2129 spin_unlock_irq(&mdev->req_lock);
2137 mutex_unlock(&mdev->state_mutex);
2139 reply->ret_code = retcode;
2143 struct cn_handler_struct {
2144 int (*function)(struct drbd_conf *,
2145 struct drbd_nl_cfg_req *,
2146 struct drbd_nl_cfg_reply *);
2147 int reply_body_size;
2150 static struct cn_handler_struct cnd_table[] = {
2151 [ P_primary ] = { &drbd_nl_primary, 0 },
2152 [ P_secondary ] = { &drbd_nl_secondary, 0 },
2153 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
2154 [ P_detach ] = { &drbd_nl_detach, 0 },
2155 [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
2156 [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
2157 [ P_resize ] = { &drbd_nl_resize, 0 },
2158 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
2159 [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
2160 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
2161 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
2162 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
2163 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
2164 [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
2165 [ P_outdate ] = { &drbd_nl_outdate, 0 },
2166 [ P_get_config ] = { &drbd_nl_get_config,
2167 sizeof(struct syncer_conf_tag_len_struct) +
2168 sizeof(struct disk_conf_tag_len_struct) +
2169 sizeof(struct net_conf_tag_len_struct) },
2170 [ P_get_state ] = { &drbd_nl_get_state,
2171 sizeof(struct get_state_tag_len_struct) +
2172 sizeof(struct sync_progress_tag_len_struct) },
2173 [ P_get_uuids ] = { &drbd_nl_get_uuids,
2174 sizeof(struct get_uuids_tag_len_struct) },
2175 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
2176 sizeof(struct get_timeout_flag_tag_len_struct)},
2177 [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
2178 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
2181 static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
2183 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
2184 struct cn_handler_struct *cm;
2185 struct cn_msg *cn_reply;
2186 struct drbd_nl_cfg_reply *reply;
2187 struct drbd_conf *mdev;
2189 int reply_size = sizeof(struct cn_msg)
2190 + sizeof(struct drbd_nl_cfg_reply)
2191 + sizeof(short int);
2193 if (!try_module_get(THIS_MODULE)) {
2194 printk(KERN_ERR "drbd: try_module_get() failed!\n");
2198 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
2203 mdev = ensure_mdev(nlp->drbd_minor,
2204 (nlp->flags & DRBD_NL_CREATE_DEVICE));
2206 retcode = ERR_MINOR_INVALID;
2210 if (nlp->packet_type >= P_nl_after_last_packet) {
2211 retcode = ERR_PACKET_NR;
2215 cm = cnd_table + nlp->packet_type;
2217 /* This may happen if packet number is 0: */
2218 if (cm->function == NULL) {
2219 retcode = ERR_PACKET_NR;
2223 reply_size += cm->reply_body_size;
2225 /* allocation not in the IO path, cqueue thread context */
2226 cn_reply = kmalloc(reply_size, GFP_KERNEL);
2228 retcode = ERR_NOMEM;
2231 reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
2233 reply->packet_type =
2234 cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
2235 reply->minor = nlp->drbd_minor;
2236 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
2237 /* reply->tag_list; might be modified by cm->function. */
2239 rr = cm->function(mdev, nlp, reply);
2241 cn_reply->id = req->id;
2242 cn_reply->seq = req->seq;
2243 cn_reply->ack = req->ack + 1;
2244 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
2245 cn_reply->flags = 0;
2247 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
2248 if (rr && rr != -ESRCH)
2249 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
2252 module_put(THIS_MODULE);
2255 drbd_nl_send_reply(req, retcode);
2256 module_put(THIS_MODULE);
2259 static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
2261 static unsigned short *
2262 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
2263 unsigned short len, int nul_terminated)
2265 unsigned short l = tag_descriptions[tag_number(tag)].max_len;
2266 len = (len < l) ? len : l;
2267 put_unaligned(tag, tl++);
2268 put_unaligned(len, tl++);
2269 memcpy(tl, data, len);
2270 tl = (unsigned short*)((char*)tl + len);
2272 *((char*)tl - 1) = 0;
2276 static unsigned short *
2277 tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
2279 return __tl_add_blob(tl, tag, data, len, 0);
2282 static unsigned short *
2283 tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
2285 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
2288 static unsigned short *
2289 tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
2291 put_unaligned(tag, tl++);
2292 switch(tag_type(tag)) {
2294 put_unaligned(sizeof(int), tl++);
2295 put_unaligned(*(int *)val, (int *)tl);
2296 tl = (unsigned short*)((char*)tl+sizeof(int));
2299 put_unaligned(sizeof(u64), tl++);
2300 put_unaligned(*(u64 *)val, (u64 *)tl);
2301 tl = (unsigned short*)((char*)tl+sizeof(u64));
2304 /* someone did something stupid. */
2310 void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
2312 char buffer[sizeof(struct cn_msg)+
2313 sizeof(struct drbd_nl_cfg_reply)+
2314 sizeof(struct get_state_tag_len_struct)+
2316 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2317 struct drbd_nl_cfg_reply *reply =
2318 (struct drbd_nl_cfg_reply *)cn_reply->data;
2319 unsigned short *tl = reply->tag_list;
2321 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2323 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
2325 put_unaligned(TT_END, tl++); /* Close the tag list */
2327 cn_reply->id.idx = CN_IDX_DRBD;
2328 cn_reply->id.val = CN_VAL_DRBD;
2330 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2331 cn_reply->ack = 0; /* not used here. */
2332 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2333 (int)((char *)tl - (char *)reply->tag_list);
2334 cn_reply->flags = 0;
2336 reply->packet_type = P_get_state;
2337 reply->minor = mdev_to_minor(mdev);
2338 reply->ret_code = NO_ERROR;
2340 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2343 void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
2345 char buffer[sizeof(struct cn_msg)+
2346 sizeof(struct drbd_nl_cfg_reply)+
2347 sizeof(struct call_helper_tag_len_struct)+
2349 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2350 struct drbd_nl_cfg_reply *reply =
2351 (struct drbd_nl_cfg_reply *)cn_reply->data;
2352 unsigned short *tl = reply->tag_list;
2354 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2356 tl = tl_add_str(tl, T_helper, helper_name);
2357 put_unaligned(TT_END, tl++); /* Close the tag list */
2359 cn_reply->id.idx = CN_IDX_DRBD;
2360 cn_reply->id.val = CN_VAL_DRBD;
2362 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2363 cn_reply->ack = 0; /* not used here. */
2364 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2365 (int)((char *)tl - (char *)reply->tag_list);
2366 cn_reply->flags = 0;
2368 reply->packet_type = P_call_helper;
2369 reply->minor = mdev_to_minor(mdev);
2370 reply->ret_code = NO_ERROR;
2372 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2375 void drbd_bcast_ee(struct drbd_conf *mdev,
2376 const char *reason, const int dgs,
2377 const char* seen_hash, const char* calc_hash,
2378 const struct drbd_epoch_entry* e)
2380 struct cn_msg *cn_reply;
2381 struct drbd_nl_cfg_reply *reply;
2388 if (!reason || !reason[0])
2391 /* apparently we have to memcpy twice, first to prepare the data for the
2392 * struct cn_msg, then within cn_netlink_send from the cn_msg to the
2394 /* receiver thread context, which is not in the writeout path (of this node),
2395 * but may be in the writeout path of the _other_ node.
2396 * GFP_NOIO to avoid potential "distributed deadlock". */
2398 sizeof(struct cn_msg)+
2399 sizeof(struct drbd_nl_cfg_reply)+
2400 sizeof(struct dump_ee_tag_len_struct)+
2405 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
2406 (unsigned long long)e->sector, e->size);
2410 reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
2411 tl = reply->tag_list;
2413 tl = tl_add_str(tl, T_dump_ee_reason, reason);
2414 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
2415 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
2416 tl = tl_add_int(tl, T_ee_sector, &e->sector);
2417 tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
2419 put_unaligned(T_ee_data, tl++);
2420 put_unaligned(e->size, tl++);
2424 page_chain_for_each(page) {
2425 void *d = kmap_atomic(page, KM_USER0);
2426 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2428 kunmap_atomic(d, KM_USER0);
2429 tl = (unsigned short*)((char*)tl + l);
2432 put_unaligned(TT_END, tl++); /* Close the tag list */
2434 cn_reply->id.idx = CN_IDX_DRBD;
2435 cn_reply->id.val = CN_VAL_DRBD;
2437 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
2438 cn_reply->ack = 0; // not used here.
2439 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2440 (int)((char*)tl - (char*)reply->tag_list);
2441 cn_reply->flags = 0;
2443 reply->packet_type = P_dump_ee;
2444 reply->minor = mdev_to_minor(mdev);
2445 reply->ret_code = NO_ERROR;
2447 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2451 void drbd_bcast_sync_progress(struct drbd_conf *mdev)
2453 char buffer[sizeof(struct cn_msg)+
2454 sizeof(struct drbd_nl_cfg_reply)+
2455 sizeof(struct sync_progress_tag_len_struct)+
2457 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2458 struct drbd_nl_cfg_reply *reply =
2459 (struct drbd_nl_cfg_reply *)cn_reply->data;
2460 unsigned short *tl = reply->tag_list;
2461 unsigned long rs_left;
2464 /* no local ref, no bitmap, no syncer progress, no broadcast. */
2465 if (!get_ldev(mdev))
2467 drbd_get_syncer_progress(mdev, &rs_left, &res);
2470 tl = tl_add_int(tl, T_sync_progress, &res);
2471 put_unaligned(TT_END, tl++); /* Close the tag list */
2473 cn_reply->id.idx = CN_IDX_DRBD;
2474 cn_reply->id.val = CN_VAL_DRBD;
2476 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2477 cn_reply->ack = 0; /* not used here. */
2478 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2479 (int)((char *)tl - (char *)reply->tag_list);
2480 cn_reply->flags = 0;
2482 reply->packet_type = P_sync_progress;
2483 reply->minor = mdev_to_minor(mdev);
2484 reply->ret_code = NO_ERROR;
2486 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2489 int __init drbd_nl_init(void)
2491 static struct cb_id cn_id_drbd;
2494 cn_id_drbd.val = CN_VAL_DRBD;
2496 cn_id_drbd.idx = cn_idx;
2497 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
2500 cn_idx = (cn_idx + CN_IDX_STEP);
2504 printk(KERN_ERR "drbd: cn_drbd failed to register\n");
2511 void drbd_nl_cleanup(void)
2513 static struct cb_id cn_id_drbd;
2515 cn_id_drbd.idx = cn_idx;
2516 cn_id_drbd.val = CN_VAL_DRBD;
2518 cn_del_callback(&cn_id_drbd);
2521 void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
2523 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
2524 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2525 struct drbd_nl_cfg_reply *reply =
2526 (struct drbd_nl_cfg_reply *)cn_reply->data;
2529 cn_reply->id = req->id;
2531 cn_reply->seq = req->seq;
2532 cn_reply->ack = req->ack + 1;
2533 cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
2534 cn_reply->flags = 0;
2536 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
2537 reply->ret_code = ret_code;
2539 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2540 if (rr && rr != -ESRCH)
2541 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);