4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
45 #include <linux/string.h>
46 #include <linux/scatterlist.h>
54 struct drbd_epoch *epoch;
63 static int drbd_do_handshake(struct drbd_conf *mdev);
64 static int drbd_do_auth(struct drbd_conf *mdev);
66 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
67 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
69 static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
71 struct drbd_epoch *prev;
72 spin_lock(&mdev->epoch_lock);
73 prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
74 if (prev == epoch || prev == mdev->current_epoch)
76 spin_unlock(&mdev->epoch_lock);
80 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
83 * some helper functions to deal with single linked page lists,
84 * page->private being our "next" pointer.
87 /* If at least n pages are linked at head, get n pages off.
88 * Otherwise, don't modify head, and return NULL.
89 * Locking is the responsibility of the caller.
91 static struct page *page_chain_del(struct page **head, int n)
105 tmp = page_chain_next(page);
107 break; /* found sufficient pages */
109 /* insufficient pages, don't use any of them. */
114 /* add end of list marker for the returned list */
115 set_page_private(page, 0);
116 /* actual return value, and adjustment of head */
122 /* may be used outside of locks to find the tail of a (usually short)
123 * "private" page chain, before adding it back to a global chain head
124 * with page_chain_add() under a spinlock. */
125 static struct page *page_chain_tail(struct page *page, int *len)
129 while ((tmp = page_chain_next(page)))
136 static int page_chain_free(struct page *page)
140 page_chain_for_each_safe(page, tmp) {
147 static void page_chain_add(struct page **head,
148 struct page *chain_first, struct page *chain_last)
152 tmp = page_chain_tail(chain_first, NULL);
153 BUG_ON(tmp != chain_last);
156 /* add chain to head */
157 set_page_private(chain_last, (unsigned long)*head);
161 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
163 struct page *page = NULL;
164 struct page *tmp = NULL;
167 /* Yes, testing drbd_pp_vacant outside the lock is racy.
168 * So what. It saves a spin_lock. */
169 if (drbd_pp_vacant >= number) {
170 spin_lock(&drbd_pp_lock);
171 page = page_chain_del(&drbd_pp_pool, number);
173 drbd_pp_vacant -= number;
174 spin_unlock(&drbd_pp_lock);
179 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
180 * "criss-cross" setup, that might cause write-out on some other DRBD,
181 * which in turn might block on the other node at this very place. */
182 for (i = 0; i < number; i++) {
183 tmp = alloc_page(GFP_TRY);
186 set_page_private(tmp, (unsigned long)page);
193 /* Not enough pages immediately available this time.
194 * No need to jump around here, drbd_pp_alloc will retry this
195 * function "soon". */
197 tmp = page_chain_tail(page, NULL);
198 spin_lock(&drbd_pp_lock);
199 page_chain_add(&drbd_pp_pool, page, tmp);
201 spin_unlock(&drbd_pp_lock);
206 /* kick lower level device, if we have more than (arbitrary number)
207 * reference counts on it, which typically are locally submitted io
208 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
209 static void maybe_kick_lo(struct drbd_conf *mdev)
211 if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
215 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
217 struct drbd_epoch_entry *e;
218 struct list_head *le, *tle;
220 /* The EEs are always appended to the end of the list. Since
221 they are sent in order over the wire, they have to finish
222 in order. As soon as we see the first not finished we can
223 stop to examine the list... */
225 list_for_each_safe(le, tle, &mdev->net_ee) {
226 e = list_entry(le, struct drbd_epoch_entry, w.list);
227 if (drbd_ee_has_active_page(e))
229 list_move(le, to_be_freed);
233 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
235 LIST_HEAD(reclaimed);
236 struct drbd_epoch_entry *e, *t;
239 spin_lock_irq(&mdev->req_lock);
240 reclaim_net_ee(mdev, &reclaimed);
241 spin_unlock_irq(&mdev->req_lock);
243 list_for_each_entry_safe(e, t, &reclaimed, w.list)
244 drbd_free_ee(mdev, e);
248 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
249 * @mdev: DRBD device.
250 * @number: number of pages requested
251 * @retry: whether to retry, if not enough pages are available right now
253 * Tries to allocate number pages, first from our own page pool, then from
254 * the kernel, unless this allocation would exceed the max_buffers setting.
255 * Possibly retry until DRBD frees sufficient pages somewhere else.
257 * Returns a page chain linked via page->private.
259 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
261 struct page *page = NULL;
264 /* Yes, we may run up to @number over max_buffers. If we
265 * follow it strictly, the admin will get it wrong anyways. */
266 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
267 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
269 while (page == NULL) {
270 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
272 drbd_kick_lo_and_reclaim_net(mdev);
274 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
275 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
283 if (signal_pending(current)) {
284 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
290 finish_wait(&drbd_pp_wait, &wait);
293 atomic_add(number, &mdev->pp_in_use);
297 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
298 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
299 * Either links the page chain back to the global pool,
300 * or returns all pages to the system. */
301 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
304 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
305 i = page_chain_free(page);
308 tmp = page_chain_tail(page, &i);
309 spin_lock(&drbd_pp_lock);
310 page_chain_add(&drbd_pp_pool, page, tmp);
312 spin_unlock(&drbd_pp_lock);
314 atomic_sub(i, &mdev->pp_in_use);
315 i = atomic_read(&mdev->pp_in_use);
317 dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
318 wake_up(&drbd_pp_wait);
322 You need to hold the req_lock:
323 _drbd_wait_ee_list_empty()
325 You must not have the req_lock:
331 drbd_process_done_ee()
333 drbd_wait_ee_list_empty()
336 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
339 unsigned int data_size,
340 gfp_t gfp_mask) __must_hold(local)
342 struct drbd_epoch_entry *e;
344 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
346 if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
349 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
351 if (!(gfp_mask & __GFP_NOWARN))
352 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
356 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
360 INIT_HLIST_NODE(&e->colision);
364 atomic_set(&e->pending_bios, 0);
374 mempool_free(e, drbd_ee_mempool);
378 void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
380 drbd_pp_free(mdev, e->pages);
381 D_ASSERT(atomic_read(&e->pending_bios) == 0);
382 D_ASSERT(hlist_unhashed(&e->colision));
383 mempool_free(e, drbd_ee_mempool);
386 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
388 LIST_HEAD(work_list);
389 struct drbd_epoch_entry *e, *t;
392 spin_lock_irq(&mdev->req_lock);
393 list_splice_init(list, &work_list);
394 spin_unlock_irq(&mdev->req_lock);
396 list_for_each_entry_safe(e, t, &work_list, w.list) {
397 drbd_free_ee(mdev, e);
405 * This function is called from _asender only_
406 * but see also comments in _req_mod(,barrier_acked)
407 * and receive_Barrier.
409 * Move entries from net_ee to done_ee, if ready.
410 * Grab done_ee, call all callbacks, free the entries.
411 * The callbacks typically send out ACKs.
413 static int drbd_process_done_ee(struct drbd_conf *mdev)
415 LIST_HEAD(work_list);
416 LIST_HEAD(reclaimed);
417 struct drbd_epoch_entry *e, *t;
418 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
420 spin_lock_irq(&mdev->req_lock);
421 reclaim_net_ee(mdev, &reclaimed);
422 list_splice_init(&mdev->done_ee, &work_list);
423 spin_unlock_irq(&mdev->req_lock);
425 list_for_each_entry_safe(e, t, &reclaimed, w.list)
426 drbd_free_ee(mdev, e);
428 /* possible callbacks here:
429 * e_end_block, and e_end_resync_block, e_send_discard_ack.
430 * all ignore the last argument.
432 list_for_each_entry_safe(e, t, &work_list, w.list) {
433 /* list_del not necessary, next/prev members not touched */
434 ok = e->w.cb(mdev, &e->w, !ok) && ok;
435 drbd_free_ee(mdev, e);
437 wake_up(&mdev->ee_wait);
442 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
446 /* avoids spin_lock/unlock
447 * and calling prepare_to_wait in the fast path */
448 while (!list_empty(head)) {
449 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
450 spin_unlock_irq(&mdev->req_lock);
453 finish_wait(&mdev->ee_wait, &wait);
454 spin_lock_irq(&mdev->req_lock);
458 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
460 spin_lock_irq(&mdev->req_lock);
461 _drbd_wait_ee_list_empty(mdev, head);
462 spin_unlock_irq(&mdev->req_lock);
465 /* see also kernel_accept; which is only present since 2.6.18.
466 * also we want to log which part of it failed, exactly */
467 static int drbd_accept(struct drbd_conf *mdev, const char **what,
468 struct socket *sock, struct socket **newsock)
470 struct sock *sk = sock->sk;
474 err = sock->ops->listen(sock, 5);
478 *what = "sock_create_lite";
479 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
485 err = sock->ops->accept(sock, *newsock, 0);
487 sock_release(*newsock);
491 (*newsock)->ops = sock->ops;
497 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
498 void *buf, size_t size, int flags)
505 struct msghdr msg = {
507 .msg_iov = (struct iovec *)&iov,
508 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
514 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
520 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
527 struct msghdr msg = {
529 .msg_iov = (struct iovec *)&iov,
530 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
538 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
543 * ECONNRESET other side closed the connection
544 * ERESTARTSYS (on sock) we got a signal
548 if (rv == -ECONNRESET)
549 dev_info(DEV, "sock was reset by peer\n");
550 else if (rv != -ERESTARTSYS)
551 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
553 } else if (rv == 0) {
554 dev_info(DEV, "sock was shut down by peer\n");
557 /* signal came in, or peer/link went down,
558 * after we read a partial message
560 /* D_ASSERT(signal_pending(current)); */
568 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
574 * On individual connections, the socket buffer size must be set prior to the
575 * listen(2) or connect(2) calls in order to have it take effect.
576 * This is our wrapper to do so.
578 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
581 /* open coded SO_SNDBUF, SO_RCVBUF */
583 sock->sk->sk_sndbuf = snd;
584 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
587 sock->sk->sk_rcvbuf = rcv;
588 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
592 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
596 struct sockaddr_in6 src_in6;
598 int disconnect_on_error = 1;
600 if (!get_net_conf(mdev))
603 what = "sock_create_kern";
604 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
605 SOCK_STREAM, IPPROTO_TCP, &sock);
611 sock->sk->sk_rcvtimeo =
612 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
613 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
614 mdev->net_conf->rcvbuf_size);
616 /* explicitly bind to the configured IP as source IP
617 * for the outgoing connections.
618 * This is needed for multihomed hosts and to be
619 * able to use lo: interfaces for drbd.
620 * Make sure to use 0 as port number, so linux selects
621 * a free one dynamically.
623 memcpy(&src_in6, mdev->net_conf->my_addr,
624 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
625 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
626 src_in6.sin6_port = 0;
628 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
630 what = "bind before connect";
631 err = sock->ops->bind(sock,
632 (struct sockaddr *) &src_in6,
633 mdev->net_conf->my_addr_len);
637 /* connect may fail, peer not yet available.
638 * stay C_WF_CONNECTION, don't go Disconnecting! */
639 disconnect_on_error = 0;
641 err = sock->ops->connect(sock,
642 (struct sockaddr *)mdev->net_conf->peer_addr,
643 mdev->net_conf->peer_addr_len, 0);
652 /* timeout, busy, signal pending */
653 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
654 case EINTR: case ERESTARTSYS:
655 /* peer not (yet) available, network problem */
656 case ECONNREFUSED: case ENETUNREACH:
657 case EHOSTDOWN: case EHOSTUNREACH:
658 disconnect_on_error = 0;
661 dev_err(DEV, "%s failed, err = %d\n", what, err);
663 if (disconnect_on_error)
664 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
670 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
673 struct socket *s_estab = NULL, *s_listen;
676 if (!get_net_conf(mdev))
679 what = "sock_create_kern";
680 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
681 SOCK_STREAM, IPPROTO_TCP, &s_listen);
687 timeo = mdev->net_conf->try_connect_int * HZ;
688 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
690 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
691 s_listen->sk->sk_rcvtimeo = timeo;
692 s_listen->sk->sk_sndtimeo = timeo;
693 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
694 mdev->net_conf->rcvbuf_size);
696 what = "bind before listen";
697 err = s_listen->ops->bind(s_listen,
698 (struct sockaddr *) mdev->net_conf->my_addr,
699 mdev->net_conf->my_addr_len);
703 err = drbd_accept(mdev, &what, s_listen, &s_estab);
707 sock_release(s_listen);
709 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
710 dev_err(DEV, "%s failed, err = %d\n", what, err);
711 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
719 static int drbd_send_fp(struct drbd_conf *mdev,
720 struct socket *sock, enum drbd_packets cmd)
722 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
724 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
727 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
729 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
732 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
734 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
735 return be16_to_cpu(h->command);
741 * drbd_socket_okay() - Free the socket if its connection is not okay
742 * @mdev: DRBD device.
743 * @sock: pointer to the pointer to the socket.
745 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
753 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
755 if (rr > 0 || rr == -EAGAIN) {
766 * 1 yes, we have a valid connection
767 * 0 oops, did not work out, please try again
768 * -1 peer talks different language,
769 * no point in trying again, please go standalone.
770 * -2 We do not have a network config...
772 static int drbd_connect(struct drbd_conf *mdev)
774 struct socket *s, *sock, *msock;
777 D_ASSERT(!mdev->data.socket);
779 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
782 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
789 /* 3 tries, this should take less than a second! */
790 s = drbd_try_connect(mdev);
793 /* give the other side time to call bind() & listen() */
794 __set_current_state(TASK_INTERRUPTIBLE);
795 schedule_timeout(HZ / 10);
800 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
804 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
808 dev_err(DEV, "Logic error in drbd_connect()\n");
809 goto out_release_sockets;
814 __set_current_state(TASK_INTERRUPTIBLE);
815 schedule_timeout(HZ / 10);
816 ok = drbd_socket_okay(mdev, &sock);
817 ok = drbd_socket_okay(mdev, &msock) && ok;
823 s = drbd_wait_for_connect(mdev);
825 try = drbd_recv_fp(mdev, s);
826 drbd_socket_okay(mdev, &sock);
827 drbd_socket_okay(mdev, &msock);
831 dev_warn(DEV, "initial packet S crossed\n");
838 dev_warn(DEV, "initial packet M crossed\n");
842 set_bit(DISCARD_CONCURRENT, &mdev->flags);
845 dev_warn(DEV, "Error receiving initial packet\n");
852 if (mdev->state.conn <= C_DISCONNECTING)
853 goto out_release_sockets;
854 if (signal_pending(current)) {
855 flush_signals(current);
857 if (get_t_state(&mdev->receiver) == Exiting)
858 goto out_release_sockets;
862 ok = drbd_socket_okay(mdev, &sock);
863 ok = drbd_socket_okay(mdev, &msock) && ok;
869 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
870 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
872 sock->sk->sk_allocation = GFP_NOIO;
873 msock->sk->sk_allocation = GFP_NOIO;
875 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
876 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
879 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
880 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
881 * first set it to the P_HAND_SHAKE timeout,
882 * which we set to 4x the configured ping_timeout. */
883 sock->sk->sk_sndtimeo =
884 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
886 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
887 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
889 /* we don't want delays.
890 * we use TCP_CORK where apropriate, though */
891 drbd_tcp_nodelay(sock);
892 drbd_tcp_nodelay(msock);
894 mdev->data.socket = sock;
895 mdev->meta.socket = msock;
896 mdev->last_received = jiffies;
898 D_ASSERT(mdev->asender.task == NULL);
900 h = drbd_do_handshake(mdev);
904 if (mdev->cram_hmac_tfm) {
905 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
906 switch (drbd_do_auth(mdev)) {
908 dev_err(DEV, "Authentication of peer failed\n");
911 dev_err(DEV, "Authentication of peer failed, trying again.\n");
916 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
919 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
920 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
922 atomic_set(&mdev->packet_seq, 0);
925 drbd_thread_start(&mdev->asender);
927 if (!drbd_send_protocol(mdev))
929 drbd_send_sync_param(mdev, &mdev->sync_conf);
930 drbd_send_sizes(mdev, 0, 0);
931 drbd_send_uuids(mdev);
932 drbd_send_state(mdev);
933 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
934 clear_bit(RESIZE_PENDING, &mdev->flags);
946 static int drbd_recv_header(struct drbd_conf *mdev, struct p_header *h)
950 r = drbd_recv(mdev, h, sizeof(*h));
952 if (unlikely(r != sizeof(*h))) {
953 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
956 h->command = be16_to_cpu(h->command);
957 h->length = be16_to_cpu(h->length);
958 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
959 dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n",
960 (long)be32_to_cpu(h->magic),
961 h->command, h->length);
964 mdev->last_received = jiffies;
969 static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
973 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
974 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
975 NULL, BLKDEV_IFL_WAIT);
977 dev_err(DEV, "local disk flush failed with status %d\n", rv);
978 /* would rather check on EOPNOTSUPP, but that is not reliable.
979 * don't try again for ANY return value != 0
980 * if (rv == -EOPNOTSUPP) */
981 drbd_bump_write_ordering(mdev, WO_drain_io);
986 return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
989 static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
991 struct flush_work *fw = (struct flush_work *)w;
992 struct drbd_epoch *epoch = fw->epoch;
996 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
997 drbd_flush_after_epoch(mdev, epoch);
999 drbd_may_finish_epoch(mdev, epoch, EV_PUT |
1000 (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
1006 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1007 * @mdev: DRBD device.
1008 * @epoch: Epoch object.
1011 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1012 struct drbd_epoch *epoch,
1013 enum epoch_event ev)
1015 int finish, epoch_size;
1016 struct drbd_epoch *next_epoch;
1017 int schedule_flush = 0;
1018 enum finish_epoch rv = FE_STILL_LIVE;
1020 spin_lock(&mdev->epoch_lock);
1025 epoch_size = atomic_read(&epoch->epoch_size);
1027 switch (ev & ~EV_CLEANUP) {
1029 atomic_dec(&epoch->active);
1031 case EV_GOT_BARRIER_NR:
1032 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1034 /* Special case: If we just switched from WO_bio_barrier to
1035 WO_bdev_flush we should not finish the current epoch */
1036 if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1037 mdev->write_ordering != WO_bio_barrier &&
1038 epoch == mdev->current_epoch)
1039 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1041 case EV_BARRIER_DONE:
1042 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1044 case EV_BECAME_LAST:
1049 if (epoch_size != 0 &&
1050 atomic_read(&epoch->active) == 0 &&
1051 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1052 epoch->list.prev == &mdev->current_epoch->list &&
1053 !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1054 /* Nearly all conditions are met to finish that epoch... */
1055 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1056 mdev->write_ordering == WO_none ||
1057 (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1060 set_bit(DE_IS_FINISHING, &epoch->flags);
1061 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1062 mdev->write_ordering == WO_bio_barrier) {
1063 atomic_inc(&epoch->active);
1068 if (!(ev & EV_CLEANUP)) {
1069 spin_unlock(&mdev->epoch_lock);
1070 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1071 spin_lock(&mdev->epoch_lock);
1075 if (mdev->current_epoch != epoch) {
1076 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1077 list_del(&epoch->list);
1078 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1082 if (rv == FE_STILL_LIVE)
1086 atomic_set(&epoch->epoch_size, 0);
1087 /* atomic_set(&epoch->active, 0); is already zero */
1088 if (rv == FE_STILL_LIVE)
1099 spin_unlock(&mdev->epoch_lock);
1101 if (schedule_flush) {
1102 struct flush_work *fw;
1103 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1107 drbd_queue_work(&mdev->data.work, &fw->w);
1109 dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1110 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1111 /* That is not a recursion, only one level */
1112 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1113 drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1121 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1122 * @mdev: DRBD device.
1123 * @wo: Write ordering method to try.
1125 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1127 enum write_ordering_e pwo;
1128 static char *write_ordering_str[] = {
1130 [WO_drain_io] = "drain",
1131 [WO_bdev_flush] = "flush",
1132 [WO_bio_barrier] = "barrier",
1135 pwo = mdev->write_ordering;
1137 if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1139 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1141 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1143 mdev->write_ordering = wo;
1144 if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1145 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1150 * @mdev: DRBD device.
1152 * @rw: flag field, see bio->bi_rw
1154 /* TODO allocate from our own bio_set. */
1155 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1156 const unsigned rw, const int fault_type)
1158 struct bio *bios = NULL;
1160 struct page *page = e->pages;
1161 sector_t sector = e->sector;
1162 unsigned ds = e->size;
1163 unsigned n_bios = 0;
1164 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1166 /* In most cases, we will only need one bio. But in case the lower
1167 * level restrictions happen to be different at this offset on this
1168 * side than those of the sending peer, we may need to submit the
1169 * request in more than one bio. */
1171 bio = bio_alloc(GFP_NOIO, nr_pages);
1173 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1176 /* > e->sector, unless this is the first bio */
1177 bio->bi_sector = sector;
1178 bio->bi_bdev = mdev->ldev->backing_bdev;
1179 /* we special case some flags in the multi-bio case, see below
1180 * (REQ_UNPLUG, REQ_HARDBARRIER) */
1182 bio->bi_private = e;
1183 bio->bi_end_io = drbd_endio_sec;
1185 bio->bi_next = bios;
1189 page_chain_for_each(page) {
1190 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1191 if (!bio_add_page(bio, page, len, 0)) {
1192 /* a single page must always be possible! */
1193 BUG_ON(bio->bi_vcnt == 0);
1200 D_ASSERT(page == NULL);
1203 atomic_set(&e->pending_bios, n_bios);
1206 bios = bios->bi_next;
1207 bio->bi_next = NULL;
1209 /* strip off REQ_UNPLUG unless it is the last bio */
1211 bio->bi_rw &= ~REQ_UNPLUG;
1213 drbd_generic_make_request(mdev, fault_type, bio);
1215 /* strip off REQ_HARDBARRIER,
1216 * unless it is the first or last bio */
1217 if (bios && bios->bi_next)
1218 bios->bi_rw &= ~REQ_HARDBARRIER;
1220 maybe_kick_lo(mdev);
1226 bios = bios->bi_next;
1233 * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
1234 * @mdev: DRBD device.
1236 * @cancel: The connection will be closed anyways (unused in this callback)
1238 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1240 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1241 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1242 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1243 so that we can finish that epoch in drbd_may_finish_epoch().
1244 That is necessary if we already have a long chain of Epochs, before
1245 we realize that REQ_HARDBARRIER is actually not supported */
1247 /* As long as the -ENOTSUPP on the barrier is reported immediately
1248 that will never trigger. If it is reported late, we will just
1249 print that warning and continue correctly for all future requests
1250 with WO_bdev_flush */
1251 if (previous_epoch(mdev, e->epoch))
1252 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1254 /* we still have a local reference,
1255 * get_ldev was done in receive_Data. */
1257 e->w.cb = e_end_block;
1258 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
1259 /* drbd_submit_ee fails for one reason only:
1260 * if was not able to allocate sufficient bios.
1261 * requeue, try again later. */
1262 e->w.cb = w_e_reissue;
1263 drbd_queue_work(&mdev->data.work, &e->w);
1268 static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
1270 int rv, issue_flush;
1271 struct p_barrier *p = (struct p_barrier *)h;
1272 struct drbd_epoch *epoch;
1274 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
1276 rv = drbd_recv(mdev, h->payload, h->length);
1277 ERR_IF(rv != h->length) return FALSE;
1281 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1284 mdev->current_epoch->barrier_nr = p->barrier;
1285 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1287 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1288 * the activity log, which means it would not be resynced in case the
1289 * R_PRIMARY crashes now.
1290 * Therefore we must send the barrier_ack after the barrier request was
1292 switch (mdev->write_ordering) {
1293 case WO_bio_barrier:
1295 if (rv == FE_RECYCLED)
1301 if (rv == FE_STILL_LIVE) {
1302 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1303 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1304 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1306 if (rv == FE_RECYCLED)
1309 /* The asender will send all the ACKs and barrier ACKs out, since
1310 all EEs moved from the active_ee to the done_ee. We need to
1311 provide a new epoch object for the EEs that come in soon */
1315 /* receiver context, in the writeout path of the other node.
1316 * avoid potential distributed deadlock */
1317 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1319 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1320 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1321 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1323 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1324 if (rv == FE_RECYCLED)
1328 drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1334 atomic_set(&epoch->epoch_size, 0);
1335 atomic_set(&epoch->active, 0);
1337 spin_lock(&mdev->epoch_lock);
1338 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1339 list_add(&epoch->list, &mdev->current_epoch->list);
1340 mdev->current_epoch = epoch;
1343 /* The current_epoch got recycled while we allocated this one... */
1346 spin_unlock(&mdev->epoch_lock);
1351 /* used from receive_RSDataReply (recv_resync_read)
1352 * and from receive_Data */
1353 static struct drbd_epoch_entry *
1354 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1356 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1357 struct drbd_epoch_entry *e;
1360 void *dig_in = mdev->int_dig_in;
1361 void *dig_vv = mdev->int_dig_vv;
1362 unsigned long *data;
1364 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1365 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1368 rr = drbd_recv(mdev, dig_in, dgs);
1370 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1378 ERR_IF(data_size & 0x1ff) return NULL;
1379 ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
1381 /* even though we trust out peer,
1382 * we sometimes have to double check. */
1383 if (sector + (data_size>>9) > capacity) {
1384 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1385 (unsigned long long)capacity,
1386 (unsigned long long)sector, data_size);
1390 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1391 * "criss-cross" setup, that might cause write-out on some other DRBD,
1392 * which in turn might block on the other node at this very place. */
1393 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1399 page_chain_for_each(page) {
1400 unsigned len = min_t(int, ds, PAGE_SIZE);
1402 rr = drbd_recv(mdev, data, len);
1403 if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
1404 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1405 data[0] = data[0] ^ (unsigned long)-1;
1409 drbd_free_ee(mdev, e);
1410 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1418 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1419 if (memcmp(dig_in, dig_vv, dgs)) {
1420 dev_err(DEV, "Digest integrity check FAILED.\n");
1421 drbd_bcast_ee(mdev, "digest failed",
1422 dgs, dig_in, dig_vv, e);
1423 drbd_free_ee(mdev, e);
1427 mdev->recv_cnt += data_size>>9;
1431 /* drbd_drain_block() just takes a data block
1432 * out of the socket input buffer, and discards it.
1434 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1443 page = drbd_pp_alloc(mdev, 1, 1);
1447 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1448 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1450 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1451 rr, min_t(int, data_size, PAGE_SIZE));
1457 drbd_pp_free(mdev, page);
1461 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1462 sector_t sector, int data_size)
1464 struct bio_vec *bvec;
1466 int dgs, rr, i, expect;
1467 void *dig_in = mdev->int_dig_in;
1468 void *dig_vv = mdev->int_dig_vv;
1470 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1471 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1474 rr = drbd_recv(mdev, dig_in, dgs);
1476 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1484 /* optimistically update recv_cnt. if receiving fails below,
1485 * we disconnect anyways, and counters will be reset. */
1486 mdev->recv_cnt += data_size>>9;
1488 bio = req->master_bio;
1489 D_ASSERT(sector == bio->bi_sector);
1491 bio_for_each_segment(bvec, bio, i) {
1492 expect = min_t(int, data_size, bvec->bv_len);
1493 rr = drbd_recv(mdev,
1494 kmap(bvec->bv_page)+bvec->bv_offset,
1496 kunmap(bvec->bv_page);
1498 dev_warn(DEV, "short read receiving data reply: "
1499 "read %d expected %d\n",
1507 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1508 if (memcmp(dig_in, dig_vv, dgs)) {
1509 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1514 D_ASSERT(data_size == 0);
1518 /* e_end_resync_block() is called via
1519 * drbd_process_done_ee() by asender only */
1520 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1522 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1523 sector_t sector = e->sector;
1526 D_ASSERT(hlist_unhashed(&e->colision));
1528 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1529 drbd_set_in_sync(mdev, sector, e->size);
1530 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1532 /* Record failure to sync */
1533 drbd_rs_failed_io(mdev, sector, e->size);
1535 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1542 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1544 struct drbd_epoch_entry *e;
1546 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1550 dec_rs_pending(mdev);
1553 /* corresponding dec_unacked() in e_end_resync_block()
1554 * respective _drbd_clear_done_ee */
1556 e->w.cb = e_end_resync_block;
1558 spin_lock_irq(&mdev->req_lock);
1559 list_add(&e->w.list, &mdev->sync_ee);
1560 spin_unlock_irq(&mdev->req_lock);
1562 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1565 drbd_free_ee(mdev, e);
1571 static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
1573 struct drbd_request *req;
1575 unsigned int header_size, data_size;
1577 struct p_data *p = (struct p_data *)h;
1579 header_size = sizeof(*p) - sizeof(*h);
1580 data_size = h->length - header_size;
1582 ERR_IF(data_size == 0) return FALSE;
1584 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1587 sector = be64_to_cpu(p->sector);
1589 spin_lock_irq(&mdev->req_lock);
1590 req = _ar_id_to_req(mdev, p->block_id, sector);
1591 spin_unlock_irq(&mdev->req_lock);
1592 if (unlikely(!req)) {
1593 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1597 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1598 * special casing it there for the various failure cases.
1599 * still no race with drbd_fail_pending_reads */
1600 ok = recv_dless_read(mdev, req, sector, data_size);
1603 req_mod(req, data_received);
1604 /* else: nothing. handled from drbd_disconnect...
1605 * I don't think we may complete this just yet
1606 * in case we are "on-disconnect: freeze" */
1611 static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h)
1614 unsigned int header_size, data_size;
1616 struct p_data *p = (struct p_data *)h;
1618 header_size = sizeof(*p) - sizeof(*h);
1619 data_size = h->length - header_size;
1621 ERR_IF(data_size == 0) return FALSE;
1623 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1626 sector = be64_to_cpu(p->sector);
1627 D_ASSERT(p->block_id == ID_SYNCER);
1629 if (get_ldev(mdev)) {
1630 /* data is submitted to disk within recv_resync_read.
1631 * corresponding put_ldev done below on error,
1632 * or in drbd_endio_write_sec. */
1633 ok = recv_resync_read(mdev, sector, data_size);
1635 if (__ratelimit(&drbd_ratelimit_state))
1636 dev_err(DEV, "Can not write resync data to local disk.\n");
1638 ok = drbd_drain_block(mdev, data_size);
1640 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1646 /* e_end_block() is called via drbd_process_done_ee().
1647 * this means this function only runs in the asender thread
1649 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1651 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1652 sector_t sector = e->sector;
1653 struct drbd_epoch *epoch;
1656 if (e->flags & EE_IS_BARRIER) {
1657 epoch = previous_epoch(mdev, e->epoch);
1659 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1662 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1663 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1664 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1665 mdev->state.conn <= C_PAUSED_SYNC_T &&
1666 e->flags & EE_MAY_SET_IN_SYNC) ?
1667 P_RS_WRITE_ACK : P_WRITE_ACK;
1668 ok &= drbd_send_ack(mdev, pcmd, e);
1669 if (pcmd == P_RS_WRITE_ACK)
1670 drbd_set_in_sync(mdev, sector, e->size);
1672 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1673 /* we expect it to be marked out of sync anyways...
1674 * maybe assert this? */
1678 /* we delete from the conflict detection hash _after_ we sent out the
1679 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1680 if (mdev->net_conf->two_primaries) {
1681 spin_lock_irq(&mdev->req_lock);
1682 D_ASSERT(!hlist_unhashed(&e->colision));
1683 hlist_del_init(&e->colision);
1684 spin_unlock_irq(&mdev->req_lock);
1686 D_ASSERT(hlist_unhashed(&e->colision));
1689 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1694 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1696 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1699 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1700 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1702 spin_lock_irq(&mdev->req_lock);
1703 D_ASSERT(!hlist_unhashed(&e->colision));
1704 hlist_del_init(&e->colision);
1705 spin_unlock_irq(&mdev->req_lock);
1712 /* Called from receive_Data.
1713 * Synchronize packets on sock with packets on msock.
1715 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1716 * packet traveling on msock, they are still processed in the order they have
1719 * Note: we don't care for Ack packets overtaking P_DATA packets.
1721 * In case packet_seq is larger than mdev->peer_seq number, there are
1722 * outstanding packets on the msock. We wait for them to arrive.
1723 * In case we are the logically next packet, we update mdev->peer_seq
1724 * ourselves. Correctly handles 32bit wrap around.
1726 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1727 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1728 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1729 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1731 * returns 0 if we may process the packet,
1732 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1733 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1739 spin_lock(&mdev->peer_seq_lock);
1741 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1742 if (seq_le(packet_seq, mdev->peer_seq+1))
1744 if (signal_pending(current)) {
1748 p_seq = mdev->peer_seq;
1749 spin_unlock(&mdev->peer_seq_lock);
1750 timeout = schedule_timeout(30*HZ);
1751 spin_lock(&mdev->peer_seq_lock);
1752 if (timeout == 0 && p_seq == mdev->peer_seq) {
1754 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1758 finish_wait(&mdev->seq_wait, &wait);
1759 if (mdev->peer_seq+1 == packet_seq)
1761 spin_unlock(&mdev->peer_seq_lock);
1765 /* mirrored write */
1766 static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1769 struct drbd_epoch_entry *e;
1770 struct p_data *p = (struct p_data *)h;
1771 int header_size, data_size;
1775 header_size = sizeof(*p) - sizeof(*h);
1776 data_size = h->length - header_size;
1778 ERR_IF(data_size == 0) return FALSE;
1780 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1783 if (!get_ldev(mdev)) {
1784 if (__ratelimit(&drbd_ratelimit_state))
1785 dev_err(DEV, "Can not write mirrored data block "
1786 "to local disk.\n");
1787 spin_lock(&mdev->peer_seq_lock);
1788 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1790 spin_unlock(&mdev->peer_seq_lock);
1792 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1793 atomic_inc(&mdev->current_epoch->epoch_size);
1794 return drbd_drain_block(mdev, data_size);
1797 /* get_ldev(mdev) successful.
1798 * Corresponding put_ldev done either below (on various errors),
1799 * or in drbd_endio_write_sec, if we successfully submit the data at
1800 * the end of this function. */
1802 sector = be64_to_cpu(p->sector);
1803 e = read_in_block(mdev, p->block_id, sector, data_size);
1809 e->w.cb = e_end_block;
1811 spin_lock(&mdev->epoch_lock);
1812 e->epoch = mdev->current_epoch;
1813 atomic_inc(&e->epoch->epoch_size);
1814 atomic_inc(&e->epoch->active);
1816 if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1817 struct drbd_epoch *epoch;
1818 /* Issue a barrier if we start a new epoch, and the previous epoch
1819 was not a epoch containing a single request which already was
1821 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1822 if (epoch == e->epoch) {
1823 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1824 rw |= REQ_HARDBARRIER;
1825 e->flags |= EE_IS_BARRIER;
1827 if (atomic_read(&epoch->epoch_size) > 1 ||
1828 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1829 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1830 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1831 rw |= REQ_HARDBARRIER;
1832 e->flags |= EE_IS_BARRIER;
1836 spin_unlock(&mdev->epoch_lock);
1838 dp_flags = be32_to_cpu(p->dp_flags);
1839 if (dp_flags & DP_HARDBARRIER) {
1840 dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
1841 /* rw |= REQ_HARDBARRIER; */
1843 if (dp_flags & DP_RW_SYNC)
1844 rw |= REQ_SYNC | REQ_UNPLUG;
1845 if (dp_flags & DP_MAY_SET_IN_SYNC)
1846 e->flags |= EE_MAY_SET_IN_SYNC;
1848 /* I'm the receiver, I do hold a net_cnt reference. */
1849 if (!mdev->net_conf->two_primaries) {
1850 spin_lock_irq(&mdev->req_lock);
1852 /* don't get the req_lock yet,
1853 * we may sleep in drbd_wait_peer_seq */
1854 const int size = e->size;
1855 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1857 struct drbd_request *i;
1858 struct hlist_node *n;
1859 struct hlist_head *slot;
1862 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1863 BUG_ON(mdev->ee_hash == NULL);
1864 BUG_ON(mdev->tl_hash == NULL);
1866 /* conflict detection and handling:
1867 * 1. wait on the sequence number,
1868 * in case this data packet overtook ACK packets.
1869 * 2. check our hash tables for conflicting requests.
1870 * we only need to walk the tl_hash, since an ee can not
1871 * have a conflict with an other ee: on the submitting
1872 * node, the corresponding req had already been conflicting,
1873 * and a conflicting req is never sent.
1875 * Note: for two_primaries, we are protocol C,
1876 * so there cannot be any request that is DONE
1877 * but still on the transfer log.
1879 * unconditionally add to the ee_hash.
1881 * if no conflicting request is found:
1884 * if any conflicting request is found
1885 * that has not yet been acked,
1886 * AND I have the "discard concurrent writes" flag:
1887 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1889 * if any conflicting request is found:
1890 * block the receiver, waiting on misc_wait
1891 * until no more conflicting requests are there,
1892 * or we get interrupted (disconnect).
1894 * we do not just write after local io completion of those
1895 * requests, but only after req is done completely, i.e.
1896 * we wait for the P_DISCARD_ACK to arrive!
1898 * then proceed normally, i.e. submit.
1900 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1901 goto out_interrupted;
1903 spin_lock_irq(&mdev->req_lock);
1905 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1907 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1908 slot = tl_hash_slot(mdev, sector);
1911 int have_unacked = 0;
1912 int have_conflict = 0;
1913 prepare_to_wait(&mdev->misc_wait, &wait,
1914 TASK_INTERRUPTIBLE);
1915 hlist_for_each_entry(i, n, slot, colision) {
1917 /* only ALERT on first iteration,
1918 * we may be woken up early... */
1920 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1921 " new: %llus +%u; pending: %llus +%u\n",
1922 current->comm, current->pid,
1923 (unsigned long long)sector, size,
1924 (unsigned long long)i->sector, i->size);
1925 if (i->rq_state & RQ_NET_PENDING)
1934 /* Discard Ack only for the _first_ iteration */
1935 if (first && discard && have_unacked) {
1936 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1937 (unsigned long long)sector);
1939 e->w.cb = e_send_discard_ack;
1940 list_add_tail(&e->w.list, &mdev->done_ee);
1942 spin_unlock_irq(&mdev->req_lock);
1944 /* we could probably send that P_DISCARD_ACK ourselves,
1945 * but I don't like the receiver using the msock */
1949 finish_wait(&mdev->misc_wait, &wait);
1953 if (signal_pending(current)) {
1954 hlist_del_init(&e->colision);
1956 spin_unlock_irq(&mdev->req_lock);
1958 finish_wait(&mdev->misc_wait, &wait);
1959 goto out_interrupted;
1962 spin_unlock_irq(&mdev->req_lock);
1965 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1966 "sec=%llus\n", (unsigned long long)sector);
1967 } else if (discard) {
1968 /* we had none on the first iteration.
1969 * there must be none now. */
1970 D_ASSERT(have_unacked == 0);
1973 spin_lock_irq(&mdev->req_lock);
1975 finish_wait(&mdev->misc_wait, &wait);
1978 list_add(&e->w.list, &mdev->active_ee);
1979 spin_unlock_irq(&mdev->req_lock);
1981 switch (mdev->net_conf->wire_protocol) {
1984 /* corresponding dec_unacked() in e_end_block()
1985 * respective _drbd_clear_done_ee */
1988 /* I really don't like it that the receiver thread
1989 * sends on the msock, but anyways */
1990 drbd_send_ack(mdev, P_RECV_ACK, e);
1997 if (mdev->state.pdsk == D_DISKLESS) {
1998 /* In case we have the only disk of the cluster, */
1999 drbd_set_out_of_sync(mdev, e->sector, e->size);
2000 e->flags |= EE_CALL_AL_COMPLETE_IO;
2001 drbd_al_begin_io(mdev, e->sector);
2004 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
2008 /* yes, the epoch_size now is imbalanced.
2009 * but we drop the connection anyways, so we don't have a chance to
2010 * receive a barrier... atomic_inc(&mdev->epoch_size); */
2012 drbd_free_ee(mdev, e);
2016 static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
2019 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
2020 struct drbd_epoch_entry *e;
2021 struct digest_info *di = NULL;
2022 int size, digest_size;
2023 unsigned int fault_type;
2024 struct p_block_req *p =
2025 (struct p_block_req *)h;
2026 const int brps = sizeof(*p)-sizeof(*h);
2028 if (drbd_recv(mdev, h->payload, brps) != brps)
2031 sector = be64_to_cpu(p->sector);
2032 size = be32_to_cpu(p->blksize);
2034 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
2035 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2036 (unsigned long long)sector, size);
2039 if (sector + (size>>9) > capacity) {
2040 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2041 (unsigned long long)sector, size);
2045 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2046 if (__ratelimit(&drbd_ratelimit_state))
2047 dev_err(DEV, "Can not satisfy peer's read request, "
2048 "no local data.\n");
2049 drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
2050 P_NEG_RS_DREPLY , p);
2051 return drbd_drain_block(mdev, h->length - brps);
2054 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2055 * "criss-cross" setup, that might cause write-out on some other DRBD,
2056 * which in turn might block on the other node at this very place. */
2057 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2063 switch (h->command) {
2064 case P_DATA_REQUEST:
2065 e->w.cb = w_e_end_data_req;
2066 fault_type = DRBD_FAULT_DT_RD;
2068 case P_RS_DATA_REQUEST:
2069 e->w.cb = w_e_end_rsdata_req;
2070 fault_type = DRBD_FAULT_RS_RD;
2071 /* Eventually this should become asynchronously. Currently it
2072 * blocks the whole receiver just to delay the reading of a
2073 * resync data block.
2074 * the drbd_work_queue mechanism is made for this...
2076 if (!drbd_rs_begin_io(mdev, sector)) {
2077 /* we have been interrupted,
2078 * probably connection lost! */
2079 D_ASSERT(signal_pending(current));
2085 case P_CSUM_RS_REQUEST:
2086 fault_type = DRBD_FAULT_RS_RD;
2087 digest_size = h->length - brps ;
2088 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2092 di->digest_size = digest_size;
2093 di->digest = (((char *)di)+sizeof(struct digest_info));
2095 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2098 e->block_id = (u64)(unsigned long)di;
2099 if (h->command == P_CSUM_RS_REQUEST) {
2100 D_ASSERT(mdev->agreed_pro_version >= 89);
2101 e->w.cb = w_e_end_csum_rs_req;
2102 } else if (h->command == P_OV_REPLY) {
2103 e->w.cb = w_e_end_ov_reply;
2104 dec_rs_pending(mdev);
2108 if (!drbd_rs_begin_io(mdev, sector)) {
2109 /* we have been interrupted, probably connection lost! */
2110 D_ASSERT(signal_pending(current));
2116 if (mdev->state.conn >= C_CONNECTED &&
2117 mdev->state.conn != C_VERIFY_T)
2118 dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2119 drbd_conn_str(mdev->state.conn));
2120 if (mdev->ov_start_sector == ~(sector_t)0 &&
2121 mdev->agreed_pro_version >= 90) {
2122 mdev->ov_start_sector = sector;
2123 mdev->ov_position = sector;
2124 mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2125 dev_info(DEV, "Online Verify start sector: %llu\n",
2126 (unsigned long long)sector);
2128 e->w.cb = w_e_end_ov_req;
2129 fault_type = DRBD_FAULT_RS_RD;
2130 /* Eventually this should become asynchronous. Currently it
2131 * blocks the whole receiver just to delay the reading of a
2132 * resync data block.
2133 * the drbd_work_queue mechanism is made for this...
2135 if (!drbd_rs_begin_io(mdev, sector)) {
2136 /* we have been interrupted,
2137 * probably connection lost! */
2138 D_ASSERT(signal_pending(current));
2145 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2146 cmdname(h->command));
2147 fault_type = DRBD_FAULT_MAX;
2150 spin_lock_irq(&mdev->req_lock);
2151 list_add(&e->w.list, &mdev->read_ee);
2152 spin_unlock_irq(&mdev->req_lock);
2156 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2162 drbd_free_ee(mdev, e);
2166 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2168 int self, peer, rv = -100;
2169 unsigned long ch_self, ch_peer;
2171 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2172 peer = mdev->p_uuid[UI_BITMAP] & 1;
2174 ch_peer = mdev->p_uuid[UI_SIZE];
2175 ch_self = mdev->comm_bm_set;
2177 switch (mdev->net_conf->after_sb_0p) {
2179 case ASB_DISCARD_SECONDARY:
2180 case ASB_CALL_HELPER:
2181 dev_err(DEV, "Configuration error.\n");
2183 case ASB_DISCONNECT:
2185 case ASB_DISCARD_YOUNGER_PRI:
2186 if (self == 0 && peer == 1) {
2190 if (self == 1 && peer == 0) {
2194 /* Else fall through to one of the other strategies... */
2195 case ASB_DISCARD_OLDER_PRI:
2196 if (self == 0 && peer == 1) {
2200 if (self == 1 && peer == 0) {
2204 /* Else fall through to one of the other strategies... */
2205 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2206 "Using discard-least-changes instead\n");
2207 case ASB_DISCARD_ZERO_CHG:
2208 if (ch_peer == 0 && ch_self == 0) {
2209 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2213 if (ch_peer == 0) { rv = 1; break; }
2214 if (ch_self == 0) { rv = -1; break; }
2216 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2218 case ASB_DISCARD_LEAST_CHG:
2219 if (ch_self < ch_peer)
2221 else if (ch_self > ch_peer)
2223 else /* ( ch_self == ch_peer ) */
2224 /* Well, then use something else. */
2225 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2228 case ASB_DISCARD_LOCAL:
2231 case ASB_DISCARD_REMOTE:
2238 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2240 int self, peer, hg, rv = -100;
2242 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2243 peer = mdev->p_uuid[UI_BITMAP] & 1;
2245 switch (mdev->net_conf->after_sb_1p) {
2246 case ASB_DISCARD_YOUNGER_PRI:
2247 case ASB_DISCARD_OLDER_PRI:
2248 case ASB_DISCARD_LEAST_CHG:
2249 case ASB_DISCARD_LOCAL:
2250 case ASB_DISCARD_REMOTE:
2251 dev_err(DEV, "Configuration error.\n");
2253 case ASB_DISCONNECT:
2256 hg = drbd_asb_recover_0p(mdev);
2257 if (hg == -1 && mdev->state.role == R_SECONDARY)
2259 if (hg == 1 && mdev->state.role == R_PRIMARY)
2263 rv = drbd_asb_recover_0p(mdev);
2265 case ASB_DISCARD_SECONDARY:
2266 return mdev->state.role == R_PRIMARY ? 1 : -1;
2267 case ASB_CALL_HELPER:
2268 hg = drbd_asb_recover_0p(mdev);
2269 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2270 self = drbd_set_role(mdev, R_SECONDARY, 0);
2271 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2272 * we might be here in C_WF_REPORT_PARAMS which is transient.
2273 * we do not need to wait for the after state change work either. */
2274 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2275 if (self != SS_SUCCESS) {
2276 drbd_khelper(mdev, "pri-lost-after-sb");
2278 dev_warn(DEV, "Successfully gave up primary role.\n");
2288 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2290 int self, peer, hg, rv = -100;
2292 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2293 peer = mdev->p_uuid[UI_BITMAP] & 1;
2295 switch (mdev->net_conf->after_sb_2p) {
2296 case ASB_DISCARD_YOUNGER_PRI:
2297 case ASB_DISCARD_OLDER_PRI:
2298 case ASB_DISCARD_LEAST_CHG:
2299 case ASB_DISCARD_LOCAL:
2300 case ASB_DISCARD_REMOTE:
2302 case ASB_DISCARD_SECONDARY:
2303 dev_err(DEV, "Configuration error.\n");
2306 rv = drbd_asb_recover_0p(mdev);
2308 case ASB_DISCONNECT:
2310 case ASB_CALL_HELPER:
2311 hg = drbd_asb_recover_0p(mdev);
2313 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2314 * we might be here in C_WF_REPORT_PARAMS which is transient.
2315 * we do not need to wait for the after state change work either. */
2316 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2317 if (self != SS_SUCCESS) {
2318 drbd_khelper(mdev, "pri-lost-after-sb");
2320 dev_warn(DEV, "Successfully gave up primary role.\n");
2330 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2331 u64 bits, u64 flags)
2334 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2337 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2339 (unsigned long long)uuid[UI_CURRENT],
2340 (unsigned long long)uuid[UI_BITMAP],
2341 (unsigned long long)uuid[UI_HISTORY_START],
2342 (unsigned long long)uuid[UI_HISTORY_END],
2343 (unsigned long long)bits,
2344 (unsigned long long)flags);
2348 100 after split brain try auto recover
2349 2 C_SYNC_SOURCE set BitMap
2350 1 C_SYNC_SOURCE use BitMap
2352 -1 C_SYNC_TARGET use BitMap
2353 -2 C_SYNC_TARGET set BitMap
2354 -100 after split brain, disconnect
2355 -1000 unrelated data
2357 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2362 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2363 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2366 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2370 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2371 peer != UUID_JUST_CREATED)
2375 if (self != UUID_JUST_CREATED &&
2376 (peer == UUID_JUST_CREATED || peer == (u64)0))
2380 int rct, dc; /* roles at crash time */
2382 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2384 if (mdev->agreed_pro_version < 91)
2387 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2388 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2389 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2390 drbd_uuid_set_bm(mdev, 0UL);
2392 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2393 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2396 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2403 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2405 if (mdev->agreed_pro_version < 91)
2408 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2409 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2410 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2412 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2413 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2414 mdev->p_uuid[UI_BITMAP] = 0UL;
2416 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2419 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2426 /* Common power [off|failure] */
2427 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2428 (mdev->p_uuid[UI_FLAGS] & 2);
2429 /* lowest bit is set when we were primary,
2430 * next bit (weight 2) is set when peer was primary */
2434 case 0: /* !self_pri && !peer_pri */ return 0;
2435 case 1: /* self_pri && !peer_pri */ return 1;
2436 case 2: /* !self_pri && peer_pri */ return -1;
2437 case 3: /* self_pri && peer_pri */
2438 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2444 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2449 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2451 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2452 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2454 /* The last P_SYNC_UUID did not get though. Undo the last start of
2455 resync as sync source modifications of the peer's UUIDs. */
2457 if (mdev->agreed_pro_version < 91)
2460 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2461 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2467 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2468 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2469 peer = mdev->p_uuid[i] & ~((u64)1);
2475 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2476 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2481 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2483 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2484 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2486 /* The last P_SYNC_UUID did not get though. Undo the last start of
2487 resync as sync source modifications of our UUIDs. */
2489 if (mdev->agreed_pro_version < 91)
2492 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2493 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2495 dev_info(DEV, "Undid last start of resync:\n");
2497 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2498 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2506 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2507 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2508 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2514 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2515 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2516 if (self == peer && self != ((u64)0))
2520 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2521 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2522 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2523 peer = mdev->p_uuid[j] & ~((u64)1);
2532 /* drbd_sync_handshake() returns the new conn state on success, or
2533 CONN_MASK (-1) on failure.
2535 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2536 enum drbd_disk_state peer_disk) __must_hold(local)
2539 enum drbd_conns rv = C_MASK;
2540 enum drbd_disk_state mydisk;
2542 mydisk = mdev->state.disk;
2543 if (mydisk == D_NEGOTIATING)
2544 mydisk = mdev->new_state_tmp.disk;
2546 dev_info(DEV, "drbd_sync_handshake:\n");
2547 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2548 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2549 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2551 hg = drbd_uuid_compare(mdev, &rule_nr);
2553 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2556 dev_alert(DEV, "Unrelated data, aborting!\n");
2560 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2564 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2565 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2566 int f = (hg == -100) || abs(hg) == 2;
2567 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2570 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2571 hg > 0 ? "source" : "target");
2575 drbd_khelper(mdev, "initial-split-brain");
2577 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2578 int pcount = (mdev->state.role == R_PRIMARY)
2579 + (peer_role == R_PRIMARY);
2580 int forced = (hg == -100);
2584 hg = drbd_asb_recover_0p(mdev);
2587 hg = drbd_asb_recover_1p(mdev);
2590 hg = drbd_asb_recover_2p(mdev);
2593 if (abs(hg) < 100) {
2594 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2595 "automatically solved. Sync from %s node\n",
2596 pcount, (hg < 0) ? "peer" : "this");
2598 dev_warn(DEV, "Doing a full sync, since"
2599 " UUIDs where ambiguous.\n");
2606 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2608 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2612 dev_warn(DEV, "Split-Brain detected, manually solved. "
2613 "Sync from %s node\n",
2614 (hg < 0) ? "peer" : "this");
2618 /* FIXME this log message is not correct if we end up here
2619 * after an attempted attach on a diskless node.
2620 * We just refuse to attach -- well, we drop the "connection"
2621 * to that disk, in a way... */
2622 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2623 drbd_khelper(mdev, "split-brain");
2627 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2628 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2632 if (hg < 0 && /* by intention we do not use mydisk here. */
2633 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2634 switch (mdev->net_conf->rr_conflict) {
2635 case ASB_CALL_HELPER:
2636 drbd_khelper(mdev, "pri-lost");
2638 case ASB_DISCONNECT:
2639 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2642 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2647 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2649 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2651 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2652 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2653 abs(hg) >= 2 ? "full" : "bit-map based");
2658 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2659 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2663 if (hg > 0) { /* become sync source. */
2665 } else if (hg < 0) { /* become sync target */
2669 if (drbd_bm_total_weight(mdev)) {
2670 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2671 drbd_bm_total_weight(mdev));
2678 /* returns 1 if invalid */
2679 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2681 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2682 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2683 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2686 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2687 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2688 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2691 /* everything else is valid if they are equal on both sides. */
2695 /* everything es is invalid. */
2699 static int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
2701 struct p_protocol *p = (struct p_protocol *)h;
2702 int header_size, data_size;
2703 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2704 int p_want_lose, p_two_primaries, cf;
2705 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2707 header_size = sizeof(*p) - sizeof(*h);
2708 data_size = h->length - header_size;
2710 if (drbd_recv(mdev, h->payload, header_size) != header_size)
2713 p_proto = be32_to_cpu(p->protocol);
2714 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2715 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2716 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2717 p_two_primaries = be32_to_cpu(p->two_primaries);
2718 cf = be32_to_cpu(p->conn_flags);
2719 p_want_lose = cf & CF_WANT_LOSE;
2721 clear_bit(CONN_DRY_RUN, &mdev->flags);
2723 if (cf & CF_DRY_RUN)
2724 set_bit(CONN_DRY_RUN, &mdev->flags);
2726 if (p_proto != mdev->net_conf->wire_protocol) {
2727 dev_err(DEV, "incompatible communication protocols\n");
2731 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2732 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2736 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2737 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2741 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2742 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2746 if (p_want_lose && mdev->net_conf->want_lose) {
2747 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2751 if (p_two_primaries != mdev->net_conf->two_primaries) {
2752 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2756 if (mdev->agreed_pro_version >= 87) {
2757 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2759 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2762 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2763 if (strcmp(p_integrity_alg, my_alg)) {
2764 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2767 dev_info(DEV, "data-integrity-alg: %s\n",
2768 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2774 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2779 * input: alg name, feature name
2780 * return: NULL (alg name was "")
2781 * ERR_PTR(error) if something goes wrong
2782 * or the crypto hash ptr, if it worked out ok. */
2783 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2784 const char *alg, const char *name)
2786 struct crypto_hash *tfm;
2791 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2793 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2794 alg, name, PTR_ERR(tfm));
2797 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2798 crypto_free_hash(tfm);
2799 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2800 return ERR_PTR(-EINVAL);
2805 static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h)
2808 struct p_rs_param_95 *p = (struct p_rs_param_95 *)h;
2809 unsigned int header_size, data_size, exp_max_sz;
2810 struct crypto_hash *verify_tfm = NULL;
2811 struct crypto_hash *csums_tfm = NULL;
2812 const int apv = mdev->agreed_pro_version;
2814 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2815 : apv == 88 ? sizeof(struct p_rs_param)
2817 : apv <= 94 ? sizeof(struct p_rs_param_89)
2818 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2820 if (h->length > exp_max_sz) {
2821 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2822 h->length, exp_max_sz);
2827 header_size = sizeof(struct p_rs_param) - sizeof(*h);
2828 data_size = h->length - header_size;
2829 } else if (apv <= 94) {
2830 header_size = sizeof(struct p_rs_param_89) - sizeof(*h);
2831 data_size = h->length - header_size;
2832 D_ASSERT(data_size == 0);
2834 header_size = sizeof(struct p_rs_param_95) - sizeof(*h);
2835 data_size = h->length - header_size;
2836 D_ASSERT(data_size == 0);
2839 /* initialize verify_alg and csums_alg */
2840 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2842 if (drbd_recv(mdev, h->payload, header_size) != header_size)
2845 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2849 if (data_size > SHARED_SECRET_MAX) {
2850 dev_err(DEV, "verify-alg too long, "
2851 "peer wants %u, accepting only %u byte\n",
2852 data_size, SHARED_SECRET_MAX);
2856 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2859 /* we expect NUL terminated string */
2860 /* but just in case someone tries to be evil */
2861 D_ASSERT(p->verify_alg[data_size-1] == 0);
2862 p->verify_alg[data_size-1] = 0;
2864 } else /* apv >= 89 */ {
2865 /* we still expect NUL terminated strings */
2866 /* but just in case someone tries to be evil */
2867 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2868 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2869 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2870 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2873 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2874 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2875 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2876 mdev->sync_conf.verify_alg, p->verify_alg);
2879 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2880 p->verify_alg, "verify-alg");
2881 if (IS_ERR(verify_tfm)) {
2887 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2888 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2889 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2890 mdev->sync_conf.csums_alg, p->csums_alg);
2893 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2894 p->csums_alg, "csums-alg");
2895 if (IS_ERR(csums_tfm)) {
2902 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2903 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2904 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2905 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2906 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2909 spin_lock(&mdev->peer_seq_lock);
2910 /* lock against drbd_nl_syncer_conf() */
2912 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2913 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2914 crypto_free_hash(mdev->verify_tfm);
2915 mdev->verify_tfm = verify_tfm;
2916 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2919 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2920 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2921 crypto_free_hash(mdev->csums_tfm);
2922 mdev->csums_tfm = csums_tfm;
2923 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2925 spin_unlock(&mdev->peer_seq_lock);
2930 /* just for completeness: actually not needed,
2931 * as this is not reached if csums_tfm was ok. */
2932 crypto_free_hash(csums_tfm);
2933 /* but free the verify_tfm again, if csums_tfm did not work out */
2934 crypto_free_hash(verify_tfm);
2935 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2939 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2941 /* sorry, we currently have no working implementation
2942 * of distributed TCQ */
2945 /* warn if the arguments differ by more than 12.5% */
2946 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2947 const char *s, sector_t a, sector_t b)
2950 if (a == 0 || b == 0)
2952 d = (a > b) ? (a - b) : (b - a);
2953 if (d > (a>>3) || d > (b>>3))
2954 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2955 (unsigned long long)a, (unsigned long long)b);
2958 static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2960 struct p_sizes *p = (struct p_sizes *)h;
2961 enum determine_dev_size dd = unchanged;
2962 unsigned int max_seg_s;
2963 sector_t p_size, p_usize, my_usize;
2964 int ldsc = 0; /* local disk size changed */
2965 enum dds_flags ddsf;
2967 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2968 if (drbd_recv(mdev, h->payload, h->length) != h->length)
2971 p_size = be64_to_cpu(p->d_size);
2972 p_usize = be64_to_cpu(p->u_size);
2974 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2975 dev_err(DEV, "some backing storage is needed\n");
2976 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2980 /* just store the peer's disk size for now.
2981 * we still need to figure out whether we accept that. */
2982 mdev->p_size = p_size;
2984 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
2985 if (get_ldev(mdev)) {
2986 warn_if_differ_considerably(mdev, "lower level device sizes",
2987 p_size, drbd_get_max_capacity(mdev->ldev));
2988 warn_if_differ_considerably(mdev, "user requested size",
2989 p_usize, mdev->ldev->dc.disk_size);
2991 /* if this is the first connect, or an otherwise expected
2992 * param exchange, choose the minimum */
2993 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2994 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2997 my_usize = mdev->ldev->dc.disk_size;
2999 if (mdev->ldev->dc.disk_size != p_usize) {
3000 mdev->ldev->dc.disk_size = p_usize;
3001 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3002 (unsigned long)mdev->ldev->dc.disk_size);
3005 /* Never shrink a device with usable data during connect.
3006 But allow online shrinking if we are connected. */
3007 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
3008 drbd_get_capacity(mdev->this_bdev) &&
3009 mdev->state.disk >= D_OUTDATED &&
3010 mdev->state.conn < C_CONNECTED) {
3011 dev_err(DEV, "The peer's disk size is too small!\n");
3012 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3013 mdev->ldev->dc.disk_size = my_usize;
3021 ddsf = be16_to_cpu(p->dds_flags);
3022 if (get_ldev(mdev)) {
3023 dd = drbd_determin_dev_size(mdev, ddsf);
3025 if (dd == dev_size_error)
3029 /* I am diskless, need to accept the peer's size. */
3030 drbd_set_my_capacity(mdev, p_size);
3033 if (get_ldev(mdev)) {
3034 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3035 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3039 if (mdev->agreed_pro_version < 94)
3040 max_seg_s = be32_to_cpu(p->max_segment_size);
3041 else /* drbd 8.3.8 onwards */
3042 max_seg_s = DRBD_MAX_SEGMENT_SIZE;
3044 if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
3045 drbd_setup_queue_param(mdev, max_seg_s);
3047 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
3051 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3052 if (be64_to_cpu(p->c_size) !=
3053 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3054 /* we have different sizes, probably peer
3055 * needs to know my new size... */
3056 drbd_send_sizes(mdev, 0, ddsf);
3058 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3059 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3060 if (mdev->state.pdsk >= D_INCONSISTENT &&
3061 mdev->state.disk >= D_INCONSISTENT) {
3062 if (ddsf & DDSF_NO_RESYNC)
3063 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3065 resync_after_online_grow(mdev);
3067 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3074 static int receive_uuids(struct drbd_conf *mdev, struct p_header *h)
3076 struct p_uuids *p = (struct p_uuids *)h;
3080 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3081 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3084 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3086 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3087 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3089 kfree(mdev->p_uuid);
3090 mdev->p_uuid = p_uuid;
3092 if (mdev->state.conn < C_CONNECTED &&
3093 mdev->state.disk < D_INCONSISTENT &&
3094 mdev->state.role == R_PRIMARY &&
3095 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3096 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3097 (unsigned long long)mdev->ed_uuid);
3098 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3102 if (get_ldev(mdev)) {
3103 int skip_initial_sync =
3104 mdev->state.conn == C_CONNECTED &&
3105 mdev->agreed_pro_version >= 90 &&
3106 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3107 (p_uuid[UI_FLAGS] & 8);
3108 if (skip_initial_sync) {
3109 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3110 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3111 "clear_n_write from receive_uuids");
3112 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3113 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3114 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3119 } else if (mdev->state.disk < D_INCONSISTENT &&
3120 mdev->state.role == R_PRIMARY) {
3121 /* I am a diskless primary, the peer just created a new current UUID
3123 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3126 /* Before we test for the disk state, we should wait until an eventually
3127 ongoing cluster wide state change is finished. That is important if
3128 we are primary and are detaching from our disk. We need to see the
3129 new disk state... */
3130 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3131 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3132 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3138 * convert_state() - Converts the peer's view of the cluster state to our point of view
3139 * @ps: The state as seen by the peer.
3141 static union drbd_state convert_state(union drbd_state ps)
3143 union drbd_state ms;
3145 static enum drbd_conns c_tab[] = {
3146 [C_CONNECTED] = C_CONNECTED,
3148 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3149 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3150 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3151 [C_VERIFY_S] = C_VERIFY_T,
3157 ms.conn = c_tab[ps.conn];
3162 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3167 static int receive_req_state(struct drbd_conf *mdev, struct p_header *h)
3169 struct p_req_state *p = (struct p_req_state *)h;
3170 union drbd_state mask, val;
3173 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3174 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3177 mask.i = be32_to_cpu(p->mask);
3178 val.i = be32_to_cpu(p->val);
3180 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3181 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3182 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3186 mask = convert_state(mask);
3187 val = convert_state(val);
3189 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3191 drbd_send_sr_reply(mdev, rv);
3197 static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3199 struct p_state *p = (struct p_state *)h;
3200 enum drbd_conns nconn, oconn;
3201 union drbd_state ns, peer_state;
3202 enum drbd_disk_state real_peer_disk;
3203 enum chg_state_flags cs_flags;
3206 ERR_IF(h->length != (sizeof(*p)-sizeof(*h)))
3209 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3212 peer_state.i = be32_to_cpu(p->state);
3214 real_peer_disk = peer_state.disk;
3215 if (peer_state.disk == D_NEGOTIATING) {
3216 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3217 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3220 spin_lock_irq(&mdev->req_lock);
3222 oconn = nconn = mdev->state.conn;
3223 spin_unlock_irq(&mdev->req_lock);
3225 if (nconn == C_WF_REPORT_PARAMS)
3226 nconn = C_CONNECTED;
3228 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3229 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3230 int cr; /* consider resync */
3232 /* if we established a new connection */
3233 cr = (oconn < C_CONNECTED);
3234 /* if we had an established connection
3235 * and one of the nodes newly attaches a disk */
3236 cr |= (oconn == C_CONNECTED &&
3237 (peer_state.disk == D_NEGOTIATING ||
3238 mdev->state.disk == D_NEGOTIATING));
3239 /* if we have both been inconsistent, and the peer has been
3240 * forced to be UpToDate with --overwrite-data */
3241 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3242 /* if we had been plain connected, and the admin requested to
3243 * start a sync by "invalidate" or "invalidate-remote" */
3244 cr |= (oconn == C_CONNECTED &&
3245 (peer_state.conn >= C_STARTING_SYNC_S &&
3246 peer_state.conn <= C_WF_BITMAP_T));
3249 nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3252 if (nconn == C_MASK) {
3253 nconn = C_CONNECTED;
3254 if (mdev->state.disk == D_NEGOTIATING) {
3255 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3256 } else if (peer_state.disk == D_NEGOTIATING) {
3257 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3258 peer_state.disk = D_DISKLESS;
3259 real_peer_disk = D_DISKLESS;
3261 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3263 D_ASSERT(oconn == C_WF_REPORT_PARAMS);
3264 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3270 spin_lock_irq(&mdev->req_lock);
3271 if (mdev->state.conn != oconn)
3273 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3274 ns.i = mdev->state.i;
3276 ns.peer = peer_state.role;
3277 ns.pdsk = real_peer_disk;
3278 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3279 if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3280 ns.disk = mdev->new_state_tmp.disk;
3281 cs_flags = CS_VERBOSE + (oconn < C_CONNECTED && nconn >= C_CONNECTED ? 0 : CS_HARD);
3282 if (ns.pdsk == D_CONSISTENT && ns.susp && nconn == C_CONNECTED && oconn < C_CONNECTED &&
3283 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3284 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3285 for temporal network outages! */
3286 spin_unlock_irq(&mdev->req_lock);
3287 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3289 drbd_uuid_new_current(mdev);
3290 clear_bit(NEW_CUR_UUID, &mdev->flags);
3291 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3294 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3296 spin_unlock_irq(&mdev->req_lock);
3298 if (rv < SS_SUCCESS) {
3299 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3303 if (oconn > C_WF_REPORT_PARAMS) {
3304 if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3305 peer_state.disk != D_NEGOTIATING ) {
3306 /* we want resync, peer has not yet decided to sync... */
3307 /* Nowadays only used when forcing a node into primary role and
3308 setting its disk to UpToDate with that */
3309 drbd_send_uuids(mdev);
3310 drbd_send_state(mdev);
3314 mdev->net_conf->want_lose = 0;
3316 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3321 static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h)
3323 struct p_rs_uuid *p = (struct p_rs_uuid *)h;
3325 wait_event(mdev->misc_wait,
3326 mdev->state.conn == C_WF_SYNC_UUID ||
3327 mdev->state.conn < C_CONNECTED ||
3328 mdev->state.disk < D_NEGOTIATING);
3330 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3332 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3333 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3336 /* Here the _drbd_uuid_ functions are right, current should
3337 _not_ be rotated into the history */
3338 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3339 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3340 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3342 drbd_start_resync(mdev, C_SYNC_TARGET);
3346 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3351 enum receive_bitmap_ret { OK, DONE, FAILED };
3353 static enum receive_bitmap_ret
3354 receive_bitmap_plain(struct drbd_conf *mdev, struct p_header *h,
3355 unsigned long *buffer, struct bm_xfer_ctx *c)
3357 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3358 unsigned want = num_words * sizeof(long);
3360 if (want != h->length) {
3361 dev_err(DEV, "%s:want (%u) != h->length (%u)\n", __func__, want, h->length);
3366 if (drbd_recv(mdev, buffer, want) != want)
3369 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3371 c->word_offset += num_words;
3372 c->bit_offset = c->word_offset * BITS_PER_LONG;
3373 if (c->bit_offset > c->bm_bits)
3374 c->bit_offset = c->bm_bits;
3379 static enum receive_bitmap_ret
3380 recv_bm_rle_bits(struct drbd_conf *mdev,
3381 struct p_compressed_bm *p,
3382 struct bm_xfer_ctx *c)
3384 struct bitstream bs;
3388 unsigned long s = c->bit_offset;
3390 int len = p->head.length - (sizeof(*p) - sizeof(p->head));
3391 int toggle = DCBP_get_start(p);
3395 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3397 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3401 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3402 bits = vli_decode_bits(&rl, look_ahead);
3408 if (e >= c->bm_bits) {
3409 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3412 _drbd_bm_set_bits(mdev, s, e);
3416 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3417 have, bits, look_ahead,
3418 (unsigned int)(bs.cur.b - p->code),
3419 (unsigned int)bs.buf_len);
3422 look_ahead >>= bits;
3425 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3428 look_ahead |= tmp << have;
3433 bm_xfer_ctx_bit_to_word_offset(c);
3435 return (s == c->bm_bits) ? DONE : OK;
3438 static enum receive_bitmap_ret
3439 decode_bitmap_c(struct drbd_conf *mdev,
3440 struct p_compressed_bm *p,
3441 struct bm_xfer_ctx *c)
3443 if (DCBP_get_code(p) == RLE_VLI_Bits)
3444 return recv_bm_rle_bits(mdev, p, c);
3446 /* other variants had been implemented for evaluation,
3447 * but have been dropped as this one turned out to be "best"
3448 * during all our tests. */
3450 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3451 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3455 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3456 const char *direction, struct bm_xfer_ctx *c)
3458 /* what would it take to transfer it "plaintext" */
3459 unsigned plain = sizeof(struct p_header) *
3460 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3461 + c->bm_words * sizeof(long);
3462 unsigned total = c->bytes[0] + c->bytes[1];
3465 /* total can not be zero. but just in case: */
3469 /* don't report if not compressed */
3473 /* total < plain. check for overflow, still */
3474 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3475 : (1000 * total / plain);
3481 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3482 "total %u; compression: %u.%u%%\n",
3484 c->bytes[1], c->packets[1],
3485 c->bytes[0], c->packets[0],
3486 total, r/10, r % 10);
3489 /* Since we are processing the bitfield from lower addresses to higher,
3490 it does not matter if the process it in 32 bit chunks or 64 bit
3491 chunks as long as it is little endian. (Understand it as byte stream,
3492 beginning with the lowest byte...) If we would use big endian
3493 we would need to process it from the highest address to the lowest,
3494 in order to be agnostic to the 32 vs 64 bits issue.
3496 returns 0 on failure, 1 if we successfully received it. */
3497 static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
3499 struct bm_xfer_ctx c;
3501 enum receive_bitmap_ret ret;
3504 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3506 drbd_bm_lock(mdev, "receive bitmap");
3508 /* maybe we should use some per thread scratch page,
3509 * and allocate that during initial device creation? */
3510 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3512 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3516 c = (struct bm_xfer_ctx) {
3517 .bm_bits = drbd_bm_bits(mdev),
3518 .bm_words = drbd_bm_words(mdev),
3522 if (h->command == P_BITMAP) {
3523 ret = receive_bitmap_plain(mdev, h, buffer, &c);
3524 } else if (h->command == P_COMPRESSED_BITMAP) {
3525 /* MAYBE: sanity check that we speak proto >= 90,
3526 * and the feature is enabled! */
3527 struct p_compressed_bm *p;
3529 if (h->length > BM_PACKET_PAYLOAD_BYTES) {
3530 dev_err(DEV, "ReportCBitmap packet too large\n");
3533 /* use the page buff */
3535 memcpy(p, h, sizeof(*h));
3536 if (drbd_recv(mdev, p->head.payload, h->length) != h->length)
3538 if (p->head.length <= (sizeof(*p) - sizeof(p->head))) {
3539 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length);
3542 ret = decode_bitmap_c(mdev, p, &c);
3544 dev_warn(DEV, "receive_bitmap: h->command neither ReportBitMap nor ReportCBitMap (is 0x%x)", h->command);
3548 c.packets[h->command == P_BITMAP]++;
3549 c.bytes[h->command == P_BITMAP] += sizeof(struct p_header) + h->length;
3554 if (!drbd_recv_header(mdev, h))
3556 } while (ret == OK);
3560 INFO_bm_xfer_stats(mdev, "receive", &c);
3562 if (mdev->state.conn == C_WF_BITMAP_T) {
3563 ok = !drbd_send_bitmap(mdev);
3566 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3567 ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3568 D_ASSERT(ok == SS_SUCCESS);
3569 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3570 /* admin may have requested C_DISCONNECTING,
3571 * other threads may have noticed network errors */
3572 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3573 drbd_conn_str(mdev->state.conn));
3578 drbd_bm_unlock(mdev);
3579 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3580 drbd_start_resync(mdev, C_SYNC_SOURCE);
3581 free_page((unsigned long) buffer);
3585 static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent)
3587 /* TODO zero copy sink :) */
3588 static char sink[128];
3592 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3593 h->command, h->length);
3597 want = min_t(int, size, sizeof(sink));
3598 r = drbd_recv(mdev, sink, want);
3599 ERR_IF(r <= 0) break;
3605 static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
3607 return receive_skip_(mdev, h, 0);
3610 static int receive_skip_silent(struct drbd_conf *mdev, struct p_header *h)
3612 return receive_skip_(mdev, h, 1);
3615 static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
3617 if (mdev->state.disk >= D_INCONSISTENT)
3620 /* Make sure we've acked all the TCP data associated
3621 * with the data requests being unplugged */
3622 drbd_tcp_quickack(mdev->data.socket);
3627 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
3629 static drbd_cmd_handler_f drbd_default_handler[] = {
3630 [P_DATA] = receive_Data,
3631 [P_DATA_REPLY] = receive_DataReply,
3632 [P_RS_DATA_REPLY] = receive_RSDataReply,
3633 [P_BARRIER] = receive_Barrier,
3634 [P_BITMAP] = receive_bitmap,
3635 [P_COMPRESSED_BITMAP] = receive_bitmap,
3636 [P_UNPLUG_REMOTE] = receive_UnplugRemote,
3637 [P_DATA_REQUEST] = receive_DataRequest,
3638 [P_RS_DATA_REQUEST] = receive_DataRequest,
3639 [P_SYNC_PARAM] = receive_SyncParam,
3640 [P_SYNC_PARAM89] = receive_SyncParam,
3641 [P_PROTOCOL] = receive_protocol,
3642 [P_UUIDS] = receive_uuids,
3643 [P_SIZES] = receive_sizes,
3644 [P_STATE] = receive_state,
3645 [P_STATE_CHG_REQ] = receive_req_state,
3646 [P_SYNC_UUID] = receive_sync_uuid,
3647 [P_OV_REQUEST] = receive_DataRequest,
3648 [P_OV_REPLY] = receive_DataRequest,
3649 [P_CSUM_RS_REQUEST] = receive_DataRequest,
3650 [P_DELAY_PROBE] = receive_skip_silent,
3651 /* anything missing from this table is in
3652 * the asender_tbl, see get_asender_cmd */
3656 static drbd_cmd_handler_f *drbd_cmd_handler = drbd_default_handler;
3657 static drbd_cmd_handler_f *drbd_opt_cmd_handler;
3659 static void drbdd(struct drbd_conf *mdev)
3661 drbd_cmd_handler_f handler;
3662 struct p_header *header = &mdev->data.rbuf.header;
3664 while (get_t_state(&mdev->receiver) == Running) {
3665 drbd_thread_current_set_cpu(mdev);
3666 if (!drbd_recv_header(mdev, header)) {
3667 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3671 if (header->command < P_MAX_CMD)
3672 handler = drbd_cmd_handler[header->command];
3673 else if (P_MAY_IGNORE < header->command
3674 && header->command < P_MAX_OPT_CMD)
3675 handler = drbd_opt_cmd_handler[header->command-P_MAY_IGNORE];
3676 else if (header->command > P_MAX_OPT_CMD)
3677 handler = receive_skip;
3681 if (unlikely(!handler)) {
3682 dev_err(DEV, "unknown packet type %d, l: %d!\n",
3683 header->command, header->length);
3684 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3687 if (unlikely(!handler(mdev, header))) {
3688 dev_err(DEV, "error receiving %s, l: %d!\n",
3689 cmdname(header->command), header->length);
3690 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3696 void drbd_flush_workqueue(struct drbd_conf *mdev)
3698 struct drbd_wq_barrier barr;
3700 barr.w.cb = w_prev_work_done;
3701 init_completion(&barr.done);
3702 drbd_queue_work(&mdev->data.work, &barr.w);
3703 wait_for_completion(&barr.done);
3706 void drbd_free_tl_hash(struct drbd_conf *mdev)
3708 struct hlist_head *h;
3710 spin_lock_irq(&mdev->req_lock);
3712 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3713 spin_unlock_irq(&mdev->req_lock);
3717 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3719 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3720 (int)(h - mdev->ee_hash), h->first);
3721 kfree(mdev->ee_hash);
3722 mdev->ee_hash = NULL;
3723 mdev->ee_hash_s = 0;
3726 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3728 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3729 (int)(h - mdev->tl_hash), h->first);
3730 kfree(mdev->tl_hash);
3731 mdev->tl_hash = NULL;
3732 mdev->tl_hash_s = 0;
3733 spin_unlock_irq(&mdev->req_lock);
3736 static void drbd_disconnect(struct drbd_conf *mdev)
3738 enum drbd_fencing_p fp;
3739 union drbd_state os, ns;
3740 int rv = SS_UNKNOWN_ERROR;
3743 if (mdev->state.conn == C_STANDALONE)
3745 if (mdev->state.conn >= C_WF_CONNECTION)
3746 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3747 drbd_conn_str(mdev->state.conn));
3749 /* asender does not clean up anything. it must not interfere, either */
3750 drbd_thread_stop(&mdev->asender);
3751 drbd_free_sock(mdev);
3753 spin_lock_irq(&mdev->req_lock);
3754 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3755 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3756 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3757 spin_unlock_irq(&mdev->req_lock);
3759 /* We do not have data structures that would allow us to
3760 * get the rs_pending_cnt down to 0 again.
3761 * * On C_SYNC_TARGET we do not have any data structures describing
3762 * the pending RSDataRequest's we have sent.
3763 * * On C_SYNC_SOURCE there is no data structure that tracks
3764 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3765 * And no, it is not the sum of the reference counts in the
3766 * resync_LRU. The resync_LRU tracks the whole operation including
3767 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3769 drbd_rs_cancel_all(mdev);
3771 mdev->rs_failed = 0;
3772 atomic_set(&mdev->rs_pending_cnt, 0);
3773 wake_up(&mdev->misc_wait);
3775 /* make sure syncer is stopped and w_resume_next_sg queued */
3776 del_timer_sync(&mdev->resync_timer);
3777 set_bit(STOP_SYNC_TIMER, &mdev->flags);
3778 resync_timer_fn((unsigned long)mdev);
3780 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3781 * w_make_resync_request etc. which may still be on the worker queue
3782 * to be "canceled" */
3783 drbd_flush_workqueue(mdev);
3785 /* This also does reclaim_net_ee(). If we do this too early, we might
3786 * miss some resync ee and pages.*/
3787 drbd_process_done_ee(mdev);
3789 kfree(mdev->p_uuid);
3790 mdev->p_uuid = NULL;
3792 if (!mdev->state.susp)
3795 dev_info(DEV, "Connection closed\n");
3800 if (get_ldev(mdev)) {
3801 fp = mdev->ldev->dc.fencing;
3805 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3806 drbd_try_outdate_peer_async(mdev);
3808 spin_lock_irq(&mdev->req_lock);
3810 if (os.conn >= C_UNCONNECTED) {
3811 /* Do not restart in case we are C_DISCONNECTING */
3813 ns.conn = C_UNCONNECTED;
3814 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3816 spin_unlock_irq(&mdev->req_lock);
3818 if (os.conn == C_DISCONNECTING) {
3819 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
3821 if (!mdev->state.susp) {
3822 /* we must not free the tl_hash
3823 * while application io is still on the fly */
3824 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3825 drbd_free_tl_hash(mdev);
3828 crypto_free_hash(mdev->cram_hmac_tfm);
3829 mdev->cram_hmac_tfm = NULL;
3831 kfree(mdev->net_conf);
3832 mdev->net_conf = NULL;
3833 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3836 /* tcp_close and release of sendpage pages can be deferred. I don't
3837 * want to use SO_LINGER, because apparently it can be deferred for
3838 * more than 20 seconds (longest time I checked).
3840 * Actually we don't care for exactly when the network stack does its
3841 * put_page(), but release our reference on these pages right here.
3843 i = drbd_release_ee(mdev, &mdev->net_ee);
3845 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3846 i = atomic_read(&mdev->pp_in_use);
3848 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3850 D_ASSERT(list_empty(&mdev->read_ee));
3851 D_ASSERT(list_empty(&mdev->active_ee));
3852 D_ASSERT(list_empty(&mdev->sync_ee));
3853 D_ASSERT(list_empty(&mdev->done_ee));
3855 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3856 atomic_set(&mdev->current_epoch->epoch_size, 0);
3857 D_ASSERT(list_empty(&mdev->current_epoch->list));
3861 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3862 * we can agree on is stored in agreed_pro_version.
3864 * feature flags and the reserved array should be enough room for future
3865 * enhancements of the handshake protocol, and possible plugins...
3867 * for now, they are expected to be zero, but ignored.
3869 static int drbd_send_handshake(struct drbd_conf *mdev)
3871 /* ASSERT current == mdev->receiver ... */
3872 struct p_handshake *p = &mdev->data.sbuf.handshake;
3875 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3876 dev_err(DEV, "interrupted during initial handshake\n");
3877 return 0; /* interrupted. not ok. */
3880 if (mdev->data.socket == NULL) {
3881 mutex_unlock(&mdev->data.mutex);
3885 memset(p, 0, sizeof(*p));
3886 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3887 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3888 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3889 (struct p_header *)p, sizeof(*p), 0 );
3890 mutex_unlock(&mdev->data.mutex);
3896 * 1 yes, we have a valid connection
3897 * 0 oops, did not work out, please try again
3898 * -1 peer talks different language,
3899 * no point in trying again, please go standalone.
3901 static int drbd_do_handshake(struct drbd_conf *mdev)
3903 /* ASSERT current == mdev->receiver ... */
3904 struct p_handshake *p = &mdev->data.rbuf.handshake;
3905 const int expect = sizeof(struct p_handshake)
3906 -sizeof(struct p_header);
3909 rv = drbd_send_handshake(mdev);
3913 rv = drbd_recv_header(mdev, &p->head);
3917 if (p->head.command != P_HAND_SHAKE) {
3918 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3919 cmdname(p->head.command), p->head.command);
3923 if (p->head.length != expect) {
3924 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3925 expect, p->head.length);
3929 rv = drbd_recv(mdev, &p->head.payload, expect);
3932 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3936 p->protocol_min = be32_to_cpu(p->protocol_min);
3937 p->protocol_max = be32_to_cpu(p->protocol_max);
3938 if (p->protocol_max == 0)
3939 p->protocol_max = p->protocol_min;
3941 if (PRO_VERSION_MAX < p->protocol_min ||
3942 PRO_VERSION_MIN > p->protocol_max)
3945 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3947 dev_info(DEV, "Handshake successful: "
3948 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3953 dev_err(DEV, "incompatible DRBD dialects: "
3954 "I support %d-%d, peer supports %d-%d\n",
3955 PRO_VERSION_MIN, PRO_VERSION_MAX,
3956 p->protocol_min, p->protocol_max);
3960 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3961 static int drbd_do_auth(struct drbd_conf *mdev)
3963 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3964 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3968 #define CHALLENGE_LEN 64
3972 0 - failed, try again (network error),
3973 -1 - auth failed, don't try again.
3976 static int drbd_do_auth(struct drbd_conf *mdev)
3978 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
3979 struct scatterlist sg;
3980 char *response = NULL;
3981 char *right_response = NULL;
3982 char *peers_ch = NULL;
3984 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
3985 unsigned int resp_size;
3986 struct hash_desc desc;
3989 desc.tfm = mdev->cram_hmac_tfm;
3992 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
3993 (u8 *)mdev->net_conf->shared_secret, key_len);
3995 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
4000 get_random_bytes(my_challenge, CHALLENGE_LEN);
4002 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4006 rv = drbd_recv_header(mdev, &p);
4010 if (p.command != P_AUTH_CHALLENGE) {
4011 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4012 cmdname(p.command), p.command);
4017 if (p.length > CHALLENGE_LEN*2) {
4018 dev_err(DEV, "expected AuthChallenge payload too big.\n");
4023 peers_ch = kmalloc(p.length, GFP_NOIO);
4024 if (peers_ch == NULL) {
4025 dev_err(DEV, "kmalloc of peers_ch failed\n");
4030 rv = drbd_recv(mdev, peers_ch, p.length);
4032 if (rv != p.length) {
4033 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4038 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4039 response = kmalloc(resp_size, GFP_NOIO);
4040 if (response == NULL) {
4041 dev_err(DEV, "kmalloc of response failed\n");
4046 sg_init_table(&sg, 1);
4047 sg_set_buf(&sg, peers_ch, p.length);
4049 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4051 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4056 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4060 rv = drbd_recv_header(mdev, &p);
4064 if (p.command != P_AUTH_RESPONSE) {
4065 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4066 cmdname(p.command), p.command);
4071 if (p.length != resp_size) {
4072 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4077 rv = drbd_recv(mdev, response , resp_size);
4079 if (rv != resp_size) {
4080 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4085 right_response = kmalloc(resp_size, GFP_NOIO);
4086 if (right_response == NULL) {
4087 dev_err(DEV, "kmalloc of right_response failed\n");
4092 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4094 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4096 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4101 rv = !memcmp(response, right_response, resp_size);
4104 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4105 resp_size, mdev->net_conf->cram_hmac_alg);
4112 kfree(right_response);
4118 int drbdd_init(struct drbd_thread *thi)
4120 struct drbd_conf *mdev = thi->mdev;
4121 unsigned int minor = mdev_to_minor(mdev);
4124 sprintf(current->comm, "drbd%d_receiver", minor);
4126 dev_info(DEV, "receiver (re)started\n");
4129 h = drbd_connect(mdev);
4131 drbd_disconnect(mdev);
4132 __set_current_state(TASK_INTERRUPTIBLE);
4133 schedule_timeout(HZ);
4136 dev_warn(DEV, "Discarding network configuration.\n");
4137 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4142 if (get_net_conf(mdev)) {
4148 drbd_disconnect(mdev);
4150 dev_info(DEV, "receiver terminated\n");
4154 /* ********* acknowledge sender ******** */
4156 static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h)
4158 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4160 int retcode = be32_to_cpu(p->retcode);
4162 if (retcode >= SS_SUCCESS) {
4163 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4165 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4166 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4167 drbd_set_st_err_str(retcode), retcode);
4169 wake_up(&mdev->state_wait);
4174 static int got_Ping(struct drbd_conf *mdev, struct p_header *h)
4176 return drbd_send_ping_ack(mdev);
4180 static int got_PingAck(struct drbd_conf *mdev, struct p_header *h)
4182 /* restore idle timeout */
4183 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4184 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4185 wake_up(&mdev->misc_wait);
4190 static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h)
4192 struct p_block_ack *p = (struct p_block_ack *)h;
4193 sector_t sector = be64_to_cpu(p->sector);
4194 int blksize = be32_to_cpu(p->blksize);
4196 D_ASSERT(mdev->agreed_pro_version >= 89);
4198 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4200 drbd_rs_complete_io(mdev, sector);
4201 drbd_set_in_sync(mdev, sector, blksize);
4202 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4203 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4204 dec_rs_pending(mdev);
4209 /* when we receive the ACK for a write request,
4210 * verify that we actually know about it */
4211 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4212 u64 id, sector_t sector)
4214 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4215 struct hlist_node *n;
4216 struct drbd_request *req;
4218 hlist_for_each_entry(req, n, slot, colision) {
4219 if ((unsigned long)req == (unsigned long)id) {
4220 if (req->sector != sector) {
4221 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4222 "wrong sector (%llus versus %llus)\n", req,
4223 (unsigned long long)req->sector,
4224 (unsigned long long)sector);
4230 dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4231 (void *)(unsigned long)id, (unsigned long long)sector);
4235 typedef struct drbd_request *(req_validator_fn)
4236 (struct drbd_conf *mdev, u64 id, sector_t sector);
4238 static int validate_req_change_req_state(struct drbd_conf *mdev,
4239 u64 id, sector_t sector, req_validator_fn validator,
4240 const char *func, enum drbd_req_event what)
4242 struct drbd_request *req;
4243 struct bio_and_error m;
4245 spin_lock_irq(&mdev->req_lock);
4246 req = validator(mdev, id, sector);
4247 if (unlikely(!req)) {
4248 spin_unlock_irq(&mdev->req_lock);
4249 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4252 __req_mod(req, what, &m);
4253 spin_unlock_irq(&mdev->req_lock);
4256 complete_master_bio(mdev, &m);
4260 static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h)
4262 struct p_block_ack *p = (struct p_block_ack *)h;
4263 sector_t sector = be64_to_cpu(p->sector);
4264 int blksize = be32_to_cpu(p->blksize);
4265 enum drbd_req_event what;
4267 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4269 if (is_syncer_block_id(p->block_id)) {
4270 drbd_set_in_sync(mdev, sector, blksize);
4271 dec_rs_pending(mdev);
4274 switch (be16_to_cpu(h->command)) {
4275 case P_RS_WRITE_ACK:
4276 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4277 what = write_acked_by_peer_and_sis;
4280 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4281 what = write_acked_by_peer;
4284 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4285 what = recv_acked_by_peer;
4288 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4289 what = conflict_discarded_by_peer;
4296 return validate_req_change_req_state(mdev, p->block_id, sector,
4297 _ack_id_to_req, __func__ , what);
4300 static int got_NegAck(struct drbd_conf *mdev, struct p_header *h)
4302 struct p_block_ack *p = (struct p_block_ack *)h;
4303 sector_t sector = be64_to_cpu(p->sector);
4305 if (__ratelimit(&drbd_ratelimit_state))
4306 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4308 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4310 if (is_syncer_block_id(p->block_id)) {
4311 int size = be32_to_cpu(p->blksize);
4312 dec_rs_pending(mdev);
4313 drbd_rs_failed_io(mdev, sector, size);
4316 return validate_req_change_req_state(mdev, p->block_id, sector,
4317 _ack_id_to_req, __func__ , neg_acked);
4320 static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h)
4322 struct p_block_ack *p = (struct p_block_ack *)h;
4323 sector_t sector = be64_to_cpu(p->sector);
4325 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4326 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4327 (unsigned long long)sector, be32_to_cpu(p->blksize));
4329 return validate_req_change_req_state(mdev, p->block_id, sector,
4330 _ar_id_to_req, __func__ , neg_acked);
4333 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
4337 struct p_block_ack *p = (struct p_block_ack *)h;
4339 sector = be64_to_cpu(p->sector);
4340 size = be32_to_cpu(p->blksize);
4342 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4344 dec_rs_pending(mdev);
4346 if (get_ldev_if_state(mdev, D_FAILED)) {
4347 drbd_rs_complete_io(mdev, sector);
4348 drbd_rs_failed_io(mdev, sector, size);
4355 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h)
4357 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4359 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4364 static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
4366 struct p_block_ack *p = (struct p_block_ack *)h;
4367 struct drbd_work *w;
4371 sector = be64_to_cpu(p->sector);
4372 size = be32_to_cpu(p->blksize);
4374 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4376 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4377 drbd_ov_oos_found(mdev, sector, size);
4381 drbd_rs_complete_io(mdev, sector);
4382 dec_rs_pending(mdev);
4384 if (--mdev->ov_left == 0) {
4385 w = kmalloc(sizeof(*w), GFP_NOIO);
4387 w->cb = w_ov_finished;
4388 drbd_queue_work_front(&mdev->data.work, w);
4390 dev_err(DEV, "kmalloc(w) failed.");
4392 drbd_resync_finished(mdev);
4398 static int got_something_to_ignore_m(struct drbd_conf *mdev, struct p_header *h)
4404 struct asender_cmd {
4406 int (*process)(struct drbd_conf *mdev, struct p_header *h);
4409 static struct asender_cmd *get_asender_cmd(int cmd)
4411 static struct asender_cmd asender_tbl[] = {
4412 /* anything missing from this table is in
4413 * the drbd_cmd_handler (drbd_default_handler) table,
4414 * see the beginning of drbdd() */
4415 [P_PING] = { sizeof(struct p_header), got_Ping },
4416 [P_PING_ACK] = { sizeof(struct p_header), got_PingAck },
4417 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4418 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4419 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4420 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4421 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4422 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4423 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4424 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4425 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4426 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4427 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4428 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_something_to_ignore_m },
4429 [P_MAX_CMD] = { 0, NULL },
4431 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4433 return &asender_tbl[cmd];
4436 int drbd_asender(struct drbd_thread *thi)
4438 struct drbd_conf *mdev = thi->mdev;
4439 struct p_header *h = &mdev->meta.rbuf.header;
4440 struct asender_cmd *cmd = NULL;
4445 int expect = sizeof(struct p_header);
4448 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4450 current->policy = SCHED_RR; /* Make this a realtime task! */
4451 current->rt_priority = 2; /* more important than all other tasks */
4453 while (get_t_state(thi) == Running) {
4454 drbd_thread_current_set_cpu(mdev);
4455 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4456 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4457 mdev->meta.socket->sk->sk_rcvtimeo =
4458 mdev->net_conf->ping_timeo*HZ/10;
4461 /* conditionally cork;
4462 * it may hurt latency if we cork without much to send */
4463 if (!mdev->net_conf->no_cork &&
4464 3 < atomic_read(&mdev->unacked_cnt))
4465 drbd_tcp_cork(mdev->meta.socket);
4467 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4468 flush_signals(current);
4469 if (!drbd_process_done_ee(mdev)) {
4470 dev_err(DEV, "process_done_ee() = NOT_OK\n");
4473 /* to avoid race with newly queued ACKs */
4474 set_bit(SIGNAL_ASENDER, &mdev->flags);
4475 spin_lock_irq(&mdev->req_lock);
4476 empty = list_empty(&mdev->done_ee);
4477 spin_unlock_irq(&mdev->req_lock);
4478 /* new ack may have been queued right here,
4479 * but then there is also a signal pending,
4480 * and we start over... */
4484 /* but unconditionally uncork unless disabled */
4485 if (!mdev->net_conf->no_cork)
4486 drbd_tcp_uncork(mdev->meta.socket);
4488 /* short circuit, recv_msg would return EINTR anyways. */
4489 if (signal_pending(current))
4492 rv = drbd_recv_short(mdev, mdev->meta.socket,
4493 buf, expect-received, 0);
4494 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4496 flush_signals(current);
4499 * -EINTR (on meta) we got a signal
4500 * -EAGAIN (on meta) rcvtimeo expired
4501 * -ECONNRESET other side closed the connection
4502 * -ERESTARTSYS (on data) we got a signal
4503 * rv < 0 other than above: unexpected error!
4504 * rv == expected: full header or command
4505 * rv < expected: "woken" by signal during receive
4506 * rv == 0 : "connection shut down by peer"
4508 if (likely(rv > 0)) {
4511 } else if (rv == 0) {
4512 dev_err(DEV, "meta connection shut down by peer.\n");
4514 } else if (rv == -EAGAIN) {
4515 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4516 mdev->net_conf->ping_timeo*HZ/10) {
4517 dev_err(DEV, "PingAck did not arrive in time.\n");
4520 set_bit(SEND_PING, &mdev->flags);
4522 } else if (rv == -EINTR) {
4525 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4529 if (received == expect && cmd == NULL) {
4530 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4531 dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n",
4532 (long)be32_to_cpu(h->magic),
4533 h->command, h->length);
4536 cmd = get_asender_cmd(be16_to_cpu(h->command));
4537 len = be16_to_cpu(h->length);
4538 if (unlikely(cmd == NULL)) {
4539 dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n",
4540 (long)be32_to_cpu(h->magic),
4541 h->command, h->length);
4544 expect = cmd->pkt_size;
4545 ERR_IF(len != expect-sizeof(struct p_header))
4548 if (received == expect) {
4549 D_ASSERT(cmd != NULL);
4550 if (!cmd->process(mdev, h))
4555 expect = sizeof(struct p_header);
4562 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4566 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4568 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4570 D_ASSERT(mdev->state.conn < C_CONNECTED);
4571 dev_info(DEV, "asender terminated\n");