4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
45 #include <linux/string.h>
46 #include <linux/scatterlist.h>
58 static int drbd_do_handshake(struct drbd_conf *mdev);
59 static int drbd_do_auth(struct drbd_conf *mdev);
61 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
62 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
65 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
68 * some helper functions to deal with single linked page lists,
69 * page->private being our "next" pointer.
72 /* If at least n pages are linked at head, get n pages off.
73 * Otherwise, don't modify head, and return NULL.
74 * Locking is the responsibility of the caller.
76 static struct page *page_chain_del(struct page **head, int n)
90 tmp = page_chain_next(page);
92 break; /* found sufficient pages */
94 /* insufficient pages, don't use any of them. */
99 /* add end of list marker for the returned list */
100 set_page_private(page, 0);
101 /* actual return value, and adjustment of head */
107 /* may be used outside of locks to find the tail of a (usually short)
108 * "private" page chain, before adding it back to a global chain head
109 * with page_chain_add() under a spinlock. */
110 static struct page *page_chain_tail(struct page *page, int *len)
114 while ((tmp = page_chain_next(page)))
121 static int page_chain_free(struct page *page)
125 page_chain_for_each_safe(page, tmp) {
132 static void page_chain_add(struct page **head,
133 struct page *chain_first, struct page *chain_last)
137 tmp = page_chain_tail(chain_first, NULL);
138 BUG_ON(tmp != chain_last);
141 /* add chain to head */
142 set_page_private(chain_last, (unsigned long)*head);
146 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
148 struct page *page = NULL;
149 struct page *tmp = NULL;
152 /* Yes, testing drbd_pp_vacant outside the lock is racy.
153 * So what. It saves a spin_lock. */
154 if (drbd_pp_vacant >= number) {
155 spin_lock(&drbd_pp_lock);
156 page = page_chain_del(&drbd_pp_pool, number);
158 drbd_pp_vacant -= number;
159 spin_unlock(&drbd_pp_lock);
164 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
165 * "criss-cross" setup, that might cause write-out on some other DRBD,
166 * which in turn might block on the other node at this very place. */
167 for (i = 0; i < number; i++) {
168 tmp = alloc_page(GFP_TRY);
171 set_page_private(tmp, (unsigned long)page);
178 /* Not enough pages immediately available this time.
179 * No need to jump around here, drbd_pp_alloc will retry this
180 * function "soon". */
182 tmp = page_chain_tail(page, NULL);
183 spin_lock(&drbd_pp_lock);
184 page_chain_add(&drbd_pp_pool, page, tmp);
186 spin_unlock(&drbd_pp_lock);
191 /* kick lower level device, if we have more than (arbitrary number)
192 * reference counts on it, which typically are locally submitted io
193 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
194 static void maybe_kick_lo(struct drbd_conf *mdev)
196 if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
200 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
202 struct drbd_epoch_entry *e;
203 struct list_head *le, *tle;
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
210 list_for_each_safe(le, tle, &mdev->net_ee) {
211 e = list_entry(le, struct drbd_epoch_entry, w.list);
212 if (drbd_ee_has_active_page(e))
214 list_move(le, to_be_freed);
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
220 LIST_HEAD(reclaimed);
221 struct drbd_epoch_entry *e, *t;
224 spin_lock_irq(&mdev->req_lock);
225 reclaim_net_ee(mdev, &reclaimed);
226 spin_unlock_irq(&mdev->req_lock);
228 list_for_each_entry_safe(e, t, &reclaimed, w.list)
229 drbd_free_net_ee(mdev, e);
233 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
234 * @mdev: DRBD device.
235 * @number: number of pages requested
236 * @retry: whether to retry, if not enough pages are available right now
238 * Tries to allocate number pages, first from our own page pool, then from
239 * the kernel, unless this allocation would exceed the max_buffers setting.
240 * Possibly retry until DRBD frees sufficient pages somewhere else.
242 * Returns a page chain linked via page->private.
244 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
246 struct page *page = NULL;
249 /* Yes, we may run up to @number over max_buffers. If we
250 * follow it strictly, the admin will get it wrong anyways. */
251 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
252 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
254 while (page == NULL) {
255 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
257 drbd_kick_lo_and_reclaim_net(mdev);
259 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
260 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
268 if (signal_pending(current)) {
269 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
275 finish_wait(&drbd_pp_wait, &wait);
278 atomic_add(number, &mdev->pp_in_use);
282 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
283 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
284 * Either links the page chain back to the global pool,
285 * or returns all pages to the system. */
286 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
288 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
291 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
292 i = page_chain_free(page);
295 tmp = page_chain_tail(page, &i);
296 spin_lock(&drbd_pp_lock);
297 page_chain_add(&drbd_pp_pool, page, tmp);
299 spin_unlock(&drbd_pp_lock);
301 i = atomic_sub_return(i, a);
303 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
304 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
305 wake_up(&drbd_pp_wait);
309 You need to hold the req_lock:
310 _drbd_wait_ee_list_empty()
312 You must not have the req_lock:
318 drbd_process_done_ee()
320 drbd_wait_ee_list_empty()
323 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
326 unsigned int data_size,
327 gfp_t gfp_mask) __must_hold(local)
329 struct drbd_epoch_entry *e;
331 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
333 if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
336 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
338 if (!(gfp_mask & __GFP_NOWARN))
339 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
343 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
347 INIT_HLIST_NODE(&e->colision);
351 atomic_set(&e->pending_bios, 0);
360 mempool_free(e, drbd_ee_mempool);
364 void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
366 if (e->flags & EE_HAS_DIGEST)
368 drbd_pp_free(mdev, e->pages, is_net);
369 D_ASSERT(atomic_read(&e->pending_bios) == 0);
370 D_ASSERT(hlist_unhashed(&e->colision));
371 mempool_free(e, drbd_ee_mempool);
374 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
376 LIST_HEAD(work_list);
377 struct drbd_epoch_entry *e, *t;
379 int is_net = list == &mdev->net_ee;
381 spin_lock_irq(&mdev->req_lock);
382 list_splice_init(list, &work_list);
383 spin_unlock_irq(&mdev->req_lock);
385 list_for_each_entry_safe(e, t, &work_list, w.list) {
386 drbd_free_some_ee(mdev, e, is_net);
394 * This function is called from _asender only_
395 * but see also comments in _req_mod(,barrier_acked)
396 * and receive_Barrier.
398 * Move entries from net_ee to done_ee, if ready.
399 * Grab done_ee, call all callbacks, free the entries.
400 * The callbacks typically send out ACKs.
402 static int drbd_process_done_ee(struct drbd_conf *mdev)
404 LIST_HEAD(work_list);
405 LIST_HEAD(reclaimed);
406 struct drbd_epoch_entry *e, *t;
407 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
409 spin_lock_irq(&mdev->req_lock);
410 reclaim_net_ee(mdev, &reclaimed);
411 list_splice_init(&mdev->done_ee, &work_list);
412 spin_unlock_irq(&mdev->req_lock);
414 list_for_each_entry_safe(e, t, &reclaimed, w.list)
415 drbd_free_net_ee(mdev, e);
417 /* possible callbacks here:
418 * e_end_block, and e_end_resync_block, e_send_discard_ack.
419 * all ignore the last argument.
421 list_for_each_entry_safe(e, t, &work_list, w.list) {
422 /* list_del not necessary, next/prev members not touched */
423 ok = e->w.cb(mdev, &e->w, !ok) && ok;
424 drbd_free_ee(mdev, e);
426 wake_up(&mdev->ee_wait);
431 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
435 /* avoids spin_lock/unlock
436 * and calling prepare_to_wait in the fast path */
437 while (!list_empty(head)) {
438 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
439 spin_unlock_irq(&mdev->req_lock);
442 finish_wait(&mdev->ee_wait, &wait);
443 spin_lock_irq(&mdev->req_lock);
447 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
449 spin_lock_irq(&mdev->req_lock);
450 _drbd_wait_ee_list_empty(mdev, head);
451 spin_unlock_irq(&mdev->req_lock);
454 /* see also kernel_accept; which is only present since 2.6.18.
455 * also we want to log which part of it failed, exactly */
456 static int drbd_accept(struct drbd_conf *mdev, const char **what,
457 struct socket *sock, struct socket **newsock)
459 struct sock *sk = sock->sk;
463 err = sock->ops->listen(sock, 5);
467 *what = "sock_create_lite";
468 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
474 err = sock->ops->accept(sock, *newsock, 0);
476 sock_release(*newsock);
480 (*newsock)->ops = sock->ops;
486 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
487 void *buf, size_t size, int flags)
494 struct msghdr msg = {
496 .msg_iov = (struct iovec *)&iov,
497 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
503 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
509 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
516 struct msghdr msg = {
518 .msg_iov = (struct iovec *)&iov,
519 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
527 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
532 * ECONNRESET other side closed the connection
533 * ERESTARTSYS (on sock) we got a signal
537 if (rv == -ECONNRESET)
538 dev_info(DEV, "sock was reset by peer\n");
539 else if (rv != -ERESTARTSYS)
540 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
542 } else if (rv == 0) {
543 dev_info(DEV, "sock was shut down by peer\n");
546 /* signal came in, or peer/link went down,
547 * after we read a partial message
549 /* D_ASSERT(signal_pending(current)); */
557 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
563 * On individual connections, the socket buffer size must be set prior to the
564 * listen(2) or connect(2) calls in order to have it take effect.
565 * This is our wrapper to do so.
567 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
570 /* open coded SO_SNDBUF, SO_RCVBUF */
572 sock->sk->sk_sndbuf = snd;
573 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
576 sock->sk->sk_rcvbuf = rcv;
577 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
581 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
585 struct sockaddr_in6 src_in6;
587 int disconnect_on_error = 1;
589 if (!get_net_conf(mdev))
592 what = "sock_create_kern";
593 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
594 SOCK_STREAM, IPPROTO_TCP, &sock);
600 sock->sk->sk_rcvtimeo =
601 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
602 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
603 mdev->net_conf->rcvbuf_size);
605 /* explicitly bind to the configured IP as source IP
606 * for the outgoing connections.
607 * This is needed for multihomed hosts and to be
608 * able to use lo: interfaces for drbd.
609 * Make sure to use 0 as port number, so linux selects
610 * a free one dynamically.
612 memcpy(&src_in6, mdev->net_conf->my_addr,
613 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
614 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
615 src_in6.sin6_port = 0;
617 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
619 what = "bind before connect";
620 err = sock->ops->bind(sock,
621 (struct sockaddr *) &src_in6,
622 mdev->net_conf->my_addr_len);
626 /* connect may fail, peer not yet available.
627 * stay C_WF_CONNECTION, don't go Disconnecting! */
628 disconnect_on_error = 0;
630 err = sock->ops->connect(sock,
631 (struct sockaddr *)mdev->net_conf->peer_addr,
632 mdev->net_conf->peer_addr_len, 0);
641 /* timeout, busy, signal pending */
642 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
643 case EINTR: case ERESTARTSYS:
644 /* peer not (yet) available, network problem */
645 case ECONNREFUSED: case ENETUNREACH:
646 case EHOSTDOWN: case EHOSTUNREACH:
647 disconnect_on_error = 0;
650 dev_err(DEV, "%s failed, err = %d\n", what, err);
652 if (disconnect_on_error)
653 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
659 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
662 struct socket *s_estab = NULL, *s_listen;
665 if (!get_net_conf(mdev))
668 what = "sock_create_kern";
669 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
670 SOCK_STREAM, IPPROTO_TCP, &s_listen);
676 timeo = mdev->net_conf->try_connect_int * HZ;
677 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
679 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
680 s_listen->sk->sk_rcvtimeo = timeo;
681 s_listen->sk->sk_sndtimeo = timeo;
682 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
683 mdev->net_conf->rcvbuf_size);
685 what = "bind before listen";
686 err = s_listen->ops->bind(s_listen,
687 (struct sockaddr *) mdev->net_conf->my_addr,
688 mdev->net_conf->my_addr_len);
692 err = drbd_accept(mdev, &what, s_listen, &s_estab);
696 sock_release(s_listen);
698 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
699 dev_err(DEV, "%s failed, err = %d\n", what, err);
700 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
708 static int drbd_send_fp(struct drbd_conf *mdev,
709 struct socket *sock, enum drbd_packets cmd)
711 struct p_header80 *h = &mdev->data.sbuf.header.h80;
713 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
716 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
718 struct p_header80 *h = &mdev->data.rbuf.header.h80;
721 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
723 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
724 return be16_to_cpu(h->command);
730 * drbd_socket_okay() - Free the socket if its connection is not okay
731 * @mdev: DRBD device.
732 * @sock: pointer to the pointer to the socket.
734 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
742 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
744 if (rr > 0 || rr == -EAGAIN) {
755 * 1 yes, we have a valid connection
756 * 0 oops, did not work out, please try again
757 * -1 peer talks different language,
758 * no point in trying again, please go standalone.
759 * -2 We do not have a network config...
761 static int drbd_connect(struct drbd_conf *mdev)
763 struct socket *s, *sock, *msock;
766 D_ASSERT(!mdev->data.socket);
768 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
771 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
778 /* 3 tries, this should take less than a second! */
779 s = drbd_try_connect(mdev);
782 /* give the other side time to call bind() & listen() */
783 __set_current_state(TASK_INTERRUPTIBLE);
784 schedule_timeout(HZ / 10);
789 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
793 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
797 dev_err(DEV, "Logic error in drbd_connect()\n");
798 goto out_release_sockets;
803 __set_current_state(TASK_INTERRUPTIBLE);
804 schedule_timeout(HZ / 10);
805 ok = drbd_socket_okay(mdev, &sock);
806 ok = drbd_socket_okay(mdev, &msock) && ok;
812 s = drbd_wait_for_connect(mdev);
814 try = drbd_recv_fp(mdev, s);
815 drbd_socket_okay(mdev, &sock);
816 drbd_socket_okay(mdev, &msock);
820 dev_warn(DEV, "initial packet S crossed\n");
827 dev_warn(DEV, "initial packet M crossed\n");
831 set_bit(DISCARD_CONCURRENT, &mdev->flags);
834 dev_warn(DEV, "Error receiving initial packet\n");
841 if (mdev->state.conn <= C_DISCONNECTING)
842 goto out_release_sockets;
843 if (signal_pending(current)) {
844 flush_signals(current);
846 if (get_t_state(&mdev->receiver) == Exiting)
847 goto out_release_sockets;
851 ok = drbd_socket_okay(mdev, &sock);
852 ok = drbd_socket_okay(mdev, &msock) && ok;
858 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
859 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
861 sock->sk->sk_allocation = GFP_NOIO;
862 msock->sk->sk_allocation = GFP_NOIO;
864 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
865 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
868 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
869 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
870 * first set it to the P_HAND_SHAKE timeout,
871 * which we set to 4x the configured ping_timeout. */
872 sock->sk->sk_sndtimeo =
873 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
875 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
876 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
878 /* we don't want delays.
879 * we use TCP_CORK where apropriate, though */
880 drbd_tcp_nodelay(sock);
881 drbd_tcp_nodelay(msock);
883 mdev->data.socket = sock;
884 mdev->meta.socket = msock;
885 mdev->last_received = jiffies;
887 D_ASSERT(mdev->asender.task == NULL);
889 h = drbd_do_handshake(mdev);
893 if (mdev->cram_hmac_tfm) {
894 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
895 switch (drbd_do_auth(mdev)) {
897 dev_err(DEV, "Authentication of peer failed\n");
900 dev_err(DEV, "Authentication of peer failed, trying again.\n");
905 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
908 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
909 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
911 atomic_set(&mdev->packet_seq, 0);
914 drbd_thread_start(&mdev->asender);
916 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
917 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
921 if (!drbd_send_protocol(mdev))
923 drbd_send_sync_param(mdev, &mdev->sync_conf);
924 drbd_send_sizes(mdev, 0, 0);
925 drbd_send_uuids(mdev);
926 drbd_send_state(mdev);
927 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
928 clear_bit(RESIZE_PENDING, &mdev->flags);
940 static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
942 union p_header *h = &mdev->data.rbuf.header;
945 r = drbd_recv(mdev, h, sizeof(*h));
946 if (unlikely(r != sizeof(*h))) {
947 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
951 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
952 *cmd = be16_to_cpu(h->h80.command);
953 *packet_size = be16_to_cpu(h->h80.length);
954 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
955 *cmd = be16_to_cpu(h->h95.command);
956 *packet_size = be32_to_cpu(h->h95.length);
958 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
959 be32_to_cpu(h->h80.magic),
960 be16_to_cpu(h->h80.command),
961 be16_to_cpu(h->h80.length));
964 mdev->last_received = jiffies;
969 static void drbd_flush(struct drbd_conf *mdev)
973 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
974 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
975 NULL, BLKDEV_IFL_WAIT);
977 dev_err(DEV, "local disk flush failed with status %d\n", rv);
978 /* would rather check on EOPNOTSUPP, but that is not reliable.
979 * don't try again for ANY return value != 0
980 * if (rv == -EOPNOTSUPP) */
981 drbd_bump_write_ordering(mdev, WO_drain_io);
988 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
989 * @mdev: DRBD device.
990 * @epoch: Epoch object.
993 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
994 struct drbd_epoch *epoch,
998 struct drbd_epoch *next_epoch;
999 enum finish_epoch rv = FE_STILL_LIVE;
1001 spin_lock(&mdev->epoch_lock);
1005 epoch_size = atomic_read(&epoch->epoch_size);
1007 switch (ev & ~EV_CLEANUP) {
1009 atomic_dec(&epoch->active);
1011 case EV_GOT_BARRIER_NR:
1012 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1014 case EV_BECAME_LAST:
1019 if (epoch_size != 0 &&
1020 atomic_read(&epoch->active) == 0 &&
1021 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
1022 if (!(ev & EV_CLEANUP)) {
1023 spin_unlock(&mdev->epoch_lock);
1024 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1025 spin_lock(&mdev->epoch_lock);
1029 if (mdev->current_epoch != epoch) {
1030 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1031 list_del(&epoch->list);
1032 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1036 if (rv == FE_STILL_LIVE)
1040 atomic_set(&epoch->epoch_size, 0);
1041 /* atomic_set(&epoch->active, 0); is already zero */
1042 if (rv == FE_STILL_LIVE)
1044 wake_up(&mdev->ee_wait);
1054 spin_unlock(&mdev->epoch_lock);
1060 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1061 * @mdev: DRBD device.
1062 * @wo: Write ordering method to try.
1064 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1066 enum write_ordering_e pwo;
1067 static char *write_ordering_str[] = {
1069 [WO_drain_io] = "drain",
1070 [WO_bdev_flush] = "flush",
1073 pwo = mdev->write_ordering;
1075 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1077 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1079 mdev->write_ordering = wo;
1080 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
1081 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1086 * @mdev: DRBD device.
1088 * @rw: flag field, see bio->bi_rw
1090 /* TODO allocate from our own bio_set. */
1091 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1092 const unsigned rw, const int fault_type)
1094 struct bio *bios = NULL;
1096 struct page *page = e->pages;
1097 sector_t sector = e->sector;
1098 unsigned ds = e->size;
1099 unsigned n_bios = 0;
1100 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1102 /* In most cases, we will only need one bio. But in case the lower
1103 * level restrictions happen to be different at this offset on this
1104 * side than those of the sending peer, we may need to submit the
1105 * request in more than one bio. */
1107 bio = bio_alloc(GFP_NOIO, nr_pages);
1109 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1112 /* > e->sector, unless this is the first bio */
1113 bio->bi_sector = sector;
1114 bio->bi_bdev = mdev->ldev->backing_bdev;
1115 /* we special case some flags in the multi-bio case, see below
1118 bio->bi_private = e;
1119 bio->bi_end_io = drbd_endio_sec;
1121 bio->bi_next = bios;
1125 page_chain_for_each(page) {
1126 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1127 if (!bio_add_page(bio, page, len, 0)) {
1128 /* a single page must always be possible! */
1129 BUG_ON(bio->bi_vcnt == 0);
1136 D_ASSERT(page == NULL);
1139 atomic_set(&e->pending_bios, n_bios);
1142 bios = bios->bi_next;
1143 bio->bi_next = NULL;
1145 /* strip off REQ_UNPLUG unless it is the last bio */
1147 bio->bi_rw &= ~REQ_UNPLUG;
1149 drbd_generic_make_request(mdev, fault_type, bio);
1151 maybe_kick_lo(mdev);
1157 bios = bios->bi_next;
1163 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1166 struct p_barrier *p = &mdev->data.rbuf.barrier;
1167 struct drbd_epoch *epoch;
1171 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1174 mdev->current_epoch->barrier_nr = p->barrier;
1175 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1177 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1178 * the activity log, which means it would not be resynced in case the
1179 * R_PRIMARY crashes now.
1180 * Therefore we must send the barrier_ack after the barrier request was
1182 switch (mdev->write_ordering) {
1184 if (rv == FE_RECYCLED)
1187 /* receiver context, in the writeout path of the other node.
1188 * avoid potential distributed deadlock */
1189 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1193 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1198 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1201 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1202 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1207 epoch = mdev->current_epoch;
1208 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1210 D_ASSERT(atomic_read(&epoch->active) == 0);
1211 D_ASSERT(epoch->flags == 0);
1215 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1220 atomic_set(&epoch->epoch_size, 0);
1221 atomic_set(&epoch->active, 0);
1223 spin_lock(&mdev->epoch_lock);
1224 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1225 list_add(&epoch->list, &mdev->current_epoch->list);
1226 mdev->current_epoch = epoch;
1229 /* The current_epoch got recycled while we allocated this one... */
1232 spin_unlock(&mdev->epoch_lock);
1237 /* used from receive_RSDataReply (recv_resync_read)
1238 * and from receive_Data */
1239 static struct drbd_epoch_entry *
1240 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1242 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1243 struct drbd_epoch_entry *e;
1246 void *dig_in = mdev->int_dig_in;
1247 void *dig_vv = mdev->int_dig_vv;
1248 unsigned long *data;
1250 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1251 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1254 rr = drbd_recv(mdev, dig_in, dgs);
1256 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1264 ERR_IF(data_size & 0x1ff) return NULL;
1265 ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
1267 /* even though we trust out peer,
1268 * we sometimes have to double check. */
1269 if (sector + (data_size>>9) > capacity) {
1270 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1271 (unsigned long long)capacity,
1272 (unsigned long long)sector, data_size);
1276 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1277 * "criss-cross" setup, that might cause write-out on some other DRBD,
1278 * which in turn might block on the other node at this very place. */
1279 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1285 page_chain_for_each(page) {
1286 unsigned len = min_t(int, ds, PAGE_SIZE);
1288 rr = drbd_recv(mdev, data, len);
1289 if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
1290 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1291 data[0] = data[0] ^ (unsigned long)-1;
1295 drbd_free_ee(mdev, e);
1296 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1304 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1305 if (memcmp(dig_in, dig_vv, dgs)) {
1306 dev_err(DEV, "Digest integrity check FAILED.\n");
1307 drbd_bcast_ee(mdev, "digest failed",
1308 dgs, dig_in, dig_vv, e);
1309 drbd_free_ee(mdev, e);
1313 mdev->recv_cnt += data_size>>9;
1317 /* drbd_drain_block() just takes a data block
1318 * out of the socket input buffer, and discards it.
1320 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1329 page = drbd_pp_alloc(mdev, 1, 1);
1333 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1334 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1336 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1337 rr, min_t(int, data_size, PAGE_SIZE));
1343 drbd_pp_free(mdev, page, 0);
1347 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1348 sector_t sector, int data_size)
1350 struct bio_vec *bvec;
1352 int dgs, rr, i, expect;
1353 void *dig_in = mdev->int_dig_in;
1354 void *dig_vv = mdev->int_dig_vv;
1356 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1357 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1360 rr = drbd_recv(mdev, dig_in, dgs);
1362 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1370 /* optimistically update recv_cnt. if receiving fails below,
1371 * we disconnect anyways, and counters will be reset. */
1372 mdev->recv_cnt += data_size>>9;
1374 bio = req->master_bio;
1375 D_ASSERT(sector == bio->bi_sector);
1377 bio_for_each_segment(bvec, bio, i) {
1378 expect = min_t(int, data_size, bvec->bv_len);
1379 rr = drbd_recv(mdev,
1380 kmap(bvec->bv_page)+bvec->bv_offset,
1382 kunmap(bvec->bv_page);
1384 dev_warn(DEV, "short read receiving data reply: "
1385 "read %d expected %d\n",
1393 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1394 if (memcmp(dig_in, dig_vv, dgs)) {
1395 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1400 D_ASSERT(data_size == 0);
1404 /* e_end_resync_block() is called via
1405 * drbd_process_done_ee() by asender only */
1406 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1408 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1409 sector_t sector = e->sector;
1412 D_ASSERT(hlist_unhashed(&e->colision));
1414 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1415 drbd_set_in_sync(mdev, sector, e->size);
1416 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1418 /* Record failure to sync */
1419 drbd_rs_failed_io(mdev, sector, e->size);
1421 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1428 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1430 struct drbd_epoch_entry *e;
1432 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1436 dec_rs_pending(mdev);
1439 /* corresponding dec_unacked() in e_end_resync_block()
1440 * respective _drbd_clear_done_ee */
1442 e->w.cb = e_end_resync_block;
1444 spin_lock_irq(&mdev->req_lock);
1445 list_add(&e->w.list, &mdev->sync_ee);
1446 spin_unlock_irq(&mdev->req_lock);
1448 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1449 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1452 /* drbd_submit_ee currently fails for one reason only:
1453 * not being able to allocate enough bios.
1454 * Is dropping the connection going to help? */
1455 spin_lock_irq(&mdev->req_lock);
1456 list_del(&e->w.list);
1457 spin_unlock_irq(&mdev->req_lock);
1459 drbd_free_ee(mdev, e);
1465 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1467 struct drbd_request *req;
1470 struct p_data *p = &mdev->data.rbuf.data;
1472 sector = be64_to_cpu(p->sector);
1474 spin_lock_irq(&mdev->req_lock);
1475 req = _ar_id_to_req(mdev, p->block_id, sector);
1476 spin_unlock_irq(&mdev->req_lock);
1477 if (unlikely(!req)) {
1478 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1482 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1483 * special casing it there for the various failure cases.
1484 * still no race with drbd_fail_pending_reads */
1485 ok = recv_dless_read(mdev, req, sector, data_size);
1488 req_mod(req, data_received);
1489 /* else: nothing. handled from drbd_disconnect...
1490 * I don't think we may complete this just yet
1491 * in case we are "on-disconnect: freeze" */
1496 static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1500 struct p_data *p = &mdev->data.rbuf.data;
1502 sector = be64_to_cpu(p->sector);
1503 D_ASSERT(p->block_id == ID_SYNCER);
1505 if (get_ldev(mdev)) {
1506 /* data is submitted to disk within recv_resync_read.
1507 * corresponding put_ldev done below on error,
1508 * or in drbd_endio_write_sec. */
1509 ok = recv_resync_read(mdev, sector, data_size);
1511 if (__ratelimit(&drbd_ratelimit_state))
1512 dev_err(DEV, "Can not write resync data to local disk.\n");
1514 ok = drbd_drain_block(mdev, data_size);
1516 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1519 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1524 /* e_end_block() is called via drbd_process_done_ee().
1525 * this means this function only runs in the asender thread
1527 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1529 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1530 sector_t sector = e->sector;
1533 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1534 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1535 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1536 mdev->state.conn <= C_PAUSED_SYNC_T &&
1537 e->flags & EE_MAY_SET_IN_SYNC) ?
1538 P_RS_WRITE_ACK : P_WRITE_ACK;
1539 ok &= drbd_send_ack(mdev, pcmd, e);
1540 if (pcmd == P_RS_WRITE_ACK)
1541 drbd_set_in_sync(mdev, sector, e->size);
1543 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1544 /* we expect it to be marked out of sync anyways...
1545 * maybe assert this? */
1549 /* we delete from the conflict detection hash _after_ we sent out the
1550 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1551 if (mdev->net_conf->two_primaries) {
1552 spin_lock_irq(&mdev->req_lock);
1553 D_ASSERT(!hlist_unhashed(&e->colision));
1554 hlist_del_init(&e->colision);
1555 spin_unlock_irq(&mdev->req_lock);
1557 D_ASSERT(hlist_unhashed(&e->colision));
1560 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1565 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1567 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1570 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1571 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1573 spin_lock_irq(&mdev->req_lock);
1574 D_ASSERT(!hlist_unhashed(&e->colision));
1575 hlist_del_init(&e->colision);
1576 spin_unlock_irq(&mdev->req_lock);
1583 /* Called from receive_Data.
1584 * Synchronize packets on sock with packets on msock.
1586 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1587 * packet traveling on msock, they are still processed in the order they have
1590 * Note: we don't care for Ack packets overtaking P_DATA packets.
1592 * In case packet_seq is larger than mdev->peer_seq number, there are
1593 * outstanding packets on the msock. We wait for them to arrive.
1594 * In case we are the logically next packet, we update mdev->peer_seq
1595 * ourselves. Correctly handles 32bit wrap around.
1597 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1598 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1599 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1600 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1602 * returns 0 if we may process the packet,
1603 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1604 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1610 spin_lock(&mdev->peer_seq_lock);
1612 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1613 if (seq_le(packet_seq, mdev->peer_seq+1))
1615 if (signal_pending(current)) {
1619 p_seq = mdev->peer_seq;
1620 spin_unlock(&mdev->peer_seq_lock);
1621 timeout = schedule_timeout(30*HZ);
1622 spin_lock(&mdev->peer_seq_lock);
1623 if (timeout == 0 && p_seq == mdev->peer_seq) {
1625 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1629 finish_wait(&mdev->seq_wait, &wait);
1630 if (mdev->peer_seq+1 == packet_seq)
1632 spin_unlock(&mdev->peer_seq_lock);
1636 static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1638 if (mdev->agreed_pro_version >= 95)
1639 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1640 (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
1641 (dpf & DP_FUA ? REQ_FUA : 0) |
1642 (dpf & DP_FLUSH ? REQ_FUA : 0) |
1643 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1645 return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0;
1648 /* mirrored write */
1649 static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1652 struct drbd_epoch_entry *e;
1653 struct p_data *p = &mdev->data.rbuf.data;
1657 if (!get_ldev(mdev)) {
1658 if (__ratelimit(&drbd_ratelimit_state))
1659 dev_err(DEV, "Can not write mirrored data block "
1660 "to local disk.\n");
1661 spin_lock(&mdev->peer_seq_lock);
1662 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1664 spin_unlock(&mdev->peer_seq_lock);
1666 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1667 atomic_inc(&mdev->current_epoch->epoch_size);
1668 return drbd_drain_block(mdev, data_size);
1671 /* get_ldev(mdev) successful.
1672 * Corresponding put_ldev done either below (on various errors),
1673 * or in drbd_endio_write_sec, if we successfully submit the data at
1674 * the end of this function. */
1676 sector = be64_to_cpu(p->sector);
1677 e = read_in_block(mdev, p->block_id, sector, data_size);
1683 e->w.cb = e_end_block;
1685 spin_lock(&mdev->epoch_lock);
1686 e->epoch = mdev->current_epoch;
1687 atomic_inc(&e->epoch->epoch_size);
1688 atomic_inc(&e->epoch->active);
1689 spin_unlock(&mdev->epoch_lock);
1691 dp_flags = be32_to_cpu(p->dp_flags);
1692 rw |= write_flags_to_bio(mdev, dp_flags);
1694 if (dp_flags & DP_MAY_SET_IN_SYNC)
1695 e->flags |= EE_MAY_SET_IN_SYNC;
1697 /* I'm the receiver, I do hold a net_cnt reference. */
1698 if (!mdev->net_conf->two_primaries) {
1699 spin_lock_irq(&mdev->req_lock);
1701 /* don't get the req_lock yet,
1702 * we may sleep in drbd_wait_peer_seq */
1703 const int size = e->size;
1704 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1706 struct drbd_request *i;
1707 struct hlist_node *n;
1708 struct hlist_head *slot;
1711 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1712 BUG_ON(mdev->ee_hash == NULL);
1713 BUG_ON(mdev->tl_hash == NULL);
1715 /* conflict detection and handling:
1716 * 1. wait on the sequence number,
1717 * in case this data packet overtook ACK packets.
1718 * 2. check our hash tables for conflicting requests.
1719 * we only need to walk the tl_hash, since an ee can not
1720 * have a conflict with an other ee: on the submitting
1721 * node, the corresponding req had already been conflicting,
1722 * and a conflicting req is never sent.
1724 * Note: for two_primaries, we are protocol C,
1725 * so there cannot be any request that is DONE
1726 * but still on the transfer log.
1728 * unconditionally add to the ee_hash.
1730 * if no conflicting request is found:
1733 * if any conflicting request is found
1734 * that has not yet been acked,
1735 * AND I have the "discard concurrent writes" flag:
1736 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1738 * if any conflicting request is found:
1739 * block the receiver, waiting on misc_wait
1740 * until no more conflicting requests are there,
1741 * or we get interrupted (disconnect).
1743 * we do not just write after local io completion of those
1744 * requests, but only after req is done completely, i.e.
1745 * we wait for the P_DISCARD_ACK to arrive!
1747 * then proceed normally, i.e. submit.
1749 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1750 goto out_interrupted;
1752 spin_lock_irq(&mdev->req_lock);
1754 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1756 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1757 slot = tl_hash_slot(mdev, sector);
1760 int have_unacked = 0;
1761 int have_conflict = 0;
1762 prepare_to_wait(&mdev->misc_wait, &wait,
1763 TASK_INTERRUPTIBLE);
1764 hlist_for_each_entry(i, n, slot, colision) {
1766 /* only ALERT on first iteration,
1767 * we may be woken up early... */
1769 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1770 " new: %llus +%u; pending: %llus +%u\n",
1771 current->comm, current->pid,
1772 (unsigned long long)sector, size,
1773 (unsigned long long)i->sector, i->size);
1774 if (i->rq_state & RQ_NET_PENDING)
1783 /* Discard Ack only for the _first_ iteration */
1784 if (first && discard && have_unacked) {
1785 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1786 (unsigned long long)sector);
1788 e->w.cb = e_send_discard_ack;
1789 list_add_tail(&e->w.list, &mdev->done_ee);
1791 spin_unlock_irq(&mdev->req_lock);
1793 /* we could probably send that P_DISCARD_ACK ourselves,
1794 * but I don't like the receiver using the msock */
1798 finish_wait(&mdev->misc_wait, &wait);
1802 if (signal_pending(current)) {
1803 hlist_del_init(&e->colision);
1805 spin_unlock_irq(&mdev->req_lock);
1807 finish_wait(&mdev->misc_wait, &wait);
1808 goto out_interrupted;
1811 spin_unlock_irq(&mdev->req_lock);
1814 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1815 "sec=%llus\n", (unsigned long long)sector);
1816 } else if (discard) {
1817 /* we had none on the first iteration.
1818 * there must be none now. */
1819 D_ASSERT(have_unacked == 0);
1822 spin_lock_irq(&mdev->req_lock);
1824 finish_wait(&mdev->misc_wait, &wait);
1827 list_add(&e->w.list, &mdev->active_ee);
1828 spin_unlock_irq(&mdev->req_lock);
1830 switch (mdev->net_conf->wire_protocol) {
1833 /* corresponding dec_unacked() in e_end_block()
1834 * respective _drbd_clear_done_ee */
1837 /* I really don't like it that the receiver thread
1838 * sends on the msock, but anyways */
1839 drbd_send_ack(mdev, P_RECV_ACK, e);
1846 if (mdev->state.pdsk < D_INCONSISTENT) {
1847 /* In case we have the only disk of the cluster, */
1848 drbd_set_out_of_sync(mdev, e->sector, e->size);
1849 e->flags |= EE_CALL_AL_COMPLETE_IO;
1850 e->flags &= ~EE_MAY_SET_IN_SYNC;
1851 drbd_al_begin_io(mdev, e->sector);
1854 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
1857 /* drbd_submit_ee currently fails for one reason only:
1858 * not being able to allocate enough bios.
1859 * Is dropping the connection going to help? */
1860 spin_lock_irq(&mdev->req_lock);
1861 list_del(&e->w.list);
1862 hlist_del_init(&e->colision);
1863 spin_unlock_irq(&mdev->req_lock);
1864 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1865 drbd_al_complete_io(mdev, e->sector);
1868 /* yes, the epoch_size now is imbalanced.
1869 * but we drop the connection anyways, so we don't have a chance to
1870 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1872 drbd_free_ee(mdev, e);
1876 /* We may throttle resync, if the lower device seems to be busy,
1877 * and current sync rate is above c_min_rate.
1879 * To decide whether or not the lower device is busy, we use a scheme similar
1880 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1881 * (more than 64 sectors) of activity we cannot account for with our own resync
1882 * activity, it obviously is "busy".
1884 * The current sync rate used here uses only the most recent two step marks,
1885 * to have a short time average so we can react faster.
1887 int drbd_rs_should_slow_down(struct drbd_conf *mdev)
1889 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1890 unsigned long db, dt, dbdt;
1894 /* feature disabled? */
1895 if (mdev->sync_conf.c_min_rate == 0)
1898 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1899 (int)part_stat_read(&disk->part0, sectors[1]) -
1900 atomic_read(&mdev->rs_sect_ev);
1901 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1902 unsigned long rs_left;
1905 mdev->rs_last_events = curr_events;
1907 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1909 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS;
1910 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
1912 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1915 db = mdev->rs_mark_left[i] - rs_left;
1916 dbdt = Bit2KB(db/dt);
1918 if (dbdt > mdev->sync_conf.c_min_rate)
1925 static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
1928 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1929 struct drbd_epoch_entry *e;
1930 struct digest_info *di = NULL;
1932 unsigned int fault_type;
1933 struct p_block_req *p = &mdev->data.rbuf.block_req;
1935 sector = be64_to_cpu(p->sector);
1936 size = be32_to_cpu(p->blksize);
1938 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
1939 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1940 (unsigned long long)sector, size);
1943 if (sector + (size>>9) > capacity) {
1944 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1945 (unsigned long long)sector, size);
1949 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
1952 case P_DATA_REQUEST:
1953 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1955 case P_RS_DATA_REQUEST:
1956 case P_CSUM_RS_REQUEST:
1958 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
1962 dec_rs_pending(mdev);
1963 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
1966 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
1969 if (verb && __ratelimit(&drbd_ratelimit_state))
1970 dev_err(DEV, "Can not satisfy peer's read request, "
1971 "no local data.\n");
1973 /* drain possibly payload */
1974 return drbd_drain_block(mdev, digest_size);
1977 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1978 * "criss-cross" setup, that might cause write-out on some other DRBD,
1979 * which in turn might block on the other node at this very place. */
1980 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1987 case P_DATA_REQUEST:
1988 e->w.cb = w_e_end_data_req;
1989 fault_type = DRBD_FAULT_DT_RD;
1990 /* application IO, don't drbd_rs_begin_io */
1993 case P_RS_DATA_REQUEST:
1994 e->w.cb = w_e_end_rsdata_req;
1995 fault_type = DRBD_FAULT_RS_RD;
1999 case P_CSUM_RS_REQUEST:
2000 fault_type = DRBD_FAULT_RS_RD;
2001 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2005 di->digest_size = digest_size;
2006 di->digest = (((char *)di)+sizeof(struct digest_info));
2009 e->flags |= EE_HAS_DIGEST;
2011 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2014 if (cmd == P_CSUM_RS_REQUEST) {
2015 D_ASSERT(mdev->agreed_pro_version >= 89);
2016 e->w.cb = w_e_end_csum_rs_req;
2017 } else if (cmd == P_OV_REPLY) {
2018 e->w.cb = w_e_end_ov_reply;
2019 dec_rs_pending(mdev);
2020 /* drbd_rs_begin_io done when we sent this request,
2021 * but accounting still needs to be done. */
2022 goto submit_for_resync;
2027 if (mdev->ov_start_sector == ~(sector_t)0 &&
2028 mdev->agreed_pro_version >= 90) {
2029 mdev->ov_start_sector = sector;
2030 mdev->ov_position = sector;
2031 mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2032 dev_info(DEV, "Online Verify start sector: %llu\n",
2033 (unsigned long long)sector);
2035 e->w.cb = w_e_end_ov_req;
2036 fault_type = DRBD_FAULT_RS_RD;
2040 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2042 fault_type = DRBD_FAULT_MAX;
2046 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2047 * wrt the receiver, but it is not as straightforward as it may seem.
2048 * Various places in the resync start and stop logic assume resync
2049 * requests are processed in order, requeuing this on the worker thread
2050 * introduces a bunch of new code for synchronization between threads.
2052 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2053 * "forever", throttling after drbd_rs_begin_io will lock that extent
2054 * for application writes for the same time. For now, just throttle
2055 * here, where the rest of the code expects the receiver to sleep for
2059 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2060 * this defers syncer requests for some time, before letting at least
2061 * on request through. The resync controller on the receiving side
2062 * will adapt to the incoming rate accordingly.
2064 * We cannot throttle here if remote is Primary/SyncTarget:
2065 * we would also throttle its application reads.
2066 * In that case, throttling is done on the SyncTarget only.
2068 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev))
2070 if (drbd_rs_begin_io(mdev, e->sector))
2074 atomic_add(size >> 9, &mdev->rs_sect_ev);
2078 spin_lock_irq(&mdev->req_lock);
2079 list_add_tail(&e->w.list, &mdev->read_ee);
2080 spin_unlock_irq(&mdev->req_lock);
2082 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2085 /* drbd_submit_ee currently fails for one reason only:
2086 * not being able to allocate enough bios.
2087 * Is dropping the connection going to help? */
2088 spin_lock_irq(&mdev->req_lock);
2089 list_del(&e->w.list);
2090 spin_unlock_irq(&mdev->req_lock);
2091 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2095 drbd_free_ee(mdev, e);
2099 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2101 int self, peer, rv = -100;
2102 unsigned long ch_self, ch_peer;
2104 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2105 peer = mdev->p_uuid[UI_BITMAP] & 1;
2107 ch_peer = mdev->p_uuid[UI_SIZE];
2108 ch_self = mdev->comm_bm_set;
2110 switch (mdev->net_conf->after_sb_0p) {
2112 case ASB_DISCARD_SECONDARY:
2113 case ASB_CALL_HELPER:
2114 dev_err(DEV, "Configuration error.\n");
2116 case ASB_DISCONNECT:
2118 case ASB_DISCARD_YOUNGER_PRI:
2119 if (self == 0 && peer == 1) {
2123 if (self == 1 && peer == 0) {
2127 /* Else fall through to one of the other strategies... */
2128 case ASB_DISCARD_OLDER_PRI:
2129 if (self == 0 && peer == 1) {
2133 if (self == 1 && peer == 0) {
2137 /* Else fall through to one of the other strategies... */
2138 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2139 "Using discard-least-changes instead\n");
2140 case ASB_DISCARD_ZERO_CHG:
2141 if (ch_peer == 0 && ch_self == 0) {
2142 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2146 if (ch_peer == 0) { rv = 1; break; }
2147 if (ch_self == 0) { rv = -1; break; }
2149 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2151 case ASB_DISCARD_LEAST_CHG:
2152 if (ch_self < ch_peer)
2154 else if (ch_self > ch_peer)
2156 else /* ( ch_self == ch_peer ) */
2157 /* Well, then use something else. */
2158 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2161 case ASB_DISCARD_LOCAL:
2164 case ASB_DISCARD_REMOTE:
2171 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2173 int self, peer, hg, rv = -100;
2175 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2176 peer = mdev->p_uuid[UI_BITMAP] & 1;
2178 switch (mdev->net_conf->after_sb_1p) {
2179 case ASB_DISCARD_YOUNGER_PRI:
2180 case ASB_DISCARD_OLDER_PRI:
2181 case ASB_DISCARD_LEAST_CHG:
2182 case ASB_DISCARD_LOCAL:
2183 case ASB_DISCARD_REMOTE:
2184 dev_err(DEV, "Configuration error.\n");
2186 case ASB_DISCONNECT:
2189 hg = drbd_asb_recover_0p(mdev);
2190 if (hg == -1 && mdev->state.role == R_SECONDARY)
2192 if (hg == 1 && mdev->state.role == R_PRIMARY)
2196 rv = drbd_asb_recover_0p(mdev);
2198 case ASB_DISCARD_SECONDARY:
2199 return mdev->state.role == R_PRIMARY ? 1 : -1;
2200 case ASB_CALL_HELPER:
2201 hg = drbd_asb_recover_0p(mdev);
2202 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2203 self = drbd_set_role(mdev, R_SECONDARY, 0);
2204 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2205 * we might be here in C_WF_REPORT_PARAMS which is transient.
2206 * we do not need to wait for the after state change work either. */
2207 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2208 if (self != SS_SUCCESS) {
2209 drbd_khelper(mdev, "pri-lost-after-sb");
2211 dev_warn(DEV, "Successfully gave up primary role.\n");
2221 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2223 int self, peer, hg, rv = -100;
2225 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2226 peer = mdev->p_uuid[UI_BITMAP] & 1;
2228 switch (mdev->net_conf->after_sb_2p) {
2229 case ASB_DISCARD_YOUNGER_PRI:
2230 case ASB_DISCARD_OLDER_PRI:
2231 case ASB_DISCARD_LEAST_CHG:
2232 case ASB_DISCARD_LOCAL:
2233 case ASB_DISCARD_REMOTE:
2235 case ASB_DISCARD_SECONDARY:
2236 dev_err(DEV, "Configuration error.\n");
2239 rv = drbd_asb_recover_0p(mdev);
2241 case ASB_DISCONNECT:
2243 case ASB_CALL_HELPER:
2244 hg = drbd_asb_recover_0p(mdev);
2246 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2247 * we might be here in C_WF_REPORT_PARAMS which is transient.
2248 * we do not need to wait for the after state change work either. */
2249 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2250 if (self != SS_SUCCESS) {
2251 drbd_khelper(mdev, "pri-lost-after-sb");
2253 dev_warn(DEV, "Successfully gave up primary role.\n");
2263 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2264 u64 bits, u64 flags)
2267 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2270 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2272 (unsigned long long)uuid[UI_CURRENT],
2273 (unsigned long long)uuid[UI_BITMAP],
2274 (unsigned long long)uuid[UI_HISTORY_START],
2275 (unsigned long long)uuid[UI_HISTORY_END],
2276 (unsigned long long)bits,
2277 (unsigned long long)flags);
2281 100 after split brain try auto recover
2282 2 C_SYNC_SOURCE set BitMap
2283 1 C_SYNC_SOURCE use BitMap
2285 -1 C_SYNC_TARGET use BitMap
2286 -2 C_SYNC_TARGET set BitMap
2287 -100 after split brain, disconnect
2288 -1000 unrelated data
2290 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2295 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2296 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2299 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2303 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2304 peer != UUID_JUST_CREATED)
2308 if (self != UUID_JUST_CREATED &&
2309 (peer == UUID_JUST_CREATED || peer == (u64)0))
2313 int rct, dc; /* roles at crash time */
2315 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2317 if (mdev->agreed_pro_version < 91)
2320 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2321 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2322 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2323 drbd_uuid_set_bm(mdev, 0UL);
2325 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2326 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2329 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2336 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2338 if (mdev->agreed_pro_version < 91)
2341 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2342 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2343 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2345 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2346 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2347 mdev->p_uuid[UI_BITMAP] = 0UL;
2349 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2352 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2359 /* Common power [off|failure] */
2360 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2361 (mdev->p_uuid[UI_FLAGS] & 2);
2362 /* lowest bit is set when we were primary,
2363 * next bit (weight 2) is set when peer was primary */
2367 case 0: /* !self_pri && !peer_pri */ return 0;
2368 case 1: /* self_pri && !peer_pri */ return 1;
2369 case 2: /* !self_pri && peer_pri */ return -1;
2370 case 3: /* self_pri && peer_pri */
2371 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2377 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2382 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2384 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2385 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2387 /* The last P_SYNC_UUID did not get though. Undo the last start of
2388 resync as sync source modifications of the peer's UUIDs. */
2390 if (mdev->agreed_pro_version < 91)
2393 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2394 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2400 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2401 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2402 peer = mdev->p_uuid[i] & ~((u64)1);
2408 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2409 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2414 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2416 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2417 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2419 /* The last P_SYNC_UUID did not get though. Undo the last start of
2420 resync as sync source modifications of our UUIDs. */
2422 if (mdev->agreed_pro_version < 91)
2425 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2426 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2428 dev_info(DEV, "Undid last start of resync:\n");
2430 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2431 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2439 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2440 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2441 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2447 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2448 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2449 if (self == peer && self != ((u64)0))
2453 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2454 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2455 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2456 peer = mdev->p_uuid[j] & ~((u64)1);
2465 /* drbd_sync_handshake() returns the new conn state on success, or
2466 CONN_MASK (-1) on failure.
2468 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2469 enum drbd_disk_state peer_disk) __must_hold(local)
2472 enum drbd_conns rv = C_MASK;
2473 enum drbd_disk_state mydisk;
2475 mydisk = mdev->state.disk;
2476 if (mydisk == D_NEGOTIATING)
2477 mydisk = mdev->new_state_tmp.disk;
2479 dev_info(DEV, "drbd_sync_handshake:\n");
2480 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2481 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2482 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2484 hg = drbd_uuid_compare(mdev, &rule_nr);
2486 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2489 dev_alert(DEV, "Unrelated data, aborting!\n");
2493 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2497 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2498 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2499 int f = (hg == -100) || abs(hg) == 2;
2500 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2503 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2504 hg > 0 ? "source" : "target");
2508 drbd_khelper(mdev, "initial-split-brain");
2510 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2511 int pcount = (mdev->state.role == R_PRIMARY)
2512 + (peer_role == R_PRIMARY);
2513 int forced = (hg == -100);
2517 hg = drbd_asb_recover_0p(mdev);
2520 hg = drbd_asb_recover_1p(mdev);
2523 hg = drbd_asb_recover_2p(mdev);
2526 if (abs(hg) < 100) {
2527 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2528 "automatically solved. Sync from %s node\n",
2529 pcount, (hg < 0) ? "peer" : "this");
2531 dev_warn(DEV, "Doing a full sync, since"
2532 " UUIDs where ambiguous.\n");
2539 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2541 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2545 dev_warn(DEV, "Split-Brain detected, manually solved. "
2546 "Sync from %s node\n",
2547 (hg < 0) ? "peer" : "this");
2551 /* FIXME this log message is not correct if we end up here
2552 * after an attempted attach on a diskless node.
2553 * We just refuse to attach -- well, we drop the "connection"
2554 * to that disk, in a way... */
2555 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2556 drbd_khelper(mdev, "split-brain");
2560 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2561 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2565 if (hg < 0 && /* by intention we do not use mydisk here. */
2566 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2567 switch (mdev->net_conf->rr_conflict) {
2568 case ASB_CALL_HELPER:
2569 drbd_khelper(mdev, "pri-lost");
2571 case ASB_DISCONNECT:
2572 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2575 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2580 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2582 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2584 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2585 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2586 abs(hg) >= 2 ? "full" : "bit-map based");
2591 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2592 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2596 if (hg > 0) { /* become sync source. */
2598 } else if (hg < 0) { /* become sync target */
2602 if (drbd_bm_total_weight(mdev)) {
2603 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2604 drbd_bm_total_weight(mdev));
2611 /* returns 1 if invalid */
2612 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2614 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2615 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2616 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2619 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2620 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2621 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2624 /* everything else is valid if they are equal on both sides. */
2628 /* everything es is invalid. */
2632 static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2634 struct p_protocol *p = &mdev->data.rbuf.protocol;
2635 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2636 int p_want_lose, p_two_primaries, cf;
2637 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2639 p_proto = be32_to_cpu(p->protocol);
2640 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2641 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2642 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2643 p_two_primaries = be32_to_cpu(p->two_primaries);
2644 cf = be32_to_cpu(p->conn_flags);
2645 p_want_lose = cf & CF_WANT_LOSE;
2647 clear_bit(CONN_DRY_RUN, &mdev->flags);
2649 if (cf & CF_DRY_RUN)
2650 set_bit(CONN_DRY_RUN, &mdev->flags);
2652 if (p_proto != mdev->net_conf->wire_protocol) {
2653 dev_err(DEV, "incompatible communication protocols\n");
2657 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2658 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2662 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2663 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2667 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2668 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2672 if (p_want_lose && mdev->net_conf->want_lose) {
2673 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2677 if (p_two_primaries != mdev->net_conf->two_primaries) {
2678 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2682 if (mdev->agreed_pro_version >= 87) {
2683 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2685 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2688 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2689 if (strcmp(p_integrity_alg, my_alg)) {
2690 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2693 dev_info(DEV, "data-integrity-alg: %s\n",
2694 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2700 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2705 * input: alg name, feature name
2706 * return: NULL (alg name was "")
2707 * ERR_PTR(error) if something goes wrong
2708 * or the crypto hash ptr, if it worked out ok. */
2709 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2710 const char *alg, const char *name)
2712 struct crypto_hash *tfm;
2717 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2719 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2720 alg, name, PTR_ERR(tfm));
2723 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2724 crypto_free_hash(tfm);
2725 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2726 return ERR_PTR(-EINVAL);
2731 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2734 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2735 unsigned int header_size, data_size, exp_max_sz;
2736 struct crypto_hash *verify_tfm = NULL;
2737 struct crypto_hash *csums_tfm = NULL;
2738 const int apv = mdev->agreed_pro_version;
2739 int *rs_plan_s = NULL;
2742 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2743 : apv == 88 ? sizeof(struct p_rs_param)
2745 : apv <= 94 ? sizeof(struct p_rs_param_89)
2746 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2748 if (packet_size > exp_max_sz) {
2749 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2750 packet_size, exp_max_sz);
2755 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2756 data_size = packet_size - header_size;
2757 } else if (apv <= 94) {
2758 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2759 data_size = packet_size - header_size;
2760 D_ASSERT(data_size == 0);
2762 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2763 data_size = packet_size - header_size;
2764 D_ASSERT(data_size == 0);
2767 /* initialize verify_alg and csums_alg */
2768 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2770 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2773 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2777 if (data_size > SHARED_SECRET_MAX) {
2778 dev_err(DEV, "verify-alg too long, "
2779 "peer wants %u, accepting only %u byte\n",
2780 data_size, SHARED_SECRET_MAX);
2784 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2787 /* we expect NUL terminated string */
2788 /* but just in case someone tries to be evil */
2789 D_ASSERT(p->verify_alg[data_size-1] == 0);
2790 p->verify_alg[data_size-1] = 0;
2792 } else /* apv >= 89 */ {
2793 /* we still expect NUL terminated strings */
2794 /* but just in case someone tries to be evil */
2795 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2796 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2797 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2798 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2801 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2802 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2803 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2804 mdev->sync_conf.verify_alg, p->verify_alg);
2807 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2808 p->verify_alg, "verify-alg");
2809 if (IS_ERR(verify_tfm)) {
2815 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2816 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2817 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2818 mdev->sync_conf.csums_alg, p->csums_alg);
2821 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2822 p->csums_alg, "csums-alg");
2823 if (IS_ERR(csums_tfm)) {
2830 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2831 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2832 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2833 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2834 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2836 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2837 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2838 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2840 dev_err(DEV, "kmalloc of fifo_buffer failed");
2846 spin_lock(&mdev->peer_seq_lock);
2847 /* lock against drbd_nl_syncer_conf() */
2849 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2850 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2851 crypto_free_hash(mdev->verify_tfm);
2852 mdev->verify_tfm = verify_tfm;
2853 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2856 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2857 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2858 crypto_free_hash(mdev->csums_tfm);
2859 mdev->csums_tfm = csums_tfm;
2860 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2862 if (fifo_size != mdev->rs_plan_s.size) {
2863 kfree(mdev->rs_plan_s.values);
2864 mdev->rs_plan_s.values = rs_plan_s;
2865 mdev->rs_plan_s.size = fifo_size;
2866 mdev->rs_planed = 0;
2868 spin_unlock(&mdev->peer_seq_lock);
2873 /* just for completeness: actually not needed,
2874 * as this is not reached if csums_tfm was ok. */
2875 crypto_free_hash(csums_tfm);
2876 /* but free the verify_tfm again, if csums_tfm did not work out */
2877 crypto_free_hash(verify_tfm);
2878 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2882 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2884 /* sorry, we currently have no working implementation
2885 * of distributed TCQ */
2888 /* warn if the arguments differ by more than 12.5% */
2889 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2890 const char *s, sector_t a, sector_t b)
2893 if (a == 0 || b == 0)
2895 d = (a > b) ? (a - b) : (b - a);
2896 if (d > (a>>3) || d > (b>>3))
2897 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2898 (unsigned long long)a, (unsigned long long)b);
2901 static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2903 struct p_sizes *p = &mdev->data.rbuf.sizes;
2904 enum determine_dev_size dd = unchanged;
2905 unsigned int max_seg_s;
2906 sector_t p_size, p_usize, my_usize;
2907 int ldsc = 0; /* local disk size changed */
2908 enum dds_flags ddsf;
2910 p_size = be64_to_cpu(p->d_size);
2911 p_usize = be64_to_cpu(p->u_size);
2913 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2914 dev_err(DEV, "some backing storage is needed\n");
2915 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2919 /* just store the peer's disk size for now.
2920 * we still need to figure out whether we accept that. */
2921 mdev->p_size = p_size;
2923 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
2924 if (get_ldev(mdev)) {
2925 warn_if_differ_considerably(mdev, "lower level device sizes",
2926 p_size, drbd_get_max_capacity(mdev->ldev));
2927 warn_if_differ_considerably(mdev, "user requested size",
2928 p_usize, mdev->ldev->dc.disk_size);
2930 /* if this is the first connect, or an otherwise expected
2931 * param exchange, choose the minimum */
2932 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2933 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2936 my_usize = mdev->ldev->dc.disk_size;
2938 if (mdev->ldev->dc.disk_size != p_usize) {
2939 mdev->ldev->dc.disk_size = p_usize;
2940 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2941 (unsigned long)mdev->ldev->dc.disk_size);
2944 /* Never shrink a device with usable data during connect.
2945 But allow online shrinking if we are connected. */
2946 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
2947 drbd_get_capacity(mdev->this_bdev) &&
2948 mdev->state.disk >= D_OUTDATED &&
2949 mdev->state.conn < C_CONNECTED) {
2950 dev_err(DEV, "The peer's disk size is too small!\n");
2951 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2952 mdev->ldev->dc.disk_size = my_usize;
2960 ddsf = be16_to_cpu(p->dds_flags);
2961 if (get_ldev(mdev)) {
2962 dd = drbd_determin_dev_size(mdev, ddsf);
2964 if (dd == dev_size_error)
2968 /* I am diskless, need to accept the peer's size. */
2969 drbd_set_my_capacity(mdev, p_size);
2972 if (get_ldev(mdev)) {
2973 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
2974 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2978 if (mdev->agreed_pro_version < 94)
2979 max_seg_s = be32_to_cpu(p->max_segment_size);
2980 else if (mdev->agreed_pro_version == 94)
2981 max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
2982 else /* drbd 8.3.8 onwards */
2983 max_seg_s = DRBD_MAX_SEGMENT_SIZE;
2985 if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
2986 drbd_setup_queue_param(mdev, max_seg_s);
2988 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
2992 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
2993 if (be64_to_cpu(p->c_size) !=
2994 drbd_get_capacity(mdev->this_bdev) || ldsc) {
2995 /* we have different sizes, probably peer
2996 * needs to know my new size... */
2997 drbd_send_sizes(mdev, 0, ddsf);
2999 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3000 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3001 if (mdev->state.pdsk >= D_INCONSISTENT &&
3002 mdev->state.disk >= D_INCONSISTENT) {
3003 if (ddsf & DDSF_NO_RESYNC)
3004 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3006 resync_after_online_grow(mdev);
3008 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3015 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3017 struct p_uuids *p = &mdev->data.rbuf.uuids;
3021 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3023 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3024 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3026 kfree(mdev->p_uuid);
3027 mdev->p_uuid = p_uuid;
3029 if (mdev->state.conn < C_CONNECTED &&
3030 mdev->state.disk < D_INCONSISTENT &&
3031 mdev->state.role == R_PRIMARY &&
3032 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3033 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3034 (unsigned long long)mdev->ed_uuid);
3035 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3039 if (get_ldev(mdev)) {
3040 int skip_initial_sync =
3041 mdev->state.conn == C_CONNECTED &&
3042 mdev->agreed_pro_version >= 90 &&
3043 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3044 (p_uuid[UI_FLAGS] & 8);
3045 if (skip_initial_sync) {
3046 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3047 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3048 "clear_n_write from receive_uuids");
3049 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3050 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3051 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3056 } else if (mdev->state.disk < D_INCONSISTENT &&
3057 mdev->state.role == R_PRIMARY) {
3058 /* I am a diskless primary, the peer just created a new current UUID
3060 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3063 /* Before we test for the disk state, we should wait until an eventually
3064 ongoing cluster wide state change is finished. That is important if
3065 we are primary and are detaching from our disk. We need to see the
3066 new disk state... */
3067 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3068 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3069 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3075 * convert_state() - Converts the peer's view of the cluster state to our point of view
3076 * @ps: The state as seen by the peer.
3078 static union drbd_state convert_state(union drbd_state ps)
3080 union drbd_state ms;
3082 static enum drbd_conns c_tab[] = {
3083 [C_CONNECTED] = C_CONNECTED,
3085 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3086 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3087 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3088 [C_VERIFY_S] = C_VERIFY_T,
3094 ms.conn = c_tab[ps.conn];
3099 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3104 static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3106 struct p_req_state *p = &mdev->data.rbuf.req_state;
3107 union drbd_state mask, val;
3110 mask.i = be32_to_cpu(p->mask);
3111 val.i = be32_to_cpu(p->val);
3113 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3114 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3115 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3119 mask = convert_state(mask);
3120 val = convert_state(val);
3122 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3124 drbd_send_sr_reply(mdev, rv);
3130 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3132 struct p_state *p = &mdev->data.rbuf.state;
3133 union drbd_state os, ns, peer_state;
3134 enum drbd_disk_state real_peer_disk;
3135 enum chg_state_flags cs_flags;
3138 peer_state.i = be32_to_cpu(p->state);
3140 real_peer_disk = peer_state.disk;
3141 if (peer_state.disk == D_NEGOTIATING) {
3142 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3143 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3146 spin_lock_irq(&mdev->req_lock);
3148 os = ns = mdev->state;
3149 spin_unlock_irq(&mdev->req_lock);
3151 /* peer says his disk is uptodate, while we think it is inconsistent,
3152 * and this happens while we think we have a sync going on. */
3153 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3154 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3155 /* If we are (becoming) SyncSource, but peer is still in sync
3156 * preparation, ignore its uptodate-ness to avoid flapping, it
3157 * will change to inconsistent once the peer reaches active
3159 * It may have changed syncer-paused flags, however, so we
3160 * cannot ignore this completely. */
3161 if (peer_state.conn > C_CONNECTED &&
3162 peer_state.conn < C_SYNC_SOURCE)
3163 real_peer_disk = D_INCONSISTENT;
3165 /* if peer_state changes to connected at the same time,
3166 * it explicitly notifies us that it finished resync.
3167 * Maybe we should finish it up, too? */
3168 else if (os.conn >= C_SYNC_SOURCE &&
3169 peer_state.conn == C_CONNECTED) {
3170 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3171 drbd_resync_finished(mdev);
3176 /* peer says his disk is inconsistent, while we think it is uptodate,
3177 * and this happens while the peer still thinks we have a sync going on,
3178 * but we think we are already done with the sync.
3179 * We ignore this to avoid flapping pdsk.
3180 * This should not happen, if the peer is a recent version of drbd. */
3181 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3182 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3183 real_peer_disk = D_UP_TO_DATE;
3185 if (ns.conn == C_WF_REPORT_PARAMS)
3186 ns.conn = C_CONNECTED;
3188 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3189 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3190 int cr; /* consider resync */
3192 /* if we established a new connection */
3193 cr = (os.conn < C_CONNECTED);
3194 /* if we had an established connection
3195 * and one of the nodes newly attaches a disk */
3196 cr |= (os.conn == C_CONNECTED &&
3197 (peer_state.disk == D_NEGOTIATING ||
3198 os.disk == D_NEGOTIATING));
3199 /* if we have both been inconsistent, and the peer has been
3200 * forced to be UpToDate with --overwrite-data */
3201 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3202 /* if we had been plain connected, and the admin requested to
3203 * start a sync by "invalidate" or "invalidate-remote" */
3204 cr |= (os.conn == C_CONNECTED &&
3205 (peer_state.conn >= C_STARTING_SYNC_S &&
3206 peer_state.conn <= C_WF_BITMAP_T));
3209 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3212 if (ns.conn == C_MASK) {
3213 ns.conn = C_CONNECTED;
3214 if (mdev->state.disk == D_NEGOTIATING) {
3215 drbd_force_state(mdev, NS(disk, D_FAILED));
3216 } else if (peer_state.disk == D_NEGOTIATING) {
3217 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3218 peer_state.disk = D_DISKLESS;
3219 real_peer_disk = D_DISKLESS;
3221 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3223 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3224 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3230 spin_lock_irq(&mdev->req_lock);
3231 if (mdev->state.i != os.i)
3233 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3234 ns.peer = peer_state.role;
3235 ns.pdsk = real_peer_disk;
3236 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3237 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3238 ns.disk = mdev->new_state_tmp.disk;
3239 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3240 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3241 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3242 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3243 for temporal network outages! */
3244 spin_unlock_irq(&mdev->req_lock);
3245 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3247 drbd_uuid_new_current(mdev);
3248 clear_bit(NEW_CUR_UUID, &mdev->flags);
3249 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3252 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3254 spin_unlock_irq(&mdev->req_lock);
3256 if (rv < SS_SUCCESS) {
3257 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3261 if (os.conn > C_WF_REPORT_PARAMS) {
3262 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3263 peer_state.disk != D_NEGOTIATING ) {
3264 /* we want resync, peer has not yet decided to sync... */
3265 /* Nowadays only used when forcing a node into primary role and
3266 setting its disk to UpToDate with that */
3267 drbd_send_uuids(mdev);
3268 drbd_send_state(mdev);
3272 mdev->net_conf->want_lose = 0;
3274 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3279 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3281 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
3283 wait_event(mdev->misc_wait,
3284 mdev->state.conn == C_WF_SYNC_UUID ||
3285 mdev->state.conn < C_CONNECTED ||
3286 mdev->state.disk < D_NEGOTIATING);
3288 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3290 /* Here the _drbd_uuid_ functions are right, current should
3291 _not_ be rotated into the history */
3292 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3293 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3294 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3296 drbd_start_resync(mdev, C_SYNC_TARGET);
3300 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3305 enum receive_bitmap_ret { OK, DONE, FAILED };
3307 static enum receive_bitmap_ret
3308 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3309 unsigned long *buffer, struct bm_xfer_ctx *c)
3311 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3312 unsigned want = num_words * sizeof(long);
3314 if (want != data_size) {
3315 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
3320 if (drbd_recv(mdev, buffer, want) != want)
3323 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3325 c->word_offset += num_words;
3326 c->bit_offset = c->word_offset * BITS_PER_LONG;
3327 if (c->bit_offset > c->bm_bits)
3328 c->bit_offset = c->bm_bits;
3333 static enum receive_bitmap_ret
3334 recv_bm_rle_bits(struct drbd_conf *mdev,
3335 struct p_compressed_bm *p,
3336 struct bm_xfer_ctx *c)
3338 struct bitstream bs;
3342 unsigned long s = c->bit_offset;
3344 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
3345 int toggle = DCBP_get_start(p);
3349 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3351 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3355 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3356 bits = vli_decode_bits(&rl, look_ahead);
3362 if (e >= c->bm_bits) {
3363 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3366 _drbd_bm_set_bits(mdev, s, e);
3370 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3371 have, bits, look_ahead,
3372 (unsigned int)(bs.cur.b - p->code),
3373 (unsigned int)bs.buf_len);
3376 look_ahead >>= bits;
3379 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3382 look_ahead |= tmp << have;
3387 bm_xfer_ctx_bit_to_word_offset(c);
3389 return (s == c->bm_bits) ? DONE : OK;
3392 static enum receive_bitmap_ret
3393 decode_bitmap_c(struct drbd_conf *mdev,
3394 struct p_compressed_bm *p,
3395 struct bm_xfer_ctx *c)
3397 if (DCBP_get_code(p) == RLE_VLI_Bits)
3398 return recv_bm_rle_bits(mdev, p, c);
3400 /* other variants had been implemented for evaluation,
3401 * but have been dropped as this one turned out to be "best"
3402 * during all our tests. */
3404 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3405 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3409 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3410 const char *direction, struct bm_xfer_ctx *c)
3412 /* what would it take to transfer it "plaintext" */
3413 unsigned plain = sizeof(struct p_header80) *
3414 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3415 + c->bm_words * sizeof(long);
3416 unsigned total = c->bytes[0] + c->bytes[1];
3419 /* total can not be zero. but just in case: */
3423 /* don't report if not compressed */
3427 /* total < plain. check for overflow, still */
3428 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3429 : (1000 * total / plain);
3435 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3436 "total %u; compression: %u.%u%%\n",
3438 c->bytes[1], c->packets[1],
3439 c->bytes[0], c->packets[0],
3440 total, r/10, r % 10);
3443 /* Since we are processing the bitfield from lower addresses to higher,
3444 it does not matter if the process it in 32 bit chunks or 64 bit
3445 chunks as long as it is little endian. (Understand it as byte stream,
3446 beginning with the lowest byte...) If we would use big endian
3447 we would need to process it from the highest address to the lowest,
3448 in order to be agnostic to the 32 vs 64 bits issue.
3450 returns 0 on failure, 1 if we successfully received it. */
3451 static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3453 struct bm_xfer_ctx c;
3455 enum receive_bitmap_ret ret;
3457 struct p_header80 *h = &mdev->data.rbuf.header.h80;
3459 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3461 drbd_bm_lock(mdev, "receive bitmap");
3463 /* maybe we should use some per thread scratch page,
3464 * and allocate that during initial device creation? */
3465 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3467 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3471 c = (struct bm_xfer_ctx) {
3472 .bm_bits = drbd_bm_bits(mdev),
3473 .bm_words = drbd_bm_words(mdev),
3477 if (cmd == P_BITMAP) {
3478 ret = receive_bitmap_plain(mdev, data_size, buffer, &c);
3479 } else if (cmd == P_COMPRESSED_BITMAP) {
3480 /* MAYBE: sanity check that we speak proto >= 90,
3481 * and the feature is enabled! */
3482 struct p_compressed_bm *p;
3484 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
3485 dev_err(DEV, "ReportCBitmap packet too large\n");
3488 /* use the page buff */
3490 memcpy(p, h, sizeof(*h));
3491 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
3493 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3494 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
3497 ret = decode_bitmap_c(mdev, p, &c);
3499 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
3503 c.packets[cmd == P_BITMAP]++;
3504 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
3509 if (!drbd_recv_header(mdev, &cmd, &data_size))
3511 } while (ret == OK);
3515 INFO_bm_xfer_stats(mdev, "receive", &c);
3517 if (mdev->state.conn == C_WF_BITMAP_T) {
3518 ok = !drbd_send_bitmap(mdev);
3521 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3522 ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3523 D_ASSERT(ok == SS_SUCCESS);
3524 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3525 /* admin may have requested C_DISCONNECTING,
3526 * other threads may have noticed network errors */
3527 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3528 drbd_conn_str(mdev->state.conn));
3533 drbd_bm_unlock(mdev);
3534 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3535 drbd_start_resync(mdev, C_SYNC_SOURCE);
3536 free_page((unsigned long) buffer);
3540 static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3542 /* TODO zero copy sink :) */
3543 static char sink[128];
3546 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3551 want = min_t(int, size, sizeof(sink));
3552 r = drbd_recv(mdev, sink, want);
3553 ERR_IF(r <= 0) break;
3559 static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3561 if (mdev->state.disk >= D_INCONSISTENT)
3564 /* Make sure we've acked all the TCP data associated
3565 * with the data requests being unplugged */
3566 drbd_tcp_quickack(mdev->data.socket);
3571 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
3576 drbd_cmd_handler_f function;
3579 static struct data_cmd drbd_cmd_handler[] = {
3580 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3581 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3582 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3583 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3584 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3585 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3586 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3587 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3588 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3589 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3590 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3591 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3592 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3593 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3594 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3595 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3596 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3597 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3598 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3599 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3600 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
3601 /* anything missing from this table is in
3602 * the asender_tbl, see get_asender_cmd */
3603 [P_MAX_CMD] = { 0, 0, NULL },
3606 /* All handler functions that expect a sub-header get that sub-heder in
3607 mdev->data.rbuf.header.head.payload.
3609 Usually in mdev->data.rbuf.header.head the callback can find the usual
3610 p_header, but they may not rely on that. Since there is also p_header95 !
3613 static void drbdd(struct drbd_conf *mdev)
3615 union p_header *header = &mdev->data.rbuf.header;
3616 unsigned int packet_size;
3617 enum drbd_packets cmd;
3618 size_t shs; /* sub header size */
3621 while (get_t_state(&mdev->receiver) == Running) {
3622 drbd_thread_current_set_cpu(mdev);
3623 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3626 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3627 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3631 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3632 rv = drbd_recv(mdev, &header->h80.payload, shs);
3633 if (unlikely(rv != shs)) {
3634 dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3638 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3639 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3643 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3645 if (unlikely(!rv)) {
3646 dev_err(DEV, "error receiving %s, l: %d!\n",
3647 cmdname(cmd), packet_size);
3654 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3656 /* If we leave here, we probably want to update at least the
3657 * "Connected" indicator on stable storage. Do so explicitly here. */
3661 void drbd_flush_workqueue(struct drbd_conf *mdev)
3663 struct drbd_wq_barrier barr;
3665 barr.w.cb = w_prev_work_done;
3666 init_completion(&barr.done);
3667 drbd_queue_work(&mdev->data.work, &barr.w);
3668 wait_for_completion(&barr.done);
3671 void drbd_free_tl_hash(struct drbd_conf *mdev)
3673 struct hlist_head *h;
3675 spin_lock_irq(&mdev->req_lock);
3677 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3678 spin_unlock_irq(&mdev->req_lock);
3682 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3684 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3685 (int)(h - mdev->ee_hash), h->first);
3686 kfree(mdev->ee_hash);
3687 mdev->ee_hash = NULL;
3688 mdev->ee_hash_s = 0;
3691 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3693 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3694 (int)(h - mdev->tl_hash), h->first);
3695 kfree(mdev->tl_hash);
3696 mdev->tl_hash = NULL;
3697 mdev->tl_hash_s = 0;
3698 spin_unlock_irq(&mdev->req_lock);
3701 static void drbd_disconnect(struct drbd_conf *mdev)
3703 enum drbd_fencing_p fp;
3704 union drbd_state os, ns;
3705 int rv = SS_UNKNOWN_ERROR;
3708 if (mdev->state.conn == C_STANDALONE)
3710 if (mdev->state.conn >= C_WF_CONNECTION)
3711 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3712 drbd_conn_str(mdev->state.conn));
3714 /* asender does not clean up anything. it must not interfere, either */
3715 drbd_thread_stop(&mdev->asender);
3716 drbd_free_sock(mdev);
3718 /* wait for current activity to cease. */
3719 spin_lock_irq(&mdev->req_lock);
3720 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3721 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3722 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3723 spin_unlock_irq(&mdev->req_lock);
3725 /* We do not have data structures that would allow us to
3726 * get the rs_pending_cnt down to 0 again.
3727 * * On C_SYNC_TARGET we do not have any data structures describing
3728 * the pending RSDataRequest's we have sent.
3729 * * On C_SYNC_SOURCE there is no data structure that tracks
3730 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3731 * And no, it is not the sum of the reference counts in the
3732 * resync_LRU. The resync_LRU tracks the whole operation including
3733 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3735 drbd_rs_cancel_all(mdev);
3737 mdev->rs_failed = 0;
3738 atomic_set(&mdev->rs_pending_cnt, 0);
3739 wake_up(&mdev->misc_wait);
3741 /* make sure syncer is stopped and w_resume_next_sg queued */
3742 del_timer_sync(&mdev->resync_timer);
3743 resync_timer_fn((unsigned long)mdev);
3745 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3746 * w_make_resync_request etc. which may still be on the worker queue
3747 * to be "canceled" */
3748 drbd_flush_workqueue(mdev);
3750 /* This also does reclaim_net_ee(). If we do this too early, we might
3751 * miss some resync ee and pages.*/
3752 drbd_process_done_ee(mdev);
3754 kfree(mdev->p_uuid);
3755 mdev->p_uuid = NULL;
3757 if (!is_susp(mdev->state))
3760 dev_info(DEV, "Connection closed\n");
3765 if (get_ldev(mdev)) {
3766 fp = mdev->ldev->dc.fencing;
3770 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3771 drbd_try_outdate_peer_async(mdev);
3773 spin_lock_irq(&mdev->req_lock);
3775 if (os.conn >= C_UNCONNECTED) {
3776 /* Do not restart in case we are C_DISCONNECTING */
3778 ns.conn = C_UNCONNECTED;
3779 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3781 spin_unlock_irq(&mdev->req_lock);
3783 if (os.conn == C_DISCONNECTING) {
3784 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
3786 if (!is_susp(mdev->state)) {
3787 /* we must not free the tl_hash
3788 * while application io is still on the fly */
3789 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3790 drbd_free_tl_hash(mdev);
3793 crypto_free_hash(mdev->cram_hmac_tfm);
3794 mdev->cram_hmac_tfm = NULL;
3796 kfree(mdev->net_conf);
3797 mdev->net_conf = NULL;
3798 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3801 /* tcp_close and release of sendpage pages can be deferred. I don't
3802 * want to use SO_LINGER, because apparently it can be deferred for
3803 * more than 20 seconds (longest time I checked).
3805 * Actually we don't care for exactly when the network stack does its
3806 * put_page(), but release our reference on these pages right here.
3808 i = drbd_release_ee(mdev, &mdev->net_ee);
3810 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3811 i = atomic_read(&mdev->pp_in_use_by_net);
3813 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
3814 i = atomic_read(&mdev->pp_in_use);
3816 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3818 D_ASSERT(list_empty(&mdev->read_ee));
3819 D_ASSERT(list_empty(&mdev->active_ee));
3820 D_ASSERT(list_empty(&mdev->sync_ee));
3821 D_ASSERT(list_empty(&mdev->done_ee));
3823 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3824 atomic_set(&mdev->current_epoch->epoch_size, 0);
3825 D_ASSERT(list_empty(&mdev->current_epoch->list));
3829 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3830 * we can agree on is stored in agreed_pro_version.
3832 * feature flags and the reserved array should be enough room for future
3833 * enhancements of the handshake protocol, and possible plugins...
3835 * for now, they are expected to be zero, but ignored.
3837 static int drbd_send_handshake(struct drbd_conf *mdev)
3839 /* ASSERT current == mdev->receiver ... */
3840 struct p_handshake *p = &mdev->data.sbuf.handshake;
3843 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3844 dev_err(DEV, "interrupted during initial handshake\n");
3845 return 0; /* interrupted. not ok. */
3848 if (mdev->data.socket == NULL) {
3849 mutex_unlock(&mdev->data.mutex);
3853 memset(p, 0, sizeof(*p));
3854 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3855 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3856 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3857 (struct p_header80 *)p, sizeof(*p), 0 );
3858 mutex_unlock(&mdev->data.mutex);
3864 * 1 yes, we have a valid connection
3865 * 0 oops, did not work out, please try again
3866 * -1 peer talks different language,
3867 * no point in trying again, please go standalone.
3869 static int drbd_do_handshake(struct drbd_conf *mdev)
3871 /* ASSERT current == mdev->receiver ... */
3872 struct p_handshake *p = &mdev->data.rbuf.handshake;
3873 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3874 unsigned int length;
3875 enum drbd_packets cmd;
3878 rv = drbd_send_handshake(mdev);
3882 rv = drbd_recv_header(mdev, &cmd, &length);
3886 if (cmd != P_HAND_SHAKE) {
3887 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3892 if (length != expect) {
3893 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3898 rv = drbd_recv(mdev, &p->head.payload, expect);
3901 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3905 p->protocol_min = be32_to_cpu(p->protocol_min);
3906 p->protocol_max = be32_to_cpu(p->protocol_max);
3907 if (p->protocol_max == 0)
3908 p->protocol_max = p->protocol_min;
3910 if (PRO_VERSION_MAX < p->protocol_min ||
3911 PRO_VERSION_MIN > p->protocol_max)
3914 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3916 dev_info(DEV, "Handshake successful: "
3917 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3922 dev_err(DEV, "incompatible DRBD dialects: "
3923 "I support %d-%d, peer supports %d-%d\n",
3924 PRO_VERSION_MIN, PRO_VERSION_MAX,
3925 p->protocol_min, p->protocol_max);
3929 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3930 static int drbd_do_auth(struct drbd_conf *mdev)
3932 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3933 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3937 #define CHALLENGE_LEN 64
3941 0 - failed, try again (network error),
3942 -1 - auth failed, don't try again.
3945 static int drbd_do_auth(struct drbd_conf *mdev)
3947 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
3948 struct scatterlist sg;
3949 char *response = NULL;
3950 char *right_response = NULL;
3951 char *peers_ch = NULL;
3952 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
3953 unsigned int resp_size;
3954 struct hash_desc desc;
3955 enum drbd_packets cmd;
3956 unsigned int length;
3959 desc.tfm = mdev->cram_hmac_tfm;
3962 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
3963 (u8 *)mdev->net_conf->shared_secret, key_len);
3965 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
3970 get_random_bytes(my_challenge, CHALLENGE_LEN);
3972 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
3976 rv = drbd_recv_header(mdev, &cmd, &length);
3980 if (cmd != P_AUTH_CHALLENGE) {
3981 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
3987 if (length > CHALLENGE_LEN * 2) {
3988 dev_err(DEV, "expected AuthChallenge payload too big.\n");
3993 peers_ch = kmalloc(length, GFP_NOIO);
3994 if (peers_ch == NULL) {
3995 dev_err(DEV, "kmalloc of peers_ch failed\n");
4000 rv = drbd_recv(mdev, peers_ch, length);
4003 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4008 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4009 response = kmalloc(resp_size, GFP_NOIO);
4010 if (response == NULL) {
4011 dev_err(DEV, "kmalloc of response failed\n");
4016 sg_init_table(&sg, 1);
4017 sg_set_buf(&sg, peers_ch, length);
4019 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4021 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4026 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4030 rv = drbd_recv_header(mdev, &cmd, &length);
4034 if (cmd != P_AUTH_RESPONSE) {
4035 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4041 if (length != resp_size) {
4042 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4047 rv = drbd_recv(mdev, response , resp_size);
4049 if (rv != resp_size) {
4050 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4055 right_response = kmalloc(resp_size, GFP_NOIO);
4056 if (right_response == NULL) {
4057 dev_err(DEV, "kmalloc of right_response failed\n");
4062 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4064 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4066 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4071 rv = !memcmp(response, right_response, resp_size);
4074 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4075 resp_size, mdev->net_conf->cram_hmac_alg);
4082 kfree(right_response);
4088 int drbdd_init(struct drbd_thread *thi)
4090 struct drbd_conf *mdev = thi->mdev;
4091 unsigned int minor = mdev_to_minor(mdev);
4094 sprintf(current->comm, "drbd%d_receiver", minor);
4096 dev_info(DEV, "receiver (re)started\n");
4099 h = drbd_connect(mdev);
4101 drbd_disconnect(mdev);
4102 __set_current_state(TASK_INTERRUPTIBLE);
4103 schedule_timeout(HZ);
4106 dev_warn(DEV, "Discarding network configuration.\n");
4107 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4112 if (get_net_conf(mdev)) {
4118 drbd_disconnect(mdev);
4120 dev_info(DEV, "receiver terminated\n");
4124 /* ********* acknowledge sender ******** */
4126 static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4128 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4130 int retcode = be32_to_cpu(p->retcode);
4132 if (retcode >= SS_SUCCESS) {
4133 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4135 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4136 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4137 drbd_set_st_err_str(retcode), retcode);
4139 wake_up(&mdev->state_wait);
4144 static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
4146 return drbd_send_ping_ack(mdev);
4150 static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4152 /* restore idle timeout */
4153 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4154 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4155 wake_up(&mdev->misc_wait);
4160 static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
4162 struct p_block_ack *p = (struct p_block_ack *)h;
4163 sector_t sector = be64_to_cpu(p->sector);
4164 int blksize = be32_to_cpu(p->blksize);
4166 D_ASSERT(mdev->agreed_pro_version >= 89);
4168 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4170 if (get_ldev(mdev)) {
4171 drbd_rs_complete_io(mdev, sector);
4172 drbd_set_in_sync(mdev, sector, blksize);
4173 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4174 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4177 dec_rs_pending(mdev);
4178 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4183 /* when we receive the ACK for a write request,
4184 * verify that we actually know about it */
4185 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4186 u64 id, sector_t sector)
4188 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4189 struct hlist_node *n;
4190 struct drbd_request *req;
4192 hlist_for_each_entry(req, n, slot, colision) {
4193 if ((unsigned long)req == (unsigned long)id) {
4194 if (req->sector != sector) {
4195 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4196 "wrong sector (%llus versus %llus)\n", req,
4197 (unsigned long long)req->sector,
4198 (unsigned long long)sector);
4204 dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4205 (void *)(unsigned long)id, (unsigned long long)sector);
4209 typedef struct drbd_request *(req_validator_fn)
4210 (struct drbd_conf *mdev, u64 id, sector_t sector);
4212 static int validate_req_change_req_state(struct drbd_conf *mdev,
4213 u64 id, sector_t sector, req_validator_fn validator,
4214 const char *func, enum drbd_req_event what)
4216 struct drbd_request *req;
4217 struct bio_and_error m;
4219 spin_lock_irq(&mdev->req_lock);
4220 req = validator(mdev, id, sector);
4221 if (unlikely(!req)) {
4222 spin_unlock_irq(&mdev->req_lock);
4223 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4226 __req_mod(req, what, &m);
4227 spin_unlock_irq(&mdev->req_lock);
4230 complete_master_bio(mdev, &m);
4234 static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4236 struct p_block_ack *p = (struct p_block_ack *)h;
4237 sector_t sector = be64_to_cpu(p->sector);
4238 int blksize = be32_to_cpu(p->blksize);
4239 enum drbd_req_event what;
4241 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4243 if (is_syncer_block_id(p->block_id)) {
4244 drbd_set_in_sync(mdev, sector, blksize);
4245 dec_rs_pending(mdev);
4248 switch (be16_to_cpu(h->command)) {
4249 case P_RS_WRITE_ACK:
4250 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4251 what = write_acked_by_peer_and_sis;
4254 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4255 what = write_acked_by_peer;
4258 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4259 what = recv_acked_by_peer;
4262 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4263 what = conflict_discarded_by_peer;
4270 return validate_req_change_req_state(mdev, p->block_id, sector,
4271 _ack_id_to_req, __func__ , what);
4274 static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
4276 struct p_block_ack *p = (struct p_block_ack *)h;
4277 sector_t sector = be64_to_cpu(p->sector);
4279 if (__ratelimit(&drbd_ratelimit_state))
4280 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4282 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4284 if (is_syncer_block_id(p->block_id)) {
4285 int size = be32_to_cpu(p->blksize);
4286 dec_rs_pending(mdev);
4287 drbd_rs_failed_io(mdev, sector, size);
4290 return validate_req_change_req_state(mdev, p->block_id, sector,
4291 _ack_id_to_req, __func__ , neg_acked);
4294 static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
4296 struct p_block_ack *p = (struct p_block_ack *)h;
4297 sector_t sector = be64_to_cpu(p->sector);
4299 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4300 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4301 (unsigned long long)sector, be32_to_cpu(p->blksize));
4303 return validate_req_change_req_state(mdev, p->block_id, sector,
4304 _ar_id_to_req, __func__ , neg_acked);
4307 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
4311 struct p_block_ack *p = (struct p_block_ack *)h;
4313 sector = be64_to_cpu(p->sector);
4314 size = be32_to_cpu(p->blksize);
4316 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4318 dec_rs_pending(mdev);
4320 if (get_ldev_if_state(mdev, D_FAILED)) {
4321 drbd_rs_complete_io(mdev, sector);
4322 drbd_rs_failed_io(mdev, sector, size);
4329 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4331 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4333 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4338 static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4340 struct p_block_ack *p = (struct p_block_ack *)h;
4341 struct drbd_work *w;
4345 sector = be64_to_cpu(p->sector);
4346 size = be32_to_cpu(p->blksize);
4348 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4350 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4351 drbd_ov_oos_found(mdev, sector, size);
4355 if (!get_ldev(mdev))
4358 drbd_rs_complete_io(mdev, sector);
4359 dec_rs_pending(mdev);
4361 if (--mdev->ov_left == 0) {
4362 w = kmalloc(sizeof(*w), GFP_NOIO);
4364 w->cb = w_ov_finished;
4365 drbd_queue_work_front(&mdev->data.work, w);
4367 dev_err(DEV, "kmalloc(w) failed.");
4369 drbd_resync_finished(mdev);
4376 static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
4381 struct asender_cmd {
4383 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
4386 static struct asender_cmd *get_asender_cmd(int cmd)
4388 static struct asender_cmd asender_tbl[] = {
4389 /* anything missing from this table is in
4390 * the drbd_cmd_handler (drbd_default_handler) table,
4391 * see the beginning of drbdd() */
4392 [P_PING] = { sizeof(struct p_header80), got_Ping },
4393 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
4394 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4395 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4396 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4397 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4398 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4399 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4400 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4401 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4402 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4403 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4404 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4405 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
4406 [P_MAX_CMD] = { 0, NULL },
4408 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4410 return &asender_tbl[cmd];
4413 int drbd_asender(struct drbd_thread *thi)
4415 struct drbd_conf *mdev = thi->mdev;
4416 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
4417 struct asender_cmd *cmd = NULL;
4422 int expect = sizeof(struct p_header80);
4425 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4427 current->policy = SCHED_RR; /* Make this a realtime task! */
4428 current->rt_priority = 2; /* more important than all other tasks */
4430 while (get_t_state(thi) == Running) {
4431 drbd_thread_current_set_cpu(mdev);
4432 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4433 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4434 mdev->meta.socket->sk->sk_rcvtimeo =
4435 mdev->net_conf->ping_timeo*HZ/10;
4438 /* conditionally cork;
4439 * it may hurt latency if we cork without much to send */
4440 if (!mdev->net_conf->no_cork &&
4441 3 < atomic_read(&mdev->unacked_cnt))
4442 drbd_tcp_cork(mdev->meta.socket);
4444 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4445 flush_signals(current);
4446 if (!drbd_process_done_ee(mdev))
4448 /* to avoid race with newly queued ACKs */
4449 set_bit(SIGNAL_ASENDER, &mdev->flags);
4450 spin_lock_irq(&mdev->req_lock);
4451 empty = list_empty(&mdev->done_ee);
4452 spin_unlock_irq(&mdev->req_lock);
4453 /* new ack may have been queued right here,
4454 * but then there is also a signal pending,
4455 * and we start over... */
4459 /* but unconditionally uncork unless disabled */
4460 if (!mdev->net_conf->no_cork)
4461 drbd_tcp_uncork(mdev->meta.socket);
4463 /* short circuit, recv_msg would return EINTR anyways. */
4464 if (signal_pending(current))
4467 rv = drbd_recv_short(mdev, mdev->meta.socket,
4468 buf, expect-received, 0);
4469 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4471 flush_signals(current);
4474 * -EINTR (on meta) we got a signal
4475 * -EAGAIN (on meta) rcvtimeo expired
4476 * -ECONNRESET other side closed the connection
4477 * -ERESTARTSYS (on data) we got a signal
4478 * rv < 0 other than above: unexpected error!
4479 * rv == expected: full header or command
4480 * rv < expected: "woken" by signal during receive
4481 * rv == 0 : "connection shut down by peer"
4483 if (likely(rv > 0)) {
4486 } else if (rv == 0) {
4487 dev_err(DEV, "meta connection shut down by peer.\n");
4489 } else if (rv == -EAGAIN) {
4490 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4491 mdev->net_conf->ping_timeo*HZ/10) {
4492 dev_err(DEV, "PingAck did not arrive in time.\n");
4495 set_bit(SEND_PING, &mdev->flags);
4497 } else if (rv == -EINTR) {
4500 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4504 if (received == expect && cmd == NULL) {
4505 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4506 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4507 be32_to_cpu(h->magic),
4508 be16_to_cpu(h->command),
4509 be16_to_cpu(h->length));
4512 cmd = get_asender_cmd(be16_to_cpu(h->command));
4513 len = be16_to_cpu(h->length);
4514 if (unlikely(cmd == NULL)) {
4515 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4516 be32_to_cpu(h->magic),
4517 be16_to_cpu(h->command),
4518 be16_to_cpu(h->length));
4521 expect = cmd->pkt_size;
4522 ERR_IF(len != expect-sizeof(struct p_header80))
4525 if (received == expect) {
4526 D_ASSERT(cmd != NULL);
4527 if (!cmd->process(mdev, h))
4532 expect = sizeof(struct p_header80);
4539 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4544 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4547 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4549 D_ASSERT(mdev->state.conn < C_CONNECTED);
4550 dev_info(DEV, "asender terminated\n");