4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
45 #include <linux/string.h>
46 #include <linux/scatterlist.h>
54 struct drbd_epoch *epoch;
63 static int drbd_do_handshake(struct drbd_conf *mdev);
64 static int drbd_do_auth(struct drbd_conf *mdev);
66 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
67 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
69 static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
71 struct drbd_epoch *prev;
72 spin_lock(&mdev->epoch_lock);
73 prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
74 if (prev == epoch || prev == mdev->current_epoch)
76 spin_unlock(&mdev->epoch_lock);
80 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
83 * some helper functions to deal with single linked page lists,
84 * page->private being our "next" pointer.
87 /* If at least n pages are linked at head, get n pages off.
88 * Otherwise, don't modify head, and return NULL.
89 * Locking is the responsibility of the caller.
91 static struct page *page_chain_del(struct page **head, int n)
105 tmp = page_chain_next(page);
107 break; /* found sufficient pages */
109 /* insufficient pages, don't use any of them. */
114 /* add end of list marker for the returned list */
115 set_page_private(page, 0);
116 /* actual return value, and adjustment of head */
122 /* may be used outside of locks to find the tail of a (usually short)
123 * "private" page chain, before adding it back to a global chain head
124 * with page_chain_add() under a spinlock. */
125 static struct page *page_chain_tail(struct page *page, int *len)
129 while ((tmp = page_chain_next(page)))
136 static int page_chain_free(struct page *page)
140 page_chain_for_each_safe(page, tmp) {
147 static void page_chain_add(struct page **head,
148 struct page *chain_first, struct page *chain_last)
152 tmp = page_chain_tail(chain_first, NULL);
153 BUG_ON(tmp != chain_last);
156 /* add chain to head */
157 set_page_private(chain_last, (unsigned long)*head);
161 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
163 struct page *page = NULL;
164 struct page *tmp = NULL;
167 /* Yes, testing drbd_pp_vacant outside the lock is racy.
168 * So what. It saves a spin_lock. */
169 if (drbd_pp_vacant >= number) {
170 spin_lock(&drbd_pp_lock);
171 page = page_chain_del(&drbd_pp_pool, number);
173 drbd_pp_vacant -= number;
174 spin_unlock(&drbd_pp_lock);
179 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
180 * "criss-cross" setup, that might cause write-out on some other DRBD,
181 * which in turn might block on the other node at this very place. */
182 for (i = 0; i < number; i++) {
183 tmp = alloc_page(GFP_TRY);
186 set_page_private(tmp, (unsigned long)page);
193 /* Not enough pages immediately available this time.
194 * No need to jump around here, drbd_pp_alloc will retry this
195 * function "soon". */
197 tmp = page_chain_tail(page, NULL);
198 spin_lock(&drbd_pp_lock);
199 page_chain_add(&drbd_pp_pool, page, tmp);
201 spin_unlock(&drbd_pp_lock);
206 /* kick lower level device, if we have more than (arbitrary number)
207 * reference counts on it, which typically are locally submitted io
208 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
209 static void maybe_kick_lo(struct drbd_conf *mdev)
211 if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
215 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
217 struct drbd_epoch_entry *e;
218 struct list_head *le, *tle;
220 /* The EEs are always appended to the end of the list. Since
221 they are sent in order over the wire, they have to finish
222 in order. As soon as we see the first not finished we can
223 stop to examine the list... */
225 list_for_each_safe(le, tle, &mdev->net_ee) {
226 e = list_entry(le, struct drbd_epoch_entry, w.list);
227 if (drbd_ee_has_active_page(e))
229 list_move(le, to_be_freed);
233 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
235 LIST_HEAD(reclaimed);
236 struct drbd_epoch_entry *e, *t;
239 spin_lock_irq(&mdev->req_lock);
240 reclaim_net_ee(mdev, &reclaimed);
241 spin_unlock_irq(&mdev->req_lock);
243 list_for_each_entry_safe(e, t, &reclaimed, w.list)
244 drbd_free_net_ee(mdev, e);
248 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
249 * @mdev: DRBD device.
250 * @number: number of pages requested
251 * @retry: whether to retry, if not enough pages are available right now
253 * Tries to allocate number pages, first from our own page pool, then from
254 * the kernel, unless this allocation would exceed the max_buffers setting.
255 * Possibly retry until DRBD frees sufficient pages somewhere else.
257 * Returns a page chain linked via page->private.
259 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
261 struct page *page = NULL;
264 /* Yes, we may run up to @number over max_buffers. If we
265 * follow it strictly, the admin will get it wrong anyways. */
266 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
267 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
269 while (page == NULL) {
270 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
272 drbd_kick_lo_and_reclaim_net(mdev);
274 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
275 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
283 if (signal_pending(current)) {
284 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
290 finish_wait(&drbd_pp_wait, &wait);
293 atomic_add(number, &mdev->pp_in_use);
297 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
298 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
299 * Either links the page chain back to the global pool,
300 * or returns all pages to the system. */
301 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
303 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
306 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
307 i = page_chain_free(page);
310 tmp = page_chain_tail(page, &i);
311 spin_lock(&drbd_pp_lock);
312 page_chain_add(&drbd_pp_pool, page, tmp);
314 spin_unlock(&drbd_pp_lock);
316 i = atomic_sub_return(i, a);
318 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
319 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
320 wake_up(&drbd_pp_wait);
324 You need to hold the req_lock:
325 _drbd_wait_ee_list_empty()
327 You must not have the req_lock:
333 drbd_process_done_ee()
335 drbd_wait_ee_list_empty()
338 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
341 unsigned int data_size,
342 gfp_t gfp_mask) __must_hold(local)
344 struct drbd_epoch_entry *e;
346 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
348 if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
351 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
353 if (!(gfp_mask & __GFP_NOWARN))
354 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
358 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
362 INIT_HLIST_NODE(&e->colision);
366 atomic_set(&e->pending_bios, 0);
375 mempool_free(e, drbd_ee_mempool);
379 void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
381 if (e->flags & EE_HAS_DIGEST)
383 drbd_pp_free(mdev, e->pages, is_net);
384 D_ASSERT(atomic_read(&e->pending_bios) == 0);
385 D_ASSERT(hlist_unhashed(&e->colision));
386 mempool_free(e, drbd_ee_mempool);
389 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
391 LIST_HEAD(work_list);
392 struct drbd_epoch_entry *e, *t;
394 int is_net = list == &mdev->net_ee;
396 spin_lock_irq(&mdev->req_lock);
397 list_splice_init(list, &work_list);
398 spin_unlock_irq(&mdev->req_lock);
400 list_for_each_entry_safe(e, t, &work_list, w.list) {
401 drbd_free_some_ee(mdev, e, is_net);
409 * This function is called from _asender only_
410 * but see also comments in _req_mod(,barrier_acked)
411 * and receive_Barrier.
413 * Move entries from net_ee to done_ee, if ready.
414 * Grab done_ee, call all callbacks, free the entries.
415 * The callbacks typically send out ACKs.
417 static int drbd_process_done_ee(struct drbd_conf *mdev)
419 LIST_HEAD(work_list);
420 LIST_HEAD(reclaimed);
421 struct drbd_epoch_entry *e, *t;
422 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
424 spin_lock_irq(&mdev->req_lock);
425 reclaim_net_ee(mdev, &reclaimed);
426 list_splice_init(&mdev->done_ee, &work_list);
427 spin_unlock_irq(&mdev->req_lock);
429 list_for_each_entry_safe(e, t, &reclaimed, w.list)
430 drbd_free_net_ee(mdev, e);
432 /* possible callbacks here:
433 * e_end_block, and e_end_resync_block, e_send_discard_ack.
434 * all ignore the last argument.
436 list_for_each_entry_safe(e, t, &work_list, w.list) {
437 /* list_del not necessary, next/prev members not touched */
438 ok = e->w.cb(mdev, &e->w, !ok) && ok;
439 drbd_free_ee(mdev, e);
441 wake_up(&mdev->ee_wait);
446 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
454 spin_unlock_irq(&mdev->req_lock);
457 finish_wait(&mdev->ee_wait, &wait);
458 spin_lock_irq(&mdev->req_lock);
462 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
464 spin_lock_irq(&mdev->req_lock);
465 _drbd_wait_ee_list_empty(mdev, head);
466 spin_unlock_irq(&mdev->req_lock);
469 /* see also kernel_accept; which is only present since 2.6.18.
470 * also we want to log which part of it failed, exactly */
471 static int drbd_accept(struct drbd_conf *mdev, const char **what,
472 struct socket *sock, struct socket **newsock)
474 struct sock *sk = sock->sk;
478 err = sock->ops->listen(sock, 5);
482 *what = "sock_create_lite";
483 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
489 err = sock->ops->accept(sock, *newsock, 0);
491 sock_release(*newsock);
495 (*newsock)->ops = sock->ops;
501 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
502 void *buf, size_t size, int flags)
509 struct msghdr msg = {
511 .msg_iov = (struct iovec *)&iov,
512 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
518 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
524 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
531 struct msghdr msg = {
533 .msg_iov = (struct iovec *)&iov,
534 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
542 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
547 * ECONNRESET other side closed the connection
548 * ERESTARTSYS (on sock) we got a signal
552 if (rv == -ECONNRESET)
553 dev_info(DEV, "sock was reset by peer\n");
554 else if (rv != -ERESTARTSYS)
555 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
557 } else if (rv == 0) {
558 dev_info(DEV, "sock was shut down by peer\n");
561 /* signal came in, or peer/link went down,
562 * after we read a partial message
564 /* D_ASSERT(signal_pending(current)); */
572 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
578 * On individual connections, the socket buffer size must be set prior to the
579 * listen(2) or connect(2) calls in order to have it take effect.
580 * This is our wrapper to do so.
582 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
585 /* open coded SO_SNDBUF, SO_RCVBUF */
587 sock->sk->sk_sndbuf = snd;
588 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
591 sock->sk->sk_rcvbuf = rcv;
592 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
596 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
600 struct sockaddr_in6 src_in6;
602 int disconnect_on_error = 1;
604 if (!get_net_conf(mdev))
607 what = "sock_create_kern";
608 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
609 SOCK_STREAM, IPPROTO_TCP, &sock);
615 sock->sk->sk_rcvtimeo =
616 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
617 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
618 mdev->net_conf->rcvbuf_size);
620 /* explicitly bind to the configured IP as source IP
621 * for the outgoing connections.
622 * This is needed for multihomed hosts and to be
623 * able to use lo: interfaces for drbd.
624 * Make sure to use 0 as port number, so linux selects
625 * a free one dynamically.
627 memcpy(&src_in6, mdev->net_conf->my_addr,
628 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
629 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
630 src_in6.sin6_port = 0;
632 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
634 what = "bind before connect";
635 err = sock->ops->bind(sock,
636 (struct sockaddr *) &src_in6,
637 mdev->net_conf->my_addr_len);
641 /* connect may fail, peer not yet available.
642 * stay C_WF_CONNECTION, don't go Disconnecting! */
643 disconnect_on_error = 0;
645 err = sock->ops->connect(sock,
646 (struct sockaddr *)mdev->net_conf->peer_addr,
647 mdev->net_conf->peer_addr_len, 0);
656 /* timeout, busy, signal pending */
657 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
658 case EINTR: case ERESTARTSYS:
659 /* peer not (yet) available, network problem */
660 case ECONNREFUSED: case ENETUNREACH:
661 case EHOSTDOWN: case EHOSTUNREACH:
662 disconnect_on_error = 0;
665 dev_err(DEV, "%s failed, err = %d\n", what, err);
667 if (disconnect_on_error)
668 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
674 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
677 struct socket *s_estab = NULL, *s_listen;
680 if (!get_net_conf(mdev))
683 what = "sock_create_kern";
684 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
685 SOCK_STREAM, IPPROTO_TCP, &s_listen);
691 timeo = mdev->net_conf->try_connect_int * HZ;
692 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
694 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
695 s_listen->sk->sk_rcvtimeo = timeo;
696 s_listen->sk->sk_sndtimeo = timeo;
697 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
698 mdev->net_conf->rcvbuf_size);
700 what = "bind before listen";
701 err = s_listen->ops->bind(s_listen,
702 (struct sockaddr *) mdev->net_conf->my_addr,
703 mdev->net_conf->my_addr_len);
707 err = drbd_accept(mdev, &what, s_listen, &s_estab);
711 sock_release(s_listen);
713 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
714 dev_err(DEV, "%s failed, err = %d\n", what, err);
715 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
723 static int drbd_send_fp(struct drbd_conf *mdev,
724 struct socket *sock, enum drbd_packets cmd)
726 struct p_header80 *h = &mdev->data.sbuf.header.h80;
728 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
731 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
733 struct p_header80 *h = &mdev->data.rbuf.header.h80;
736 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
738 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
739 return be16_to_cpu(h->command);
745 * drbd_socket_okay() - Free the socket if its connection is not okay
746 * @mdev: DRBD device.
747 * @sock: pointer to the pointer to the socket.
749 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
757 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
759 if (rr > 0 || rr == -EAGAIN) {
770 * 1 yes, we have a valid connection
771 * 0 oops, did not work out, please try again
772 * -1 peer talks different language,
773 * no point in trying again, please go standalone.
774 * -2 We do not have a network config...
776 static int drbd_connect(struct drbd_conf *mdev)
778 struct socket *s, *sock, *msock;
781 D_ASSERT(!mdev->data.socket);
783 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
786 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
793 /* 3 tries, this should take less than a second! */
794 s = drbd_try_connect(mdev);
797 /* give the other side time to call bind() & listen() */
798 __set_current_state(TASK_INTERRUPTIBLE);
799 schedule_timeout(HZ / 10);
804 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
808 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
812 dev_err(DEV, "Logic error in drbd_connect()\n");
813 goto out_release_sockets;
818 __set_current_state(TASK_INTERRUPTIBLE);
819 schedule_timeout(HZ / 10);
820 ok = drbd_socket_okay(mdev, &sock);
821 ok = drbd_socket_okay(mdev, &msock) && ok;
827 s = drbd_wait_for_connect(mdev);
829 try = drbd_recv_fp(mdev, s);
830 drbd_socket_okay(mdev, &sock);
831 drbd_socket_okay(mdev, &msock);
835 dev_warn(DEV, "initial packet S crossed\n");
842 dev_warn(DEV, "initial packet M crossed\n");
846 set_bit(DISCARD_CONCURRENT, &mdev->flags);
849 dev_warn(DEV, "Error receiving initial packet\n");
856 if (mdev->state.conn <= C_DISCONNECTING)
857 goto out_release_sockets;
858 if (signal_pending(current)) {
859 flush_signals(current);
861 if (get_t_state(&mdev->receiver) == Exiting)
862 goto out_release_sockets;
866 ok = drbd_socket_okay(mdev, &sock);
867 ok = drbd_socket_okay(mdev, &msock) && ok;
873 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
874 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
876 sock->sk->sk_allocation = GFP_NOIO;
877 msock->sk->sk_allocation = GFP_NOIO;
879 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
880 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
883 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
884 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
885 * first set it to the P_HAND_SHAKE timeout,
886 * which we set to 4x the configured ping_timeout. */
887 sock->sk->sk_sndtimeo =
888 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
890 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
891 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
893 /* we don't want delays.
894 * we use TCP_CORK where apropriate, though */
895 drbd_tcp_nodelay(sock);
896 drbd_tcp_nodelay(msock);
898 mdev->data.socket = sock;
899 mdev->meta.socket = msock;
900 mdev->last_received = jiffies;
902 D_ASSERT(mdev->asender.task == NULL);
904 h = drbd_do_handshake(mdev);
908 if (mdev->cram_hmac_tfm) {
909 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
910 switch (drbd_do_auth(mdev)) {
912 dev_err(DEV, "Authentication of peer failed\n");
915 dev_err(DEV, "Authentication of peer failed, trying again.\n");
920 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
923 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
924 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
926 atomic_set(&mdev->packet_seq, 0);
929 drbd_thread_start(&mdev->asender);
931 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
932 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
936 if (!drbd_send_protocol(mdev))
938 drbd_send_sync_param(mdev, &mdev->sync_conf);
939 drbd_send_sizes(mdev, 0, 0);
940 drbd_send_uuids(mdev);
941 drbd_send_state(mdev);
942 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
943 clear_bit(RESIZE_PENDING, &mdev->flags);
955 static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
957 union p_header *h = &mdev->data.rbuf.header;
960 r = drbd_recv(mdev, h, sizeof(*h));
961 if (unlikely(r != sizeof(*h))) {
962 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
966 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
967 *cmd = be16_to_cpu(h->h80.command);
968 *packet_size = be16_to_cpu(h->h80.length);
969 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
970 *cmd = be16_to_cpu(h->h95.command);
971 *packet_size = be32_to_cpu(h->h95.length);
973 dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n",
974 (long)be32_to_cpu(h->h80.magic),
975 h->h80.command, h->h80.length);
978 mdev->last_received = jiffies;
983 static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
987 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
988 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
989 NULL, BLKDEV_IFL_WAIT);
991 dev_err(DEV, "local disk flush failed with status %d\n", rv);
992 /* would rather check on EOPNOTSUPP, but that is not reliable.
993 * don't try again for ANY return value != 0
994 * if (rv == -EOPNOTSUPP) */
995 drbd_bump_write_ordering(mdev, WO_drain_io);
1000 return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1003 static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1005 struct flush_work *fw = (struct flush_work *)w;
1006 struct drbd_epoch *epoch = fw->epoch;
1010 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
1011 drbd_flush_after_epoch(mdev, epoch);
1013 drbd_may_finish_epoch(mdev, epoch, EV_PUT |
1014 (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
1020 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1021 * @mdev: DRBD device.
1022 * @epoch: Epoch object.
1025 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1026 struct drbd_epoch *epoch,
1027 enum epoch_event ev)
1029 int finish, epoch_size;
1030 struct drbd_epoch *next_epoch;
1031 int schedule_flush = 0;
1032 enum finish_epoch rv = FE_STILL_LIVE;
1034 spin_lock(&mdev->epoch_lock);
1039 epoch_size = atomic_read(&epoch->epoch_size);
1041 switch (ev & ~EV_CLEANUP) {
1043 atomic_dec(&epoch->active);
1045 case EV_GOT_BARRIER_NR:
1046 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1048 /* Special case: If we just switched from WO_bio_barrier to
1049 WO_bdev_flush we should not finish the current epoch */
1050 if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1051 mdev->write_ordering != WO_bio_barrier &&
1052 epoch == mdev->current_epoch)
1053 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1055 case EV_BARRIER_DONE:
1056 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1058 case EV_BECAME_LAST:
1063 if (epoch_size != 0 &&
1064 atomic_read(&epoch->active) == 0 &&
1065 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1066 epoch->list.prev == &mdev->current_epoch->list &&
1067 !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1068 /* Nearly all conditions are met to finish that epoch... */
1069 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1070 mdev->write_ordering == WO_none ||
1071 (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1074 set_bit(DE_IS_FINISHING, &epoch->flags);
1075 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1076 mdev->write_ordering == WO_bio_barrier) {
1077 atomic_inc(&epoch->active);
1082 if (!(ev & EV_CLEANUP)) {
1083 spin_unlock(&mdev->epoch_lock);
1084 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1085 spin_lock(&mdev->epoch_lock);
1089 if (mdev->current_epoch != epoch) {
1090 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1091 list_del(&epoch->list);
1092 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1096 if (rv == FE_STILL_LIVE)
1100 atomic_set(&epoch->epoch_size, 0);
1101 /* atomic_set(&epoch->active, 0); is already zero */
1102 if (rv == FE_STILL_LIVE)
1113 spin_unlock(&mdev->epoch_lock);
1115 if (schedule_flush) {
1116 struct flush_work *fw;
1117 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1121 drbd_queue_work(&mdev->data.work, &fw->w);
1123 dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1124 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1125 /* That is not a recursion, only one level */
1126 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1127 drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1135 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1136 * @mdev: DRBD device.
1137 * @wo: Write ordering method to try.
1139 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1141 enum write_ordering_e pwo;
1142 static char *write_ordering_str[] = {
1144 [WO_drain_io] = "drain",
1145 [WO_bdev_flush] = "flush",
1146 [WO_bio_barrier] = "barrier",
1149 pwo = mdev->write_ordering;
1151 if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1153 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1155 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1157 mdev->write_ordering = wo;
1158 if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1159 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1164 * @mdev: DRBD device.
1166 * @rw: flag field, see bio->bi_rw
1168 /* TODO allocate from our own bio_set. */
1169 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1170 const unsigned rw, const int fault_type)
1172 struct bio *bios = NULL;
1174 struct page *page = e->pages;
1175 sector_t sector = e->sector;
1176 unsigned ds = e->size;
1177 unsigned n_bios = 0;
1178 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1180 /* In most cases, we will only need one bio. But in case the lower
1181 * level restrictions happen to be different at this offset on this
1182 * side than those of the sending peer, we may need to submit the
1183 * request in more than one bio. */
1185 bio = bio_alloc(GFP_NOIO, nr_pages);
1187 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1190 /* > e->sector, unless this is the first bio */
1191 bio->bi_sector = sector;
1192 bio->bi_bdev = mdev->ldev->backing_bdev;
1193 /* we special case some flags in the multi-bio case, see below
1194 * (REQ_UNPLUG, REQ_HARDBARRIER) */
1196 bio->bi_private = e;
1197 bio->bi_end_io = drbd_endio_sec;
1199 bio->bi_next = bios;
1203 page_chain_for_each(page) {
1204 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1205 if (!bio_add_page(bio, page, len, 0)) {
1206 /* a single page must always be possible! */
1207 BUG_ON(bio->bi_vcnt == 0);
1214 D_ASSERT(page == NULL);
1217 atomic_set(&e->pending_bios, n_bios);
1220 bios = bios->bi_next;
1221 bio->bi_next = NULL;
1223 /* strip off REQ_UNPLUG unless it is the last bio */
1225 bio->bi_rw &= ~REQ_UNPLUG;
1227 drbd_generic_make_request(mdev, fault_type, bio);
1229 /* strip off REQ_HARDBARRIER,
1230 * unless it is the first or last bio */
1231 if (bios && bios->bi_next)
1232 bios->bi_rw &= ~REQ_HARDBARRIER;
1234 maybe_kick_lo(mdev);
1240 bios = bios->bi_next;
1247 * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
1248 * @mdev: DRBD device.
1250 * @cancel: The connection will be closed anyways (unused in this callback)
1252 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1254 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1255 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1256 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1257 so that we can finish that epoch in drbd_may_finish_epoch().
1258 That is necessary if we already have a long chain of Epochs, before
1259 we realize that REQ_HARDBARRIER is actually not supported */
1261 /* As long as the -ENOTSUPP on the barrier is reported immediately
1262 that will never trigger. If it is reported late, we will just
1263 print that warning and continue correctly for all future requests
1264 with WO_bdev_flush */
1265 if (previous_epoch(mdev, e->epoch))
1266 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1268 /* we still have a local reference,
1269 * get_ldev was done in receive_Data. */
1271 e->w.cb = e_end_block;
1272 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
1273 /* drbd_submit_ee fails for one reason only:
1274 * if was not able to allocate sufficient bios.
1275 * requeue, try again later. */
1276 e->w.cb = w_e_reissue;
1277 drbd_queue_work(&mdev->data.work, &e->w);
1282 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1284 int rv, issue_flush;
1285 struct p_barrier *p = &mdev->data.rbuf.barrier;
1286 struct drbd_epoch *epoch;
1290 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1293 mdev->current_epoch->barrier_nr = p->barrier;
1294 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1296 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1297 * the activity log, which means it would not be resynced in case the
1298 * R_PRIMARY crashes now.
1299 * Therefore we must send the barrier_ack after the barrier request was
1301 switch (mdev->write_ordering) {
1302 case WO_bio_barrier:
1304 if (rv == FE_RECYCLED)
1310 if (rv == FE_STILL_LIVE) {
1311 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1312 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1313 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1315 if (rv == FE_RECYCLED)
1318 /* The asender will send all the ACKs and barrier ACKs out, since
1319 all EEs moved from the active_ee to the done_ee. We need to
1320 provide a new epoch object for the EEs that come in soon */
1324 /* receiver context, in the writeout path of the other node.
1325 * avoid potential distributed deadlock */
1326 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1328 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1329 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1330 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1332 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1333 if (rv == FE_RECYCLED)
1337 drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1343 atomic_set(&epoch->epoch_size, 0);
1344 atomic_set(&epoch->active, 0);
1346 spin_lock(&mdev->epoch_lock);
1347 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1348 list_add(&epoch->list, &mdev->current_epoch->list);
1349 mdev->current_epoch = epoch;
1352 /* The current_epoch got recycled while we allocated this one... */
1355 spin_unlock(&mdev->epoch_lock);
1360 /* used from receive_RSDataReply (recv_resync_read)
1361 * and from receive_Data */
1362 static struct drbd_epoch_entry *
1363 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1365 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1366 struct drbd_epoch_entry *e;
1369 void *dig_in = mdev->int_dig_in;
1370 void *dig_vv = mdev->int_dig_vv;
1371 unsigned long *data;
1373 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1374 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1377 rr = drbd_recv(mdev, dig_in, dgs);
1379 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1387 ERR_IF(data_size & 0x1ff) return NULL;
1388 ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
1390 /* even though we trust out peer,
1391 * we sometimes have to double check. */
1392 if (sector + (data_size>>9) > capacity) {
1393 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1394 (unsigned long long)capacity,
1395 (unsigned long long)sector, data_size);
1399 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1400 * "criss-cross" setup, that might cause write-out on some other DRBD,
1401 * which in turn might block on the other node at this very place. */
1402 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1408 page_chain_for_each(page) {
1409 unsigned len = min_t(int, ds, PAGE_SIZE);
1411 rr = drbd_recv(mdev, data, len);
1412 if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
1413 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1414 data[0] = data[0] ^ (unsigned long)-1;
1418 drbd_free_ee(mdev, e);
1419 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1427 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1428 if (memcmp(dig_in, dig_vv, dgs)) {
1429 dev_err(DEV, "Digest integrity check FAILED.\n");
1430 drbd_bcast_ee(mdev, "digest failed",
1431 dgs, dig_in, dig_vv, e);
1432 drbd_free_ee(mdev, e);
1436 mdev->recv_cnt += data_size>>9;
1440 /* drbd_drain_block() just takes a data block
1441 * out of the socket input buffer, and discards it.
1443 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1452 page = drbd_pp_alloc(mdev, 1, 1);
1456 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1457 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1459 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1460 rr, min_t(int, data_size, PAGE_SIZE));
1466 drbd_pp_free(mdev, page, 0);
1470 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1471 sector_t sector, int data_size)
1473 struct bio_vec *bvec;
1475 int dgs, rr, i, expect;
1476 void *dig_in = mdev->int_dig_in;
1477 void *dig_vv = mdev->int_dig_vv;
1479 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1480 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1483 rr = drbd_recv(mdev, dig_in, dgs);
1485 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1493 /* optimistically update recv_cnt. if receiving fails below,
1494 * we disconnect anyways, and counters will be reset. */
1495 mdev->recv_cnt += data_size>>9;
1497 bio = req->master_bio;
1498 D_ASSERT(sector == bio->bi_sector);
1500 bio_for_each_segment(bvec, bio, i) {
1501 expect = min_t(int, data_size, bvec->bv_len);
1502 rr = drbd_recv(mdev,
1503 kmap(bvec->bv_page)+bvec->bv_offset,
1505 kunmap(bvec->bv_page);
1507 dev_warn(DEV, "short read receiving data reply: "
1508 "read %d expected %d\n",
1516 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1517 if (memcmp(dig_in, dig_vv, dgs)) {
1518 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1523 D_ASSERT(data_size == 0);
1527 /* e_end_resync_block() is called via
1528 * drbd_process_done_ee() by asender only */
1529 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1531 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1532 sector_t sector = e->sector;
1535 D_ASSERT(hlist_unhashed(&e->colision));
1537 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1538 drbd_set_in_sync(mdev, sector, e->size);
1539 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1541 /* Record failure to sync */
1542 drbd_rs_failed_io(mdev, sector, e->size);
1544 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1551 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1553 struct drbd_epoch_entry *e;
1555 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1559 dec_rs_pending(mdev);
1562 /* corresponding dec_unacked() in e_end_resync_block()
1563 * respective _drbd_clear_done_ee */
1565 e->w.cb = e_end_resync_block;
1567 spin_lock_irq(&mdev->req_lock);
1568 list_add(&e->w.list, &mdev->sync_ee);
1569 spin_unlock_irq(&mdev->req_lock);
1571 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1572 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1575 drbd_free_ee(mdev, e);
1581 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1583 struct drbd_request *req;
1586 struct p_data *p = &mdev->data.rbuf.data;
1588 sector = be64_to_cpu(p->sector);
1590 spin_lock_irq(&mdev->req_lock);
1591 req = _ar_id_to_req(mdev, p->block_id, sector);
1592 spin_unlock_irq(&mdev->req_lock);
1593 if (unlikely(!req)) {
1594 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1598 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1599 * special casing it there for the various failure cases.
1600 * still no race with drbd_fail_pending_reads */
1601 ok = recv_dless_read(mdev, req, sector, data_size);
1604 req_mod(req, data_received);
1605 /* else: nothing. handled from drbd_disconnect...
1606 * I don't think we may complete this just yet
1607 * in case we are "on-disconnect: freeze" */
1612 static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1616 struct p_data *p = &mdev->data.rbuf.data;
1618 sector = be64_to_cpu(p->sector);
1619 D_ASSERT(p->block_id == ID_SYNCER);
1621 if (get_ldev(mdev)) {
1622 /* data is submitted to disk within recv_resync_read.
1623 * corresponding put_ldev done below on error,
1624 * or in drbd_endio_write_sec. */
1625 ok = recv_resync_read(mdev, sector, data_size);
1627 if (__ratelimit(&drbd_ratelimit_state))
1628 dev_err(DEV, "Can not write resync data to local disk.\n");
1630 ok = drbd_drain_block(mdev, data_size);
1632 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1635 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1640 /* e_end_block() is called via drbd_process_done_ee().
1641 * this means this function only runs in the asender thread
1643 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1645 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1646 sector_t sector = e->sector;
1647 struct drbd_epoch *epoch;
1650 if (e->flags & EE_IS_BARRIER) {
1651 epoch = previous_epoch(mdev, e->epoch);
1653 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1656 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1657 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1658 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1659 mdev->state.conn <= C_PAUSED_SYNC_T &&
1660 e->flags & EE_MAY_SET_IN_SYNC) ?
1661 P_RS_WRITE_ACK : P_WRITE_ACK;
1662 ok &= drbd_send_ack(mdev, pcmd, e);
1663 if (pcmd == P_RS_WRITE_ACK)
1664 drbd_set_in_sync(mdev, sector, e->size);
1666 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1667 /* we expect it to be marked out of sync anyways...
1668 * maybe assert this? */
1672 /* we delete from the conflict detection hash _after_ we sent out the
1673 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1674 if (mdev->net_conf->two_primaries) {
1675 spin_lock_irq(&mdev->req_lock);
1676 D_ASSERT(!hlist_unhashed(&e->colision));
1677 hlist_del_init(&e->colision);
1678 spin_unlock_irq(&mdev->req_lock);
1680 D_ASSERT(hlist_unhashed(&e->colision));
1683 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1688 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1690 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1693 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1694 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1696 spin_lock_irq(&mdev->req_lock);
1697 D_ASSERT(!hlist_unhashed(&e->colision));
1698 hlist_del_init(&e->colision);
1699 spin_unlock_irq(&mdev->req_lock);
1706 /* Called from receive_Data.
1707 * Synchronize packets on sock with packets on msock.
1709 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1710 * packet traveling on msock, they are still processed in the order they have
1713 * Note: we don't care for Ack packets overtaking P_DATA packets.
1715 * In case packet_seq is larger than mdev->peer_seq number, there are
1716 * outstanding packets on the msock. We wait for them to arrive.
1717 * In case we are the logically next packet, we update mdev->peer_seq
1718 * ourselves. Correctly handles 32bit wrap around.
1720 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1721 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1722 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1723 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1725 * returns 0 if we may process the packet,
1726 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1727 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1733 spin_lock(&mdev->peer_seq_lock);
1735 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1736 if (seq_le(packet_seq, mdev->peer_seq+1))
1738 if (signal_pending(current)) {
1742 p_seq = mdev->peer_seq;
1743 spin_unlock(&mdev->peer_seq_lock);
1744 timeout = schedule_timeout(30*HZ);
1745 spin_lock(&mdev->peer_seq_lock);
1746 if (timeout == 0 && p_seq == mdev->peer_seq) {
1748 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1752 finish_wait(&mdev->seq_wait, &wait);
1753 if (mdev->peer_seq+1 == packet_seq)
1755 spin_unlock(&mdev->peer_seq_lock);
1759 static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1761 if (mdev->agreed_pro_version >= 95)
1762 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1763 (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
1764 (dpf & DP_FUA ? REQ_FUA : 0) |
1765 (dpf & DP_FLUSH ? REQ_FUA : 0) |
1766 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1768 return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0;
1771 /* mirrored write */
1772 static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1775 struct drbd_epoch_entry *e;
1776 struct p_data *p = &mdev->data.rbuf.data;
1780 if (!get_ldev(mdev)) {
1781 if (__ratelimit(&drbd_ratelimit_state))
1782 dev_err(DEV, "Can not write mirrored data block "
1783 "to local disk.\n");
1784 spin_lock(&mdev->peer_seq_lock);
1785 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1787 spin_unlock(&mdev->peer_seq_lock);
1789 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1790 atomic_inc(&mdev->current_epoch->epoch_size);
1791 return drbd_drain_block(mdev, data_size);
1794 /* get_ldev(mdev) successful.
1795 * Corresponding put_ldev done either below (on various errors),
1796 * or in drbd_endio_write_sec, if we successfully submit the data at
1797 * the end of this function. */
1799 sector = be64_to_cpu(p->sector);
1800 e = read_in_block(mdev, p->block_id, sector, data_size);
1806 e->w.cb = e_end_block;
1808 spin_lock(&mdev->epoch_lock);
1809 e->epoch = mdev->current_epoch;
1810 atomic_inc(&e->epoch->epoch_size);
1811 atomic_inc(&e->epoch->active);
1813 if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1814 struct drbd_epoch *epoch;
1815 /* Issue a barrier if we start a new epoch, and the previous epoch
1816 was not a epoch containing a single request which already was
1818 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1819 if (epoch == e->epoch) {
1820 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1821 rw |= REQ_HARDBARRIER;
1822 e->flags |= EE_IS_BARRIER;
1824 if (atomic_read(&epoch->epoch_size) > 1 ||
1825 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1826 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1827 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1828 rw |= REQ_HARDBARRIER;
1829 e->flags |= EE_IS_BARRIER;
1833 spin_unlock(&mdev->epoch_lock);
1835 dp_flags = be32_to_cpu(p->dp_flags);
1836 rw |= write_flags_to_bio(mdev, dp_flags);
1838 if (dp_flags & DP_MAY_SET_IN_SYNC)
1839 e->flags |= EE_MAY_SET_IN_SYNC;
1841 /* I'm the receiver, I do hold a net_cnt reference. */
1842 if (!mdev->net_conf->two_primaries) {
1843 spin_lock_irq(&mdev->req_lock);
1845 /* don't get the req_lock yet,
1846 * we may sleep in drbd_wait_peer_seq */
1847 const int size = e->size;
1848 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1850 struct drbd_request *i;
1851 struct hlist_node *n;
1852 struct hlist_head *slot;
1855 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1856 BUG_ON(mdev->ee_hash == NULL);
1857 BUG_ON(mdev->tl_hash == NULL);
1859 /* conflict detection and handling:
1860 * 1. wait on the sequence number,
1861 * in case this data packet overtook ACK packets.
1862 * 2. check our hash tables for conflicting requests.
1863 * we only need to walk the tl_hash, since an ee can not
1864 * have a conflict with an other ee: on the submitting
1865 * node, the corresponding req had already been conflicting,
1866 * and a conflicting req is never sent.
1868 * Note: for two_primaries, we are protocol C,
1869 * so there cannot be any request that is DONE
1870 * but still on the transfer log.
1872 * unconditionally add to the ee_hash.
1874 * if no conflicting request is found:
1877 * if any conflicting request is found
1878 * that has not yet been acked,
1879 * AND I have the "discard concurrent writes" flag:
1880 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1882 * if any conflicting request is found:
1883 * block the receiver, waiting on misc_wait
1884 * until no more conflicting requests are there,
1885 * or we get interrupted (disconnect).
1887 * we do not just write after local io completion of those
1888 * requests, but only after req is done completely, i.e.
1889 * we wait for the P_DISCARD_ACK to arrive!
1891 * then proceed normally, i.e. submit.
1893 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1894 goto out_interrupted;
1896 spin_lock_irq(&mdev->req_lock);
1898 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1900 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1901 slot = tl_hash_slot(mdev, sector);
1904 int have_unacked = 0;
1905 int have_conflict = 0;
1906 prepare_to_wait(&mdev->misc_wait, &wait,
1907 TASK_INTERRUPTIBLE);
1908 hlist_for_each_entry(i, n, slot, colision) {
1910 /* only ALERT on first iteration,
1911 * we may be woken up early... */
1913 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1914 " new: %llus +%u; pending: %llus +%u\n",
1915 current->comm, current->pid,
1916 (unsigned long long)sector, size,
1917 (unsigned long long)i->sector, i->size);
1918 if (i->rq_state & RQ_NET_PENDING)
1927 /* Discard Ack only for the _first_ iteration */
1928 if (first && discard && have_unacked) {
1929 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1930 (unsigned long long)sector);
1932 e->w.cb = e_send_discard_ack;
1933 list_add_tail(&e->w.list, &mdev->done_ee);
1935 spin_unlock_irq(&mdev->req_lock);
1937 /* we could probably send that P_DISCARD_ACK ourselves,
1938 * but I don't like the receiver using the msock */
1942 finish_wait(&mdev->misc_wait, &wait);
1946 if (signal_pending(current)) {
1947 hlist_del_init(&e->colision);
1949 spin_unlock_irq(&mdev->req_lock);
1951 finish_wait(&mdev->misc_wait, &wait);
1952 goto out_interrupted;
1955 spin_unlock_irq(&mdev->req_lock);
1958 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1959 "sec=%llus\n", (unsigned long long)sector);
1960 } else if (discard) {
1961 /* we had none on the first iteration.
1962 * there must be none now. */
1963 D_ASSERT(have_unacked == 0);
1966 spin_lock_irq(&mdev->req_lock);
1968 finish_wait(&mdev->misc_wait, &wait);
1971 list_add(&e->w.list, &mdev->active_ee);
1972 spin_unlock_irq(&mdev->req_lock);
1974 switch (mdev->net_conf->wire_protocol) {
1977 /* corresponding dec_unacked() in e_end_block()
1978 * respective _drbd_clear_done_ee */
1981 /* I really don't like it that the receiver thread
1982 * sends on the msock, but anyways */
1983 drbd_send_ack(mdev, P_RECV_ACK, e);
1990 if (mdev->state.pdsk == D_DISKLESS) {
1991 /* In case we have the only disk of the cluster, */
1992 drbd_set_out_of_sync(mdev, e->sector, e->size);
1993 e->flags |= EE_CALL_AL_COMPLETE_IO;
1994 drbd_al_begin_io(mdev, e->sector);
1997 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
2001 /* yes, the epoch_size now is imbalanced.
2002 * but we drop the connection anyways, so we don't have a chance to
2003 * receive a barrier... atomic_inc(&mdev->epoch_size); */
2005 drbd_free_ee(mdev, e);
2009 /* We may throttle resync, if the lower device seems to be busy,
2010 * and current sync rate is above c_min_rate.
2012 * To decide whether or not the lower device is busy, we use a scheme similar
2013 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2014 * (more than 64 sectors) of activity we cannot account for with our own resync
2015 * activity, it obviously is "busy".
2017 * The current sync rate used here uses only the most recent two step marks,
2018 * to have a short time average so we can react faster.
2020 int drbd_rs_should_slow_down(struct drbd_conf *mdev)
2022 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2023 unsigned long db, dt, dbdt;
2027 /* feature disabled? */
2028 if (mdev->sync_conf.c_min_rate == 0)
2031 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2032 (int)part_stat_read(&disk->part0, sectors[1]) -
2033 atomic_read(&mdev->rs_sect_ev);
2034 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2035 unsigned long rs_left;
2038 mdev->rs_last_events = curr_events;
2040 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2042 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS;
2043 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2045 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2048 db = mdev->rs_mark_left[i] - rs_left;
2049 dbdt = Bit2KB(db/dt);
2051 if (dbdt > mdev->sync_conf.c_min_rate)
2058 static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
2061 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
2062 struct drbd_epoch_entry *e;
2063 struct digest_info *di = NULL;
2065 unsigned int fault_type;
2066 struct p_block_req *p = &mdev->data.rbuf.block_req;
2068 sector = be64_to_cpu(p->sector);
2069 size = be32_to_cpu(p->blksize);
2071 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
2072 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2073 (unsigned long long)sector, size);
2076 if (sector + (size>>9) > capacity) {
2077 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2078 (unsigned long long)sector, size);
2082 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2083 if (__ratelimit(&drbd_ratelimit_state))
2084 dev_err(DEV, "Can not satisfy peer's read request, "
2085 "no local data.\n");
2086 drbd_send_ack_rp(mdev, cmd == P_DATA_REQUEST ? P_NEG_DREPLY :
2087 P_NEG_RS_DREPLY , p);
2088 /* drain possibly payload */
2089 return drbd_drain_block(mdev, digest_size);
2092 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2093 * "criss-cross" setup, that might cause write-out on some other DRBD,
2094 * which in turn might block on the other node at this very place. */
2095 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2102 case P_DATA_REQUEST:
2103 e->w.cb = w_e_end_data_req;
2104 fault_type = DRBD_FAULT_DT_RD;
2105 /* application IO, don't drbd_rs_begin_io */
2108 case P_RS_DATA_REQUEST:
2109 e->w.cb = w_e_end_rsdata_req;
2110 fault_type = DRBD_FAULT_RS_RD;
2114 case P_CSUM_RS_REQUEST:
2115 fault_type = DRBD_FAULT_RS_RD;
2116 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2120 di->digest_size = digest_size;
2121 di->digest = (((char *)di)+sizeof(struct digest_info));
2124 e->flags |= EE_HAS_DIGEST;
2126 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2129 if (cmd == P_CSUM_RS_REQUEST) {
2130 D_ASSERT(mdev->agreed_pro_version >= 89);
2131 e->w.cb = w_e_end_csum_rs_req;
2132 } else if (cmd == P_OV_REPLY) {
2133 e->w.cb = w_e_end_ov_reply;
2134 dec_rs_pending(mdev);
2135 /* drbd_rs_begin_io done when we sent this request,
2136 * but accounting still needs to be done. */
2137 goto submit_for_resync;
2142 if (mdev->state.conn >= C_CONNECTED &&
2143 mdev->state.conn != C_VERIFY_T)
2144 dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2145 drbd_conn_str(mdev->state.conn));
2146 if (mdev->ov_start_sector == ~(sector_t)0 &&
2147 mdev->agreed_pro_version >= 90) {
2148 mdev->ov_start_sector = sector;
2149 mdev->ov_position = sector;
2150 mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2151 dev_info(DEV, "Online Verify start sector: %llu\n",
2152 (unsigned long long)sector);
2154 e->w.cb = w_e_end_ov_req;
2155 fault_type = DRBD_FAULT_RS_RD;
2159 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2161 fault_type = DRBD_FAULT_MAX;
2165 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2166 * wrt the receiver, but it is not as straightforward as it may seem.
2167 * Various places in the resync start and stop logic assume resync
2168 * requests are processed in order, requeuing this on the worker thread
2169 * introduces a bunch of new code for synchronization between threads.
2171 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2172 * "forever", throttling after drbd_rs_begin_io will lock that extent
2173 * for application writes for the same time. For now, just throttle
2174 * here, where the rest of the code expects the receiver to sleep for
2178 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2179 * this defers syncer requests for some time, before letting at least
2180 * on request through. The resync controller on the receiving side
2181 * will adapt to the incoming rate accordingly.
2183 * We cannot throttle here if remote is Primary/SyncTarget:
2184 * we would also throttle its application reads.
2185 * In that case, throttling is done on the SyncTarget only.
2187 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev))
2189 if (drbd_rs_begin_io(mdev, e->sector))
2193 atomic_add(size >> 9, &mdev->rs_sect_ev);
2197 spin_lock_irq(&mdev->req_lock);
2198 list_add_tail(&e->w.list, &mdev->read_ee);
2199 spin_unlock_irq(&mdev->req_lock);
2201 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2206 drbd_free_ee(mdev, e);
2210 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2212 int self, peer, rv = -100;
2213 unsigned long ch_self, ch_peer;
2215 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2216 peer = mdev->p_uuid[UI_BITMAP] & 1;
2218 ch_peer = mdev->p_uuid[UI_SIZE];
2219 ch_self = mdev->comm_bm_set;
2221 switch (mdev->net_conf->after_sb_0p) {
2223 case ASB_DISCARD_SECONDARY:
2224 case ASB_CALL_HELPER:
2225 dev_err(DEV, "Configuration error.\n");
2227 case ASB_DISCONNECT:
2229 case ASB_DISCARD_YOUNGER_PRI:
2230 if (self == 0 && peer == 1) {
2234 if (self == 1 && peer == 0) {
2238 /* Else fall through to one of the other strategies... */
2239 case ASB_DISCARD_OLDER_PRI:
2240 if (self == 0 && peer == 1) {
2244 if (self == 1 && peer == 0) {
2248 /* Else fall through to one of the other strategies... */
2249 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2250 "Using discard-least-changes instead\n");
2251 case ASB_DISCARD_ZERO_CHG:
2252 if (ch_peer == 0 && ch_self == 0) {
2253 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2257 if (ch_peer == 0) { rv = 1; break; }
2258 if (ch_self == 0) { rv = -1; break; }
2260 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2262 case ASB_DISCARD_LEAST_CHG:
2263 if (ch_self < ch_peer)
2265 else if (ch_self > ch_peer)
2267 else /* ( ch_self == ch_peer ) */
2268 /* Well, then use something else. */
2269 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2272 case ASB_DISCARD_LOCAL:
2275 case ASB_DISCARD_REMOTE:
2282 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2284 int self, peer, hg, rv = -100;
2286 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2287 peer = mdev->p_uuid[UI_BITMAP] & 1;
2289 switch (mdev->net_conf->after_sb_1p) {
2290 case ASB_DISCARD_YOUNGER_PRI:
2291 case ASB_DISCARD_OLDER_PRI:
2292 case ASB_DISCARD_LEAST_CHG:
2293 case ASB_DISCARD_LOCAL:
2294 case ASB_DISCARD_REMOTE:
2295 dev_err(DEV, "Configuration error.\n");
2297 case ASB_DISCONNECT:
2300 hg = drbd_asb_recover_0p(mdev);
2301 if (hg == -1 && mdev->state.role == R_SECONDARY)
2303 if (hg == 1 && mdev->state.role == R_PRIMARY)
2307 rv = drbd_asb_recover_0p(mdev);
2309 case ASB_DISCARD_SECONDARY:
2310 return mdev->state.role == R_PRIMARY ? 1 : -1;
2311 case ASB_CALL_HELPER:
2312 hg = drbd_asb_recover_0p(mdev);
2313 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2314 self = drbd_set_role(mdev, R_SECONDARY, 0);
2315 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2316 * we might be here in C_WF_REPORT_PARAMS which is transient.
2317 * we do not need to wait for the after state change work either. */
2318 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2319 if (self != SS_SUCCESS) {
2320 drbd_khelper(mdev, "pri-lost-after-sb");
2322 dev_warn(DEV, "Successfully gave up primary role.\n");
2332 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2334 int self, peer, hg, rv = -100;
2336 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2337 peer = mdev->p_uuid[UI_BITMAP] & 1;
2339 switch (mdev->net_conf->after_sb_2p) {
2340 case ASB_DISCARD_YOUNGER_PRI:
2341 case ASB_DISCARD_OLDER_PRI:
2342 case ASB_DISCARD_LEAST_CHG:
2343 case ASB_DISCARD_LOCAL:
2344 case ASB_DISCARD_REMOTE:
2346 case ASB_DISCARD_SECONDARY:
2347 dev_err(DEV, "Configuration error.\n");
2350 rv = drbd_asb_recover_0p(mdev);
2352 case ASB_DISCONNECT:
2354 case ASB_CALL_HELPER:
2355 hg = drbd_asb_recover_0p(mdev);
2357 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2358 * we might be here in C_WF_REPORT_PARAMS which is transient.
2359 * we do not need to wait for the after state change work either. */
2360 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2361 if (self != SS_SUCCESS) {
2362 drbd_khelper(mdev, "pri-lost-after-sb");
2364 dev_warn(DEV, "Successfully gave up primary role.\n");
2374 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2375 u64 bits, u64 flags)
2378 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2381 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2383 (unsigned long long)uuid[UI_CURRENT],
2384 (unsigned long long)uuid[UI_BITMAP],
2385 (unsigned long long)uuid[UI_HISTORY_START],
2386 (unsigned long long)uuid[UI_HISTORY_END],
2387 (unsigned long long)bits,
2388 (unsigned long long)flags);
2392 100 after split brain try auto recover
2393 2 C_SYNC_SOURCE set BitMap
2394 1 C_SYNC_SOURCE use BitMap
2396 -1 C_SYNC_TARGET use BitMap
2397 -2 C_SYNC_TARGET set BitMap
2398 -100 after split brain, disconnect
2399 -1000 unrelated data
2401 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2406 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2407 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2410 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2414 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2415 peer != UUID_JUST_CREATED)
2419 if (self != UUID_JUST_CREATED &&
2420 (peer == UUID_JUST_CREATED || peer == (u64)0))
2424 int rct, dc; /* roles at crash time */
2426 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2428 if (mdev->agreed_pro_version < 91)
2431 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2432 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2433 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2434 drbd_uuid_set_bm(mdev, 0UL);
2436 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2437 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2440 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2447 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2449 if (mdev->agreed_pro_version < 91)
2452 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2453 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2454 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2456 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2457 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2458 mdev->p_uuid[UI_BITMAP] = 0UL;
2460 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2463 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2470 /* Common power [off|failure] */
2471 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2472 (mdev->p_uuid[UI_FLAGS] & 2);
2473 /* lowest bit is set when we were primary,
2474 * next bit (weight 2) is set when peer was primary */
2478 case 0: /* !self_pri && !peer_pri */ return 0;
2479 case 1: /* self_pri && !peer_pri */ return 1;
2480 case 2: /* !self_pri && peer_pri */ return -1;
2481 case 3: /* self_pri && peer_pri */
2482 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2488 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2493 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2495 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2496 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2498 /* The last P_SYNC_UUID did not get though. Undo the last start of
2499 resync as sync source modifications of the peer's UUIDs. */
2501 if (mdev->agreed_pro_version < 91)
2504 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2505 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2511 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2512 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2513 peer = mdev->p_uuid[i] & ~((u64)1);
2519 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2520 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2525 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2527 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2528 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2530 /* The last P_SYNC_UUID did not get though. Undo the last start of
2531 resync as sync source modifications of our UUIDs. */
2533 if (mdev->agreed_pro_version < 91)
2536 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2537 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2539 dev_info(DEV, "Undid last start of resync:\n");
2541 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2542 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2550 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2551 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2552 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2558 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2559 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2560 if (self == peer && self != ((u64)0))
2564 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2565 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2566 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2567 peer = mdev->p_uuid[j] & ~((u64)1);
2576 /* drbd_sync_handshake() returns the new conn state on success, or
2577 CONN_MASK (-1) on failure.
2579 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2580 enum drbd_disk_state peer_disk) __must_hold(local)
2583 enum drbd_conns rv = C_MASK;
2584 enum drbd_disk_state mydisk;
2586 mydisk = mdev->state.disk;
2587 if (mydisk == D_NEGOTIATING)
2588 mydisk = mdev->new_state_tmp.disk;
2590 dev_info(DEV, "drbd_sync_handshake:\n");
2591 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2592 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2593 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2595 hg = drbd_uuid_compare(mdev, &rule_nr);
2597 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2600 dev_alert(DEV, "Unrelated data, aborting!\n");
2604 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2608 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2609 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2610 int f = (hg == -100) || abs(hg) == 2;
2611 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2614 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2615 hg > 0 ? "source" : "target");
2619 drbd_khelper(mdev, "initial-split-brain");
2621 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2622 int pcount = (mdev->state.role == R_PRIMARY)
2623 + (peer_role == R_PRIMARY);
2624 int forced = (hg == -100);
2628 hg = drbd_asb_recover_0p(mdev);
2631 hg = drbd_asb_recover_1p(mdev);
2634 hg = drbd_asb_recover_2p(mdev);
2637 if (abs(hg) < 100) {
2638 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2639 "automatically solved. Sync from %s node\n",
2640 pcount, (hg < 0) ? "peer" : "this");
2642 dev_warn(DEV, "Doing a full sync, since"
2643 " UUIDs where ambiguous.\n");
2650 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2652 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2656 dev_warn(DEV, "Split-Brain detected, manually solved. "
2657 "Sync from %s node\n",
2658 (hg < 0) ? "peer" : "this");
2662 /* FIXME this log message is not correct if we end up here
2663 * after an attempted attach on a diskless node.
2664 * We just refuse to attach -- well, we drop the "connection"
2665 * to that disk, in a way... */
2666 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2667 drbd_khelper(mdev, "split-brain");
2671 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2672 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2676 if (hg < 0 && /* by intention we do not use mydisk here. */
2677 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2678 switch (mdev->net_conf->rr_conflict) {
2679 case ASB_CALL_HELPER:
2680 drbd_khelper(mdev, "pri-lost");
2682 case ASB_DISCONNECT:
2683 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2686 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2691 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2693 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2695 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2696 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2697 abs(hg) >= 2 ? "full" : "bit-map based");
2702 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2703 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2707 if (hg > 0) { /* become sync source. */
2709 } else if (hg < 0) { /* become sync target */
2713 if (drbd_bm_total_weight(mdev)) {
2714 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2715 drbd_bm_total_weight(mdev));
2722 /* returns 1 if invalid */
2723 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2725 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2726 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2727 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2730 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2731 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2732 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2735 /* everything else is valid if they are equal on both sides. */
2739 /* everything es is invalid. */
2743 static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2745 struct p_protocol *p = &mdev->data.rbuf.protocol;
2746 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2747 int p_want_lose, p_two_primaries, cf;
2748 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2750 p_proto = be32_to_cpu(p->protocol);
2751 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2752 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2753 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2754 p_two_primaries = be32_to_cpu(p->two_primaries);
2755 cf = be32_to_cpu(p->conn_flags);
2756 p_want_lose = cf & CF_WANT_LOSE;
2758 clear_bit(CONN_DRY_RUN, &mdev->flags);
2760 if (cf & CF_DRY_RUN)
2761 set_bit(CONN_DRY_RUN, &mdev->flags);
2763 if (p_proto != mdev->net_conf->wire_protocol) {
2764 dev_err(DEV, "incompatible communication protocols\n");
2768 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2769 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2773 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2774 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2778 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2779 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2783 if (p_want_lose && mdev->net_conf->want_lose) {
2784 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2788 if (p_two_primaries != mdev->net_conf->two_primaries) {
2789 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2793 if (mdev->agreed_pro_version >= 87) {
2794 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2796 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2799 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2800 if (strcmp(p_integrity_alg, my_alg)) {
2801 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2804 dev_info(DEV, "data-integrity-alg: %s\n",
2805 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2811 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2816 * input: alg name, feature name
2817 * return: NULL (alg name was "")
2818 * ERR_PTR(error) if something goes wrong
2819 * or the crypto hash ptr, if it worked out ok. */
2820 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2821 const char *alg, const char *name)
2823 struct crypto_hash *tfm;
2828 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2830 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2831 alg, name, PTR_ERR(tfm));
2834 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2835 crypto_free_hash(tfm);
2836 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2837 return ERR_PTR(-EINVAL);
2842 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2845 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2846 unsigned int header_size, data_size, exp_max_sz;
2847 struct crypto_hash *verify_tfm = NULL;
2848 struct crypto_hash *csums_tfm = NULL;
2849 const int apv = mdev->agreed_pro_version;
2850 int *rs_plan_s = NULL;
2853 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2854 : apv == 88 ? sizeof(struct p_rs_param)
2856 : apv <= 94 ? sizeof(struct p_rs_param_89)
2857 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2859 if (packet_size > exp_max_sz) {
2860 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2861 packet_size, exp_max_sz);
2866 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2867 data_size = packet_size - header_size;
2868 } else if (apv <= 94) {
2869 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2870 data_size = packet_size - header_size;
2871 D_ASSERT(data_size == 0);
2873 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2874 data_size = packet_size - header_size;
2875 D_ASSERT(data_size == 0);
2878 /* initialize verify_alg and csums_alg */
2879 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2881 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2884 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2888 if (data_size > SHARED_SECRET_MAX) {
2889 dev_err(DEV, "verify-alg too long, "
2890 "peer wants %u, accepting only %u byte\n",
2891 data_size, SHARED_SECRET_MAX);
2895 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2898 /* we expect NUL terminated string */
2899 /* but just in case someone tries to be evil */
2900 D_ASSERT(p->verify_alg[data_size-1] == 0);
2901 p->verify_alg[data_size-1] = 0;
2903 } else /* apv >= 89 */ {
2904 /* we still expect NUL terminated strings */
2905 /* but just in case someone tries to be evil */
2906 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2907 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2908 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2909 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2912 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2913 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2914 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2915 mdev->sync_conf.verify_alg, p->verify_alg);
2918 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2919 p->verify_alg, "verify-alg");
2920 if (IS_ERR(verify_tfm)) {
2926 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2927 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2928 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2929 mdev->sync_conf.csums_alg, p->csums_alg);
2932 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2933 p->csums_alg, "csums-alg");
2934 if (IS_ERR(csums_tfm)) {
2941 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2942 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2943 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2944 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2945 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2947 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2948 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2949 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2951 dev_err(DEV, "kmalloc of fifo_buffer failed");
2957 spin_lock(&mdev->peer_seq_lock);
2958 /* lock against drbd_nl_syncer_conf() */
2960 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2961 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2962 crypto_free_hash(mdev->verify_tfm);
2963 mdev->verify_tfm = verify_tfm;
2964 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2967 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2968 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2969 crypto_free_hash(mdev->csums_tfm);
2970 mdev->csums_tfm = csums_tfm;
2971 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2973 if (fifo_size != mdev->rs_plan_s.size) {
2974 kfree(mdev->rs_plan_s.values);
2975 mdev->rs_plan_s.values = rs_plan_s;
2976 mdev->rs_plan_s.size = fifo_size;
2977 mdev->rs_planed = 0;
2979 spin_unlock(&mdev->peer_seq_lock);
2984 /* just for completeness: actually not needed,
2985 * as this is not reached if csums_tfm was ok. */
2986 crypto_free_hash(csums_tfm);
2987 /* but free the verify_tfm again, if csums_tfm did not work out */
2988 crypto_free_hash(verify_tfm);
2989 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2993 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2995 /* sorry, we currently have no working implementation
2996 * of distributed TCQ */
2999 /* warn if the arguments differ by more than 12.5% */
3000 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3001 const char *s, sector_t a, sector_t b)
3004 if (a == 0 || b == 0)
3006 d = (a > b) ? (a - b) : (b - a);
3007 if (d > (a>>3) || d > (b>>3))
3008 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3009 (unsigned long long)a, (unsigned long long)b);
3012 static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3014 struct p_sizes *p = &mdev->data.rbuf.sizes;
3015 enum determine_dev_size dd = unchanged;
3016 unsigned int max_seg_s;
3017 sector_t p_size, p_usize, my_usize;
3018 int ldsc = 0; /* local disk size changed */
3019 enum dds_flags ddsf;
3021 p_size = be64_to_cpu(p->d_size);
3022 p_usize = be64_to_cpu(p->u_size);
3024 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
3025 dev_err(DEV, "some backing storage is needed\n");
3026 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3030 /* just store the peer's disk size for now.
3031 * we still need to figure out whether we accept that. */
3032 mdev->p_size = p_size;
3034 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
3035 if (get_ldev(mdev)) {
3036 warn_if_differ_considerably(mdev, "lower level device sizes",
3037 p_size, drbd_get_max_capacity(mdev->ldev));
3038 warn_if_differ_considerably(mdev, "user requested size",
3039 p_usize, mdev->ldev->dc.disk_size);
3041 /* if this is the first connect, or an otherwise expected
3042 * param exchange, choose the minimum */
3043 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3044 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
3047 my_usize = mdev->ldev->dc.disk_size;
3049 if (mdev->ldev->dc.disk_size != p_usize) {
3050 mdev->ldev->dc.disk_size = p_usize;
3051 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3052 (unsigned long)mdev->ldev->dc.disk_size);
3055 /* Never shrink a device with usable data during connect.
3056 But allow online shrinking if we are connected. */
3057 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
3058 drbd_get_capacity(mdev->this_bdev) &&
3059 mdev->state.disk >= D_OUTDATED &&
3060 mdev->state.conn < C_CONNECTED) {
3061 dev_err(DEV, "The peer's disk size is too small!\n");
3062 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3063 mdev->ldev->dc.disk_size = my_usize;
3071 ddsf = be16_to_cpu(p->dds_flags);
3072 if (get_ldev(mdev)) {
3073 dd = drbd_determin_dev_size(mdev, ddsf);
3075 if (dd == dev_size_error)
3079 /* I am diskless, need to accept the peer's size. */
3080 drbd_set_my_capacity(mdev, p_size);
3083 if (get_ldev(mdev)) {
3084 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3085 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3089 if (mdev->agreed_pro_version < 94)
3090 max_seg_s = be32_to_cpu(p->max_segment_size);
3091 else /* drbd 8.3.8 onwards */
3092 max_seg_s = DRBD_MAX_SEGMENT_SIZE;
3094 if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
3095 drbd_setup_queue_param(mdev, max_seg_s);
3097 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
3101 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3102 if (be64_to_cpu(p->c_size) !=
3103 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3104 /* we have different sizes, probably peer
3105 * needs to know my new size... */
3106 drbd_send_sizes(mdev, 0, ddsf);
3108 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3109 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3110 if (mdev->state.pdsk >= D_INCONSISTENT &&
3111 mdev->state.disk >= D_INCONSISTENT) {
3112 if (ddsf & DDSF_NO_RESYNC)
3113 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3115 resync_after_online_grow(mdev);
3117 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3124 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3126 struct p_uuids *p = &mdev->data.rbuf.uuids;
3130 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3132 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3133 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3135 kfree(mdev->p_uuid);
3136 mdev->p_uuid = p_uuid;
3138 if (mdev->state.conn < C_CONNECTED &&
3139 mdev->state.disk < D_INCONSISTENT &&
3140 mdev->state.role == R_PRIMARY &&
3141 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3142 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3143 (unsigned long long)mdev->ed_uuid);
3144 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3148 if (get_ldev(mdev)) {
3149 int skip_initial_sync =
3150 mdev->state.conn == C_CONNECTED &&
3151 mdev->agreed_pro_version >= 90 &&
3152 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3153 (p_uuid[UI_FLAGS] & 8);
3154 if (skip_initial_sync) {
3155 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3156 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3157 "clear_n_write from receive_uuids");
3158 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3159 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3160 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3165 } else if (mdev->state.disk < D_INCONSISTENT &&
3166 mdev->state.role == R_PRIMARY) {
3167 /* I am a diskless primary, the peer just created a new current UUID
3169 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3172 /* Before we test for the disk state, we should wait until an eventually
3173 ongoing cluster wide state change is finished. That is important if
3174 we are primary and are detaching from our disk. We need to see the
3175 new disk state... */
3176 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3177 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3178 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3184 * convert_state() - Converts the peer's view of the cluster state to our point of view
3185 * @ps: The state as seen by the peer.
3187 static union drbd_state convert_state(union drbd_state ps)
3189 union drbd_state ms;
3191 static enum drbd_conns c_tab[] = {
3192 [C_CONNECTED] = C_CONNECTED,
3194 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3195 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3196 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3197 [C_VERIFY_S] = C_VERIFY_T,
3203 ms.conn = c_tab[ps.conn];
3208 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3213 static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3215 struct p_req_state *p = &mdev->data.rbuf.req_state;
3216 union drbd_state mask, val;
3219 mask.i = be32_to_cpu(p->mask);
3220 val.i = be32_to_cpu(p->val);
3222 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3223 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3224 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3228 mask = convert_state(mask);
3229 val = convert_state(val);
3231 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3233 drbd_send_sr_reply(mdev, rv);
3239 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3241 struct p_state *p = &mdev->data.rbuf.state;
3242 enum drbd_conns nconn, oconn;
3243 union drbd_state ns, peer_state;
3244 enum drbd_disk_state real_peer_disk;
3245 enum chg_state_flags cs_flags;
3248 peer_state.i = be32_to_cpu(p->state);
3250 real_peer_disk = peer_state.disk;
3251 if (peer_state.disk == D_NEGOTIATING) {
3252 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3253 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3256 spin_lock_irq(&mdev->req_lock);
3258 oconn = nconn = mdev->state.conn;
3259 spin_unlock_irq(&mdev->req_lock);
3261 if (nconn == C_WF_REPORT_PARAMS)
3262 nconn = C_CONNECTED;
3264 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3265 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3266 int cr; /* consider resync */
3268 /* if we established a new connection */
3269 cr = (oconn < C_CONNECTED);
3270 /* if we had an established connection
3271 * and one of the nodes newly attaches a disk */
3272 cr |= (oconn == C_CONNECTED &&
3273 (peer_state.disk == D_NEGOTIATING ||
3274 mdev->state.disk == D_NEGOTIATING));
3275 /* if we have both been inconsistent, and the peer has been
3276 * forced to be UpToDate with --overwrite-data */
3277 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3278 /* if we had been plain connected, and the admin requested to
3279 * start a sync by "invalidate" or "invalidate-remote" */
3280 cr |= (oconn == C_CONNECTED &&
3281 (peer_state.conn >= C_STARTING_SYNC_S &&
3282 peer_state.conn <= C_WF_BITMAP_T));
3285 nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3288 if (nconn == C_MASK) {
3289 nconn = C_CONNECTED;
3290 if (mdev->state.disk == D_NEGOTIATING) {
3291 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3292 } else if (peer_state.disk == D_NEGOTIATING) {
3293 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3294 peer_state.disk = D_DISKLESS;
3295 real_peer_disk = D_DISKLESS;
3297 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3299 D_ASSERT(oconn == C_WF_REPORT_PARAMS);
3300 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3306 spin_lock_irq(&mdev->req_lock);
3307 if (mdev->state.conn != oconn)
3309 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3310 ns.i = mdev->state.i;
3312 ns.peer = peer_state.role;
3313 ns.pdsk = real_peer_disk;
3314 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3315 if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3316 ns.disk = mdev->new_state_tmp.disk;
3317 cs_flags = CS_VERBOSE + (oconn < C_CONNECTED && nconn >= C_CONNECTED ? 0 : CS_HARD);
3318 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && nconn == C_CONNECTED && oconn < C_CONNECTED &&
3319 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3320 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3321 for temporal network outages! */
3322 spin_unlock_irq(&mdev->req_lock);
3323 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3325 drbd_uuid_new_current(mdev);
3326 clear_bit(NEW_CUR_UUID, &mdev->flags);
3327 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3330 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3332 spin_unlock_irq(&mdev->req_lock);
3334 if (rv < SS_SUCCESS) {
3335 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3339 if (oconn > C_WF_REPORT_PARAMS) {
3340 if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3341 peer_state.disk != D_NEGOTIATING ) {
3342 /* we want resync, peer has not yet decided to sync... */
3343 /* Nowadays only used when forcing a node into primary role and
3344 setting its disk to UpToDate with that */
3345 drbd_send_uuids(mdev);
3346 drbd_send_state(mdev);
3350 mdev->net_conf->want_lose = 0;
3352 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3357 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3359 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
3361 wait_event(mdev->misc_wait,
3362 mdev->state.conn == C_WF_SYNC_UUID ||
3363 mdev->state.conn < C_CONNECTED ||
3364 mdev->state.disk < D_NEGOTIATING);
3366 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3368 /* Here the _drbd_uuid_ functions are right, current should
3369 _not_ be rotated into the history */
3370 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3371 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3372 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3374 drbd_start_resync(mdev, C_SYNC_TARGET);
3378 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3383 enum receive_bitmap_ret { OK, DONE, FAILED };
3385 static enum receive_bitmap_ret
3386 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3387 unsigned long *buffer, struct bm_xfer_ctx *c)
3389 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3390 unsigned want = num_words * sizeof(long);
3392 if (want != data_size) {
3393 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
3398 if (drbd_recv(mdev, buffer, want) != want)
3401 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3403 c->word_offset += num_words;
3404 c->bit_offset = c->word_offset * BITS_PER_LONG;
3405 if (c->bit_offset > c->bm_bits)
3406 c->bit_offset = c->bm_bits;
3411 static enum receive_bitmap_ret
3412 recv_bm_rle_bits(struct drbd_conf *mdev,
3413 struct p_compressed_bm *p,
3414 struct bm_xfer_ctx *c)
3416 struct bitstream bs;
3420 unsigned long s = c->bit_offset;
3422 int len = p->head.length - (sizeof(*p) - sizeof(p->head));
3423 int toggle = DCBP_get_start(p);
3427 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3429 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3433 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3434 bits = vli_decode_bits(&rl, look_ahead);
3440 if (e >= c->bm_bits) {
3441 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3444 _drbd_bm_set_bits(mdev, s, e);
3448 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3449 have, bits, look_ahead,
3450 (unsigned int)(bs.cur.b - p->code),
3451 (unsigned int)bs.buf_len);
3454 look_ahead >>= bits;
3457 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3460 look_ahead |= tmp << have;
3465 bm_xfer_ctx_bit_to_word_offset(c);
3467 return (s == c->bm_bits) ? DONE : OK;
3470 static enum receive_bitmap_ret
3471 decode_bitmap_c(struct drbd_conf *mdev,
3472 struct p_compressed_bm *p,
3473 struct bm_xfer_ctx *c)
3475 if (DCBP_get_code(p) == RLE_VLI_Bits)
3476 return recv_bm_rle_bits(mdev, p, c);
3478 /* other variants had been implemented for evaluation,
3479 * but have been dropped as this one turned out to be "best"
3480 * during all our tests. */
3482 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3483 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3487 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3488 const char *direction, struct bm_xfer_ctx *c)
3490 /* what would it take to transfer it "plaintext" */
3491 unsigned plain = sizeof(struct p_header80) *
3492 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3493 + c->bm_words * sizeof(long);
3494 unsigned total = c->bytes[0] + c->bytes[1];
3497 /* total can not be zero. but just in case: */
3501 /* don't report if not compressed */
3505 /* total < plain. check for overflow, still */
3506 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3507 : (1000 * total / plain);
3513 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3514 "total %u; compression: %u.%u%%\n",
3516 c->bytes[1], c->packets[1],
3517 c->bytes[0], c->packets[0],
3518 total, r/10, r % 10);
3521 /* Since we are processing the bitfield from lower addresses to higher,
3522 it does not matter if the process it in 32 bit chunks or 64 bit
3523 chunks as long as it is little endian. (Understand it as byte stream,
3524 beginning with the lowest byte...) If we would use big endian
3525 we would need to process it from the highest address to the lowest,
3526 in order to be agnostic to the 32 vs 64 bits issue.
3528 returns 0 on failure, 1 if we successfully received it. */
3529 static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3531 struct bm_xfer_ctx c;
3533 enum receive_bitmap_ret ret;
3535 struct p_header80 *h = &mdev->data.rbuf.header.h80;
3537 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3539 drbd_bm_lock(mdev, "receive bitmap");
3541 /* maybe we should use some per thread scratch page,
3542 * and allocate that during initial device creation? */
3543 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3545 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3549 c = (struct bm_xfer_ctx) {
3550 .bm_bits = drbd_bm_bits(mdev),
3551 .bm_words = drbd_bm_words(mdev),
3555 if (cmd == P_BITMAP) {
3556 ret = receive_bitmap_plain(mdev, data_size, buffer, &c);
3557 } else if (cmd == P_COMPRESSED_BITMAP) {
3558 /* MAYBE: sanity check that we speak proto >= 90,
3559 * and the feature is enabled! */
3560 struct p_compressed_bm *p;
3562 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
3563 dev_err(DEV, "ReportCBitmap packet too large\n");
3566 /* use the page buff */
3568 memcpy(p, h, sizeof(*h));
3569 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
3571 if (p->head.length <= (sizeof(*p) - sizeof(p->head))) {
3572 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length);
3575 ret = decode_bitmap_c(mdev, p, &c);
3577 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
3581 c.packets[cmd == P_BITMAP]++;
3582 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
3587 if (!drbd_recv_header(mdev, &cmd, &data_size))
3589 } while (ret == OK);
3593 INFO_bm_xfer_stats(mdev, "receive", &c);
3595 if (mdev->state.conn == C_WF_BITMAP_T) {
3596 ok = !drbd_send_bitmap(mdev);
3599 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3600 ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3601 D_ASSERT(ok == SS_SUCCESS);
3602 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3603 /* admin may have requested C_DISCONNECTING,
3604 * other threads may have noticed network errors */
3605 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3606 drbd_conn_str(mdev->state.conn));
3611 drbd_bm_unlock(mdev);
3612 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3613 drbd_start_resync(mdev, C_SYNC_SOURCE);
3614 free_page((unsigned long) buffer);
3618 static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3620 /* TODO zero copy sink :) */
3621 static char sink[128];
3624 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3629 want = min_t(int, size, sizeof(sink));
3630 r = drbd_recv(mdev, sink, want);
3631 ERR_IF(r <= 0) break;
3637 static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3639 if (mdev->state.disk >= D_INCONSISTENT)
3642 /* Make sure we've acked all the TCP data associated
3643 * with the data requests being unplugged */
3644 drbd_tcp_quickack(mdev->data.socket);
3649 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
3654 drbd_cmd_handler_f function;
3657 static struct data_cmd drbd_cmd_handler[] = {
3658 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3659 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3660 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3661 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3662 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3663 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3664 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3665 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3666 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3667 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3668 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3669 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3670 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3671 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3672 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3673 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3674 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3675 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3676 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3677 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3678 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
3679 /* anything missing from this table is in
3680 * the asender_tbl, see get_asender_cmd */
3681 [P_MAX_CMD] = { 0, 0, NULL },
3684 /* All handler functions that expect a sub-header get that sub-heder in
3685 mdev->data.rbuf.header.head.payload.
3687 Usually in mdev->data.rbuf.header.head the callback can find the usual
3688 p_header, but they may not rely on that. Since there is also p_header95 !
3691 static void drbdd(struct drbd_conf *mdev)
3693 union p_header *header = &mdev->data.rbuf.header;
3694 unsigned int packet_size;
3695 enum drbd_packets cmd;
3696 size_t shs; /* sub header size */
3699 while (get_t_state(&mdev->receiver) == Running) {
3700 drbd_thread_current_set_cpu(mdev);
3701 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3704 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3705 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3709 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3710 rv = drbd_recv(mdev, &header->h80.payload, shs);
3711 if (unlikely(rv != shs)) {
3712 dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3716 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3717 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3721 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3723 if (unlikely(!rv)) {
3724 dev_err(DEV, "error receiving %s, l: %d!\n",
3725 cmdname(cmd), packet_size);
3732 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3736 void drbd_flush_workqueue(struct drbd_conf *mdev)
3738 struct drbd_wq_barrier barr;
3740 barr.w.cb = w_prev_work_done;
3741 init_completion(&barr.done);
3742 drbd_queue_work(&mdev->data.work, &barr.w);
3743 wait_for_completion(&barr.done);
3746 void drbd_free_tl_hash(struct drbd_conf *mdev)
3748 struct hlist_head *h;
3750 spin_lock_irq(&mdev->req_lock);
3752 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3753 spin_unlock_irq(&mdev->req_lock);
3757 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3759 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3760 (int)(h - mdev->ee_hash), h->first);
3761 kfree(mdev->ee_hash);
3762 mdev->ee_hash = NULL;
3763 mdev->ee_hash_s = 0;
3766 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3768 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3769 (int)(h - mdev->tl_hash), h->first);
3770 kfree(mdev->tl_hash);
3771 mdev->tl_hash = NULL;
3772 mdev->tl_hash_s = 0;
3773 spin_unlock_irq(&mdev->req_lock);
3776 static void drbd_disconnect(struct drbd_conf *mdev)
3778 enum drbd_fencing_p fp;
3779 union drbd_state os, ns;
3780 int rv = SS_UNKNOWN_ERROR;
3783 if (mdev->state.conn == C_STANDALONE)
3785 if (mdev->state.conn >= C_WF_CONNECTION)
3786 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3787 drbd_conn_str(mdev->state.conn));
3789 /* asender does not clean up anything. it must not interfere, either */
3790 drbd_thread_stop(&mdev->asender);
3791 drbd_free_sock(mdev);
3793 /* wait for current activity to cease. */
3794 spin_lock_irq(&mdev->req_lock);
3795 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3796 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3797 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3798 spin_unlock_irq(&mdev->req_lock);
3800 /* We do not have data structures that would allow us to
3801 * get the rs_pending_cnt down to 0 again.
3802 * * On C_SYNC_TARGET we do not have any data structures describing
3803 * the pending RSDataRequest's we have sent.
3804 * * On C_SYNC_SOURCE there is no data structure that tracks
3805 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3806 * And no, it is not the sum of the reference counts in the
3807 * resync_LRU. The resync_LRU tracks the whole operation including
3808 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3810 drbd_rs_cancel_all(mdev);
3812 mdev->rs_failed = 0;
3813 atomic_set(&mdev->rs_pending_cnt, 0);
3814 wake_up(&mdev->misc_wait);
3816 /* make sure syncer is stopped and w_resume_next_sg queued */
3817 del_timer_sync(&mdev->resync_timer);
3818 resync_timer_fn((unsigned long)mdev);
3820 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3821 * w_make_resync_request etc. which may still be on the worker queue
3822 * to be "canceled" */
3823 drbd_flush_workqueue(mdev);
3825 /* This also does reclaim_net_ee(). If we do this too early, we might
3826 * miss some resync ee and pages.*/
3827 drbd_process_done_ee(mdev);
3829 kfree(mdev->p_uuid);
3830 mdev->p_uuid = NULL;
3832 if (!is_susp(mdev->state))
3835 dev_info(DEV, "Connection closed\n");
3840 if (get_ldev(mdev)) {
3841 fp = mdev->ldev->dc.fencing;
3845 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3846 drbd_try_outdate_peer_async(mdev);
3848 spin_lock_irq(&mdev->req_lock);
3850 if (os.conn >= C_UNCONNECTED) {
3851 /* Do not restart in case we are C_DISCONNECTING */
3853 ns.conn = C_UNCONNECTED;
3854 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3856 spin_unlock_irq(&mdev->req_lock);
3858 if (os.conn == C_DISCONNECTING) {
3859 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
3861 if (!is_susp(mdev->state)) {
3862 /* we must not free the tl_hash
3863 * while application io is still on the fly */
3864 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3865 drbd_free_tl_hash(mdev);
3868 crypto_free_hash(mdev->cram_hmac_tfm);
3869 mdev->cram_hmac_tfm = NULL;
3871 kfree(mdev->net_conf);
3872 mdev->net_conf = NULL;
3873 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3876 /* tcp_close and release of sendpage pages can be deferred. I don't
3877 * want to use SO_LINGER, because apparently it can be deferred for
3878 * more than 20 seconds (longest time I checked).
3880 * Actually we don't care for exactly when the network stack does its
3881 * put_page(), but release our reference on these pages right here.
3883 i = drbd_release_ee(mdev, &mdev->net_ee);
3885 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3886 i = atomic_read(&mdev->pp_in_use_by_net);
3888 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
3889 i = atomic_read(&mdev->pp_in_use);
3891 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3893 D_ASSERT(list_empty(&mdev->read_ee));
3894 D_ASSERT(list_empty(&mdev->active_ee));
3895 D_ASSERT(list_empty(&mdev->sync_ee));
3896 D_ASSERT(list_empty(&mdev->done_ee));
3898 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3899 atomic_set(&mdev->current_epoch->epoch_size, 0);
3900 D_ASSERT(list_empty(&mdev->current_epoch->list));
3904 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3905 * we can agree on is stored in agreed_pro_version.
3907 * feature flags and the reserved array should be enough room for future
3908 * enhancements of the handshake protocol, and possible plugins...
3910 * for now, they are expected to be zero, but ignored.
3912 static int drbd_send_handshake(struct drbd_conf *mdev)
3914 /* ASSERT current == mdev->receiver ... */
3915 struct p_handshake *p = &mdev->data.sbuf.handshake;
3918 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3919 dev_err(DEV, "interrupted during initial handshake\n");
3920 return 0; /* interrupted. not ok. */
3923 if (mdev->data.socket == NULL) {
3924 mutex_unlock(&mdev->data.mutex);
3928 memset(p, 0, sizeof(*p));
3929 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3930 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3931 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3932 (struct p_header80 *)p, sizeof(*p), 0 );
3933 mutex_unlock(&mdev->data.mutex);
3939 * 1 yes, we have a valid connection
3940 * 0 oops, did not work out, please try again
3941 * -1 peer talks different language,
3942 * no point in trying again, please go standalone.
3944 static int drbd_do_handshake(struct drbd_conf *mdev)
3946 /* ASSERT current == mdev->receiver ... */
3947 struct p_handshake *p = &mdev->data.rbuf.handshake;
3948 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3949 unsigned int length;
3950 enum drbd_packets cmd;
3953 rv = drbd_send_handshake(mdev);
3957 rv = drbd_recv_header(mdev, &cmd, &length);
3961 if (cmd != P_HAND_SHAKE) {
3962 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3967 if (length != expect) {
3968 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3973 rv = drbd_recv(mdev, &p->head.payload, expect);
3976 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3980 p->protocol_min = be32_to_cpu(p->protocol_min);
3981 p->protocol_max = be32_to_cpu(p->protocol_max);
3982 if (p->protocol_max == 0)
3983 p->protocol_max = p->protocol_min;
3985 if (PRO_VERSION_MAX < p->protocol_min ||
3986 PRO_VERSION_MIN > p->protocol_max)
3989 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3991 dev_info(DEV, "Handshake successful: "
3992 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3997 dev_err(DEV, "incompatible DRBD dialects: "
3998 "I support %d-%d, peer supports %d-%d\n",
3999 PRO_VERSION_MIN, PRO_VERSION_MAX,
4000 p->protocol_min, p->protocol_max);
4004 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4005 static int drbd_do_auth(struct drbd_conf *mdev)
4007 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4008 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4012 #define CHALLENGE_LEN 64
4016 0 - failed, try again (network error),
4017 -1 - auth failed, don't try again.
4020 static int drbd_do_auth(struct drbd_conf *mdev)
4022 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4023 struct scatterlist sg;
4024 char *response = NULL;
4025 char *right_response = NULL;
4026 char *peers_ch = NULL;
4027 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4028 unsigned int resp_size;
4029 struct hash_desc desc;
4030 enum drbd_packets cmd;
4031 unsigned int length;
4034 desc.tfm = mdev->cram_hmac_tfm;
4037 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4038 (u8 *)mdev->net_conf->shared_secret, key_len);
4040 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
4045 get_random_bytes(my_challenge, CHALLENGE_LEN);
4047 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4051 rv = drbd_recv_header(mdev, &cmd, &length);
4055 if (cmd != P_AUTH_CHALLENGE) {
4056 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4062 if (length > CHALLENGE_LEN * 2) {
4063 dev_err(DEV, "expected AuthChallenge payload too big.\n");
4068 peers_ch = kmalloc(length, GFP_NOIO);
4069 if (peers_ch == NULL) {
4070 dev_err(DEV, "kmalloc of peers_ch failed\n");
4075 rv = drbd_recv(mdev, peers_ch, length);
4078 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4083 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4084 response = kmalloc(resp_size, GFP_NOIO);
4085 if (response == NULL) {
4086 dev_err(DEV, "kmalloc of response failed\n");
4091 sg_init_table(&sg, 1);
4092 sg_set_buf(&sg, peers_ch, length);
4094 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4096 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4101 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4105 rv = drbd_recv_header(mdev, &cmd, &length);
4109 if (cmd != P_AUTH_RESPONSE) {
4110 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4116 if (length != resp_size) {
4117 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4122 rv = drbd_recv(mdev, response , resp_size);
4124 if (rv != resp_size) {
4125 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4130 right_response = kmalloc(resp_size, GFP_NOIO);
4131 if (right_response == NULL) {
4132 dev_err(DEV, "kmalloc of right_response failed\n");
4137 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4139 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4141 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4146 rv = !memcmp(response, right_response, resp_size);
4149 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4150 resp_size, mdev->net_conf->cram_hmac_alg);
4157 kfree(right_response);
4163 int drbdd_init(struct drbd_thread *thi)
4165 struct drbd_conf *mdev = thi->mdev;
4166 unsigned int minor = mdev_to_minor(mdev);
4169 sprintf(current->comm, "drbd%d_receiver", minor);
4171 dev_info(DEV, "receiver (re)started\n");
4174 h = drbd_connect(mdev);
4176 drbd_disconnect(mdev);
4177 __set_current_state(TASK_INTERRUPTIBLE);
4178 schedule_timeout(HZ);
4181 dev_warn(DEV, "Discarding network configuration.\n");
4182 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4187 if (get_net_conf(mdev)) {
4193 drbd_disconnect(mdev);
4195 dev_info(DEV, "receiver terminated\n");
4199 /* ********* acknowledge sender ******** */
4201 static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4203 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4205 int retcode = be32_to_cpu(p->retcode);
4207 if (retcode >= SS_SUCCESS) {
4208 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4210 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4211 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4212 drbd_set_st_err_str(retcode), retcode);
4214 wake_up(&mdev->state_wait);
4219 static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
4221 return drbd_send_ping_ack(mdev);
4225 static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4227 /* restore idle timeout */
4228 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4229 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4230 wake_up(&mdev->misc_wait);
4235 static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
4237 struct p_block_ack *p = (struct p_block_ack *)h;
4238 sector_t sector = be64_to_cpu(p->sector);
4239 int blksize = be32_to_cpu(p->blksize);
4241 D_ASSERT(mdev->agreed_pro_version >= 89);
4243 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4245 if (get_ldev(mdev)) {
4246 drbd_rs_complete_io(mdev, sector);
4247 drbd_set_in_sync(mdev, sector, blksize);
4248 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4249 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4252 dec_rs_pending(mdev);
4253 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4258 /* when we receive the ACK for a write request,
4259 * verify that we actually know about it */
4260 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4261 u64 id, sector_t sector)
4263 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4264 struct hlist_node *n;
4265 struct drbd_request *req;
4267 hlist_for_each_entry(req, n, slot, colision) {
4268 if ((unsigned long)req == (unsigned long)id) {
4269 if (req->sector != sector) {
4270 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4271 "wrong sector (%llus versus %llus)\n", req,
4272 (unsigned long long)req->sector,
4273 (unsigned long long)sector);
4279 dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4280 (void *)(unsigned long)id, (unsigned long long)sector);
4284 typedef struct drbd_request *(req_validator_fn)
4285 (struct drbd_conf *mdev, u64 id, sector_t sector);
4287 static int validate_req_change_req_state(struct drbd_conf *mdev,
4288 u64 id, sector_t sector, req_validator_fn validator,
4289 const char *func, enum drbd_req_event what)
4291 struct drbd_request *req;
4292 struct bio_and_error m;
4294 spin_lock_irq(&mdev->req_lock);
4295 req = validator(mdev, id, sector);
4296 if (unlikely(!req)) {
4297 spin_unlock_irq(&mdev->req_lock);
4298 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4301 __req_mod(req, what, &m);
4302 spin_unlock_irq(&mdev->req_lock);
4305 complete_master_bio(mdev, &m);
4309 static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4311 struct p_block_ack *p = (struct p_block_ack *)h;
4312 sector_t sector = be64_to_cpu(p->sector);
4313 int blksize = be32_to_cpu(p->blksize);
4314 enum drbd_req_event what;
4316 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4318 if (is_syncer_block_id(p->block_id)) {
4319 drbd_set_in_sync(mdev, sector, blksize);
4320 dec_rs_pending(mdev);
4323 switch (be16_to_cpu(h->command)) {
4324 case P_RS_WRITE_ACK:
4325 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4326 what = write_acked_by_peer_and_sis;
4329 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4330 what = write_acked_by_peer;
4333 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4334 what = recv_acked_by_peer;
4337 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4338 what = conflict_discarded_by_peer;
4345 return validate_req_change_req_state(mdev, p->block_id, sector,
4346 _ack_id_to_req, __func__ , what);
4349 static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
4351 struct p_block_ack *p = (struct p_block_ack *)h;
4352 sector_t sector = be64_to_cpu(p->sector);
4354 if (__ratelimit(&drbd_ratelimit_state))
4355 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4357 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4359 if (is_syncer_block_id(p->block_id)) {
4360 int size = be32_to_cpu(p->blksize);
4361 dec_rs_pending(mdev);
4362 drbd_rs_failed_io(mdev, sector, size);
4365 return validate_req_change_req_state(mdev, p->block_id, sector,
4366 _ack_id_to_req, __func__ , neg_acked);
4369 static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
4371 struct p_block_ack *p = (struct p_block_ack *)h;
4372 sector_t sector = be64_to_cpu(p->sector);
4374 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4375 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4376 (unsigned long long)sector, be32_to_cpu(p->blksize));
4378 return validate_req_change_req_state(mdev, p->block_id, sector,
4379 _ar_id_to_req, __func__ , neg_acked);
4382 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
4386 struct p_block_ack *p = (struct p_block_ack *)h;
4388 sector = be64_to_cpu(p->sector);
4389 size = be32_to_cpu(p->blksize);
4391 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4393 dec_rs_pending(mdev);
4395 if (get_ldev_if_state(mdev, D_FAILED)) {
4396 drbd_rs_complete_io(mdev, sector);
4397 drbd_rs_failed_io(mdev, sector, size);
4404 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4406 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4408 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4413 static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4415 struct p_block_ack *p = (struct p_block_ack *)h;
4416 struct drbd_work *w;
4420 sector = be64_to_cpu(p->sector);
4421 size = be32_to_cpu(p->blksize);
4423 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4425 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4426 drbd_ov_oos_found(mdev, sector, size);
4430 if (!get_ldev(mdev))
4433 drbd_rs_complete_io(mdev, sector);
4434 dec_rs_pending(mdev);
4436 if (--mdev->ov_left == 0) {
4437 w = kmalloc(sizeof(*w), GFP_NOIO);
4439 w->cb = w_ov_finished;
4440 drbd_queue_work_front(&mdev->data.work, w);
4442 dev_err(DEV, "kmalloc(w) failed.");
4444 drbd_resync_finished(mdev);
4451 static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
4456 struct asender_cmd {
4458 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
4461 static struct asender_cmd *get_asender_cmd(int cmd)
4463 static struct asender_cmd asender_tbl[] = {
4464 /* anything missing from this table is in
4465 * the drbd_cmd_handler (drbd_default_handler) table,
4466 * see the beginning of drbdd() */
4467 [P_PING] = { sizeof(struct p_header80), got_Ping },
4468 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
4469 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4470 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4471 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4472 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4473 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4474 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4475 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4476 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4477 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4478 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4479 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4480 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
4481 [P_MAX_CMD] = { 0, NULL },
4483 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4485 return &asender_tbl[cmd];
4488 int drbd_asender(struct drbd_thread *thi)
4490 struct drbd_conf *mdev = thi->mdev;
4491 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
4492 struct asender_cmd *cmd = NULL;
4497 int expect = sizeof(struct p_header80);
4500 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4502 current->policy = SCHED_RR; /* Make this a realtime task! */
4503 current->rt_priority = 2; /* more important than all other tasks */
4505 while (get_t_state(thi) == Running) {
4506 drbd_thread_current_set_cpu(mdev);
4507 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4508 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4509 mdev->meta.socket->sk->sk_rcvtimeo =
4510 mdev->net_conf->ping_timeo*HZ/10;
4513 /* conditionally cork;
4514 * it may hurt latency if we cork without much to send */
4515 if (!mdev->net_conf->no_cork &&
4516 3 < atomic_read(&mdev->unacked_cnt))
4517 drbd_tcp_cork(mdev->meta.socket);
4519 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4520 flush_signals(current);
4521 if (!drbd_process_done_ee(mdev)) {
4522 dev_err(DEV, "process_done_ee() = NOT_OK\n");
4525 /* to avoid race with newly queued ACKs */
4526 set_bit(SIGNAL_ASENDER, &mdev->flags);
4527 spin_lock_irq(&mdev->req_lock);
4528 empty = list_empty(&mdev->done_ee);
4529 spin_unlock_irq(&mdev->req_lock);
4530 /* new ack may have been queued right here,
4531 * but then there is also a signal pending,
4532 * and we start over... */
4536 /* but unconditionally uncork unless disabled */
4537 if (!mdev->net_conf->no_cork)
4538 drbd_tcp_uncork(mdev->meta.socket);
4540 /* short circuit, recv_msg would return EINTR anyways. */
4541 if (signal_pending(current))
4544 rv = drbd_recv_short(mdev, mdev->meta.socket,
4545 buf, expect-received, 0);
4546 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4548 flush_signals(current);
4551 * -EINTR (on meta) we got a signal
4552 * -EAGAIN (on meta) rcvtimeo expired
4553 * -ECONNRESET other side closed the connection
4554 * -ERESTARTSYS (on data) we got a signal
4555 * rv < 0 other than above: unexpected error!
4556 * rv == expected: full header or command
4557 * rv < expected: "woken" by signal during receive
4558 * rv == 0 : "connection shut down by peer"
4560 if (likely(rv > 0)) {
4563 } else if (rv == 0) {
4564 dev_err(DEV, "meta connection shut down by peer.\n");
4566 } else if (rv == -EAGAIN) {
4567 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4568 mdev->net_conf->ping_timeo*HZ/10) {
4569 dev_err(DEV, "PingAck did not arrive in time.\n");
4572 set_bit(SEND_PING, &mdev->flags);
4574 } else if (rv == -EINTR) {
4577 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4581 if (received == expect && cmd == NULL) {
4582 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4583 dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n",
4584 (long)be32_to_cpu(h->magic),
4585 h->command, h->length);
4588 cmd = get_asender_cmd(be16_to_cpu(h->command));
4589 len = be16_to_cpu(h->length);
4590 if (unlikely(cmd == NULL)) {
4591 dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n",
4592 (long)be32_to_cpu(h->magic),
4593 h->command, h->length);
4596 expect = cmd->pkt_size;
4597 ERR_IF(len != expect-sizeof(struct p_header80))
4600 if (received == expect) {
4601 D_ASSERT(cmd != NULL);
4602 if (!cmd->process(mdev, h))
4607 expect = sizeof(struct p_header80);
4614 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4618 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4620 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4622 D_ASSERT(mdev->state.conn < C_CONNECTED);
4623 dev_info(DEV, "asender terminated\n");