4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
57 static int drbd_do_handshake(struct drbd_conf *mdev);
58 static int drbd_do_auth(struct drbd_conf *mdev);
60 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
64 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
71 /* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
75 static struct page *page_chain_del(struct page **head, int n)
89 tmp = page_chain_next(page);
91 break; /* found sufficient pages */
93 /* insufficient pages, don't use any of them. */
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
106 /* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109 static struct page *page_chain_tail(struct page *page, int *len)
113 while ((tmp = page_chain_next(page)))
120 static int page_chain_free(struct page *page)
124 page_chain_for_each_safe(page, tmp) {
131 static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
145 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
147 struct page *page = NULL;
148 struct page *tmp = NULL;
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
153 if (drbd_pp_vacant >= number) {
154 spin_lock(&drbd_pp_lock);
155 page = page_chain_del(&drbd_pp_pool, number);
157 drbd_pp_vacant -= number;
158 spin_unlock(&drbd_pp_lock);
163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
170 set_page_private(tmp, (unsigned long)page);
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
185 spin_unlock(&drbd_pp_lock);
190 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
202 if (drbd_ee_has_active_page(e))
204 list_move(le, to_be_freed);
208 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
218 drbd_free_net_ee(mdev, e);
222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
223 * @mdev: DRBD device.
224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
231 * Returns a page chain linked via page->private.
233 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
235 struct page *page = NULL;
238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
243 while (page == NULL) {
244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
246 drbd_kick_lo_and_reclaim_net(mdev);
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
264 finish_wait(&drbd_pp_wait, &wait);
267 atomic_add(number, &mdev->pp_in_use);
271 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
275 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
281 i = page_chain_free(page);
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
288 spin_unlock(&drbd_pp_lock);
290 i = atomic_sub_return(i, a);
292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
294 wake_up(&drbd_pp_wait);
298 You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
301 You must not have the req_lock:
307 drbd_process_done_ee()
309 drbd_wait_ee_list_empty()
312 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
318 struct drbd_epoch_entry *e;
320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
336 INIT_HLIST_NODE(&e->colision);
340 atomic_set(&e->pending_bios, 0);
349 mempool_free(e, drbd_ee_mempool);
353 void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
355 if (e->flags & EE_HAS_DIGEST)
357 drbd_pp_free(mdev, e->pages, is_net);
358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
359 D_ASSERT(hlist_unhashed(&e->colision));
360 mempool_free(e, drbd_ee_mempool);
363 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
365 LIST_HEAD(work_list);
366 struct drbd_epoch_entry *e, *t;
368 int is_net = list == &mdev->net_ee;
370 spin_lock_irq(&mdev->req_lock);
371 list_splice_init(list, &work_list);
372 spin_unlock_irq(&mdev->req_lock);
374 list_for_each_entry_safe(e, t, &work_list, w.list) {
375 drbd_free_some_ee(mdev, e, is_net);
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
391 static int drbd_process_done_ee(struct drbd_conf *mdev)
393 LIST_HEAD(work_list);
394 LIST_HEAD(reclaimed);
395 struct drbd_epoch_entry *e, *t;
396 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
398 spin_lock_irq(&mdev->req_lock);
399 reclaim_net_ee(mdev, &reclaimed);
400 list_splice_init(&mdev->done_ee, &work_list);
401 spin_unlock_irq(&mdev->req_lock);
403 list_for_each_entry_safe(e, t, &reclaimed, w.list)
404 drbd_free_net_ee(mdev, e);
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
410 list_for_each_entry_safe(e, t, &work_list, w.list) {
411 /* list_del not necessary, next/prev members not touched */
412 ok = e->w.cb(mdev, &e->w, !ok) && ok;
413 drbd_free_ee(mdev, e);
415 wake_up(&mdev->ee_wait);
420 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head)) {
427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
428 spin_unlock_irq(&mdev->req_lock);
430 finish_wait(&mdev->ee_wait, &wait);
431 spin_lock_irq(&mdev->req_lock);
435 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
437 spin_lock_irq(&mdev->req_lock);
438 _drbd_wait_ee_list_empty(mdev, head);
439 spin_unlock_irq(&mdev->req_lock);
442 /* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444 static int drbd_accept(struct drbd_conf *mdev, const char **what,
445 struct socket *sock, struct socket **newsock)
447 struct sock *sk = sock->sk;
451 err = sock->ops->listen(sock, 5);
455 *what = "sock_create_lite";
456 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
462 err = sock->ops->accept(sock, *newsock, 0);
464 sock_release(*newsock);
468 (*newsock)->ops = sock->ops;
474 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
475 void *buf, size_t size, int flags)
482 struct msghdr msg = {
484 .msg_iov = (struct iovec *)&iov,
485 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
491 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
497 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
504 struct msghdr msg = {
506 .msg_iov = (struct iovec *)&iov,
507 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
515 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
525 if (rv == -ECONNRESET)
526 dev_info(DEV, "sock was reset by peer\n");
527 else if (rv != -ERESTARTSYS)
528 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
530 } else if (rv == 0) {
531 dev_info(DEV, "sock was shut down by peer\n");
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
537 /* D_ASSERT(signal_pending(current)); */
545 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
555 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
558 /* open coded SO_SNDBUF, SO_RCVBUF */
560 sock->sk->sk_sndbuf = snd;
561 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
564 sock->sk->sk_rcvbuf = rcv;
565 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
569 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
573 struct sockaddr_in6 src_in6;
575 int disconnect_on_error = 1;
577 if (!get_net_conf(mdev))
580 what = "sock_create_kern";
581 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
582 SOCK_STREAM, IPPROTO_TCP, &sock);
588 sock->sk->sk_rcvtimeo =
589 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
590 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
591 mdev->net_conf->rcvbuf_size);
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
600 memcpy(&src_in6, mdev->net_conf->my_addr,
601 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
602 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
603 src_in6.sin6_port = 0;
605 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
607 what = "bind before connect";
608 err = sock->ops->bind(sock,
609 (struct sockaddr *) &src_in6,
610 mdev->net_conf->my_addr_len);
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error = 0;
618 err = sock->ops->connect(sock,
619 (struct sockaddr *)mdev->net_conf->peer_addr,
620 mdev->net_conf->peer_addr_len, 0);
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
631 case EINTR: case ERESTARTSYS:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED: case ENETUNREACH:
634 case EHOSTDOWN: case EHOSTUNREACH:
635 disconnect_on_error = 0;
638 dev_err(DEV, "%s failed, err = %d\n", what, err);
640 if (disconnect_on_error)
641 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
647 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
650 struct socket *s_estab = NULL, *s_listen;
653 if (!get_net_conf(mdev))
656 what = "sock_create_kern";
657 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
658 SOCK_STREAM, IPPROTO_TCP, &s_listen);
664 timeo = mdev->net_conf->try_connect_int * HZ;
665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
667 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
668 s_listen->sk->sk_rcvtimeo = timeo;
669 s_listen->sk->sk_sndtimeo = timeo;
670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
671 mdev->net_conf->rcvbuf_size);
673 what = "bind before listen";
674 err = s_listen->ops->bind(s_listen,
675 (struct sockaddr *) mdev->net_conf->my_addr,
676 mdev->net_conf->my_addr_len);
680 err = drbd_accept(mdev, &what, s_listen, &s_estab);
684 sock_release(s_listen);
686 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
687 dev_err(DEV, "%s failed, err = %d\n", what, err);
688 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
696 static int drbd_send_fp(struct drbd_conf *mdev,
697 struct socket *sock, enum drbd_packets cmd)
699 struct p_header80 *h = &mdev->data.sbuf.header.h80;
701 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
704 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
706 struct p_header80 *h = &mdev->data.rbuf.header.h80;
709 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
711 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
712 return be16_to_cpu(h->command);
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
722 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
730 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
732 if (rr > 0 || rr == -EAGAIN) {
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
749 static int drbd_connect(struct drbd_conf *mdev)
751 struct socket *s, *sock, *msock;
754 D_ASSERT(!mdev->data.socket);
756 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
759 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
766 /* 3 tries, this should take less than a second! */
767 s = drbd_try_connect(mdev);
770 /* give the other side time to call bind() & listen() */
771 schedule_timeout_interruptible(HZ / 10);
776 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
780 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
784 dev_err(DEV, "Logic error in drbd_connect()\n");
785 goto out_release_sockets;
790 schedule_timeout_interruptible(HZ / 10);
791 ok = drbd_socket_okay(mdev, &sock);
792 ok = drbd_socket_okay(mdev, &msock) && ok;
798 s = drbd_wait_for_connect(mdev);
800 try = drbd_recv_fp(mdev, s);
801 drbd_socket_okay(mdev, &sock);
802 drbd_socket_okay(mdev, &msock);
806 dev_warn(DEV, "initial packet S crossed\n");
813 dev_warn(DEV, "initial packet M crossed\n");
817 set_bit(DISCARD_CONCURRENT, &mdev->flags);
820 dev_warn(DEV, "Error receiving initial packet\n");
827 if (mdev->state.conn <= C_DISCONNECTING)
828 goto out_release_sockets;
829 if (signal_pending(current)) {
830 flush_signals(current);
832 if (get_t_state(&mdev->receiver) == Exiting)
833 goto out_release_sockets;
837 ok = drbd_socket_okay(mdev, &sock);
838 ok = drbd_socket_okay(mdev, &msock) && ok;
844 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
845 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
847 sock->sk->sk_allocation = GFP_NOIO;
848 msock->sk->sk_allocation = GFP_NOIO;
850 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
851 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
854 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 * first set it to the P_HAND_SHAKE timeout,
857 * which we set to 4x the configured ping_timeout. */
858 sock->sk->sk_sndtimeo =
859 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
861 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
862 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
864 /* we don't want delays.
865 * we use TCP_CORK where apropriate, though */
866 drbd_tcp_nodelay(sock);
867 drbd_tcp_nodelay(msock);
869 mdev->data.socket = sock;
870 mdev->meta.socket = msock;
871 mdev->last_received = jiffies;
873 D_ASSERT(mdev->asender.task == NULL);
875 h = drbd_do_handshake(mdev);
879 if (mdev->cram_hmac_tfm) {
880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
881 switch (drbd_do_auth(mdev)) {
883 dev_err(DEV, "Authentication of peer failed\n");
886 dev_err(DEV, "Authentication of peer failed, trying again.\n");
891 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
894 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
895 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
897 atomic_set(&mdev->packet_seq, 0);
900 drbd_thread_start(&mdev->asender);
902 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
903 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
907 if (drbd_send_protocol(mdev) == -1)
909 drbd_send_sync_param(mdev, &mdev->sync_conf);
910 drbd_send_sizes(mdev, 0, 0);
911 drbd_send_uuids(mdev);
912 drbd_send_state(mdev);
913 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
914 clear_bit(RESIZE_PENDING, &mdev->flags);
926 static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
928 union p_header *h = &mdev->data.rbuf.header;
931 r = drbd_recv(mdev, h, sizeof(*h));
932 if (unlikely(r != sizeof(*h))) {
933 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
937 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
938 *cmd = be16_to_cpu(h->h80.command);
939 *packet_size = be16_to_cpu(h->h80.length);
940 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
941 *cmd = be16_to_cpu(h->h95.command);
942 *packet_size = be32_to_cpu(h->h95.length);
944 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
945 be32_to_cpu(h->h80.magic),
946 be16_to_cpu(h->h80.command),
947 be16_to_cpu(h->h80.length));
950 mdev->last_received = jiffies;
955 static void drbd_flush(struct drbd_conf *mdev)
959 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
960 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
963 dev_err(DEV, "local disk flush failed with status %d\n", rv);
964 /* would rather check on EOPNOTSUPP, but that is not reliable.
965 * don't try again for ANY return value != 0
966 * if (rv == -EOPNOTSUPP) */
967 drbd_bump_write_ordering(mdev, WO_drain_io);
974 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
975 * @mdev: DRBD device.
976 * @epoch: Epoch object.
979 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
980 struct drbd_epoch *epoch,
984 struct drbd_epoch *next_epoch;
985 enum finish_epoch rv = FE_STILL_LIVE;
987 spin_lock(&mdev->epoch_lock);
991 epoch_size = atomic_read(&epoch->epoch_size);
993 switch (ev & ~EV_CLEANUP) {
995 atomic_dec(&epoch->active);
997 case EV_GOT_BARRIER_NR:
998 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1000 case EV_BECAME_LAST:
1005 if (epoch_size != 0 &&
1006 atomic_read(&epoch->active) == 0 &&
1007 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
1008 if (!(ev & EV_CLEANUP)) {
1009 spin_unlock(&mdev->epoch_lock);
1010 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1011 spin_lock(&mdev->epoch_lock);
1015 if (mdev->current_epoch != epoch) {
1016 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1017 list_del(&epoch->list);
1018 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1022 if (rv == FE_STILL_LIVE)
1026 atomic_set(&epoch->epoch_size, 0);
1027 /* atomic_set(&epoch->active, 0); is already zero */
1028 if (rv == FE_STILL_LIVE)
1030 wake_up(&mdev->ee_wait);
1040 spin_unlock(&mdev->epoch_lock);
1046 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1047 * @mdev: DRBD device.
1048 * @wo: Write ordering method to try.
1050 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1052 enum write_ordering_e pwo;
1053 static char *write_ordering_str[] = {
1055 [WO_drain_io] = "drain",
1056 [WO_bdev_flush] = "flush",
1059 pwo = mdev->write_ordering;
1061 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1063 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1065 mdev->write_ordering = wo;
1066 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
1067 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1072 * @mdev: DRBD device.
1074 * @rw: flag field, see bio->bi_rw
1076 /* TODO allocate from our own bio_set. */
1077 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1078 const unsigned rw, const int fault_type)
1080 struct bio *bios = NULL;
1082 struct page *page = e->pages;
1083 sector_t sector = e->sector;
1084 unsigned ds = e->size;
1085 unsigned n_bios = 0;
1086 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1088 /* In most cases, we will only need one bio. But in case the lower
1089 * level restrictions happen to be different at this offset on this
1090 * side than those of the sending peer, we may need to submit the
1091 * request in more than one bio. */
1093 bio = bio_alloc(GFP_NOIO, nr_pages);
1095 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1098 /* > e->sector, unless this is the first bio */
1099 bio->bi_sector = sector;
1100 bio->bi_bdev = mdev->ldev->backing_bdev;
1102 bio->bi_private = e;
1103 bio->bi_end_io = drbd_endio_sec;
1105 bio->bi_next = bios;
1109 page_chain_for_each(page) {
1110 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1111 if (!bio_add_page(bio, page, len, 0)) {
1112 /* a single page must always be possible! */
1113 BUG_ON(bio->bi_vcnt == 0);
1120 D_ASSERT(page == NULL);
1123 atomic_set(&e->pending_bios, n_bios);
1126 bios = bios->bi_next;
1127 bio->bi_next = NULL;
1129 drbd_generic_make_request(mdev, fault_type, bio);
1136 bios = bios->bi_next;
1142 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1145 struct p_barrier *p = &mdev->data.rbuf.barrier;
1146 struct drbd_epoch *epoch;
1150 mdev->current_epoch->barrier_nr = p->barrier;
1151 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1153 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1154 * the activity log, which means it would not be resynced in case the
1155 * R_PRIMARY crashes now.
1156 * Therefore we must send the barrier_ack after the barrier request was
1158 switch (mdev->write_ordering) {
1160 if (rv == FE_RECYCLED)
1163 /* receiver context, in the writeout path of the other node.
1164 * avoid potential distributed deadlock */
1165 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1169 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1174 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1177 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1178 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1183 epoch = mdev->current_epoch;
1184 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1186 D_ASSERT(atomic_read(&epoch->active) == 0);
1187 D_ASSERT(epoch->flags == 0);
1191 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1196 atomic_set(&epoch->epoch_size, 0);
1197 atomic_set(&epoch->active, 0);
1199 spin_lock(&mdev->epoch_lock);
1200 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1201 list_add(&epoch->list, &mdev->current_epoch->list);
1202 mdev->current_epoch = epoch;
1205 /* The current_epoch got recycled while we allocated this one... */
1208 spin_unlock(&mdev->epoch_lock);
1213 /* used from receive_RSDataReply (recv_resync_read)
1214 * and from receive_Data */
1215 static struct drbd_epoch_entry *
1216 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1218 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1219 struct drbd_epoch_entry *e;
1222 void *dig_in = mdev->int_dig_in;
1223 void *dig_vv = mdev->int_dig_vv;
1224 unsigned long *data;
1226 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1227 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1230 rr = drbd_recv(mdev, dig_in, dgs);
1232 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1240 ERR_IF(data_size == 0) return NULL;
1241 ERR_IF(data_size & 0x1ff) return NULL;
1242 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
1244 /* even though we trust out peer,
1245 * we sometimes have to double check. */
1246 if (sector + (data_size>>9) > capacity) {
1247 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1248 (unsigned long long)capacity,
1249 (unsigned long long)sector, data_size);
1253 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1254 * "criss-cross" setup, that might cause write-out on some other DRBD,
1255 * which in turn might block on the other node at this very place. */
1256 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1262 page_chain_for_each(page) {
1263 unsigned len = min_t(int, ds, PAGE_SIZE);
1265 rr = drbd_recv(mdev, data, len);
1266 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1267 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1268 data[0] = data[0] ^ (unsigned long)-1;
1272 drbd_free_ee(mdev, e);
1273 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1281 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1282 if (memcmp(dig_in, dig_vv, dgs)) {
1283 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1284 (unsigned long long)sector, data_size);
1285 drbd_bcast_ee(mdev, "digest failed",
1286 dgs, dig_in, dig_vv, e);
1287 drbd_free_ee(mdev, e);
1291 mdev->recv_cnt += data_size>>9;
1295 /* drbd_drain_block() just takes a data block
1296 * out of the socket input buffer, and discards it.
1298 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1307 page = drbd_pp_alloc(mdev, 1, 1);
1311 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1312 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1314 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1315 rr, min_t(int, data_size, PAGE_SIZE));
1321 drbd_pp_free(mdev, page, 0);
1325 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1326 sector_t sector, int data_size)
1328 struct bio_vec *bvec;
1330 int dgs, rr, i, expect;
1331 void *dig_in = mdev->int_dig_in;
1332 void *dig_vv = mdev->int_dig_vv;
1334 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1335 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1338 rr = drbd_recv(mdev, dig_in, dgs);
1340 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1348 /* optimistically update recv_cnt. if receiving fails below,
1349 * we disconnect anyways, and counters will be reset. */
1350 mdev->recv_cnt += data_size>>9;
1352 bio = req->master_bio;
1353 D_ASSERT(sector == bio->bi_sector);
1355 bio_for_each_segment(bvec, bio, i) {
1356 expect = min_t(int, data_size, bvec->bv_len);
1357 rr = drbd_recv(mdev,
1358 kmap(bvec->bv_page)+bvec->bv_offset,
1360 kunmap(bvec->bv_page);
1362 dev_warn(DEV, "short read receiving data reply: "
1363 "read %d expected %d\n",
1371 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1372 if (memcmp(dig_in, dig_vv, dgs)) {
1373 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1378 D_ASSERT(data_size == 0);
1382 /* e_end_resync_block() is called via
1383 * drbd_process_done_ee() by asender only */
1384 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1386 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1387 sector_t sector = e->sector;
1390 D_ASSERT(hlist_unhashed(&e->colision));
1392 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1393 drbd_set_in_sync(mdev, sector, e->size);
1394 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1396 /* Record failure to sync */
1397 drbd_rs_failed_io(mdev, sector, e->size);
1399 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1406 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1408 struct drbd_epoch_entry *e;
1410 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1414 dec_rs_pending(mdev);
1417 /* corresponding dec_unacked() in e_end_resync_block()
1418 * respective _drbd_clear_done_ee */
1420 e->w.cb = e_end_resync_block;
1422 spin_lock_irq(&mdev->req_lock);
1423 list_add(&e->w.list, &mdev->sync_ee);
1424 spin_unlock_irq(&mdev->req_lock);
1426 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1427 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1430 /* drbd_submit_ee currently fails for one reason only:
1431 * not being able to allocate enough bios.
1432 * Is dropping the connection going to help? */
1433 spin_lock_irq(&mdev->req_lock);
1434 list_del(&e->w.list);
1435 spin_unlock_irq(&mdev->req_lock);
1437 drbd_free_ee(mdev, e);
1443 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1445 struct drbd_request *req;
1448 struct p_data *p = &mdev->data.rbuf.data;
1450 sector = be64_to_cpu(p->sector);
1452 spin_lock_irq(&mdev->req_lock);
1453 req = _ar_id_to_req(mdev, p->block_id, sector);
1454 spin_unlock_irq(&mdev->req_lock);
1455 if (unlikely(!req)) {
1456 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1460 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1461 * special casing it there for the various failure cases.
1462 * still no race with drbd_fail_pending_reads */
1463 ok = recv_dless_read(mdev, req, sector, data_size);
1466 req_mod(req, data_received);
1467 /* else: nothing. handled from drbd_disconnect...
1468 * I don't think we may complete this just yet
1469 * in case we are "on-disconnect: freeze" */
1474 static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1478 struct p_data *p = &mdev->data.rbuf.data;
1480 sector = be64_to_cpu(p->sector);
1481 D_ASSERT(p->block_id == ID_SYNCER);
1483 if (get_ldev(mdev)) {
1484 /* data is submitted to disk within recv_resync_read.
1485 * corresponding put_ldev done below on error,
1486 * or in drbd_endio_write_sec. */
1487 ok = recv_resync_read(mdev, sector, data_size);
1489 if (__ratelimit(&drbd_ratelimit_state))
1490 dev_err(DEV, "Can not write resync data to local disk.\n");
1492 ok = drbd_drain_block(mdev, data_size);
1494 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1497 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1502 /* e_end_block() is called via drbd_process_done_ee().
1503 * this means this function only runs in the asender thread
1505 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1507 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1508 sector_t sector = e->sector;
1511 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1512 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1513 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1514 mdev->state.conn <= C_PAUSED_SYNC_T &&
1515 e->flags & EE_MAY_SET_IN_SYNC) ?
1516 P_RS_WRITE_ACK : P_WRITE_ACK;
1517 ok &= drbd_send_ack(mdev, pcmd, e);
1518 if (pcmd == P_RS_WRITE_ACK)
1519 drbd_set_in_sync(mdev, sector, e->size);
1521 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1522 /* we expect it to be marked out of sync anyways...
1523 * maybe assert this? */
1527 /* we delete from the conflict detection hash _after_ we sent out the
1528 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1529 if (mdev->net_conf->two_primaries) {
1530 spin_lock_irq(&mdev->req_lock);
1531 D_ASSERT(!hlist_unhashed(&e->colision));
1532 hlist_del_init(&e->colision);
1533 spin_unlock_irq(&mdev->req_lock);
1535 D_ASSERT(hlist_unhashed(&e->colision));
1538 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1543 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1545 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1548 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1549 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1551 spin_lock_irq(&mdev->req_lock);
1552 D_ASSERT(!hlist_unhashed(&e->colision));
1553 hlist_del_init(&e->colision);
1554 spin_unlock_irq(&mdev->req_lock);
1561 /* Called from receive_Data.
1562 * Synchronize packets on sock with packets on msock.
1564 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1565 * packet traveling on msock, they are still processed in the order they have
1568 * Note: we don't care for Ack packets overtaking P_DATA packets.
1570 * In case packet_seq is larger than mdev->peer_seq number, there are
1571 * outstanding packets on the msock. We wait for them to arrive.
1572 * In case we are the logically next packet, we update mdev->peer_seq
1573 * ourselves. Correctly handles 32bit wrap around.
1575 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1576 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1577 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1578 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1580 * returns 0 if we may process the packet,
1581 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1582 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1588 spin_lock(&mdev->peer_seq_lock);
1590 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1591 if (seq_le(packet_seq, mdev->peer_seq+1))
1593 if (signal_pending(current)) {
1597 p_seq = mdev->peer_seq;
1598 spin_unlock(&mdev->peer_seq_lock);
1599 timeout = schedule_timeout(30*HZ);
1600 spin_lock(&mdev->peer_seq_lock);
1601 if (timeout == 0 && p_seq == mdev->peer_seq) {
1603 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1607 finish_wait(&mdev->seq_wait, &wait);
1608 if (mdev->peer_seq+1 == packet_seq)
1610 spin_unlock(&mdev->peer_seq_lock);
1614 /* see also bio_flags_to_wire()
1615 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1616 * flags and back. We may replicate to other kernel versions. */
1617 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1619 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1620 (dpf & DP_FUA ? REQ_FUA : 0) |
1621 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1622 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1625 /* mirrored write */
1626 static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1629 struct drbd_epoch_entry *e;
1630 struct p_data *p = &mdev->data.rbuf.data;
1634 if (!get_ldev(mdev)) {
1635 spin_lock(&mdev->peer_seq_lock);
1636 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1638 spin_unlock(&mdev->peer_seq_lock);
1640 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1641 atomic_inc(&mdev->current_epoch->epoch_size);
1642 return drbd_drain_block(mdev, data_size);
1645 /* get_ldev(mdev) successful.
1646 * Corresponding put_ldev done either below (on various errors),
1647 * or in drbd_endio_write_sec, if we successfully submit the data at
1648 * the end of this function. */
1650 sector = be64_to_cpu(p->sector);
1651 e = read_in_block(mdev, p->block_id, sector, data_size);
1657 e->w.cb = e_end_block;
1659 dp_flags = be32_to_cpu(p->dp_flags);
1660 rw |= wire_flags_to_bio(mdev, dp_flags);
1662 if (dp_flags & DP_MAY_SET_IN_SYNC)
1663 e->flags |= EE_MAY_SET_IN_SYNC;
1665 spin_lock(&mdev->epoch_lock);
1666 e->epoch = mdev->current_epoch;
1667 atomic_inc(&e->epoch->epoch_size);
1668 atomic_inc(&e->epoch->active);
1669 spin_unlock(&mdev->epoch_lock);
1671 /* I'm the receiver, I do hold a net_cnt reference. */
1672 if (!mdev->net_conf->two_primaries) {
1673 spin_lock_irq(&mdev->req_lock);
1675 /* don't get the req_lock yet,
1676 * we may sleep in drbd_wait_peer_seq */
1677 const int size = e->size;
1678 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1680 struct drbd_request *i;
1681 struct hlist_node *n;
1682 struct hlist_head *slot;
1685 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1686 BUG_ON(mdev->ee_hash == NULL);
1687 BUG_ON(mdev->tl_hash == NULL);
1689 /* conflict detection and handling:
1690 * 1. wait on the sequence number,
1691 * in case this data packet overtook ACK packets.
1692 * 2. check our hash tables for conflicting requests.
1693 * we only need to walk the tl_hash, since an ee can not
1694 * have a conflict with an other ee: on the submitting
1695 * node, the corresponding req had already been conflicting,
1696 * and a conflicting req is never sent.
1698 * Note: for two_primaries, we are protocol C,
1699 * so there cannot be any request that is DONE
1700 * but still on the transfer log.
1702 * unconditionally add to the ee_hash.
1704 * if no conflicting request is found:
1707 * if any conflicting request is found
1708 * that has not yet been acked,
1709 * AND I have the "discard concurrent writes" flag:
1710 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1712 * if any conflicting request is found:
1713 * block the receiver, waiting on misc_wait
1714 * until no more conflicting requests are there,
1715 * or we get interrupted (disconnect).
1717 * we do not just write after local io completion of those
1718 * requests, but only after req is done completely, i.e.
1719 * we wait for the P_DISCARD_ACK to arrive!
1721 * then proceed normally, i.e. submit.
1723 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1724 goto out_interrupted;
1726 spin_lock_irq(&mdev->req_lock);
1728 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1730 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1731 slot = tl_hash_slot(mdev, sector);
1734 int have_unacked = 0;
1735 int have_conflict = 0;
1736 prepare_to_wait(&mdev->misc_wait, &wait,
1737 TASK_INTERRUPTIBLE);
1738 hlist_for_each_entry(i, n, slot, colision) {
1740 /* only ALERT on first iteration,
1741 * we may be woken up early... */
1743 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1744 " new: %llus +%u; pending: %llus +%u\n",
1745 current->comm, current->pid,
1746 (unsigned long long)sector, size,
1747 (unsigned long long)i->sector, i->size);
1748 if (i->rq_state & RQ_NET_PENDING)
1757 /* Discard Ack only for the _first_ iteration */
1758 if (first && discard && have_unacked) {
1759 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1760 (unsigned long long)sector);
1762 e->w.cb = e_send_discard_ack;
1763 list_add_tail(&e->w.list, &mdev->done_ee);
1765 spin_unlock_irq(&mdev->req_lock);
1767 /* we could probably send that P_DISCARD_ACK ourselves,
1768 * but I don't like the receiver using the msock */
1772 finish_wait(&mdev->misc_wait, &wait);
1776 if (signal_pending(current)) {
1777 hlist_del_init(&e->colision);
1779 spin_unlock_irq(&mdev->req_lock);
1781 finish_wait(&mdev->misc_wait, &wait);
1782 goto out_interrupted;
1785 spin_unlock_irq(&mdev->req_lock);
1788 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1789 "sec=%llus\n", (unsigned long long)sector);
1790 } else if (discard) {
1791 /* we had none on the first iteration.
1792 * there must be none now. */
1793 D_ASSERT(have_unacked == 0);
1796 spin_lock_irq(&mdev->req_lock);
1798 finish_wait(&mdev->misc_wait, &wait);
1801 list_add(&e->w.list, &mdev->active_ee);
1802 spin_unlock_irq(&mdev->req_lock);
1804 switch (mdev->net_conf->wire_protocol) {
1807 /* corresponding dec_unacked() in e_end_block()
1808 * respective _drbd_clear_done_ee */
1811 /* I really don't like it that the receiver thread
1812 * sends on the msock, but anyways */
1813 drbd_send_ack(mdev, P_RECV_ACK, e);
1820 if (mdev->state.pdsk < D_INCONSISTENT) {
1821 /* In case we have the only disk of the cluster, */
1822 drbd_set_out_of_sync(mdev, e->sector, e->size);
1823 e->flags |= EE_CALL_AL_COMPLETE_IO;
1824 e->flags &= ~EE_MAY_SET_IN_SYNC;
1825 drbd_al_begin_io(mdev, e->sector);
1828 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
1831 /* drbd_submit_ee currently fails for one reason only:
1832 * not being able to allocate enough bios.
1833 * Is dropping the connection going to help? */
1834 spin_lock_irq(&mdev->req_lock);
1835 list_del(&e->w.list);
1836 hlist_del_init(&e->colision);
1837 spin_unlock_irq(&mdev->req_lock);
1838 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1839 drbd_al_complete_io(mdev, e->sector);
1842 /* yes, the epoch_size now is imbalanced.
1843 * but we drop the connection anyways, so we don't have a chance to
1844 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1846 drbd_free_ee(mdev, e);
1850 /* We may throttle resync, if the lower device seems to be busy,
1851 * and current sync rate is above c_min_rate.
1853 * To decide whether or not the lower device is busy, we use a scheme similar
1854 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1855 * (more than 64 sectors) of activity we cannot account for with our own resync
1856 * activity, it obviously is "busy".
1858 * The current sync rate used here uses only the most recent two step marks,
1859 * to have a short time average so we can react faster.
1861 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
1863 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1864 unsigned long db, dt, dbdt;
1865 struct lc_element *tmp;
1869 /* feature disabled? */
1870 if (mdev->sync_conf.c_min_rate == 0)
1873 spin_lock_irq(&mdev->al_lock);
1874 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1876 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1877 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1878 spin_unlock_irq(&mdev->al_lock);
1881 /* Do not slow down if app IO is already waiting for this extent */
1883 spin_unlock_irq(&mdev->al_lock);
1885 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1886 (int)part_stat_read(&disk->part0, sectors[1]) -
1887 atomic_read(&mdev->rs_sect_ev);
1889 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1890 unsigned long rs_left;
1893 mdev->rs_last_events = curr_events;
1895 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1897 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1899 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1900 rs_left = mdev->ov_left;
1902 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
1904 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1907 db = mdev->rs_mark_left[i] - rs_left;
1908 dbdt = Bit2KB(db/dt);
1910 if (dbdt > mdev->sync_conf.c_min_rate)
1917 static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
1920 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1921 struct drbd_epoch_entry *e;
1922 struct digest_info *di = NULL;
1924 unsigned int fault_type;
1925 struct p_block_req *p = &mdev->data.rbuf.block_req;
1927 sector = be64_to_cpu(p->sector);
1928 size = be32_to_cpu(p->blksize);
1930 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
1931 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1932 (unsigned long long)sector, size);
1935 if (sector + (size>>9) > capacity) {
1936 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1937 (unsigned long long)sector, size);
1941 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
1944 case P_DATA_REQUEST:
1945 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1947 case P_RS_DATA_REQUEST:
1948 case P_CSUM_RS_REQUEST:
1950 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
1954 dec_rs_pending(mdev);
1955 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
1958 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
1961 if (verb && __ratelimit(&drbd_ratelimit_state))
1962 dev_err(DEV, "Can not satisfy peer's read request, "
1963 "no local data.\n");
1965 /* drain possibly payload */
1966 return drbd_drain_block(mdev, digest_size);
1969 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1970 * "criss-cross" setup, that might cause write-out on some other DRBD,
1971 * which in turn might block on the other node at this very place. */
1972 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1979 case P_DATA_REQUEST:
1980 e->w.cb = w_e_end_data_req;
1981 fault_type = DRBD_FAULT_DT_RD;
1982 /* application IO, don't drbd_rs_begin_io */
1985 case P_RS_DATA_REQUEST:
1986 e->w.cb = w_e_end_rsdata_req;
1987 fault_type = DRBD_FAULT_RS_RD;
1988 /* used in the sector offset progress display */
1989 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
1993 case P_CSUM_RS_REQUEST:
1994 fault_type = DRBD_FAULT_RS_RD;
1995 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
1999 di->digest_size = digest_size;
2000 di->digest = (((char *)di)+sizeof(struct digest_info));
2003 e->flags |= EE_HAS_DIGEST;
2005 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2008 if (cmd == P_CSUM_RS_REQUEST) {
2009 D_ASSERT(mdev->agreed_pro_version >= 89);
2010 e->w.cb = w_e_end_csum_rs_req;
2011 /* used in the sector offset progress display */
2012 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2013 } else if (cmd == P_OV_REPLY) {
2014 /* track progress, we may need to throttle */
2015 atomic_add(size >> 9, &mdev->rs_sect_in);
2016 e->w.cb = w_e_end_ov_reply;
2017 dec_rs_pending(mdev);
2018 /* drbd_rs_begin_io done when we sent this request,
2019 * but accounting still needs to be done. */
2020 goto submit_for_resync;
2025 if (mdev->ov_start_sector == ~(sector_t)0 &&
2026 mdev->agreed_pro_version >= 90) {
2027 unsigned long now = jiffies;
2029 mdev->ov_start_sector = sector;
2030 mdev->ov_position = sector;
2031 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2032 mdev->rs_total = mdev->ov_left;
2033 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2034 mdev->rs_mark_left[i] = mdev->ov_left;
2035 mdev->rs_mark_time[i] = now;
2037 dev_info(DEV, "Online Verify start sector: %llu\n",
2038 (unsigned long long)sector);
2040 e->w.cb = w_e_end_ov_req;
2041 fault_type = DRBD_FAULT_RS_RD;
2045 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2047 fault_type = DRBD_FAULT_MAX;
2051 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2052 * wrt the receiver, but it is not as straightforward as it may seem.
2053 * Various places in the resync start and stop logic assume resync
2054 * requests are processed in order, requeuing this on the worker thread
2055 * introduces a bunch of new code for synchronization between threads.
2057 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2058 * "forever", throttling after drbd_rs_begin_io will lock that extent
2059 * for application writes for the same time. For now, just throttle
2060 * here, where the rest of the code expects the receiver to sleep for
2064 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2065 * this defers syncer requests for some time, before letting at least
2066 * on request through. The resync controller on the receiving side
2067 * will adapt to the incoming rate accordingly.
2069 * We cannot throttle here if remote is Primary/SyncTarget:
2070 * we would also throttle its application reads.
2071 * In that case, throttling is done on the SyncTarget only.
2073 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2074 schedule_timeout_uninterruptible(HZ/10);
2075 if (drbd_rs_begin_io(mdev, sector))
2079 atomic_add(size >> 9, &mdev->rs_sect_ev);
2083 spin_lock_irq(&mdev->req_lock);
2084 list_add_tail(&e->w.list, &mdev->read_ee);
2085 spin_unlock_irq(&mdev->req_lock);
2087 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2090 /* drbd_submit_ee currently fails for one reason only:
2091 * not being able to allocate enough bios.
2092 * Is dropping the connection going to help? */
2093 spin_lock_irq(&mdev->req_lock);
2094 list_del(&e->w.list);
2095 spin_unlock_irq(&mdev->req_lock);
2096 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2100 drbd_free_ee(mdev, e);
2104 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2106 int self, peer, rv = -100;
2107 unsigned long ch_self, ch_peer;
2109 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2110 peer = mdev->p_uuid[UI_BITMAP] & 1;
2112 ch_peer = mdev->p_uuid[UI_SIZE];
2113 ch_self = mdev->comm_bm_set;
2115 switch (mdev->net_conf->after_sb_0p) {
2117 case ASB_DISCARD_SECONDARY:
2118 case ASB_CALL_HELPER:
2119 dev_err(DEV, "Configuration error.\n");
2121 case ASB_DISCONNECT:
2123 case ASB_DISCARD_YOUNGER_PRI:
2124 if (self == 0 && peer == 1) {
2128 if (self == 1 && peer == 0) {
2132 /* Else fall through to one of the other strategies... */
2133 case ASB_DISCARD_OLDER_PRI:
2134 if (self == 0 && peer == 1) {
2138 if (self == 1 && peer == 0) {
2142 /* Else fall through to one of the other strategies... */
2143 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2144 "Using discard-least-changes instead\n");
2145 case ASB_DISCARD_ZERO_CHG:
2146 if (ch_peer == 0 && ch_self == 0) {
2147 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2151 if (ch_peer == 0) { rv = 1; break; }
2152 if (ch_self == 0) { rv = -1; break; }
2154 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2156 case ASB_DISCARD_LEAST_CHG:
2157 if (ch_self < ch_peer)
2159 else if (ch_self > ch_peer)
2161 else /* ( ch_self == ch_peer ) */
2162 /* Well, then use something else. */
2163 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2166 case ASB_DISCARD_LOCAL:
2169 case ASB_DISCARD_REMOTE:
2176 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2180 switch (mdev->net_conf->after_sb_1p) {
2181 case ASB_DISCARD_YOUNGER_PRI:
2182 case ASB_DISCARD_OLDER_PRI:
2183 case ASB_DISCARD_LEAST_CHG:
2184 case ASB_DISCARD_LOCAL:
2185 case ASB_DISCARD_REMOTE:
2186 dev_err(DEV, "Configuration error.\n");
2188 case ASB_DISCONNECT:
2191 hg = drbd_asb_recover_0p(mdev);
2192 if (hg == -1 && mdev->state.role == R_SECONDARY)
2194 if (hg == 1 && mdev->state.role == R_PRIMARY)
2198 rv = drbd_asb_recover_0p(mdev);
2200 case ASB_DISCARD_SECONDARY:
2201 return mdev->state.role == R_PRIMARY ? 1 : -1;
2202 case ASB_CALL_HELPER:
2203 hg = drbd_asb_recover_0p(mdev);
2204 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2205 enum drbd_state_rv rv2;
2207 drbd_set_role(mdev, R_SECONDARY, 0);
2208 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2209 * we might be here in C_WF_REPORT_PARAMS which is transient.
2210 * we do not need to wait for the after state change work either. */
2211 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2212 if (rv2 != SS_SUCCESS) {
2213 drbd_khelper(mdev, "pri-lost-after-sb");
2215 dev_warn(DEV, "Successfully gave up primary role.\n");
2225 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2229 switch (mdev->net_conf->after_sb_2p) {
2230 case ASB_DISCARD_YOUNGER_PRI:
2231 case ASB_DISCARD_OLDER_PRI:
2232 case ASB_DISCARD_LEAST_CHG:
2233 case ASB_DISCARD_LOCAL:
2234 case ASB_DISCARD_REMOTE:
2236 case ASB_DISCARD_SECONDARY:
2237 dev_err(DEV, "Configuration error.\n");
2240 rv = drbd_asb_recover_0p(mdev);
2242 case ASB_DISCONNECT:
2244 case ASB_CALL_HELPER:
2245 hg = drbd_asb_recover_0p(mdev);
2247 enum drbd_state_rv rv2;
2249 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2250 * we might be here in C_WF_REPORT_PARAMS which is transient.
2251 * we do not need to wait for the after state change work either. */
2252 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2253 if (rv2 != SS_SUCCESS) {
2254 drbd_khelper(mdev, "pri-lost-after-sb");
2256 dev_warn(DEV, "Successfully gave up primary role.\n");
2266 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2267 u64 bits, u64 flags)
2270 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2273 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2275 (unsigned long long)uuid[UI_CURRENT],
2276 (unsigned long long)uuid[UI_BITMAP],
2277 (unsigned long long)uuid[UI_HISTORY_START],
2278 (unsigned long long)uuid[UI_HISTORY_END],
2279 (unsigned long long)bits,
2280 (unsigned long long)flags);
2284 100 after split brain try auto recover
2285 2 C_SYNC_SOURCE set BitMap
2286 1 C_SYNC_SOURCE use BitMap
2288 -1 C_SYNC_TARGET use BitMap
2289 -2 C_SYNC_TARGET set BitMap
2290 -100 after split brain, disconnect
2291 -1000 unrelated data
2292 -1091 requires proto 91
2293 -1096 requires proto 96
2295 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2300 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2301 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2304 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2308 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2309 peer != UUID_JUST_CREATED)
2313 if (self != UUID_JUST_CREATED &&
2314 (peer == UUID_JUST_CREATED || peer == (u64)0))
2318 int rct, dc; /* roles at crash time */
2320 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2322 if (mdev->agreed_pro_version < 91)
2325 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2326 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2327 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2328 drbd_uuid_set_bm(mdev, 0UL);
2330 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2331 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2334 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2341 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2343 if (mdev->agreed_pro_version < 91)
2346 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2347 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2348 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2350 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2351 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2352 mdev->p_uuid[UI_BITMAP] = 0UL;
2354 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2357 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2364 /* Common power [off|failure] */
2365 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2366 (mdev->p_uuid[UI_FLAGS] & 2);
2367 /* lowest bit is set when we were primary,
2368 * next bit (weight 2) is set when peer was primary */
2372 case 0: /* !self_pri && !peer_pri */ return 0;
2373 case 1: /* self_pri && !peer_pri */ return 1;
2374 case 2: /* !self_pri && peer_pri */ return -1;
2375 case 3: /* self_pri && peer_pri */
2376 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2382 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2387 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2389 if (mdev->agreed_pro_version < 96 ?
2390 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2391 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2392 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2393 /* The last P_SYNC_UUID did not get though. Undo the last start of
2394 resync as sync source modifications of the peer's UUIDs. */
2396 if (mdev->agreed_pro_version < 91)
2399 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2400 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2402 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2403 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2410 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2411 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2412 peer = mdev->p_uuid[i] & ~((u64)1);
2418 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2419 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2424 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2426 if (mdev->agreed_pro_version < 96 ?
2427 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2428 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2429 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2430 /* The last P_SYNC_UUID did not get though. Undo the last start of
2431 resync as sync source modifications of our UUIDs. */
2433 if (mdev->agreed_pro_version < 91)
2436 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2437 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2439 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2440 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2441 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2449 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2450 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2451 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2457 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2458 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2459 if (self == peer && self != ((u64)0))
2463 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2464 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2465 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2466 peer = mdev->p_uuid[j] & ~((u64)1);
2475 /* drbd_sync_handshake() returns the new conn state on success, or
2476 CONN_MASK (-1) on failure.
2478 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2479 enum drbd_disk_state peer_disk) __must_hold(local)
2482 enum drbd_conns rv = C_MASK;
2483 enum drbd_disk_state mydisk;
2485 mydisk = mdev->state.disk;
2486 if (mydisk == D_NEGOTIATING)
2487 mydisk = mdev->new_state_tmp.disk;
2489 dev_info(DEV, "drbd_sync_handshake:\n");
2490 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2491 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2492 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2494 hg = drbd_uuid_compare(mdev, &rule_nr);
2496 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2499 dev_alert(DEV, "Unrelated data, aborting!\n");
2503 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2507 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2508 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2509 int f = (hg == -100) || abs(hg) == 2;
2510 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2513 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2514 hg > 0 ? "source" : "target");
2518 drbd_khelper(mdev, "initial-split-brain");
2520 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2521 int pcount = (mdev->state.role == R_PRIMARY)
2522 + (peer_role == R_PRIMARY);
2523 int forced = (hg == -100);
2527 hg = drbd_asb_recover_0p(mdev);
2530 hg = drbd_asb_recover_1p(mdev);
2533 hg = drbd_asb_recover_2p(mdev);
2536 if (abs(hg) < 100) {
2537 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2538 "automatically solved. Sync from %s node\n",
2539 pcount, (hg < 0) ? "peer" : "this");
2541 dev_warn(DEV, "Doing a full sync, since"
2542 " UUIDs where ambiguous.\n");
2549 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2551 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2555 dev_warn(DEV, "Split-Brain detected, manually solved. "
2556 "Sync from %s node\n",
2557 (hg < 0) ? "peer" : "this");
2561 /* FIXME this log message is not correct if we end up here
2562 * after an attempted attach on a diskless node.
2563 * We just refuse to attach -- well, we drop the "connection"
2564 * to that disk, in a way... */
2565 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2566 drbd_khelper(mdev, "split-brain");
2570 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2571 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2575 if (hg < 0 && /* by intention we do not use mydisk here. */
2576 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2577 switch (mdev->net_conf->rr_conflict) {
2578 case ASB_CALL_HELPER:
2579 drbd_khelper(mdev, "pri-lost");
2581 case ASB_DISCONNECT:
2582 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2585 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2590 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2592 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2594 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2595 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2596 abs(hg) >= 2 ? "full" : "bit-map based");
2601 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2602 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2606 if (hg > 0) { /* become sync source. */
2608 } else if (hg < 0) { /* become sync target */
2612 if (drbd_bm_total_weight(mdev)) {
2613 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2614 drbd_bm_total_weight(mdev));
2621 /* returns 1 if invalid */
2622 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2624 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2625 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2626 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2629 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2630 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2631 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2634 /* everything else is valid if they are equal on both sides. */
2638 /* everything es is invalid. */
2642 static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2644 struct p_protocol *p = &mdev->data.rbuf.protocol;
2645 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2646 int p_want_lose, p_two_primaries, cf;
2647 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2649 p_proto = be32_to_cpu(p->protocol);
2650 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2651 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2652 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2653 p_two_primaries = be32_to_cpu(p->two_primaries);
2654 cf = be32_to_cpu(p->conn_flags);
2655 p_want_lose = cf & CF_WANT_LOSE;
2657 clear_bit(CONN_DRY_RUN, &mdev->flags);
2659 if (cf & CF_DRY_RUN)
2660 set_bit(CONN_DRY_RUN, &mdev->flags);
2662 if (p_proto != mdev->net_conf->wire_protocol) {
2663 dev_err(DEV, "incompatible communication protocols\n");
2667 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2668 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2672 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2673 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2677 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2678 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2682 if (p_want_lose && mdev->net_conf->want_lose) {
2683 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2687 if (p_two_primaries != mdev->net_conf->two_primaries) {
2688 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2692 if (mdev->agreed_pro_version >= 87) {
2693 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2695 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2698 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2699 if (strcmp(p_integrity_alg, my_alg)) {
2700 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2703 dev_info(DEV, "data-integrity-alg: %s\n",
2704 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2710 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2715 * input: alg name, feature name
2716 * return: NULL (alg name was "")
2717 * ERR_PTR(error) if something goes wrong
2718 * or the crypto hash ptr, if it worked out ok. */
2719 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2720 const char *alg, const char *name)
2722 struct crypto_hash *tfm;
2727 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2729 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2730 alg, name, PTR_ERR(tfm));
2733 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2734 crypto_free_hash(tfm);
2735 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2736 return ERR_PTR(-EINVAL);
2741 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2744 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2745 unsigned int header_size, data_size, exp_max_sz;
2746 struct crypto_hash *verify_tfm = NULL;
2747 struct crypto_hash *csums_tfm = NULL;
2748 const int apv = mdev->agreed_pro_version;
2749 int *rs_plan_s = NULL;
2752 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2753 : apv == 88 ? sizeof(struct p_rs_param)
2755 : apv <= 94 ? sizeof(struct p_rs_param_89)
2756 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2758 if (packet_size > exp_max_sz) {
2759 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2760 packet_size, exp_max_sz);
2765 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2766 data_size = packet_size - header_size;
2767 } else if (apv <= 94) {
2768 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2769 data_size = packet_size - header_size;
2770 D_ASSERT(data_size == 0);
2772 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2773 data_size = packet_size - header_size;
2774 D_ASSERT(data_size == 0);
2777 /* initialize verify_alg and csums_alg */
2778 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2780 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2783 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2787 if (data_size > SHARED_SECRET_MAX) {
2788 dev_err(DEV, "verify-alg too long, "
2789 "peer wants %u, accepting only %u byte\n",
2790 data_size, SHARED_SECRET_MAX);
2794 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2797 /* we expect NUL terminated string */
2798 /* but just in case someone tries to be evil */
2799 D_ASSERT(p->verify_alg[data_size-1] == 0);
2800 p->verify_alg[data_size-1] = 0;
2802 } else /* apv >= 89 */ {
2803 /* we still expect NUL terminated strings */
2804 /* but just in case someone tries to be evil */
2805 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2806 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2807 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2808 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2811 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2812 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2813 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2814 mdev->sync_conf.verify_alg, p->verify_alg);
2817 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2818 p->verify_alg, "verify-alg");
2819 if (IS_ERR(verify_tfm)) {
2825 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2826 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2827 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2828 mdev->sync_conf.csums_alg, p->csums_alg);
2831 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2832 p->csums_alg, "csums-alg");
2833 if (IS_ERR(csums_tfm)) {
2840 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2841 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2842 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2843 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2844 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2846 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2847 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2848 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2850 dev_err(DEV, "kmalloc of fifo_buffer failed");
2856 spin_lock(&mdev->peer_seq_lock);
2857 /* lock against drbd_nl_syncer_conf() */
2859 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2860 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2861 crypto_free_hash(mdev->verify_tfm);
2862 mdev->verify_tfm = verify_tfm;
2863 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2866 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2867 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2868 crypto_free_hash(mdev->csums_tfm);
2869 mdev->csums_tfm = csums_tfm;
2870 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2872 if (fifo_size != mdev->rs_plan_s.size) {
2873 kfree(mdev->rs_plan_s.values);
2874 mdev->rs_plan_s.values = rs_plan_s;
2875 mdev->rs_plan_s.size = fifo_size;
2876 mdev->rs_planed = 0;
2878 spin_unlock(&mdev->peer_seq_lock);
2883 /* just for completeness: actually not needed,
2884 * as this is not reached if csums_tfm was ok. */
2885 crypto_free_hash(csums_tfm);
2886 /* but free the verify_tfm again, if csums_tfm did not work out */
2887 crypto_free_hash(verify_tfm);
2888 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2892 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2894 /* sorry, we currently have no working implementation
2895 * of distributed TCQ */
2898 /* warn if the arguments differ by more than 12.5% */
2899 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2900 const char *s, sector_t a, sector_t b)
2903 if (a == 0 || b == 0)
2905 d = (a > b) ? (a - b) : (b - a);
2906 if (d > (a>>3) || d > (b>>3))
2907 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2908 (unsigned long long)a, (unsigned long long)b);
2911 static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2913 struct p_sizes *p = &mdev->data.rbuf.sizes;
2914 enum determine_dev_size dd = unchanged;
2915 unsigned int max_bio_size;
2916 sector_t p_size, p_usize, my_usize;
2917 int ldsc = 0; /* local disk size changed */
2918 enum dds_flags ddsf;
2920 p_size = be64_to_cpu(p->d_size);
2921 p_usize = be64_to_cpu(p->u_size);
2923 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2924 dev_err(DEV, "some backing storage is needed\n");
2925 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2929 /* just store the peer's disk size for now.
2930 * we still need to figure out whether we accept that. */
2931 mdev->p_size = p_size;
2933 if (get_ldev(mdev)) {
2934 warn_if_differ_considerably(mdev, "lower level device sizes",
2935 p_size, drbd_get_max_capacity(mdev->ldev));
2936 warn_if_differ_considerably(mdev, "user requested size",
2937 p_usize, mdev->ldev->dc.disk_size);
2939 /* if this is the first connect, or an otherwise expected
2940 * param exchange, choose the minimum */
2941 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2942 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2945 my_usize = mdev->ldev->dc.disk_size;
2947 if (mdev->ldev->dc.disk_size != p_usize) {
2948 mdev->ldev->dc.disk_size = p_usize;
2949 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2950 (unsigned long)mdev->ldev->dc.disk_size);
2953 /* Never shrink a device with usable data during connect.
2954 But allow online shrinking if we are connected. */
2955 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
2956 drbd_get_capacity(mdev->this_bdev) &&
2957 mdev->state.disk >= D_OUTDATED &&
2958 mdev->state.conn < C_CONNECTED) {
2959 dev_err(DEV, "The peer's disk size is too small!\n");
2960 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2961 mdev->ldev->dc.disk_size = my_usize;
2968 ddsf = be16_to_cpu(p->dds_flags);
2969 if (get_ldev(mdev)) {
2970 dd = drbd_determin_dev_size(mdev, ddsf);
2972 if (dd == dev_size_error)
2976 /* I am diskless, need to accept the peer's size. */
2977 drbd_set_my_capacity(mdev, p_size);
2980 if (get_ldev(mdev)) {
2981 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
2982 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2986 if (mdev->agreed_pro_version < 94)
2987 max_bio_size = be32_to_cpu(p->max_bio_size);
2988 else if (mdev->agreed_pro_version == 94)
2989 max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
2990 else /* drbd 8.3.8 onwards */
2991 max_bio_size = DRBD_MAX_BIO_SIZE;
2993 if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9)
2994 drbd_setup_queue_param(mdev, max_bio_size);
2996 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
3000 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3001 if (be64_to_cpu(p->c_size) !=
3002 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3003 /* we have different sizes, probably peer
3004 * needs to know my new size... */
3005 drbd_send_sizes(mdev, 0, ddsf);
3007 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3008 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3009 if (mdev->state.pdsk >= D_INCONSISTENT &&
3010 mdev->state.disk >= D_INCONSISTENT) {
3011 if (ddsf & DDSF_NO_RESYNC)
3012 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3014 resync_after_online_grow(mdev);
3016 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3023 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3025 struct p_uuids *p = &mdev->data.rbuf.uuids;
3029 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3031 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3032 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3034 kfree(mdev->p_uuid);
3035 mdev->p_uuid = p_uuid;
3037 if (mdev->state.conn < C_CONNECTED &&
3038 mdev->state.disk < D_INCONSISTENT &&
3039 mdev->state.role == R_PRIMARY &&
3040 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3041 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3042 (unsigned long long)mdev->ed_uuid);
3043 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3047 if (get_ldev(mdev)) {
3048 int skip_initial_sync =
3049 mdev->state.conn == C_CONNECTED &&
3050 mdev->agreed_pro_version >= 90 &&
3051 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3052 (p_uuid[UI_FLAGS] & 8);
3053 if (skip_initial_sync) {
3054 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3055 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3056 "clear_n_write from receive_uuids");
3057 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3058 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3059 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3064 } else if (mdev->state.disk < D_INCONSISTENT &&
3065 mdev->state.role == R_PRIMARY) {
3066 /* I am a diskless primary, the peer just created a new current UUID
3068 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3071 /* Before we test for the disk state, we should wait until an eventually
3072 ongoing cluster wide state change is finished. That is important if
3073 we are primary and are detaching from our disk. We need to see the
3074 new disk state... */
3075 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3076 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3077 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3083 * convert_state() - Converts the peer's view of the cluster state to our point of view
3084 * @ps: The state as seen by the peer.
3086 static union drbd_state convert_state(union drbd_state ps)
3088 union drbd_state ms;
3090 static enum drbd_conns c_tab[] = {
3091 [C_CONNECTED] = C_CONNECTED,
3093 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3094 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3095 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3096 [C_VERIFY_S] = C_VERIFY_T,
3102 ms.conn = c_tab[ps.conn];
3107 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3112 static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3114 struct p_req_state *p = &mdev->data.rbuf.req_state;
3115 union drbd_state mask, val;
3116 enum drbd_state_rv rv;
3118 mask.i = be32_to_cpu(p->mask);
3119 val.i = be32_to_cpu(p->val);
3121 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3122 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3123 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3127 mask = convert_state(mask);
3128 val = convert_state(val);
3130 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3132 drbd_send_sr_reply(mdev, rv);
3138 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3140 struct p_state *p = &mdev->data.rbuf.state;
3141 union drbd_state os, ns, peer_state;
3142 enum drbd_disk_state real_peer_disk;
3143 enum chg_state_flags cs_flags;
3146 peer_state.i = be32_to_cpu(p->state);
3148 real_peer_disk = peer_state.disk;
3149 if (peer_state.disk == D_NEGOTIATING) {
3150 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3151 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3154 spin_lock_irq(&mdev->req_lock);
3156 os = ns = mdev->state;
3157 spin_unlock_irq(&mdev->req_lock);
3159 /* peer says his disk is uptodate, while we think it is inconsistent,
3160 * and this happens while we think we have a sync going on. */
3161 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3162 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3163 /* If we are (becoming) SyncSource, but peer is still in sync
3164 * preparation, ignore its uptodate-ness to avoid flapping, it
3165 * will change to inconsistent once the peer reaches active
3167 * It may have changed syncer-paused flags, however, so we
3168 * cannot ignore this completely. */
3169 if (peer_state.conn > C_CONNECTED &&
3170 peer_state.conn < C_SYNC_SOURCE)
3171 real_peer_disk = D_INCONSISTENT;
3173 /* if peer_state changes to connected at the same time,
3174 * it explicitly notifies us that it finished resync.
3175 * Maybe we should finish it up, too? */
3176 else if (os.conn >= C_SYNC_SOURCE &&
3177 peer_state.conn == C_CONNECTED) {
3178 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3179 drbd_resync_finished(mdev);
3184 /* peer says his disk is inconsistent, while we think it is uptodate,
3185 * and this happens while the peer still thinks we have a sync going on,
3186 * but we think we are already done with the sync.
3187 * We ignore this to avoid flapping pdsk.
3188 * This should not happen, if the peer is a recent version of drbd. */
3189 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3190 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3191 real_peer_disk = D_UP_TO_DATE;
3193 if (ns.conn == C_WF_REPORT_PARAMS)
3194 ns.conn = C_CONNECTED;
3196 if (peer_state.conn == C_AHEAD)
3199 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3200 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3201 int cr; /* consider resync */
3203 /* if we established a new connection */
3204 cr = (os.conn < C_CONNECTED);
3205 /* if we had an established connection
3206 * and one of the nodes newly attaches a disk */
3207 cr |= (os.conn == C_CONNECTED &&
3208 (peer_state.disk == D_NEGOTIATING ||
3209 os.disk == D_NEGOTIATING));
3210 /* if we have both been inconsistent, and the peer has been
3211 * forced to be UpToDate with --overwrite-data */
3212 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3213 /* if we had been plain connected, and the admin requested to
3214 * start a sync by "invalidate" or "invalidate-remote" */
3215 cr |= (os.conn == C_CONNECTED &&
3216 (peer_state.conn >= C_STARTING_SYNC_S &&
3217 peer_state.conn <= C_WF_BITMAP_T));
3220 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3223 if (ns.conn == C_MASK) {
3224 ns.conn = C_CONNECTED;
3225 if (mdev->state.disk == D_NEGOTIATING) {
3226 drbd_force_state(mdev, NS(disk, D_FAILED));
3227 } else if (peer_state.disk == D_NEGOTIATING) {
3228 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3229 peer_state.disk = D_DISKLESS;
3230 real_peer_disk = D_DISKLESS;
3232 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3234 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3235 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3241 spin_lock_irq(&mdev->req_lock);
3242 if (mdev->state.i != os.i)
3244 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3245 ns.peer = peer_state.role;
3246 ns.pdsk = real_peer_disk;
3247 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3248 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3249 ns.disk = mdev->new_state_tmp.disk;
3250 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3251 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3252 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3253 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3254 for temporal network outages! */
3255 spin_unlock_irq(&mdev->req_lock);
3256 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3258 drbd_uuid_new_current(mdev);
3259 clear_bit(NEW_CUR_UUID, &mdev->flags);
3260 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3263 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3265 spin_unlock_irq(&mdev->req_lock);
3267 if (rv < SS_SUCCESS) {
3268 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3272 if (os.conn > C_WF_REPORT_PARAMS) {
3273 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3274 peer_state.disk != D_NEGOTIATING ) {
3275 /* we want resync, peer has not yet decided to sync... */
3276 /* Nowadays only used when forcing a node into primary role and
3277 setting its disk to UpToDate with that */
3278 drbd_send_uuids(mdev);
3279 drbd_send_state(mdev);
3283 mdev->net_conf->want_lose = 0;
3285 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3290 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3292 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
3294 wait_event(mdev->misc_wait,
3295 mdev->state.conn == C_WF_SYNC_UUID ||
3296 mdev->state.conn == C_BEHIND ||
3297 mdev->state.conn < C_CONNECTED ||
3298 mdev->state.disk < D_NEGOTIATING);
3300 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3302 /* Here the _drbd_uuid_ functions are right, current should
3303 _not_ be rotated into the history */
3304 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3305 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3306 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3308 drbd_start_resync(mdev, C_SYNC_TARGET);
3312 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3318 * receive_bitmap_plain
3320 * Return 0 when done, 1 when another iteration is needed, and a negative error
3321 * code upon failure.
3324 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3325 unsigned long *buffer, struct bm_xfer_ctx *c)
3327 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3328 unsigned want = num_words * sizeof(long);
3331 if (want != data_size) {
3332 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
3337 err = drbd_recv(mdev, buffer, want);
3344 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3346 c->word_offset += num_words;
3347 c->bit_offset = c->word_offset * BITS_PER_LONG;
3348 if (c->bit_offset > c->bm_bits)
3349 c->bit_offset = c->bm_bits;
3357 * Return 0 when done, 1 when another iteration is needed, and a negative error
3358 * code upon failure.
3361 recv_bm_rle_bits(struct drbd_conf *mdev,
3362 struct p_compressed_bm *p,
3363 struct bm_xfer_ctx *c)
3365 struct bitstream bs;
3369 unsigned long s = c->bit_offset;
3371 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
3372 int toggle = DCBP_get_start(p);
3376 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3378 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3382 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3383 bits = vli_decode_bits(&rl, look_ahead);
3389 if (e >= c->bm_bits) {
3390 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3393 _drbd_bm_set_bits(mdev, s, e);
3397 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3398 have, bits, look_ahead,
3399 (unsigned int)(bs.cur.b - p->code),
3400 (unsigned int)bs.buf_len);
3403 look_ahead >>= bits;
3406 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3409 look_ahead |= tmp << have;
3414 bm_xfer_ctx_bit_to_word_offset(c);
3416 return (s != c->bm_bits);
3422 * Return 0 when done, 1 when another iteration is needed, and a negative error
3423 * code upon failure.
3426 decode_bitmap_c(struct drbd_conf *mdev,
3427 struct p_compressed_bm *p,
3428 struct bm_xfer_ctx *c)
3430 if (DCBP_get_code(p) == RLE_VLI_Bits)
3431 return recv_bm_rle_bits(mdev, p, c);
3433 /* other variants had been implemented for evaluation,
3434 * but have been dropped as this one turned out to be "best"
3435 * during all our tests. */
3437 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3438 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3442 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3443 const char *direction, struct bm_xfer_ctx *c)
3445 /* what would it take to transfer it "plaintext" */
3446 unsigned plain = sizeof(struct p_header80) *
3447 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3448 + c->bm_words * sizeof(long);
3449 unsigned total = c->bytes[0] + c->bytes[1];
3452 /* total can not be zero. but just in case: */
3456 /* don't report if not compressed */
3460 /* total < plain. check for overflow, still */
3461 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3462 : (1000 * total / plain);
3468 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3469 "total %u; compression: %u.%u%%\n",
3471 c->bytes[1], c->packets[1],
3472 c->bytes[0], c->packets[0],
3473 total, r/10, r % 10);
3476 /* Since we are processing the bitfield from lower addresses to higher,
3477 it does not matter if the process it in 32 bit chunks or 64 bit
3478 chunks as long as it is little endian. (Understand it as byte stream,
3479 beginning with the lowest byte...) If we would use big endian
3480 we would need to process it from the highest address to the lowest,
3481 in order to be agnostic to the 32 vs 64 bits issue.
3483 returns 0 on failure, 1 if we successfully received it. */
3484 static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3486 struct bm_xfer_ctx c;
3490 struct p_header80 *h = &mdev->data.rbuf.header.h80;
3492 /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */
3494 /* maybe we should use some per thread scratch page,
3495 * and allocate that during initial device creation? */
3496 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3498 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3502 c = (struct bm_xfer_ctx) {
3503 .bm_bits = drbd_bm_bits(mdev),
3504 .bm_words = drbd_bm_words(mdev),
3508 if (cmd == P_BITMAP) {
3509 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
3510 } else if (cmd == P_COMPRESSED_BITMAP) {
3511 /* MAYBE: sanity check that we speak proto >= 90,
3512 * and the feature is enabled! */
3513 struct p_compressed_bm *p;
3515 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
3516 dev_err(DEV, "ReportCBitmap packet too large\n");
3519 /* use the page buff */
3521 memcpy(p, h, sizeof(*h));
3522 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
3524 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3525 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
3528 err = decode_bitmap_c(mdev, p, &c);
3530 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
3534 c.packets[cmd == P_BITMAP]++;
3535 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
3542 if (!drbd_recv_header(mdev, &cmd, &data_size))
3546 INFO_bm_xfer_stats(mdev, "receive", &c);
3548 if (mdev->state.conn == C_WF_BITMAP_T) {
3549 enum drbd_state_rv rv;
3551 ok = !drbd_send_bitmap(mdev);
3554 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3555 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3556 D_ASSERT(rv == SS_SUCCESS);
3557 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3558 /* admin may have requested C_DISCONNECTING,
3559 * other threads may have noticed network errors */
3560 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3561 drbd_conn_str(mdev->state.conn));
3566 /* drbd_bm_unlock(mdev); by intention no lock */
3567 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3568 drbd_start_resync(mdev, C_SYNC_SOURCE);
3569 free_page((unsigned long) buffer);
3573 static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3575 /* TODO zero copy sink :) */
3576 static char sink[128];
3579 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3584 want = min_t(int, size, sizeof(sink));
3585 r = drbd_recv(mdev, sink, want);
3586 ERR_IF(r <= 0) break;
3592 static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3594 /* Make sure we've acked all the TCP data associated
3595 * with the data requests being unplugged */
3596 drbd_tcp_quickack(mdev->data.socket);
3601 static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3603 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3605 switch (mdev->state.conn) {
3606 case C_WF_SYNC_UUID:
3611 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3612 drbd_conn_str(mdev->state.conn));
3615 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3620 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
3625 drbd_cmd_handler_f function;
3628 static struct data_cmd drbd_cmd_handler[] = {
3629 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3630 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3631 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3632 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3633 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3634 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3635 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3636 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3637 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3638 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3639 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3640 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3641 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3642 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3643 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3644 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3645 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3646 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3647 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3648 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3649 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
3650 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
3651 /* anything missing from this table is in
3652 * the asender_tbl, see get_asender_cmd */
3653 [P_MAX_CMD] = { 0, 0, NULL },
3656 /* All handler functions that expect a sub-header get that sub-heder in
3657 mdev->data.rbuf.header.head.payload.
3659 Usually in mdev->data.rbuf.header.head the callback can find the usual
3660 p_header, but they may not rely on that. Since there is also p_header95 !
3663 static void drbdd(struct drbd_conf *mdev)
3665 union p_header *header = &mdev->data.rbuf.header;
3666 unsigned int packet_size;
3667 enum drbd_packets cmd;
3668 size_t shs; /* sub header size */
3671 while (get_t_state(&mdev->receiver) == Running) {
3672 drbd_thread_current_set_cpu(mdev);
3673 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3676 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3677 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3681 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3682 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3683 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3688 rv = drbd_recv(mdev, &header->h80.payload, shs);
3689 if (unlikely(rv != shs)) {
3690 dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3695 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3697 if (unlikely(!rv)) {
3698 dev_err(DEV, "error receiving %s, l: %d!\n",
3699 cmdname(cmd), packet_size);
3706 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3708 /* If we leave here, we probably want to update at least the
3709 * "Connected" indicator on stable storage. Do so explicitly here. */
3713 void drbd_flush_workqueue(struct drbd_conf *mdev)
3715 struct drbd_wq_barrier barr;
3717 barr.w.cb = w_prev_work_done;
3718 init_completion(&barr.done);
3719 drbd_queue_work(&mdev->data.work, &barr.w);
3720 wait_for_completion(&barr.done);
3723 void drbd_free_tl_hash(struct drbd_conf *mdev)
3725 struct hlist_head *h;
3727 spin_lock_irq(&mdev->req_lock);
3729 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3730 spin_unlock_irq(&mdev->req_lock);
3734 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3736 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3737 (int)(h - mdev->ee_hash), h->first);
3738 kfree(mdev->ee_hash);
3739 mdev->ee_hash = NULL;
3740 mdev->ee_hash_s = 0;
3743 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3745 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3746 (int)(h - mdev->tl_hash), h->first);
3747 kfree(mdev->tl_hash);
3748 mdev->tl_hash = NULL;
3749 mdev->tl_hash_s = 0;
3750 spin_unlock_irq(&mdev->req_lock);
3753 static void drbd_disconnect(struct drbd_conf *mdev)
3755 enum drbd_fencing_p fp;
3756 union drbd_state os, ns;
3757 int rv = SS_UNKNOWN_ERROR;
3760 if (mdev->state.conn == C_STANDALONE)
3763 /* asender does not clean up anything. it must not interfere, either */
3764 drbd_thread_stop(&mdev->asender);
3765 drbd_free_sock(mdev);
3767 /* wait for current activity to cease. */
3768 spin_lock_irq(&mdev->req_lock);
3769 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3770 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3771 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3772 spin_unlock_irq(&mdev->req_lock);
3774 /* We do not have data structures that would allow us to
3775 * get the rs_pending_cnt down to 0 again.
3776 * * On C_SYNC_TARGET we do not have any data structures describing
3777 * the pending RSDataRequest's we have sent.
3778 * * On C_SYNC_SOURCE there is no data structure that tracks
3779 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3780 * And no, it is not the sum of the reference counts in the
3781 * resync_LRU. The resync_LRU tracks the whole operation including
3782 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3784 drbd_rs_cancel_all(mdev);
3786 mdev->rs_failed = 0;
3787 atomic_set(&mdev->rs_pending_cnt, 0);
3788 wake_up(&mdev->misc_wait);
3790 /* make sure syncer is stopped and w_resume_next_sg queued */
3791 del_timer_sync(&mdev->resync_timer);
3792 resync_timer_fn((unsigned long)mdev);
3794 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3795 * w_make_resync_request etc. which may still be on the worker queue
3796 * to be "canceled" */
3797 drbd_flush_workqueue(mdev);
3799 /* This also does reclaim_net_ee(). If we do this too early, we might
3800 * miss some resync ee and pages.*/
3801 drbd_process_done_ee(mdev);
3803 kfree(mdev->p_uuid);
3804 mdev->p_uuid = NULL;
3806 if (!is_susp(mdev->state))
3809 dev_info(DEV, "Connection closed\n");
3814 if (get_ldev(mdev)) {
3815 drbd_bitmap_io(mdev, &drbd_bm_write, "write from disconnect");
3816 fp = mdev->ldev->dc.fencing;
3820 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3821 drbd_try_outdate_peer_async(mdev);
3823 spin_lock_irq(&mdev->req_lock);
3825 if (os.conn >= C_UNCONNECTED) {
3826 /* Do not restart in case we are C_DISCONNECTING */
3828 ns.conn = C_UNCONNECTED;
3829 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3831 spin_unlock_irq(&mdev->req_lock);
3833 if (os.conn == C_DISCONNECTING) {
3834 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
3836 crypto_free_hash(mdev->cram_hmac_tfm);
3837 mdev->cram_hmac_tfm = NULL;
3839 kfree(mdev->net_conf);
3840 mdev->net_conf = NULL;
3841 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3844 /* tcp_close and release of sendpage pages can be deferred. I don't
3845 * want to use SO_LINGER, because apparently it can be deferred for
3846 * more than 20 seconds (longest time I checked).
3848 * Actually we don't care for exactly when the network stack does its
3849 * put_page(), but release our reference on these pages right here.
3851 i = drbd_release_ee(mdev, &mdev->net_ee);
3853 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3854 i = atomic_read(&mdev->pp_in_use_by_net);
3856 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
3857 i = atomic_read(&mdev->pp_in_use);
3859 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3861 D_ASSERT(list_empty(&mdev->read_ee));
3862 D_ASSERT(list_empty(&mdev->active_ee));
3863 D_ASSERT(list_empty(&mdev->sync_ee));
3864 D_ASSERT(list_empty(&mdev->done_ee));
3866 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3867 atomic_set(&mdev->current_epoch->epoch_size, 0);
3868 D_ASSERT(list_empty(&mdev->current_epoch->list));
3872 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3873 * we can agree on is stored in agreed_pro_version.
3875 * feature flags and the reserved array should be enough room for future
3876 * enhancements of the handshake protocol, and possible plugins...
3878 * for now, they are expected to be zero, but ignored.
3880 static int drbd_send_handshake(struct drbd_conf *mdev)
3882 /* ASSERT current == mdev->receiver ... */
3883 struct p_handshake *p = &mdev->data.sbuf.handshake;
3886 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3887 dev_err(DEV, "interrupted during initial handshake\n");
3888 return 0; /* interrupted. not ok. */
3891 if (mdev->data.socket == NULL) {
3892 mutex_unlock(&mdev->data.mutex);
3896 memset(p, 0, sizeof(*p));
3897 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3898 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3899 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3900 (struct p_header80 *)p, sizeof(*p), 0 );
3901 mutex_unlock(&mdev->data.mutex);
3907 * 1 yes, we have a valid connection
3908 * 0 oops, did not work out, please try again
3909 * -1 peer talks different language,
3910 * no point in trying again, please go standalone.
3912 static int drbd_do_handshake(struct drbd_conf *mdev)
3914 /* ASSERT current == mdev->receiver ... */
3915 struct p_handshake *p = &mdev->data.rbuf.handshake;
3916 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3917 unsigned int length;
3918 enum drbd_packets cmd;
3921 rv = drbd_send_handshake(mdev);
3925 rv = drbd_recv_header(mdev, &cmd, &length);
3929 if (cmd != P_HAND_SHAKE) {
3930 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3935 if (length != expect) {
3936 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3941 rv = drbd_recv(mdev, &p->head.payload, expect);
3944 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3948 p->protocol_min = be32_to_cpu(p->protocol_min);
3949 p->protocol_max = be32_to_cpu(p->protocol_max);
3950 if (p->protocol_max == 0)
3951 p->protocol_max = p->protocol_min;
3953 if (PRO_VERSION_MAX < p->protocol_min ||
3954 PRO_VERSION_MIN > p->protocol_max)
3957 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3959 dev_info(DEV, "Handshake successful: "
3960 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3965 dev_err(DEV, "incompatible DRBD dialects: "
3966 "I support %d-%d, peer supports %d-%d\n",
3967 PRO_VERSION_MIN, PRO_VERSION_MAX,
3968 p->protocol_min, p->protocol_max);
3972 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3973 static int drbd_do_auth(struct drbd_conf *mdev)
3975 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3976 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3980 #define CHALLENGE_LEN 64
3984 0 - failed, try again (network error),
3985 -1 - auth failed, don't try again.
3988 static int drbd_do_auth(struct drbd_conf *mdev)
3990 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
3991 struct scatterlist sg;
3992 char *response = NULL;
3993 char *right_response = NULL;
3994 char *peers_ch = NULL;
3995 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
3996 unsigned int resp_size;
3997 struct hash_desc desc;
3998 enum drbd_packets cmd;
3999 unsigned int length;
4002 desc.tfm = mdev->cram_hmac_tfm;
4005 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4006 (u8 *)mdev->net_conf->shared_secret, key_len);
4008 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
4013 get_random_bytes(my_challenge, CHALLENGE_LEN);
4015 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4019 rv = drbd_recv_header(mdev, &cmd, &length);
4023 if (cmd != P_AUTH_CHALLENGE) {
4024 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4030 if (length > CHALLENGE_LEN * 2) {
4031 dev_err(DEV, "expected AuthChallenge payload too big.\n");
4036 peers_ch = kmalloc(length, GFP_NOIO);
4037 if (peers_ch == NULL) {
4038 dev_err(DEV, "kmalloc of peers_ch failed\n");
4043 rv = drbd_recv(mdev, peers_ch, length);
4046 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4051 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4052 response = kmalloc(resp_size, GFP_NOIO);
4053 if (response == NULL) {
4054 dev_err(DEV, "kmalloc of response failed\n");
4059 sg_init_table(&sg, 1);
4060 sg_set_buf(&sg, peers_ch, length);
4062 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4064 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4069 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4073 rv = drbd_recv_header(mdev, &cmd, &length);
4077 if (cmd != P_AUTH_RESPONSE) {
4078 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4084 if (length != resp_size) {
4085 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4090 rv = drbd_recv(mdev, response , resp_size);
4092 if (rv != resp_size) {
4093 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4098 right_response = kmalloc(resp_size, GFP_NOIO);
4099 if (right_response == NULL) {
4100 dev_err(DEV, "kmalloc of right_response failed\n");
4105 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4107 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4109 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4114 rv = !memcmp(response, right_response, resp_size);
4117 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4118 resp_size, mdev->net_conf->cram_hmac_alg);
4125 kfree(right_response);
4131 int drbdd_init(struct drbd_thread *thi)
4133 struct drbd_conf *mdev = thi->mdev;
4134 unsigned int minor = mdev_to_minor(mdev);
4137 sprintf(current->comm, "drbd%d_receiver", minor);
4139 dev_info(DEV, "receiver (re)started\n");
4142 h = drbd_connect(mdev);
4144 drbd_disconnect(mdev);
4145 schedule_timeout_interruptible(HZ);
4148 dev_warn(DEV, "Discarding network configuration.\n");
4149 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4154 if (get_net_conf(mdev)) {
4160 drbd_disconnect(mdev);
4162 dev_info(DEV, "receiver terminated\n");
4166 /* ********* acknowledge sender ******** */
4168 static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4170 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4172 int retcode = be32_to_cpu(p->retcode);
4174 if (retcode >= SS_SUCCESS) {
4175 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4177 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4178 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4179 drbd_set_st_err_str(retcode), retcode);
4181 wake_up(&mdev->state_wait);
4186 static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
4188 return drbd_send_ping_ack(mdev);
4192 static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4194 /* restore idle timeout */
4195 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4196 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4197 wake_up(&mdev->misc_wait);
4202 static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
4204 struct p_block_ack *p = (struct p_block_ack *)h;
4205 sector_t sector = be64_to_cpu(p->sector);
4206 int blksize = be32_to_cpu(p->blksize);
4208 D_ASSERT(mdev->agreed_pro_version >= 89);
4210 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4212 if (get_ldev(mdev)) {
4213 drbd_rs_complete_io(mdev, sector);
4214 drbd_set_in_sync(mdev, sector, blksize);
4215 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4216 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4219 dec_rs_pending(mdev);
4220 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4225 /* when we receive the ACK for a write request,
4226 * verify that we actually know about it */
4227 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4228 u64 id, sector_t sector)
4230 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4231 struct hlist_node *n;
4232 struct drbd_request *req;
4234 hlist_for_each_entry(req, n, slot, colision) {
4235 if ((unsigned long)req == (unsigned long)id) {
4236 if (req->sector != sector) {
4237 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4238 "wrong sector (%llus versus %llus)\n", req,
4239 (unsigned long long)req->sector,
4240 (unsigned long long)sector);
4249 typedef struct drbd_request *(req_validator_fn)
4250 (struct drbd_conf *mdev, u64 id, sector_t sector);
4252 static int validate_req_change_req_state(struct drbd_conf *mdev,
4253 u64 id, sector_t sector, req_validator_fn validator,
4254 const char *func, enum drbd_req_event what)
4256 struct drbd_request *req;
4257 struct bio_and_error m;
4259 spin_lock_irq(&mdev->req_lock);
4260 req = validator(mdev, id, sector);
4261 if (unlikely(!req)) {
4262 spin_unlock_irq(&mdev->req_lock);
4264 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
4265 (void *)(unsigned long)id, (unsigned long long)sector);
4268 __req_mod(req, what, &m);
4269 spin_unlock_irq(&mdev->req_lock);
4272 complete_master_bio(mdev, &m);
4276 static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4278 struct p_block_ack *p = (struct p_block_ack *)h;
4279 sector_t sector = be64_to_cpu(p->sector);
4280 int blksize = be32_to_cpu(p->blksize);
4281 enum drbd_req_event what;
4283 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4285 if (is_syncer_block_id(p->block_id)) {
4286 drbd_set_in_sync(mdev, sector, blksize);
4287 dec_rs_pending(mdev);
4290 switch (be16_to_cpu(h->command)) {
4291 case P_RS_WRITE_ACK:
4292 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4293 what = write_acked_by_peer_and_sis;
4296 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4297 what = write_acked_by_peer;
4300 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4301 what = recv_acked_by_peer;
4304 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4305 what = conflict_discarded_by_peer;
4312 return validate_req_change_req_state(mdev, p->block_id, sector,
4313 _ack_id_to_req, __func__ , what);
4316 static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
4318 struct p_block_ack *p = (struct p_block_ack *)h;
4319 sector_t sector = be64_to_cpu(p->sector);
4320 int size = be32_to_cpu(p->blksize);
4321 struct drbd_request *req;
4322 struct bio_and_error m;
4324 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4326 if (is_syncer_block_id(p->block_id)) {
4327 dec_rs_pending(mdev);
4328 drbd_rs_failed_io(mdev, sector, size);
4332 spin_lock_irq(&mdev->req_lock);
4333 req = _ack_id_to_req(mdev, p->block_id, sector);
4335 spin_unlock_irq(&mdev->req_lock);
4336 if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4337 mdev->net_conf->wire_protocol == DRBD_PROT_B) {
4338 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4339 The master bio might already be completed, therefore the
4340 request is no longer in the collision hash.
4341 => Do not try to validate block_id as request. */
4342 /* In Protocol B we might already have got a P_RECV_ACK
4343 but then get a P_NEG_ACK after wards. */
4344 drbd_set_out_of_sync(mdev, sector, size);
4347 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
4348 (void *)(unsigned long)p->block_id, (unsigned long long)sector);
4352 __req_mod(req, neg_acked, &m);
4353 spin_unlock_irq(&mdev->req_lock);
4356 complete_master_bio(mdev, &m);
4360 static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
4362 struct p_block_ack *p = (struct p_block_ack *)h;
4363 sector_t sector = be64_to_cpu(p->sector);
4365 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4366 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4367 (unsigned long long)sector, be32_to_cpu(p->blksize));
4369 return validate_req_change_req_state(mdev, p->block_id, sector,
4370 _ar_id_to_req, __func__ , neg_acked);
4373 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
4377 struct p_block_ack *p = (struct p_block_ack *)h;
4379 sector = be64_to_cpu(p->sector);
4380 size = be32_to_cpu(p->blksize);
4382 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4384 dec_rs_pending(mdev);
4386 if (get_ldev_if_state(mdev, D_FAILED)) {
4387 drbd_rs_complete_io(mdev, sector);
4388 switch (be16_to_cpu(h->command)) {
4389 case P_NEG_RS_DREPLY:
4390 drbd_rs_failed_io(mdev, sector, size);
4404 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4406 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4408 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4410 if (mdev->state.conn == C_AHEAD &&
4411 atomic_read(&mdev->ap_in_flight) == 0 &&
4412 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4413 mdev->start_resync_timer.expires = jiffies + HZ;
4414 add_timer(&mdev->start_resync_timer);
4420 static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4422 struct p_block_ack *p = (struct p_block_ack *)h;
4423 struct drbd_work *w;
4427 sector = be64_to_cpu(p->sector);
4428 size = be32_to_cpu(p->blksize);
4430 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4432 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4433 drbd_ov_oos_found(mdev, sector, size);
4437 if (!get_ldev(mdev))
4440 drbd_rs_complete_io(mdev, sector);
4441 dec_rs_pending(mdev);
4445 /* let's advance progress step marks only for every other megabyte */
4446 if ((mdev->ov_left & 0x200) == 0x200)
4447 drbd_advance_rs_marks(mdev, mdev->ov_left);
4449 if (mdev->ov_left == 0) {
4450 w = kmalloc(sizeof(*w), GFP_NOIO);
4452 w->cb = w_ov_finished;
4453 drbd_queue_work_front(&mdev->data.work, w);
4455 dev_err(DEV, "kmalloc(w) failed.");
4457 drbd_resync_finished(mdev);
4464 static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
4469 struct asender_cmd {
4471 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
4474 static struct asender_cmd *get_asender_cmd(int cmd)
4476 static struct asender_cmd asender_tbl[] = {
4477 /* anything missing from this table is in
4478 * the drbd_cmd_handler (drbd_default_handler) table,
4479 * see the beginning of drbdd() */
4480 [P_PING] = { sizeof(struct p_header80), got_Ping },
4481 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
4482 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4483 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4484 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4485 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4486 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4487 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4488 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4489 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4490 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4491 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4492 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4493 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
4494 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
4495 [P_MAX_CMD] = { 0, NULL },
4497 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4499 return &asender_tbl[cmd];
4502 int drbd_asender(struct drbd_thread *thi)
4504 struct drbd_conf *mdev = thi->mdev;
4505 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
4506 struct asender_cmd *cmd = NULL;
4511 int expect = sizeof(struct p_header80);
4514 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4516 current->policy = SCHED_RR; /* Make this a realtime task! */
4517 current->rt_priority = 2; /* more important than all other tasks */
4519 while (get_t_state(thi) == Running) {
4520 drbd_thread_current_set_cpu(mdev);
4521 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4522 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4523 mdev->meta.socket->sk->sk_rcvtimeo =
4524 mdev->net_conf->ping_timeo*HZ/10;
4527 /* conditionally cork;
4528 * it may hurt latency if we cork without much to send */
4529 if (!mdev->net_conf->no_cork &&
4530 3 < atomic_read(&mdev->unacked_cnt))
4531 drbd_tcp_cork(mdev->meta.socket);
4533 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4534 flush_signals(current);
4535 if (!drbd_process_done_ee(mdev))
4537 /* to avoid race with newly queued ACKs */
4538 set_bit(SIGNAL_ASENDER, &mdev->flags);
4539 spin_lock_irq(&mdev->req_lock);
4540 empty = list_empty(&mdev->done_ee);
4541 spin_unlock_irq(&mdev->req_lock);
4542 /* new ack may have been queued right here,
4543 * but then there is also a signal pending,
4544 * and we start over... */
4548 /* but unconditionally uncork unless disabled */
4549 if (!mdev->net_conf->no_cork)
4550 drbd_tcp_uncork(mdev->meta.socket);
4552 /* short circuit, recv_msg would return EINTR anyways. */
4553 if (signal_pending(current))
4556 rv = drbd_recv_short(mdev, mdev->meta.socket,
4557 buf, expect-received, 0);
4558 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4560 flush_signals(current);
4563 * -EINTR (on meta) we got a signal
4564 * -EAGAIN (on meta) rcvtimeo expired
4565 * -ECONNRESET other side closed the connection
4566 * -ERESTARTSYS (on data) we got a signal
4567 * rv < 0 other than above: unexpected error!
4568 * rv == expected: full header or command
4569 * rv < expected: "woken" by signal during receive
4570 * rv == 0 : "connection shut down by peer"
4572 if (likely(rv > 0)) {
4575 } else if (rv == 0) {
4576 dev_err(DEV, "meta connection shut down by peer.\n");
4578 } else if (rv == -EAGAIN) {
4579 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4580 mdev->net_conf->ping_timeo*HZ/10) {
4581 dev_err(DEV, "PingAck did not arrive in time.\n");
4584 set_bit(SEND_PING, &mdev->flags);
4586 } else if (rv == -EINTR) {
4589 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4593 if (received == expect && cmd == NULL) {
4594 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4595 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4596 be32_to_cpu(h->magic),
4597 be16_to_cpu(h->command),
4598 be16_to_cpu(h->length));
4601 cmd = get_asender_cmd(be16_to_cpu(h->command));
4602 len = be16_to_cpu(h->length);
4603 if (unlikely(cmd == NULL)) {
4604 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4605 be32_to_cpu(h->magic),
4606 be16_to_cpu(h->command),
4607 be16_to_cpu(h->length));
4610 expect = cmd->pkt_size;
4611 ERR_IF(len != expect-sizeof(struct p_header80))
4614 if (received == expect) {
4615 D_ASSERT(cmd != NULL);
4616 if (!cmd->process(mdev, h))
4621 expect = sizeof(struct p_header80);
4628 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4633 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4636 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4638 D_ASSERT(mdev->state.conn < C_CONNECTED);
4639 dev_info(DEV, "asender terminated\n");