drbd: Track the reasons to suspend IO in dedicated state bits
[pandora-kernel.git] / drivers / block / drbd / drbd_receiver.c
1 /*
2    drbd_receiver.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24
25
26 #include <linux/module.h>
27
28 #include <asm/uaccess.h>
29 #include <net/sock.h>
30
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
45 #include <linux/string.h>
46 #include <linux/scatterlist.h>
47 #include "drbd_int.h"
48 #include "drbd_req.h"
49
50 #include "drbd_vli.h"
51
52 struct flush_work {
53         struct drbd_work w;
54         struct drbd_epoch *epoch;
55 };
56
57 enum finish_epoch {
58         FE_STILL_LIVE,
59         FE_DESTROYED,
60         FE_RECYCLED,
61 };
62
63 static int drbd_do_handshake(struct drbd_conf *mdev);
64 static int drbd_do_auth(struct drbd_conf *mdev);
65
66 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
67 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
68
69 static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
70 {
71         struct drbd_epoch *prev;
72         spin_lock(&mdev->epoch_lock);
73         prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
74         if (prev == epoch || prev == mdev->current_epoch)
75                 prev = NULL;
76         spin_unlock(&mdev->epoch_lock);
77         return prev;
78 }
79
80 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
81
82 /*
83  * some helper functions to deal with single linked page lists,
84  * page->private being our "next" pointer.
85  */
86
87 /* If at least n pages are linked at head, get n pages off.
88  * Otherwise, don't modify head, and return NULL.
89  * Locking is the responsibility of the caller.
90  */
91 static struct page *page_chain_del(struct page **head, int n)
92 {
93         struct page *page;
94         struct page *tmp;
95
96         BUG_ON(!n);
97         BUG_ON(!head);
98
99         page = *head;
100
101         if (!page)
102                 return NULL;
103
104         while (page) {
105                 tmp = page_chain_next(page);
106                 if (--n == 0)
107                         break; /* found sufficient pages */
108                 if (tmp == NULL)
109                         /* insufficient pages, don't use any of them. */
110                         return NULL;
111                 page = tmp;
112         }
113
114         /* add end of list marker for the returned list */
115         set_page_private(page, 0);
116         /* actual return value, and adjustment of head */
117         page = *head;
118         *head = tmp;
119         return page;
120 }
121
122 /* may be used outside of locks to find the tail of a (usually short)
123  * "private" page chain, before adding it back to a global chain head
124  * with page_chain_add() under a spinlock. */
125 static struct page *page_chain_tail(struct page *page, int *len)
126 {
127         struct page *tmp;
128         int i = 1;
129         while ((tmp = page_chain_next(page)))
130                 ++i, page = tmp;
131         if (len)
132                 *len = i;
133         return page;
134 }
135
136 static int page_chain_free(struct page *page)
137 {
138         struct page *tmp;
139         int i = 0;
140         page_chain_for_each_safe(page, tmp) {
141                 put_page(page);
142                 ++i;
143         }
144         return i;
145 }
146
147 static void page_chain_add(struct page **head,
148                 struct page *chain_first, struct page *chain_last)
149 {
150 #if 1
151         struct page *tmp;
152         tmp = page_chain_tail(chain_first, NULL);
153         BUG_ON(tmp != chain_last);
154 #endif
155
156         /* add chain to head */
157         set_page_private(chain_last, (unsigned long)*head);
158         *head = chain_first;
159 }
160
161 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
162 {
163         struct page *page = NULL;
164         struct page *tmp = NULL;
165         int i = 0;
166
167         /* Yes, testing drbd_pp_vacant outside the lock is racy.
168          * So what. It saves a spin_lock. */
169         if (drbd_pp_vacant >= number) {
170                 spin_lock(&drbd_pp_lock);
171                 page = page_chain_del(&drbd_pp_pool, number);
172                 if (page)
173                         drbd_pp_vacant -= number;
174                 spin_unlock(&drbd_pp_lock);
175                 if (page)
176                         return page;
177         }
178
179         /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
180          * "criss-cross" setup, that might cause write-out on some other DRBD,
181          * which in turn might block on the other node at this very place.  */
182         for (i = 0; i < number; i++) {
183                 tmp = alloc_page(GFP_TRY);
184                 if (!tmp)
185                         break;
186                 set_page_private(tmp, (unsigned long)page);
187                 page = tmp;
188         }
189
190         if (i == number)
191                 return page;
192
193         /* Not enough pages immediately available this time.
194          * No need to jump around here, drbd_pp_alloc will retry this
195          * function "soon". */
196         if (page) {
197                 tmp = page_chain_tail(page, NULL);
198                 spin_lock(&drbd_pp_lock);
199                 page_chain_add(&drbd_pp_pool, page, tmp);
200                 drbd_pp_vacant += i;
201                 spin_unlock(&drbd_pp_lock);
202         }
203         return NULL;
204 }
205
206 /* kick lower level device, if we have more than (arbitrary number)
207  * reference counts on it, which typically are locally submitted io
208  * requests.  don't use unacked_cnt, so we speed up proto A and B, too. */
209 static void maybe_kick_lo(struct drbd_conf *mdev)
210 {
211         if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
212                 drbd_kick_lo(mdev);
213 }
214
215 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
216 {
217         struct drbd_epoch_entry *e;
218         struct list_head *le, *tle;
219
220         /* The EEs are always appended to the end of the list. Since
221            they are sent in order over the wire, they have to finish
222            in order. As soon as we see the first not finished we can
223            stop to examine the list... */
224
225         list_for_each_safe(le, tle, &mdev->net_ee) {
226                 e = list_entry(le, struct drbd_epoch_entry, w.list);
227                 if (drbd_ee_has_active_page(e))
228                         break;
229                 list_move(le, to_be_freed);
230         }
231 }
232
233 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
234 {
235         LIST_HEAD(reclaimed);
236         struct drbd_epoch_entry *e, *t;
237
238         maybe_kick_lo(mdev);
239         spin_lock_irq(&mdev->req_lock);
240         reclaim_net_ee(mdev, &reclaimed);
241         spin_unlock_irq(&mdev->req_lock);
242
243         list_for_each_entry_safe(e, t, &reclaimed, w.list)
244                 drbd_free_net_ee(mdev, e);
245 }
246
247 /**
248  * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
249  * @mdev:       DRBD device.
250  * @number:     number of pages requested
251  * @retry:      whether to retry, if not enough pages are available right now
252  *
253  * Tries to allocate number pages, first from our own page pool, then from
254  * the kernel, unless this allocation would exceed the max_buffers setting.
255  * Possibly retry until DRBD frees sufficient pages somewhere else.
256  *
257  * Returns a page chain linked via page->private.
258  */
259 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
260 {
261         struct page *page = NULL;
262         DEFINE_WAIT(wait);
263
264         /* Yes, we may run up to @number over max_buffers. If we
265          * follow it strictly, the admin will get it wrong anyways. */
266         if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
267                 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
268
269         while (page == NULL) {
270                 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
271
272                 drbd_kick_lo_and_reclaim_net(mdev);
273
274                 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
275                         page = drbd_pp_first_pages_or_try_alloc(mdev, number);
276                         if (page)
277                                 break;
278                 }
279
280                 if (!retry)
281                         break;
282
283                 if (signal_pending(current)) {
284                         dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
285                         break;
286                 }
287
288                 schedule();
289         }
290         finish_wait(&drbd_pp_wait, &wait);
291
292         if (page)
293                 atomic_add(number, &mdev->pp_in_use);
294         return page;
295 }
296
297 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
298  * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
299  * Either links the page chain back to the global pool,
300  * or returns all pages to the system. */
301 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
302 {
303         atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
304         int i;
305
306         if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
307                 i = page_chain_free(page);
308         else {
309                 struct page *tmp;
310                 tmp = page_chain_tail(page, &i);
311                 spin_lock(&drbd_pp_lock);
312                 page_chain_add(&drbd_pp_pool, page, tmp);
313                 drbd_pp_vacant += i;
314                 spin_unlock(&drbd_pp_lock);
315         }
316         i = atomic_sub_return(i, a);
317         if (i < 0)
318                 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
319                         is_net ? "pp_in_use_by_net" : "pp_in_use", i);
320         wake_up(&drbd_pp_wait);
321 }
322
323 /*
324 You need to hold the req_lock:
325  _drbd_wait_ee_list_empty()
326
327 You must not have the req_lock:
328  drbd_free_ee()
329  drbd_alloc_ee()
330  drbd_init_ee()
331  drbd_release_ee()
332  drbd_ee_fix_bhs()
333  drbd_process_done_ee()
334  drbd_clear_done_ee()
335  drbd_wait_ee_list_empty()
336 */
337
338 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
339                                      u64 id,
340                                      sector_t sector,
341                                      unsigned int data_size,
342                                      gfp_t gfp_mask) __must_hold(local)
343 {
344         struct drbd_epoch_entry *e;
345         struct page *page;
346         unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
347
348         if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
349                 return NULL;
350
351         e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
352         if (!e) {
353                 if (!(gfp_mask & __GFP_NOWARN))
354                         dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
355                 return NULL;
356         }
357
358         page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
359         if (!page)
360                 goto fail;
361
362         INIT_HLIST_NODE(&e->colision);
363         e->epoch = NULL;
364         e->mdev = mdev;
365         e->pages = page;
366         atomic_set(&e->pending_bios, 0);
367         e->size = data_size;
368         e->flags = 0;
369         e->sector = sector;
370         e->block_id = id;
371
372         return e;
373
374  fail:
375         mempool_free(e, drbd_ee_mempool);
376         return NULL;
377 }
378
379 void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
380 {
381         if (e->flags & EE_HAS_DIGEST)
382                 kfree(e->digest);
383         drbd_pp_free(mdev, e->pages, is_net);
384         D_ASSERT(atomic_read(&e->pending_bios) == 0);
385         D_ASSERT(hlist_unhashed(&e->colision));
386         mempool_free(e, drbd_ee_mempool);
387 }
388
389 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
390 {
391         LIST_HEAD(work_list);
392         struct drbd_epoch_entry *e, *t;
393         int count = 0;
394         int is_net = list == &mdev->net_ee;
395
396         spin_lock_irq(&mdev->req_lock);
397         list_splice_init(list, &work_list);
398         spin_unlock_irq(&mdev->req_lock);
399
400         list_for_each_entry_safe(e, t, &work_list, w.list) {
401                 drbd_free_some_ee(mdev, e, is_net);
402                 count++;
403         }
404         return count;
405 }
406
407
408 /*
409  * This function is called from _asender only_
410  * but see also comments in _req_mod(,barrier_acked)
411  * and receive_Barrier.
412  *
413  * Move entries from net_ee to done_ee, if ready.
414  * Grab done_ee, call all callbacks, free the entries.
415  * The callbacks typically send out ACKs.
416  */
417 static int drbd_process_done_ee(struct drbd_conf *mdev)
418 {
419         LIST_HEAD(work_list);
420         LIST_HEAD(reclaimed);
421         struct drbd_epoch_entry *e, *t;
422         int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
423
424         spin_lock_irq(&mdev->req_lock);
425         reclaim_net_ee(mdev, &reclaimed);
426         list_splice_init(&mdev->done_ee, &work_list);
427         spin_unlock_irq(&mdev->req_lock);
428
429         list_for_each_entry_safe(e, t, &reclaimed, w.list)
430                 drbd_free_net_ee(mdev, e);
431
432         /* possible callbacks here:
433          * e_end_block, and e_end_resync_block, e_send_discard_ack.
434          * all ignore the last argument.
435          */
436         list_for_each_entry_safe(e, t, &work_list, w.list) {
437                 /* list_del not necessary, next/prev members not touched */
438                 ok = e->w.cb(mdev, &e->w, !ok) && ok;
439                 drbd_free_ee(mdev, e);
440         }
441         wake_up(&mdev->ee_wait);
442
443         return ok;
444 }
445
446 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
447 {
448         DEFINE_WAIT(wait);
449
450         /* avoids spin_lock/unlock
451          * and calling prepare_to_wait in the fast path */
452         while (!list_empty(head)) {
453                 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
454                 spin_unlock_irq(&mdev->req_lock);
455                 drbd_kick_lo(mdev);
456                 schedule();
457                 finish_wait(&mdev->ee_wait, &wait);
458                 spin_lock_irq(&mdev->req_lock);
459         }
460 }
461
462 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
463 {
464         spin_lock_irq(&mdev->req_lock);
465         _drbd_wait_ee_list_empty(mdev, head);
466         spin_unlock_irq(&mdev->req_lock);
467 }
468
469 /* see also kernel_accept; which is only present since 2.6.18.
470  * also we want to log which part of it failed, exactly */
471 static int drbd_accept(struct drbd_conf *mdev, const char **what,
472                 struct socket *sock, struct socket **newsock)
473 {
474         struct sock *sk = sock->sk;
475         int err = 0;
476
477         *what = "listen";
478         err = sock->ops->listen(sock, 5);
479         if (err < 0)
480                 goto out;
481
482         *what = "sock_create_lite";
483         err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
484                                newsock);
485         if (err < 0)
486                 goto out;
487
488         *what = "accept";
489         err = sock->ops->accept(sock, *newsock, 0);
490         if (err < 0) {
491                 sock_release(*newsock);
492                 *newsock = NULL;
493                 goto out;
494         }
495         (*newsock)->ops  = sock->ops;
496
497 out:
498         return err;
499 }
500
501 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
502                     void *buf, size_t size, int flags)
503 {
504         mm_segment_t oldfs;
505         struct kvec iov = {
506                 .iov_base = buf,
507                 .iov_len = size,
508         };
509         struct msghdr msg = {
510                 .msg_iovlen = 1,
511                 .msg_iov = (struct iovec *)&iov,
512                 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
513         };
514         int rv;
515
516         oldfs = get_fs();
517         set_fs(KERNEL_DS);
518         rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
519         set_fs(oldfs);
520
521         return rv;
522 }
523
524 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
525 {
526         mm_segment_t oldfs;
527         struct kvec iov = {
528                 .iov_base = buf,
529                 .iov_len = size,
530         };
531         struct msghdr msg = {
532                 .msg_iovlen = 1,
533                 .msg_iov = (struct iovec *)&iov,
534                 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
535         };
536         int rv;
537
538         oldfs = get_fs();
539         set_fs(KERNEL_DS);
540
541         for (;;) {
542                 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
543                 if (rv == size)
544                         break;
545
546                 /* Note:
547                  * ECONNRESET   other side closed the connection
548                  * ERESTARTSYS  (on  sock) we got a signal
549                  */
550
551                 if (rv < 0) {
552                         if (rv == -ECONNRESET)
553                                 dev_info(DEV, "sock was reset by peer\n");
554                         else if (rv != -ERESTARTSYS)
555                                 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
556                         break;
557                 } else if (rv == 0) {
558                         dev_info(DEV, "sock was shut down by peer\n");
559                         break;
560                 } else  {
561                         /* signal came in, or peer/link went down,
562                          * after we read a partial message
563                          */
564                         /* D_ASSERT(signal_pending(current)); */
565                         break;
566                 }
567         };
568
569         set_fs(oldfs);
570
571         if (rv != size)
572                 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
573
574         return rv;
575 }
576
577 /* quoting tcp(7):
578  *   On individual connections, the socket buffer size must be set prior to the
579  *   listen(2) or connect(2) calls in order to have it take effect.
580  * This is our wrapper to do so.
581  */
582 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
583                 unsigned int rcv)
584 {
585         /* open coded SO_SNDBUF, SO_RCVBUF */
586         if (snd) {
587                 sock->sk->sk_sndbuf = snd;
588                 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
589         }
590         if (rcv) {
591                 sock->sk->sk_rcvbuf = rcv;
592                 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
593         }
594 }
595
596 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
597 {
598         const char *what;
599         struct socket *sock;
600         struct sockaddr_in6 src_in6;
601         int err;
602         int disconnect_on_error = 1;
603
604         if (!get_net_conf(mdev))
605                 return NULL;
606
607         what = "sock_create_kern";
608         err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
609                 SOCK_STREAM, IPPROTO_TCP, &sock);
610         if (err < 0) {
611                 sock = NULL;
612                 goto out;
613         }
614
615         sock->sk->sk_rcvtimeo =
616         sock->sk->sk_sndtimeo =  mdev->net_conf->try_connect_int*HZ;
617         drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
618                         mdev->net_conf->rcvbuf_size);
619
620        /* explicitly bind to the configured IP as source IP
621         *  for the outgoing connections.
622         *  This is needed for multihomed hosts and to be
623         *  able to use lo: interfaces for drbd.
624         * Make sure to use 0 as port number, so linux selects
625         *  a free one dynamically.
626         */
627         memcpy(&src_in6, mdev->net_conf->my_addr,
628                min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
629         if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
630                 src_in6.sin6_port = 0;
631         else
632                 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
633
634         what = "bind before connect";
635         err = sock->ops->bind(sock,
636                               (struct sockaddr *) &src_in6,
637                               mdev->net_conf->my_addr_len);
638         if (err < 0)
639                 goto out;
640
641         /* connect may fail, peer not yet available.
642          * stay C_WF_CONNECTION, don't go Disconnecting! */
643         disconnect_on_error = 0;
644         what = "connect";
645         err = sock->ops->connect(sock,
646                                  (struct sockaddr *)mdev->net_conf->peer_addr,
647                                  mdev->net_conf->peer_addr_len, 0);
648
649 out:
650         if (err < 0) {
651                 if (sock) {
652                         sock_release(sock);
653                         sock = NULL;
654                 }
655                 switch (-err) {
656                         /* timeout, busy, signal pending */
657                 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
658                 case EINTR: case ERESTARTSYS:
659                         /* peer not (yet) available, network problem */
660                 case ECONNREFUSED: case ENETUNREACH:
661                 case EHOSTDOWN:    case EHOSTUNREACH:
662                         disconnect_on_error = 0;
663                         break;
664                 default:
665                         dev_err(DEV, "%s failed, err = %d\n", what, err);
666                 }
667                 if (disconnect_on_error)
668                         drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
669         }
670         put_net_conf(mdev);
671         return sock;
672 }
673
674 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
675 {
676         int timeo, err;
677         struct socket *s_estab = NULL, *s_listen;
678         const char *what;
679
680         if (!get_net_conf(mdev))
681                 return NULL;
682
683         what = "sock_create_kern";
684         err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
685                 SOCK_STREAM, IPPROTO_TCP, &s_listen);
686         if (err) {
687                 s_listen = NULL;
688                 goto out;
689         }
690
691         timeo = mdev->net_conf->try_connect_int * HZ;
692         timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
693
694         s_listen->sk->sk_reuse    = 1; /* SO_REUSEADDR */
695         s_listen->sk->sk_rcvtimeo = timeo;
696         s_listen->sk->sk_sndtimeo = timeo;
697         drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
698                         mdev->net_conf->rcvbuf_size);
699
700         what = "bind before listen";
701         err = s_listen->ops->bind(s_listen,
702                               (struct sockaddr *) mdev->net_conf->my_addr,
703                               mdev->net_conf->my_addr_len);
704         if (err < 0)
705                 goto out;
706
707         err = drbd_accept(mdev, &what, s_listen, &s_estab);
708
709 out:
710         if (s_listen)
711                 sock_release(s_listen);
712         if (err < 0) {
713                 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
714                         dev_err(DEV, "%s failed, err = %d\n", what, err);
715                         drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
716                 }
717         }
718         put_net_conf(mdev);
719
720         return s_estab;
721 }
722
723 static int drbd_send_fp(struct drbd_conf *mdev,
724         struct socket *sock, enum drbd_packets cmd)
725 {
726         struct p_header80 *h = &mdev->data.sbuf.header.h80;
727
728         return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
729 }
730
731 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
732 {
733         struct p_header80 *h = &mdev->data.rbuf.header.h80;
734         int rr;
735
736         rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
737
738         if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
739                 return be16_to_cpu(h->command);
740
741         return 0xffff;
742 }
743
744 /**
745  * drbd_socket_okay() - Free the socket if its connection is not okay
746  * @mdev:       DRBD device.
747  * @sock:       pointer to the pointer to the socket.
748  */
749 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
750 {
751         int rr;
752         char tb[4];
753
754         if (!*sock)
755                 return FALSE;
756
757         rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
758
759         if (rr > 0 || rr == -EAGAIN) {
760                 return TRUE;
761         } else {
762                 sock_release(*sock);
763                 *sock = NULL;
764                 return FALSE;
765         }
766 }
767
768 /*
769  * return values:
770  *   1 yes, we have a valid connection
771  *   0 oops, did not work out, please try again
772  *  -1 peer talks different language,
773  *     no point in trying again, please go standalone.
774  *  -2 We do not have a network config...
775  */
776 static int drbd_connect(struct drbd_conf *mdev)
777 {
778         struct socket *s, *sock, *msock;
779         int try, h, ok;
780
781         D_ASSERT(!mdev->data.socket);
782
783         if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
784                 return -2;
785
786         clear_bit(DISCARD_CONCURRENT, &mdev->flags);
787
788         sock  = NULL;
789         msock = NULL;
790
791         do {
792                 for (try = 0;;) {
793                         /* 3 tries, this should take less than a second! */
794                         s = drbd_try_connect(mdev);
795                         if (s || ++try >= 3)
796                                 break;
797                         /* give the other side time to call bind() & listen() */
798                         __set_current_state(TASK_INTERRUPTIBLE);
799                         schedule_timeout(HZ / 10);
800                 }
801
802                 if (s) {
803                         if (!sock) {
804                                 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
805                                 sock = s;
806                                 s = NULL;
807                         } else if (!msock) {
808                                 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
809                                 msock = s;
810                                 s = NULL;
811                         } else {
812                                 dev_err(DEV, "Logic error in drbd_connect()\n");
813                                 goto out_release_sockets;
814                         }
815                 }
816
817                 if (sock && msock) {
818                         __set_current_state(TASK_INTERRUPTIBLE);
819                         schedule_timeout(HZ / 10);
820                         ok = drbd_socket_okay(mdev, &sock);
821                         ok = drbd_socket_okay(mdev, &msock) && ok;
822                         if (ok)
823                                 break;
824                 }
825
826 retry:
827                 s = drbd_wait_for_connect(mdev);
828                 if (s) {
829                         try = drbd_recv_fp(mdev, s);
830                         drbd_socket_okay(mdev, &sock);
831                         drbd_socket_okay(mdev, &msock);
832                         switch (try) {
833                         case P_HAND_SHAKE_S:
834                                 if (sock) {
835                                         dev_warn(DEV, "initial packet S crossed\n");
836                                         sock_release(sock);
837                                 }
838                                 sock = s;
839                                 break;
840                         case P_HAND_SHAKE_M:
841                                 if (msock) {
842                                         dev_warn(DEV, "initial packet M crossed\n");
843                                         sock_release(msock);
844                                 }
845                                 msock = s;
846                                 set_bit(DISCARD_CONCURRENT, &mdev->flags);
847                                 break;
848                         default:
849                                 dev_warn(DEV, "Error receiving initial packet\n");
850                                 sock_release(s);
851                                 if (random32() & 1)
852                                         goto retry;
853                         }
854                 }
855
856                 if (mdev->state.conn <= C_DISCONNECTING)
857                         goto out_release_sockets;
858                 if (signal_pending(current)) {
859                         flush_signals(current);
860                         smp_rmb();
861                         if (get_t_state(&mdev->receiver) == Exiting)
862                                 goto out_release_sockets;
863                 }
864
865                 if (sock && msock) {
866                         ok = drbd_socket_okay(mdev, &sock);
867                         ok = drbd_socket_okay(mdev, &msock) && ok;
868                         if (ok)
869                                 break;
870                 }
871         } while (1);
872
873         msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
874         sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
875
876         sock->sk->sk_allocation = GFP_NOIO;
877         msock->sk->sk_allocation = GFP_NOIO;
878
879         sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
880         msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
881
882         /* NOT YET ...
883          * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
884          * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
885          * first set it to the P_HAND_SHAKE timeout,
886          * which we set to 4x the configured ping_timeout. */
887         sock->sk->sk_sndtimeo =
888         sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
889
890         msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
891         msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
892
893         /* we don't want delays.
894          * we use TCP_CORK where apropriate, though */
895         drbd_tcp_nodelay(sock);
896         drbd_tcp_nodelay(msock);
897
898         mdev->data.socket = sock;
899         mdev->meta.socket = msock;
900         mdev->last_received = jiffies;
901
902         D_ASSERT(mdev->asender.task == NULL);
903
904         h = drbd_do_handshake(mdev);
905         if (h <= 0)
906                 return h;
907
908         if (mdev->cram_hmac_tfm) {
909                 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
910                 switch (drbd_do_auth(mdev)) {
911                 case -1:
912                         dev_err(DEV, "Authentication of peer failed\n");
913                         return -1;
914                 case 0:
915                         dev_err(DEV, "Authentication of peer failed, trying again.\n");
916                         return 0;
917                 }
918         }
919
920         if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
921                 return 0;
922
923         sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
924         sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
925
926         atomic_set(&mdev->packet_seq, 0);
927         mdev->peer_seq = 0;
928
929         drbd_thread_start(&mdev->asender);
930
931         if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
932                 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
933                 put_ldev(mdev);
934         }
935
936         if (!drbd_send_protocol(mdev))
937                 return -1;
938         drbd_send_sync_param(mdev, &mdev->sync_conf);
939         drbd_send_sizes(mdev, 0, 0);
940         drbd_send_uuids(mdev);
941         drbd_send_state(mdev);
942         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
943         clear_bit(RESIZE_PENDING, &mdev->flags);
944
945         return 1;
946
947 out_release_sockets:
948         if (sock)
949                 sock_release(sock);
950         if (msock)
951                 sock_release(msock);
952         return -1;
953 }
954
955 static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
956 {
957         union p_header *h = &mdev->data.rbuf.header;
958         int r;
959
960         r = drbd_recv(mdev, h, sizeof(*h));
961         if (unlikely(r != sizeof(*h))) {
962                 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
963                 return FALSE;
964         }
965
966         if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
967                 *cmd = be16_to_cpu(h->h80.command);
968                 *packet_size = be16_to_cpu(h->h80.length);
969         } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
970                 *cmd = be16_to_cpu(h->h95.command);
971                 *packet_size = be32_to_cpu(h->h95.length);
972         } else {
973                 dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n",
974                     (long)be32_to_cpu(h->h80.magic),
975                     h->h80.command, h->h80.length);
976                 return FALSE;
977         }
978         mdev->last_received = jiffies;
979
980         return TRUE;
981 }
982
983 static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
984 {
985         int rv;
986
987         if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
988                 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
989                                         NULL, BLKDEV_IFL_WAIT);
990                 if (rv) {
991                         dev_err(DEV, "local disk flush failed with status %d\n", rv);
992                         /* would rather check on EOPNOTSUPP, but that is not reliable.
993                          * don't try again for ANY return value != 0
994                          * if (rv == -EOPNOTSUPP) */
995                         drbd_bump_write_ordering(mdev, WO_drain_io);
996                 }
997                 put_ldev(mdev);
998         }
999
1000         return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1001 }
1002
1003 static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1004 {
1005         struct flush_work *fw = (struct flush_work *)w;
1006         struct drbd_epoch *epoch = fw->epoch;
1007
1008         kfree(w);
1009
1010         if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
1011                 drbd_flush_after_epoch(mdev, epoch);
1012
1013         drbd_may_finish_epoch(mdev, epoch, EV_PUT |
1014                               (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
1015
1016         return 1;
1017 }
1018
1019 /**
1020  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1021  * @mdev:       DRBD device.
1022  * @epoch:      Epoch object.
1023  * @ev:         Epoch event.
1024  */
1025 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1026                                                struct drbd_epoch *epoch,
1027                                                enum epoch_event ev)
1028 {
1029         int finish, epoch_size;
1030         struct drbd_epoch *next_epoch;
1031         int schedule_flush = 0;
1032         enum finish_epoch rv = FE_STILL_LIVE;
1033
1034         spin_lock(&mdev->epoch_lock);
1035         do {
1036                 next_epoch = NULL;
1037                 finish = 0;
1038
1039                 epoch_size = atomic_read(&epoch->epoch_size);
1040
1041                 switch (ev & ~EV_CLEANUP) {
1042                 case EV_PUT:
1043                         atomic_dec(&epoch->active);
1044                         break;
1045                 case EV_GOT_BARRIER_NR:
1046                         set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1047
1048                         /* Special case: If we just switched from WO_bio_barrier to
1049                            WO_bdev_flush we should not finish the current epoch */
1050                         if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1051                             mdev->write_ordering != WO_bio_barrier &&
1052                             epoch == mdev->current_epoch)
1053                                 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1054                         break;
1055                 case EV_BARRIER_DONE:
1056                         set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1057                         break;
1058                 case EV_BECAME_LAST:
1059                         /* nothing to do*/
1060                         break;
1061                 }
1062
1063                 if (epoch_size != 0 &&
1064                     atomic_read(&epoch->active) == 0 &&
1065                     test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1066                     epoch->list.prev == &mdev->current_epoch->list &&
1067                     !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1068                         /* Nearly all conditions are met to finish that epoch... */
1069                         if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1070                             mdev->write_ordering == WO_none ||
1071                             (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1072                             ev & EV_CLEANUP) {
1073                                 finish = 1;
1074                                 set_bit(DE_IS_FINISHING, &epoch->flags);
1075                         } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1076                                  mdev->write_ordering == WO_bio_barrier) {
1077                                 atomic_inc(&epoch->active);
1078                                 schedule_flush = 1;
1079                         }
1080                 }
1081                 if (finish) {
1082                         if (!(ev & EV_CLEANUP)) {
1083                                 spin_unlock(&mdev->epoch_lock);
1084                                 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1085                                 spin_lock(&mdev->epoch_lock);
1086                         }
1087                         dec_unacked(mdev);
1088
1089                         if (mdev->current_epoch != epoch) {
1090                                 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1091                                 list_del(&epoch->list);
1092                                 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1093                                 mdev->epochs--;
1094                                 kfree(epoch);
1095
1096                                 if (rv == FE_STILL_LIVE)
1097                                         rv = FE_DESTROYED;
1098                         } else {
1099                                 epoch->flags = 0;
1100                                 atomic_set(&epoch->epoch_size, 0);
1101                                 /* atomic_set(&epoch->active, 0); is already zero */
1102                                 if (rv == FE_STILL_LIVE)
1103                                         rv = FE_RECYCLED;
1104                         }
1105                 }
1106
1107                 if (!next_epoch)
1108                         break;
1109
1110                 epoch = next_epoch;
1111         } while (1);
1112
1113         spin_unlock(&mdev->epoch_lock);
1114
1115         if (schedule_flush) {
1116                 struct flush_work *fw;
1117                 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1118                 if (fw) {
1119                         fw->w.cb = w_flush;
1120                         fw->epoch = epoch;
1121                         drbd_queue_work(&mdev->data.work, &fw->w);
1122                 } else {
1123                         dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1124                         set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1125                         /* That is not a recursion, only one level */
1126                         drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1127                         drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1128                 }
1129         }
1130
1131         return rv;
1132 }
1133
1134 /**
1135  * drbd_bump_write_ordering() - Fall back to an other write ordering method
1136  * @mdev:       DRBD device.
1137  * @wo:         Write ordering method to try.
1138  */
1139 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1140 {
1141         enum write_ordering_e pwo;
1142         static char *write_ordering_str[] = {
1143                 [WO_none] = "none",
1144                 [WO_drain_io] = "drain",
1145                 [WO_bdev_flush] = "flush",
1146                 [WO_bio_barrier] = "barrier",
1147         };
1148
1149         pwo = mdev->write_ordering;
1150         wo = min(pwo, wo);
1151         if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1152                 wo = WO_bdev_flush;
1153         if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1154                 wo = WO_drain_io;
1155         if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1156                 wo = WO_none;
1157         mdev->write_ordering = wo;
1158         if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1159                 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1160 }
1161
1162 /**
1163  * drbd_submit_ee()
1164  * @mdev:       DRBD device.
1165  * @e:          epoch entry
1166  * @rw:         flag field, see bio->bi_rw
1167  */
1168 /* TODO allocate from our own bio_set. */
1169 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1170                 const unsigned rw, const int fault_type)
1171 {
1172         struct bio *bios = NULL;
1173         struct bio *bio;
1174         struct page *page = e->pages;
1175         sector_t sector = e->sector;
1176         unsigned ds = e->size;
1177         unsigned n_bios = 0;
1178         unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1179
1180         /* In most cases, we will only need one bio.  But in case the lower
1181          * level restrictions happen to be different at this offset on this
1182          * side than those of the sending peer, we may need to submit the
1183          * request in more than one bio. */
1184 next_bio:
1185         bio = bio_alloc(GFP_NOIO, nr_pages);
1186         if (!bio) {
1187                 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1188                 goto fail;
1189         }
1190         /* > e->sector, unless this is the first bio */
1191         bio->bi_sector = sector;
1192         bio->bi_bdev = mdev->ldev->backing_bdev;
1193         /* we special case some flags in the multi-bio case, see below
1194          * (REQ_UNPLUG, REQ_HARDBARRIER) */
1195         bio->bi_rw = rw;
1196         bio->bi_private = e;
1197         bio->bi_end_io = drbd_endio_sec;
1198
1199         bio->bi_next = bios;
1200         bios = bio;
1201         ++n_bios;
1202
1203         page_chain_for_each(page) {
1204                 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1205                 if (!bio_add_page(bio, page, len, 0)) {
1206                         /* a single page must always be possible! */
1207                         BUG_ON(bio->bi_vcnt == 0);
1208                         goto next_bio;
1209                 }
1210                 ds -= len;
1211                 sector += len >> 9;
1212                 --nr_pages;
1213         }
1214         D_ASSERT(page == NULL);
1215         D_ASSERT(ds == 0);
1216
1217         atomic_set(&e->pending_bios, n_bios);
1218         do {
1219                 bio = bios;
1220                 bios = bios->bi_next;
1221                 bio->bi_next = NULL;
1222
1223                 /* strip off REQ_UNPLUG unless it is the last bio */
1224                 if (bios)
1225                         bio->bi_rw &= ~REQ_UNPLUG;
1226
1227                 drbd_generic_make_request(mdev, fault_type, bio);
1228
1229                 /* strip off REQ_HARDBARRIER,
1230                  * unless it is the first or last bio */
1231                 if (bios && bios->bi_next)
1232                         bios->bi_rw &= ~REQ_HARDBARRIER;
1233         } while (bios);
1234         maybe_kick_lo(mdev);
1235         return 0;
1236
1237 fail:
1238         while (bios) {
1239                 bio = bios;
1240                 bios = bios->bi_next;
1241                 bio_put(bio);
1242         }
1243         return -ENOMEM;
1244 }
1245
1246 /**
1247  * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
1248  * @mdev:       DRBD device.
1249  * @w:          work object.
1250  * @cancel:     The connection will be closed anyways (unused in this callback)
1251  */
1252 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1253 {
1254         struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1255         /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1256            (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1257            so that we can finish that epoch in drbd_may_finish_epoch().
1258            That is necessary if we already have a long chain of Epochs, before
1259            we realize that REQ_HARDBARRIER is actually not supported */
1260
1261         /* As long as the -ENOTSUPP on the barrier is reported immediately
1262            that will never trigger. If it is reported late, we will just
1263            print that warning and continue correctly for all future requests
1264            with WO_bdev_flush */
1265         if (previous_epoch(mdev, e->epoch))
1266                 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1267
1268         /* we still have a local reference,
1269          * get_ldev was done in receive_Data. */
1270
1271         e->w.cb = e_end_block;
1272         if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
1273                 /* drbd_submit_ee fails for one reason only:
1274                  * if was not able to allocate sufficient bios.
1275                  * requeue, try again later. */
1276                 e->w.cb = w_e_reissue;
1277                 drbd_queue_work(&mdev->data.work, &e->w);
1278         }
1279         return 1;
1280 }
1281
1282 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1283 {
1284         int rv, issue_flush;
1285         struct p_barrier *p = &mdev->data.rbuf.barrier;
1286         struct drbd_epoch *epoch;
1287
1288         inc_unacked(mdev);
1289
1290         if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1291                 drbd_kick_lo(mdev);
1292
1293         mdev->current_epoch->barrier_nr = p->barrier;
1294         rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1295
1296         /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1297          * the activity log, which means it would not be resynced in case the
1298          * R_PRIMARY crashes now.
1299          * Therefore we must send the barrier_ack after the barrier request was
1300          * completed. */
1301         switch (mdev->write_ordering) {
1302         case WO_bio_barrier:
1303         case WO_none:
1304                 if (rv == FE_RECYCLED)
1305                         return TRUE;
1306                 break;
1307
1308         case WO_bdev_flush:
1309         case WO_drain_io:
1310                 if (rv == FE_STILL_LIVE) {
1311                         set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1312                         drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1313                         rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1314                 }
1315                 if (rv == FE_RECYCLED)
1316                         return TRUE;
1317
1318                 /* The asender will send all the ACKs and barrier ACKs out, since
1319                    all EEs moved from the active_ee to the done_ee. We need to
1320                    provide a new epoch object for the EEs that come in soon */
1321                 break;
1322         }
1323
1324         /* receiver context, in the writeout path of the other node.
1325          * avoid potential distributed deadlock */
1326         epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1327         if (!epoch) {
1328                 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1329                 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1330                 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1331                 if (issue_flush) {
1332                         rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1333                         if (rv == FE_RECYCLED)
1334                                 return TRUE;
1335                 }
1336
1337                 drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1338
1339                 return TRUE;
1340         }
1341
1342         epoch->flags = 0;
1343         atomic_set(&epoch->epoch_size, 0);
1344         atomic_set(&epoch->active, 0);
1345
1346         spin_lock(&mdev->epoch_lock);
1347         if (atomic_read(&mdev->current_epoch->epoch_size)) {
1348                 list_add(&epoch->list, &mdev->current_epoch->list);
1349                 mdev->current_epoch = epoch;
1350                 mdev->epochs++;
1351         } else {
1352                 /* The current_epoch got recycled while we allocated this one... */
1353                 kfree(epoch);
1354         }
1355         spin_unlock(&mdev->epoch_lock);
1356
1357         return TRUE;
1358 }
1359
1360 /* used from receive_RSDataReply (recv_resync_read)
1361  * and from receive_Data */
1362 static struct drbd_epoch_entry *
1363 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1364 {
1365         const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1366         struct drbd_epoch_entry *e;
1367         struct page *page;
1368         int dgs, ds, rr;
1369         void *dig_in = mdev->int_dig_in;
1370         void *dig_vv = mdev->int_dig_vv;
1371         unsigned long *data;
1372
1373         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1374                 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1375
1376         if (dgs) {
1377                 rr = drbd_recv(mdev, dig_in, dgs);
1378                 if (rr != dgs) {
1379                         dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1380                              rr, dgs);
1381                         return NULL;
1382                 }
1383         }
1384
1385         data_size -= dgs;
1386
1387         ERR_IF(data_size &  0x1ff) return NULL;
1388         ERR_IF(data_size >  DRBD_MAX_SEGMENT_SIZE) return NULL;
1389
1390         /* even though we trust out peer,
1391          * we sometimes have to double check. */
1392         if (sector + (data_size>>9) > capacity) {
1393                 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1394                         (unsigned long long)capacity,
1395                         (unsigned long long)sector, data_size);
1396                 return NULL;
1397         }
1398
1399         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1400          * "criss-cross" setup, that might cause write-out on some other DRBD,
1401          * which in turn might block on the other node at this very place.  */
1402         e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1403         if (!e)
1404                 return NULL;
1405
1406         ds = data_size;
1407         page = e->pages;
1408         page_chain_for_each(page) {
1409                 unsigned len = min_t(int, ds, PAGE_SIZE);
1410                 data = kmap(page);
1411                 rr = drbd_recv(mdev, data, len);
1412                 if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
1413                         dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1414                         data[0] = data[0] ^ (unsigned long)-1;
1415                 }
1416                 kunmap(page);
1417                 if (rr != len) {
1418                         drbd_free_ee(mdev, e);
1419                         dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1420                              rr, len);
1421                         return NULL;
1422                 }
1423                 ds -= rr;
1424         }
1425
1426         if (dgs) {
1427                 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1428                 if (memcmp(dig_in, dig_vv, dgs)) {
1429                         dev_err(DEV, "Digest integrity check FAILED.\n");
1430                         drbd_bcast_ee(mdev, "digest failed",
1431                                         dgs, dig_in, dig_vv, e);
1432                         drbd_free_ee(mdev, e);
1433                         return NULL;
1434                 }
1435         }
1436         mdev->recv_cnt += data_size>>9;
1437         return e;
1438 }
1439
1440 /* drbd_drain_block() just takes a data block
1441  * out of the socket input buffer, and discards it.
1442  */
1443 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1444 {
1445         struct page *page;
1446         int rr, rv = 1;
1447         void *data;
1448
1449         if (!data_size)
1450                 return TRUE;
1451
1452         page = drbd_pp_alloc(mdev, 1, 1);
1453
1454         data = kmap(page);
1455         while (data_size) {
1456                 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1457                 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1458                         rv = 0;
1459                         dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1460                              rr, min_t(int, data_size, PAGE_SIZE));
1461                         break;
1462                 }
1463                 data_size -= rr;
1464         }
1465         kunmap(page);
1466         drbd_pp_free(mdev, page, 0);
1467         return rv;
1468 }
1469
1470 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1471                            sector_t sector, int data_size)
1472 {
1473         struct bio_vec *bvec;
1474         struct bio *bio;
1475         int dgs, rr, i, expect;
1476         void *dig_in = mdev->int_dig_in;
1477         void *dig_vv = mdev->int_dig_vv;
1478
1479         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1480                 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1481
1482         if (dgs) {
1483                 rr = drbd_recv(mdev, dig_in, dgs);
1484                 if (rr != dgs) {
1485                         dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1486                              rr, dgs);
1487                         return 0;
1488                 }
1489         }
1490
1491         data_size -= dgs;
1492
1493         /* optimistically update recv_cnt.  if receiving fails below,
1494          * we disconnect anyways, and counters will be reset. */
1495         mdev->recv_cnt += data_size>>9;
1496
1497         bio = req->master_bio;
1498         D_ASSERT(sector == bio->bi_sector);
1499
1500         bio_for_each_segment(bvec, bio, i) {
1501                 expect = min_t(int, data_size, bvec->bv_len);
1502                 rr = drbd_recv(mdev,
1503                              kmap(bvec->bv_page)+bvec->bv_offset,
1504                              expect);
1505                 kunmap(bvec->bv_page);
1506                 if (rr != expect) {
1507                         dev_warn(DEV, "short read receiving data reply: "
1508                              "read %d expected %d\n",
1509                              rr, expect);
1510                         return 0;
1511                 }
1512                 data_size -= rr;
1513         }
1514
1515         if (dgs) {
1516                 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1517                 if (memcmp(dig_in, dig_vv, dgs)) {
1518                         dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1519                         return 0;
1520                 }
1521         }
1522
1523         D_ASSERT(data_size == 0);
1524         return 1;
1525 }
1526
1527 /* e_end_resync_block() is called via
1528  * drbd_process_done_ee() by asender only */
1529 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1530 {
1531         struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1532         sector_t sector = e->sector;
1533         int ok;
1534
1535         D_ASSERT(hlist_unhashed(&e->colision));
1536
1537         if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1538                 drbd_set_in_sync(mdev, sector, e->size);
1539                 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1540         } else {
1541                 /* Record failure to sync */
1542                 drbd_rs_failed_io(mdev, sector, e->size);
1543
1544                 ok  = drbd_send_ack(mdev, P_NEG_ACK, e);
1545         }
1546         dec_unacked(mdev);
1547
1548         return ok;
1549 }
1550
1551 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1552 {
1553         struct drbd_epoch_entry *e;
1554
1555         e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1556         if (!e)
1557                 goto fail;
1558
1559         dec_rs_pending(mdev);
1560
1561         inc_unacked(mdev);
1562         /* corresponding dec_unacked() in e_end_resync_block()
1563          * respective _drbd_clear_done_ee */
1564
1565         e->w.cb = e_end_resync_block;
1566
1567         spin_lock_irq(&mdev->req_lock);
1568         list_add(&e->w.list, &mdev->sync_ee);
1569         spin_unlock_irq(&mdev->req_lock);
1570
1571         atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1572         if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1573                 return TRUE;
1574
1575         drbd_free_ee(mdev, e);
1576 fail:
1577         put_ldev(mdev);
1578         return FALSE;
1579 }
1580
1581 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1582 {
1583         struct drbd_request *req;
1584         sector_t sector;
1585         int ok;
1586         struct p_data *p = &mdev->data.rbuf.data;
1587
1588         sector = be64_to_cpu(p->sector);
1589
1590         spin_lock_irq(&mdev->req_lock);
1591         req = _ar_id_to_req(mdev, p->block_id, sector);
1592         spin_unlock_irq(&mdev->req_lock);
1593         if (unlikely(!req)) {
1594                 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1595                 return FALSE;
1596         }
1597
1598         /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1599          * special casing it there for the various failure cases.
1600          * still no race with drbd_fail_pending_reads */
1601         ok = recv_dless_read(mdev, req, sector, data_size);
1602
1603         if (ok)
1604                 req_mod(req, data_received);
1605         /* else: nothing. handled from drbd_disconnect...
1606          * I don't think we may complete this just yet
1607          * in case we are "on-disconnect: freeze" */
1608
1609         return ok;
1610 }
1611
1612 static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1613 {
1614         sector_t sector;
1615         int ok;
1616         struct p_data *p = &mdev->data.rbuf.data;
1617
1618         sector = be64_to_cpu(p->sector);
1619         D_ASSERT(p->block_id == ID_SYNCER);
1620
1621         if (get_ldev(mdev)) {
1622                 /* data is submitted to disk within recv_resync_read.
1623                  * corresponding put_ldev done below on error,
1624                  * or in drbd_endio_write_sec. */
1625                 ok = recv_resync_read(mdev, sector, data_size);
1626         } else {
1627                 if (__ratelimit(&drbd_ratelimit_state))
1628                         dev_err(DEV, "Can not write resync data to local disk.\n");
1629
1630                 ok = drbd_drain_block(mdev, data_size);
1631
1632                 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1633         }
1634
1635         atomic_add(data_size >> 9, &mdev->rs_sect_in);
1636
1637         return ok;
1638 }
1639
1640 /* e_end_block() is called via drbd_process_done_ee().
1641  * this means this function only runs in the asender thread
1642  */
1643 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1644 {
1645         struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1646         sector_t sector = e->sector;
1647         struct drbd_epoch *epoch;
1648         int ok = 1, pcmd;
1649
1650         if (e->flags & EE_IS_BARRIER) {
1651                 epoch = previous_epoch(mdev, e->epoch);
1652                 if (epoch)
1653                         drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1654         }
1655
1656         if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1657                 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1658                         pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1659                                 mdev->state.conn <= C_PAUSED_SYNC_T &&
1660                                 e->flags & EE_MAY_SET_IN_SYNC) ?
1661                                 P_RS_WRITE_ACK : P_WRITE_ACK;
1662                         ok &= drbd_send_ack(mdev, pcmd, e);
1663                         if (pcmd == P_RS_WRITE_ACK)
1664                                 drbd_set_in_sync(mdev, sector, e->size);
1665                 } else {
1666                         ok  = drbd_send_ack(mdev, P_NEG_ACK, e);
1667                         /* we expect it to be marked out of sync anyways...
1668                          * maybe assert this?  */
1669                 }
1670                 dec_unacked(mdev);
1671         }
1672         /* we delete from the conflict detection hash _after_ we sent out the
1673          * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
1674         if (mdev->net_conf->two_primaries) {
1675                 spin_lock_irq(&mdev->req_lock);
1676                 D_ASSERT(!hlist_unhashed(&e->colision));
1677                 hlist_del_init(&e->colision);
1678                 spin_unlock_irq(&mdev->req_lock);
1679         } else {
1680                 D_ASSERT(hlist_unhashed(&e->colision));
1681         }
1682
1683         drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1684
1685         return ok;
1686 }
1687
1688 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1689 {
1690         struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1691         int ok = 1;
1692
1693         D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1694         ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1695
1696         spin_lock_irq(&mdev->req_lock);
1697         D_ASSERT(!hlist_unhashed(&e->colision));
1698         hlist_del_init(&e->colision);
1699         spin_unlock_irq(&mdev->req_lock);
1700
1701         dec_unacked(mdev);
1702
1703         return ok;
1704 }
1705
1706 /* Called from receive_Data.
1707  * Synchronize packets on sock with packets on msock.
1708  *
1709  * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1710  * packet traveling on msock, they are still processed in the order they have
1711  * been sent.
1712  *
1713  * Note: we don't care for Ack packets overtaking P_DATA packets.
1714  *
1715  * In case packet_seq is larger than mdev->peer_seq number, there are
1716  * outstanding packets on the msock. We wait for them to arrive.
1717  * In case we are the logically next packet, we update mdev->peer_seq
1718  * ourselves. Correctly handles 32bit wrap around.
1719  *
1720  * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1721  * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1722  * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1723  * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1724  *
1725  * returns 0 if we may process the packet,
1726  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1727 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1728 {
1729         DEFINE_WAIT(wait);
1730         unsigned int p_seq;
1731         long timeout;
1732         int ret = 0;
1733         spin_lock(&mdev->peer_seq_lock);
1734         for (;;) {
1735                 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1736                 if (seq_le(packet_seq, mdev->peer_seq+1))
1737                         break;
1738                 if (signal_pending(current)) {
1739                         ret = -ERESTARTSYS;
1740                         break;
1741                 }
1742                 p_seq = mdev->peer_seq;
1743                 spin_unlock(&mdev->peer_seq_lock);
1744                 timeout = schedule_timeout(30*HZ);
1745                 spin_lock(&mdev->peer_seq_lock);
1746                 if (timeout == 0 && p_seq == mdev->peer_seq) {
1747                         ret = -ETIMEDOUT;
1748                         dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1749                         break;
1750                 }
1751         }
1752         finish_wait(&mdev->seq_wait, &wait);
1753         if (mdev->peer_seq+1 == packet_seq)
1754                 mdev->peer_seq++;
1755         spin_unlock(&mdev->peer_seq_lock);
1756         return ret;
1757 }
1758
1759 static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1760 {
1761         if (mdev->agreed_pro_version >= 95)
1762                 return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1763                         (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
1764                         (dpf & DP_FUA ? REQ_FUA : 0) |
1765                         (dpf & DP_FLUSH ? REQ_FUA : 0) |
1766                         (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1767         else
1768                 return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0;
1769 }
1770
1771 /* mirrored write */
1772 static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1773 {
1774         sector_t sector;
1775         struct drbd_epoch_entry *e;
1776         struct p_data *p = &mdev->data.rbuf.data;
1777         int rw = WRITE;
1778         u32 dp_flags;
1779
1780         if (!get_ldev(mdev)) {
1781                 if (__ratelimit(&drbd_ratelimit_state))
1782                         dev_err(DEV, "Can not write mirrored data block "
1783                             "to local disk.\n");
1784                 spin_lock(&mdev->peer_seq_lock);
1785                 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1786                         mdev->peer_seq++;
1787                 spin_unlock(&mdev->peer_seq_lock);
1788
1789                 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1790                 atomic_inc(&mdev->current_epoch->epoch_size);
1791                 return drbd_drain_block(mdev, data_size);
1792         }
1793
1794         /* get_ldev(mdev) successful.
1795          * Corresponding put_ldev done either below (on various errors),
1796          * or in drbd_endio_write_sec, if we successfully submit the data at
1797          * the end of this function. */
1798
1799         sector = be64_to_cpu(p->sector);
1800         e = read_in_block(mdev, p->block_id, sector, data_size);
1801         if (!e) {
1802                 put_ldev(mdev);
1803                 return FALSE;
1804         }
1805
1806         e->w.cb = e_end_block;
1807
1808         spin_lock(&mdev->epoch_lock);
1809         e->epoch = mdev->current_epoch;
1810         atomic_inc(&e->epoch->epoch_size);
1811         atomic_inc(&e->epoch->active);
1812
1813         if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1814                 struct drbd_epoch *epoch;
1815                 /* Issue a barrier if we start a new epoch, and the previous epoch
1816                    was not a epoch containing a single request which already was
1817                    a Barrier. */
1818                 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1819                 if (epoch == e->epoch) {
1820                         set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1821                         rw |= REQ_HARDBARRIER;
1822                         e->flags |= EE_IS_BARRIER;
1823                 } else {
1824                         if (atomic_read(&epoch->epoch_size) > 1 ||
1825                             !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1826                                 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1827                                 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1828                                 rw |= REQ_HARDBARRIER;
1829                                 e->flags |= EE_IS_BARRIER;
1830                         }
1831                 }
1832         }
1833         spin_unlock(&mdev->epoch_lock);
1834
1835         dp_flags = be32_to_cpu(p->dp_flags);
1836         rw |= write_flags_to_bio(mdev, dp_flags);
1837
1838         if (dp_flags & DP_MAY_SET_IN_SYNC)
1839                 e->flags |= EE_MAY_SET_IN_SYNC;
1840
1841         /* I'm the receiver, I do hold a net_cnt reference. */
1842         if (!mdev->net_conf->two_primaries) {
1843                 spin_lock_irq(&mdev->req_lock);
1844         } else {
1845                 /* don't get the req_lock yet,
1846                  * we may sleep in drbd_wait_peer_seq */
1847                 const int size = e->size;
1848                 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1849                 DEFINE_WAIT(wait);
1850                 struct drbd_request *i;
1851                 struct hlist_node *n;
1852                 struct hlist_head *slot;
1853                 int first;
1854
1855                 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1856                 BUG_ON(mdev->ee_hash == NULL);
1857                 BUG_ON(mdev->tl_hash == NULL);
1858
1859                 /* conflict detection and handling:
1860                  * 1. wait on the sequence number,
1861                  *    in case this data packet overtook ACK packets.
1862                  * 2. check our hash tables for conflicting requests.
1863                  *    we only need to walk the tl_hash, since an ee can not
1864                  *    have a conflict with an other ee: on the submitting
1865                  *    node, the corresponding req had already been conflicting,
1866                  *    and a conflicting req is never sent.
1867                  *
1868                  * Note: for two_primaries, we are protocol C,
1869                  * so there cannot be any request that is DONE
1870                  * but still on the transfer log.
1871                  *
1872                  * unconditionally add to the ee_hash.
1873                  *
1874                  * if no conflicting request is found:
1875                  *    submit.
1876                  *
1877                  * if any conflicting request is found
1878                  * that has not yet been acked,
1879                  * AND I have the "discard concurrent writes" flag:
1880                  *       queue (via done_ee) the P_DISCARD_ACK; OUT.
1881                  *
1882                  * if any conflicting request is found:
1883                  *       block the receiver, waiting on misc_wait
1884                  *       until no more conflicting requests are there,
1885                  *       or we get interrupted (disconnect).
1886                  *
1887                  *       we do not just write after local io completion of those
1888                  *       requests, but only after req is done completely, i.e.
1889                  *       we wait for the P_DISCARD_ACK to arrive!
1890                  *
1891                  *       then proceed normally, i.e. submit.
1892                  */
1893                 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1894                         goto out_interrupted;
1895
1896                 spin_lock_irq(&mdev->req_lock);
1897
1898                 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1899
1900 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1901                 slot = tl_hash_slot(mdev, sector);
1902                 first = 1;
1903                 for (;;) {
1904                         int have_unacked = 0;
1905                         int have_conflict = 0;
1906                         prepare_to_wait(&mdev->misc_wait, &wait,
1907                                 TASK_INTERRUPTIBLE);
1908                         hlist_for_each_entry(i, n, slot, colision) {
1909                                 if (OVERLAPS) {
1910                                         /* only ALERT on first iteration,
1911                                          * we may be woken up early... */
1912                                         if (first)
1913                                                 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1914                                                       " new: %llus +%u; pending: %llus +%u\n",
1915                                                       current->comm, current->pid,
1916                                                       (unsigned long long)sector, size,
1917                                                       (unsigned long long)i->sector, i->size);
1918                                         if (i->rq_state & RQ_NET_PENDING)
1919                                                 ++have_unacked;
1920                                         ++have_conflict;
1921                                 }
1922                         }
1923 #undef OVERLAPS
1924                         if (!have_conflict)
1925                                 break;
1926
1927                         /* Discard Ack only for the _first_ iteration */
1928                         if (first && discard && have_unacked) {
1929                                 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1930                                      (unsigned long long)sector);
1931                                 inc_unacked(mdev);
1932                                 e->w.cb = e_send_discard_ack;
1933                                 list_add_tail(&e->w.list, &mdev->done_ee);
1934
1935                                 spin_unlock_irq(&mdev->req_lock);
1936
1937                                 /* we could probably send that P_DISCARD_ACK ourselves,
1938                                  * but I don't like the receiver using the msock */
1939
1940                                 put_ldev(mdev);
1941                                 wake_asender(mdev);
1942                                 finish_wait(&mdev->misc_wait, &wait);
1943                                 return TRUE;
1944                         }
1945
1946                         if (signal_pending(current)) {
1947                                 hlist_del_init(&e->colision);
1948
1949                                 spin_unlock_irq(&mdev->req_lock);
1950
1951                                 finish_wait(&mdev->misc_wait, &wait);
1952                                 goto out_interrupted;
1953                         }
1954
1955                         spin_unlock_irq(&mdev->req_lock);
1956                         if (first) {
1957                                 first = 0;
1958                                 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1959                                      "sec=%llus\n", (unsigned long long)sector);
1960                         } else if (discard) {
1961                                 /* we had none on the first iteration.
1962                                  * there must be none now. */
1963                                 D_ASSERT(have_unacked == 0);
1964                         }
1965                         schedule();
1966                         spin_lock_irq(&mdev->req_lock);
1967                 }
1968                 finish_wait(&mdev->misc_wait, &wait);
1969         }
1970
1971         list_add(&e->w.list, &mdev->active_ee);
1972         spin_unlock_irq(&mdev->req_lock);
1973
1974         switch (mdev->net_conf->wire_protocol) {
1975         case DRBD_PROT_C:
1976                 inc_unacked(mdev);
1977                 /* corresponding dec_unacked() in e_end_block()
1978                  * respective _drbd_clear_done_ee */
1979                 break;
1980         case DRBD_PROT_B:
1981                 /* I really don't like it that the receiver thread
1982                  * sends on the msock, but anyways */
1983                 drbd_send_ack(mdev, P_RECV_ACK, e);
1984                 break;
1985         case DRBD_PROT_A:
1986                 /* nothing to do */
1987                 break;
1988         }
1989
1990         if (mdev->state.pdsk == D_DISKLESS) {
1991                 /* In case we have the only disk of the cluster, */
1992                 drbd_set_out_of_sync(mdev, e->sector, e->size);
1993                 e->flags |= EE_CALL_AL_COMPLETE_IO;
1994                 drbd_al_begin_io(mdev, e->sector);
1995         }
1996
1997         if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
1998                 return TRUE;
1999
2000 out_interrupted:
2001         /* yes, the epoch_size now is imbalanced.
2002          * but we drop the connection anyways, so we don't have a chance to
2003          * receive a barrier... atomic_inc(&mdev->epoch_size); */
2004         put_ldev(mdev);
2005         drbd_free_ee(mdev, e);
2006         return FALSE;
2007 }
2008
2009 /* We may throttle resync, if the lower device seems to be busy,
2010  * and current sync rate is above c_min_rate.
2011  *
2012  * To decide whether or not the lower device is busy, we use a scheme similar
2013  * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2014  * (more than 64 sectors) of activity we cannot account for with our own resync
2015  * activity, it obviously is "busy".
2016  *
2017  * The current sync rate used here uses only the most recent two step marks,
2018  * to have a short time average so we can react faster.
2019  */
2020 int drbd_rs_should_slow_down(struct drbd_conf *mdev)
2021 {
2022         struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2023         unsigned long db, dt, dbdt;
2024         int curr_events;
2025         int throttle = 0;
2026
2027         /* feature disabled? */
2028         if (mdev->sync_conf.c_min_rate == 0)
2029                 return 0;
2030
2031         curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2032                       (int)part_stat_read(&disk->part0, sectors[1]) -
2033                         atomic_read(&mdev->rs_sect_ev);
2034         if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2035                 unsigned long rs_left;
2036                 int i;
2037
2038                 mdev->rs_last_events = curr_events;
2039
2040                 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2041                  * approx. */
2042                 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS;
2043                 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2044
2045                 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2046                 if (!dt)
2047                         dt++;
2048                 db = mdev->rs_mark_left[i] - rs_left;
2049                 dbdt = Bit2KB(db/dt);
2050
2051                 if (dbdt > mdev->sync_conf.c_min_rate)
2052                         throttle = 1;
2053         }
2054         return throttle;
2055 }
2056
2057
2058 static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
2059 {
2060         sector_t sector;
2061         const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
2062         struct drbd_epoch_entry *e;
2063         struct digest_info *di = NULL;
2064         int size;
2065         unsigned int fault_type;
2066         struct p_block_req *p = &mdev->data.rbuf.block_req;
2067
2068         sector = be64_to_cpu(p->sector);
2069         size   = be32_to_cpu(p->blksize);
2070
2071         if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
2072                 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2073                                 (unsigned long long)sector, size);
2074                 return FALSE;
2075         }
2076         if (sector + (size>>9) > capacity) {
2077                 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2078                                 (unsigned long long)sector, size);
2079                 return FALSE;
2080         }
2081
2082         if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2083                 if (__ratelimit(&drbd_ratelimit_state))
2084                         dev_err(DEV, "Can not satisfy peer's read request, "
2085                             "no local data.\n");
2086                 drbd_send_ack_rp(mdev, cmd == P_DATA_REQUEST ? P_NEG_DREPLY :
2087                                  P_NEG_RS_DREPLY , p);
2088                 /* drain possibly payload */
2089                 return drbd_drain_block(mdev, digest_size);
2090         }
2091
2092         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2093          * "criss-cross" setup, that might cause write-out on some other DRBD,
2094          * which in turn might block on the other node at this very place.  */
2095         e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2096         if (!e) {
2097                 put_ldev(mdev);
2098                 return FALSE;
2099         }
2100
2101         switch (cmd) {
2102         case P_DATA_REQUEST:
2103                 e->w.cb = w_e_end_data_req;
2104                 fault_type = DRBD_FAULT_DT_RD;
2105                 /* application IO, don't drbd_rs_begin_io */
2106                 goto submit;
2107
2108         case P_RS_DATA_REQUEST:
2109                 e->w.cb = w_e_end_rsdata_req;
2110                 fault_type = DRBD_FAULT_RS_RD;
2111                 break;
2112
2113         case P_OV_REPLY:
2114         case P_CSUM_RS_REQUEST:
2115                 fault_type = DRBD_FAULT_RS_RD;
2116                 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2117                 if (!di)
2118                         goto out_free_e;
2119
2120                 di->digest_size = digest_size;
2121                 di->digest = (((char *)di)+sizeof(struct digest_info));
2122
2123                 e->digest = di;
2124                 e->flags |= EE_HAS_DIGEST;
2125
2126                 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2127                         goto out_free_e;
2128
2129                 if (cmd == P_CSUM_RS_REQUEST) {
2130                         D_ASSERT(mdev->agreed_pro_version >= 89);
2131                         e->w.cb = w_e_end_csum_rs_req;
2132                 } else if (cmd == P_OV_REPLY) {
2133                         e->w.cb = w_e_end_ov_reply;
2134                         dec_rs_pending(mdev);
2135                         /* drbd_rs_begin_io done when we sent this request,
2136                          * but accounting still needs to be done. */
2137                         goto submit_for_resync;
2138                 }
2139                 break;
2140
2141         case P_OV_REQUEST:
2142                 if (mdev->state.conn >= C_CONNECTED &&
2143                     mdev->state.conn != C_VERIFY_T)
2144                         dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2145                                 drbd_conn_str(mdev->state.conn));
2146                 if (mdev->ov_start_sector == ~(sector_t)0 &&
2147                     mdev->agreed_pro_version >= 90) {
2148                         mdev->ov_start_sector = sector;
2149                         mdev->ov_position = sector;
2150                         mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2151                         dev_info(DEV, "Online Verify start sector: %llu\n",
2152                                         (unsigned long long)sector);
2153                 }
2154                 e->w.cb = w_e_end_ov_req;
2155                 fault_type = DRBD_FAULT_RS_RD;
2156                 break;
2157
2158         default:
2159                 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2160                     cmdname(cmd));
2161                 fault_type = DRBD_FAULT_MAX;
2162                 goto out_free_e;
2163         }
2164
2165         /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2166          * wrt the receiver, but it is not as straightforward as it may seem.
2167          * Various places in the resync start and stop logic assume resync
2168          * requests are processed in order, requeuing this on the worker thread
2169          * introduces a bunch of new code for synchronization between threads.
2170          *
2171          * Unlimited throttling before drbd_rs_begin_io may stall the resync
2172          * "forever", throttling after drbd_rs_begin_io will lock that extent
2173          * for application writes for the same time.  For now, just throttle
2174          * here, where the rest of the code expects the receiver to sleep for
2175          * a while, anyways.
2176          */
2177
2178         /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2179          * this defers syncer requests for some time, before letting at least
2180          * on request through.  The resync controller on the receiving side
2181          * will adapt to the incoming rate accordingly.
2182          *
2183          * We cannot throttle here if remote is Primary/SyncTarget:
2184          * we would also throttle its application reads.
2185          * In that case, throttling is done on the SyncTarget only.
2186          */
2187         if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev))
2188                 msleep(100);
2189         if (drbd_rs_begin_io(mdev, e->sector))
2190                 goto out_free_e;
2191
2192 submit_for_resync:
2193         atomic_add(size >> 9, &mdev->rs_sect_ev);
2194
2195 submit:
2196         inc_unacked(mdev);
2197         spin_lock_irq(&mdev->req_lock);
2198         list_add_tail(&e->w.list, &mdev->read_ee);
2199         spin_unlock_irq(&mdev->req_lock);
2200
2201         if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2202                 return TRUE;
2203
2204 out_free_e:
2205         put_ldev(mdev);
2206         drbd_free_ee(mdev, e);
2207         return FALSE;
2208 }
2209
2210 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2211 {
2212         int self, peer, rv = -100;
2213         unsigned long ch_self, ch_peer;
2214
2215         self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2216         peer = mdev->p_uuid[UI_BITMAP] & 1;
2217
2218         ch_peer = mdev->p_uuid[UI_SIZE];
2219         ch_self = mdev->comm_bm_set;
2220
2221         switch (mdev->net_conf->after_sb_0p) {
2222         case ASB_CONSENSUS:
2223         case ASB_DISCARD_SECONDARY:
2224         case ASB_CALL_HELPER:
2225                 dev_err(DEV, "Configuration error.\n");
2226                 break;
2227         case ASB_DISCONNECT:
2228                 break;
2229         case ASB_DISCARD_YOUNGER_PRI:
2230                 if (self == 0 && peer == 1) {
2231                         rv = -1;
2232                         break;
2233                 }
2234                 if (self == 1 && peer == 0) {
2235                         rv =  1;
2236                         break;
2237                 }
2238                 /* Else fall through to one of the other strategies... */
2239         case ASB_DISCARD_OLDER_PRI:
2240                 if (self == 0 && peer == 1) {
2241                         rv = 1;
2242                         break;
2243                 }
2244                 if (self == 1 && peer == 0) {
2245                         rv = -1;
2246                         break;
2247                 }
2248                 /* Else fall through to one of the other strategies... */
2249                 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2250                      "Using discard-least-changes instead\n");
2251         case ASB_DISCARD_ZERO_CHG:
2252                 if (ch_peer == 0 && ch_self == 0) {
2253                         rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2254                                 ? -1 : 1;
2255                         break;
2256                 } else {
2257                         if (ch_peer == 0) { rv =  1; break; }
2258                         if (ch_self == 0) { rv = -1; break; }
2259                 }
2260                 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2261                         break;
2262         case ASB_DISCARD_LEAST_CHG:
2263                 if      (ch_self < ch_peer)
2264                         rv = -1;
2265                 else if (ch_self > ch_peer)
2266                         rv =  1;
2267                 else /* ( ch_self == ch_peer ) */
2268                      /* Well, then use something else. */
2269                         rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2270                                 ? -1 : 1;
2271                 break;
2272         case ASB_DISCARD_LOCAL:
2273                 rv = -1;
2274                 break;
2275         case ASB_DISCARD_REMOTE:
2276                 rv =  1;
2277         }
2278
2279         return rv;
2280 }
2281
2282 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2283 {
2284         int self, peer, hg, rv = -100;
2285
2286         self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2287         peer = mdev->p_uuid[UI_BITMAP] & 1;
2288
2289         switch (mdev->net_conf->after_sb_1p) {
2290         case ASB_DISCARD_YOUNGER_PRI:
2291         case ASB_DISCARD_OLDER_PRI:
2292         case ASB_DISCARD_LEAST_CHG:
2293         case ASB_DISCARD_LOCAL:
2294         case ASB_DISCARD_REMOTE:
2295                 dev_err(DEV, "Configuration error.\n");
2296                 break;
2297         case ASB_DISCONNECT:
2298                 break;
2299         case ASB_CONSENSUS:
2300                 hg = drbd_asb_recover_0p(mdev);
2301                 if (hg == -1 && mdev->state.role == R_SECONDARY)
2302                         rv = hg;
2303                 if (hg == 1  && mdev->state.role == R_PRIMARY)
2304                         rv = hg;
2305                 break;
2306         case ASB_VIOLENTLY:
2307                 rv = drbd_asb_recover_0p(mdev);
2308                 break;
2309         case ASB_DISCARD_SECONDARY:
2310                 return mdev->state.role == R_PRIMARY ? 1 : -1;
2311         case ASB_CALL_HELPER:
2312                 hg = drbd_asb_recover_0p(mdev);
2313                 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2314                         self = drbd_set_role(mdev, R_SECONDARY, 0);
2315                          /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2316                           * we might be here in C_WF_REPORT_PARAMS which is transient.
2317                           * we do not need to wait for the after state change work either. */
2318                         self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2319                         if (self != SS_SUCCESS) {
2320                                 drbd_khelper(mdev, "pri-lost-after-sb");
2321                         } else {
2322                                 dev_warn(DEV, "Successfully gave up primary role.\n");
2323                                 rv = hg;
2324                         }
2325                 } else
2326                         rv = hg;
2327         }
2328
2329         return rv;
2330 }
2331
2332 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2333 {
2334         int self, peer, hg, rv = -100;
2335
2336         self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2337         peer = mdev->p_uuid[UI_BITMAP] & 1;
2338
2339         switch (mdev->net_conf->after_sb_2p) {
2340         case ASB_DISCARD_YOUNGER_PRI:
2341         case ASB_DISCARD_OLDER_PRI:
2342         case ASB_DISCARD_LEAST_CHG:
2343         case ASB_DISCARD_LOCAL:
2344         case ASB_DISCARD_REMOTE:
2345         case ASB_CONSENSUS:
2346         case ASB_DISCARD_SECONDARY:
2347                 dev_err(DEV, "Configuration error.\n");
2348                 break;
2349         case ASB_VIOLENTLY:
2350                 rv = drbd_asb_recover_0p(mdev);
2351                 break;
2352         case ASB_DISCONNECT:
2353                 break;
2354         case ASB_CALL_HELPER:
2355                 hg = drbd_asb_recover_0p(mdev);
2356                 if (hg == -1) {
2357                          /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2358                           * we might be here in C_WF_REPORT_PARAMS which is transient.
2359                           * we do not need to wait for the after state change work either. */
2360                         self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2361                         if (self != SS_SUCCESS) {
2362                                 drbd_khelper(mdev, "pri-lost-after-sb");
2363                         } else {
2364                                 dev_warn(DEV, "Successfully gave up primary role.\n");
2365                                 rv = hg;
2366                         }
2367                 } else
2368                         rv = hg;
2369         }
2370
2371         return rv;
2372 }
2373
2374 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2375                            u64 bits, u64 flags)
2376 {
2377         if (!uuid) {
2378                 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2379                 return;
2380         }
2381         dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2382              text,
2383              (unsigned long long)uuid[UI_CURRENT],
2384              (unsigned long long)uuid[UI_BITMAP],
2385              (unsigned long long)uuid[UI_HISTORY_START],
2386              (unsigned long long)uuid[UI_HISTORY_END],
2387              (unsigned long long)bits,
2388              (unsigned long long)flags);
2389 }
2390
2391 /*
2392   100   after split brain try auto recover
2393     2   C_SYNC_SOURCE set BitMap
2394     1   C_SYNC_SOURCE use BitMap
2395     0   no Sync
2396    -1   C_SYNC_TARGET use BitMap
2397    -2   C_SYNC_TARGET set BitMap
2398  -100   after split brain, disconnect
2399 -1000   unrelated data
2400  */
2401 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2402 {
2403         u64 self, peer;
2404         int i, j;
2405
2406         self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2407         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2408
2409         *rule_nr = 10;
2410         if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2411                 return 0;
2412
2413         *rule_nr = 20;
2414         if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2415              peer != UUID_JUST_CREATED)
2416                 return -2;
2417
2418         *rule_nr = 30;
2419         if (self != UUID_JUST_CREATED &&
2420             (peer == UUID_JUST_CREATED || peer == (u64)0))
2421                 return 2;
2422
2423         if (self == peer) {
2424                 int rct, dc; /* roles at crash time */
2425
2426                 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2427
2428                         if (mdev->agreed_pro_version < 91)
2429                                 return -1001;
2430
2431                         if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2432                             (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2433                                 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2434                                 drbd_uuid_set_bm(mdev, 0UL);
2435
2436                                 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2437                                                mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2438                                 *rule_nr = 34;
2439                         } else {
2440                                 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2441                                 *rule_nr = 36;
2442                         }
2443
2444                         return 1;
2445                 }
2446
2447                 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2448
2449                         if (mdev->agreed_pro_version < 91)
2450                                 return -1001;
2451
2452                         if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2453                             (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2454                                 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2455
2456                                 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2457                                 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2458                                 mdev->p_uuid[UI_BITMAP] = 0UL;
2459
2460                                 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2461                                 *rule_nr = 35;
2462                         } else {
2463                                 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2464                                 *rule_nr = 37;
2465                         }
2466
2467                         return -1;
2468                 }
2469
2470                 /* Common power [off|failure] */
2471                 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2472                         (mdev->p_uuid[UI_FLAGS] & 2);
2473                 /* lowest bit is set when we were primary,
2474                  * next bit (weight 2) is set when peer was primary */
2475                 *rule_nr = 40;
2476
2477                 switch (rct) {
2478                 case 0: /* !self_pri && !peer_pri */ return 0;
2479                 case 1: /*  self_pri && !peer_pri */ return 1;
2480                 case 2: /* !self_pri &&  peer_pri */ return -1;
2481                 case 3: /*  self_pri &&  peer_pri */
2482                         dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2483                         return dc ? -1 : 1;
2484                 }
2485         }
2486
2487         *rule_nr = 50;
2488         peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2489         if (self == peer)
2490                 return -1;
2491
2492         *rule_nr = 51;
2493         peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2494         if (self == peer) {
2495                 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2496                 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2497                 if (self == peer) {
2498                         /* The last P_SYNC_UUID did not get though. Undo the last start of
2499                            resync as sync source modifications of the peer's UUIDs. */
2500
2501                         if (mdev->agreed_pro_version < 91)
2502                                 return -1001;
2503
2504                         mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2505                         mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2506                         return -1;
2507                 }
2508         }
2509
2510         *rule_nr = 60;
2511         self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2512         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2513                 peer = mdev->p_uuid[i] & ~((u64)1);
2514                 if (self == peer)
2515                         return -2;
2516         }
2517
2518         *rule_nr = 70;
2519         self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2520         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2521         if (self == peer)
2522                 return 1;
2523
2524         *rule_nr = 71;
2525         self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2526         if (self == peer) {
2527                 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2528                 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2529                 if (self == peer) {
2530                         /* The last P_SYNC_UUID did not get though. Undo the last start of
2531                            resync as sync source modifications of our UUIDs. */
2532
2533                         if (mdev->agreed_pro_version < 91)
2534                                 return -1001;
2535
2536                         _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2537                         _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2538
2539                         dev_info(DEV, "Undid last start of resync:\n");
2540
2541                         drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2542                                        mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2543
2544                         return 1;
2545                 }
2546         }
2547
2548
2549         *rule_nr = 80;
2550         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2551         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2552                 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2553                 if (self == peer)
2554                         return 2;
2555         }
2556
2557         *rule_nr = 90;
2558         self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2559         peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2560         if (self == peer && self != ((u64)0))
2561                 return 100;
2562
2563         *rule_nr = 100;
2564         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2565                 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2566                 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2567                         peer = mdev->p_uuid[j] & ~((u64)1);
2568                         if (self == peer)
2569                                 return -100;
2570                 }
2571         }
2572
2573         return -1000;
2574 }
2575
2576 /* drbd_sync_handshake() returns the new conn state on success, or
2577    CONN_MASK (-1) on failure.
2578  */
2579 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2580                                            enum drbd_disk_state peer_disk) __must_hold(local)
2581 {
2582         int hg, rule_nr;
2583         enum drbd_conns rv = C_MASK;
2584         enum drbd_disk_state mydisk;
2585
2586         mydisk = mdev->state.disk;
2587         if (mydisk == D_NEGOTIATING)
2588                 mydisk = mdev->new_state_tmp.disk;
2589
2590         dev_info(DEV, "drbd_sync_handshake:\n");
2591         drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2592         drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2593                        mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2594
2595         hg = drbd_uuid_compare(mdev, &rule_nr);
2596
2597         dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2598
2599         if (hg == -1000) {
2600                 dev_alert(DEV, "Unrelated data, aborting!\n");
2601                 return C_MASK;
2602         }
2603         if (hg == -1001) {
2604                 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2605                 return C_MASK;
2606         }
2607
2608         if    ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2609             (peer_disk == D_INCONSISTENT && mydisk    > D_INCONSISTENT)) {
2610                 int f = (hg == -100) || abs(hg) == 2;
2611                 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2612                 if (f)
2613                         hg = hg*2;
2614                 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2615                      hg > 0 ? "source" : "target");
2616         }
2617
2618         if (abs(hg) == 100)
2619                 drbd_khelper(mdev, "initial-split-brain");
2620
2621         if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2622                 int pcount = (mdev->state.role == R_PRIMARY)
2623                            + (peer_role == R_PRIMARY);
2624                 int forced = (hg == -100);
2625
2626                 switch (pcount) {
2627                 case 0:
2628                         hg = drbd_asb_recover_0p(mdev);
2629                         break;
2630                 case 1:
2631                         hg = drbd_asb_recover_1p(mdev);
2632                         break;
2633                 case 2:
2634                         hg = drbd_asb_recover_2p(mdev);
2635                         break;
2636                 }
2637                 if (abs(hg) < 100) {
2638                         dev_warn(DEV, "Split-Brain detected, %d primaries, "
2639                              "automatically solved. Sync from %s node\n",
2640                              pcount, (hg < 0) ? "peer" : "this");
2641                         if (forced) {
2642                                 dev_warn(DEV, "Doing a full sync, since"
2643                                      " UUIDs where ambiguous.\n");
2644                                 hg = hg*2;
2645                         }
2646                 }
2647         }
2648
2649         if (hg == -100) {
2650                 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2651                         hg = -1;
2652                 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2653                         hg = 1;
2654
2655                 if (abs(hg) < 100)
2656                         dev_warn(DEV, "Split-Brain detected, manually solved. "
2657                              "Sync from %s node\n",
2658                              (hg < 0) ? "peer" : "this");
2659         }
2660
2661         if (hg == -100) {
2662                 /* FIXME this log message is not correct if we end up here
2663                  * after an attempted attach on a diskless node.
2664                  * We just refuse to attach -- well, we drop the "connection"
2665                  * to that disk, in a way... */
2666                 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2667                 drbd_khelper(mdev, "split-brain");
2668                 return C_MASK;
2669         }
2670
2671         if (hg > 0 && mydisk <= D_INCONSISTENT) {
2672                 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2673                 return C_MASK;
2674         }
2675
2676         if (hg < 0 && /* by intention we do not use mydisk here. */
2677             mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2678                 switch (mdev->net_conf->rr_conflict) {
2679                 case ASB_CALL_HELPER:
2680                         drbd_khelper(mdev, "pri-lost");
2681                         /* fall through */
2682                 case ASB_DISCONNECT:
2683                         dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2684                         return C_MASK;
2685                 case ASB_VIOLENTLY:
2686                         dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2687                              "assumption\n");
2688                 }
2689         }
2690
2691         if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2692                 if (hg == 0)
2693                         dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2694                 else
2695                         dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2696                                  drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2697                                  abs(hg) >= 2 ? "full" : "bit-map based");
2698                 return C_MASK;
2699         }
2700
2701         if (abs(hg) >= 2) {
2702                 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2703                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2704                         return C_MASK;
2705         }
2706
2707         if (hg > 0) { /* become sync source. */
2708                 rv = C_WF_BITMAP_S;
2709         } else if (hg < 0) { /* become sync target */
2710                 rv = C_WF_BITMAP_T;
2711         } else {
2712                 rv = C_CONNECTED;
2713                 if (drbd_bm_total_weight(mdev)) {
2714                         dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2715                              drbd_bm_total_weight(mdev));
2716                 }
2717         }
2718
2719         return rv;
2720 }
2721
2722 /* returns 1 if invalid */
2723 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2724 {
2725         /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2726         if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2727             (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2728                 return 0;
2729
2730         /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2731         if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2732             self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2733                 return 1;
2734
2735         /* everything else is valid if they are equal on both sides. */
2736         if (peer == self)
2737                 return 0;
2738
2739         /* everything es is invalid. */
2740         return 1;
2741 }
2742
2743 static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2744 {
2745         struct p_protocol *p = &mdev->data.rbuf.protocol;
2746         int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2747         int p_want_lose, p_two_primaries, cf;
2748         char p_integrity_alg[SHARED_SECRET_MAX] = "";
2749
2750         p_proto         = be32_to_cpu(p->protocol);
2751         p_after_sb_0p   = be32_to_cpu(p->after_sb_0p);
2752         p_after_sb_1p   = be32_to_cpu(p->after_sb_1p);
2753         p_after_sb_2p   = be32_to_cpu(p->after_sb_2p);
2754         p_two_primaries = be32_to_cpu(p->two_primaries);
2755         cf              = be32_to_cpu(p->conn_flags);
2756         p_want_lose = cf & CF_WANT_LOSE;
2757
2758         clear_bit(CONN_DRY_RUN, &mdev->flags);
2759
2760         if (cf & CF_DRY_RUN)
2761                 set_bit(CONN_DRY_RUN, &mdev->flags);
2762
2763         if (p_proto != mdev->net_conf->wire_protocol) {
2764                 dev_err(DEV, "incompatible communication protocols\n");
2765                 goto disconnect;
2766         }
2767
2768         if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2769                 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2770                 goto disconnect;
2771         }
2772
2773         if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2774                 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2775                 goto disconnect;
2776         }
2777
2778         if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2779                 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2780                 goto disconnect;
2781         }
2782
2783         if (p_want_lose && mdev->net_conf->want_lose) {
2784                 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2785                 goto disconnect;
2786         }
2787
2788         if (p_two_primaries != mdev->net_conf->two_primaries) {
2789                 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2790                 goto disconnect;
2791         }
2792
2793         if (mdev->agreed_pro_version >= 87) {
2794                 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2795
2796                 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2797                         return FALSE;
2798
2799                 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2800                 if (strcmp(p_integrity_alg, my_alg)) {
2801                         dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2802                         goto disconnect;
2803                 }
2804                 dev_info(DEV, "data-integrity-alg: %s\n",
2805                      my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2806         }
2807
2808         return TRUE;
2809
2810 disconnect:
2811         drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2812         return FALSE;
2813 }
2814
2815 /* helper function
2816  * input: alg name, feature name
2817  * return: NULL (alg name was "")
2818  *         ERR_PTR(error) if something goes wrong
2819  *         or the crypto hash ptr, if it worked out ok. */
2820 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2821                 const char *alg, const char *name)
2822 {
2823         struct crypto_hash *tfm;
2824
2825         if (!alg[0])
2826                 return NULL;
2827
2828         tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2829         if (IS_ERR(tfm)) {
2830                 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2831                         alg, name, PTR_ERR(tfm));
2832                 return tfm;
2833         }
2834         if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2835                 crypto_free_hash(tfm);
2836                 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2837                 return ERR_PTR(-EINVAL);
2838         }
2839         return tfm;
2840 }
2841
2842 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2843 {
2844         int ok = TRUE;
2845         struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2846         unsigned int header_size, data_size, exp_max_sz;
2847         struct crypto_hash *verify_tfm = NULL;
2848         struct crypto_hash *csums_tfm = NULL;
2849         const int apv = mdev->agreed_pro_version;
2850         int *rs_plan_s = NULL;
2851         int fifo_size = 0;
2852
2853         exp_max_sz  = apv <= 87 ? sizeof(struct p_rs_param)
2854                     : apv == 88 ? sizeof(struct p_rs_param)
2855                                         + SHARED_SECRET_MAX
2856                     : apv <= 94 ? sizeof(struct p_rs_param_89)
2857                     : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2858
2859         if (packet_size > exp_max_sz) {
2860                 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2861                     packet_size, exp_max_sz);
2862                 return FALSE;
2863         }
2864
2865         if (apv <= 88) {
2866                 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2867                 data_size   = packet_size  - header_size;
2868         } else if (apv <= 94) {
2869                 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2870                 data_size   = packet_size  - header_size;
2871                 D_ASSERT(data_size == 0);
2872         } else {
2873                 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2874                 data_size   = packet_size  - header_size;
2875                 D_ASSERT(data_size == 0);
2876         }
2877
2878         /* initialize verify_alg and csums_alg */
2879         memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2880
2881         if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2882                 return FALSE;
2883
2884         mdev->sync_conf.rate      = be32_to_cpu(p->rate);
2885
2886         if (apv >= 88) {
2887                 if (apv == 88) {
2888                         if (data_size > SHARED_SECRET_MAX) {
2889                                 dev_err(DEV, "verify-alg too long, "
2890                                     "peer wants %u, accepting only %u byte\n",
2891                                                 data_size, SHARED_SECRET_MAX);
2892                                 return FALSE;
2893                         }
2894
2895                         if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2896                                 return FALSE;
2897
2898                         /* we expect NUL terminated string */
2899                         /* but just in case someone tries to be evil */
2900                         D_ASSERT(p->verify_alg[data_size-1] == 0);
2901                         p->verify_alg[data_size-1] = 0;
2902
2903                 } else /* apv >= 89 */ {
2904                         /* we still expect NUL terminated strings */
2905                         /* but just in case someone tries to be evil */
2906                         D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2907                         D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2908                         p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2909                         p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2910                 }
2911
2912                 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2913                         if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2914                                 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2915                                     mdev->sync_conf.verify_alg, p->verify_alg);
2916                                 goto disconnect;
2917                         }
2918                         verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2919                                         p->verify_alg, "verify-alg");
2920                         if (IS_ERR(verify_tfm)) {
2921                                 verify_tfm = NULL;
2922                                 goto disconnect;
2923                         }
2924                 }
2925
2926                 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2927                         if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2928                                 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2929                                     mdev->sync_conf.csums_alg, p->csums_alg);
2930                                 goto disconnect;
2931                         }
2932                         csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2933                                         p->csums_alg, "csums-alg");
2934                         if (IS_ERR(csums_tfm)) {
2935                                 csums_tfm = NULL;
2936                                 goto disconnect;
2937                         }
2938                 }
2939
2940                 if (apv > 94) {
2941                         mdev->sync_conf.rate      = be32_to_cpu(p->rate);
2942                         mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2943                         mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2944                         mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2945                         mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2946
2947                         fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2948                         if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2949                                 rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2950                                 if (!rs_plan_s) {
2951                                         dev_err(DEV, "kmalloc of fifo_buffer failed");
2952                                         goto disconnect;
2953                                 }
2954                         }
2955                 }
2956
2957                 spin_lock(&mdev->peer_seq_lock);
2958                 /* lock against drbd_nl_syncer_conf() */
2959                 if (verify_tfm) {
2960                         strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2961                         mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2962                         crypto_free_hash(mdev->verify_tfm);
2963                         mdev->verify_tfm = verify_tfm;
2964                         dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2965                 }
2966                 if (csums_tfm) {
2967                         strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2968                         mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2969                         crypto_free_hash(mdev->csums_tfm);
2970                         mdev->csums_tfm = csums_tfm;
2971                         dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2972                 }
2973                 if (fifo_size != mdev->rs_plan_s.size) {
2974                         kfree(mdev->rs_plan_s.values);
2975                         mdev->rs_plan_s.values = rs_plan_s;
2976                         mdev->rs_plan_s.size   = fifo_size;
2977                         mdev->rs_planed = 0;
2978                 }
2979                 spin_unlock(&mdev->peer_seq_lock);
2980         }
2981
2982         return ok;
2983 disconnect:
2984         /* just for completeness: actually not needed,
2985          * as this is not reached if csums_tfm was ok. */
2986         crypto_free_hash(csums_tfm);
2987         /* but free the verify_tfm again, if csums_tfm did not work out */
2988         crypto_free_hash(verify_tfm);
2989         drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2990         return FALSE;
2991 }
2992
2993 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2994 {
2995         /* sorry, we currently have no working implementation
2996          * of distributed TCQ */
2997 }
2998
2999 /* warn if the arguments differ by more than 12.5% */
3000 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3001         const char *s, sector_t a, sector_t b)
3002 {
3003         sector_t d;
3004         if (a == 0 || b == 0)
3005                 return;
3006         d = (a > b) ? (a - b) : (b - a);
3007         if (d > (a>>3) || d > (b>>3))
3008                 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3009                      (unsigned long long)a, (unsigned long long)b);
3010 }
3011
3012 static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3013 {
3014         struct p_sizes *p = &mdev->data.rbuf.sizes;
3015         enum determine_dev_size dd = unchanged;
3016         unsigned int max_seg_s;
3017         sector_t p_size, p_usize, my_usize;
3018         int ldsc = 0; /* local disk size changed */
3019         enum dds_flags ddsf;
3020
3021         p_size = be64_to_cpu(p->d_size);
3022         p_usize = be64_to_cpu(p->u_size);
3023
3024         if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
3025                 dev_err(DEV, "some backing storage is needed\n");
3026                 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3027                 return FALSE;
3028         }
3029
3030         /* just store the peer's disk size for now.
3031          * we still need to figure out whether we accept that. */
3032         mdev->p_size = p_size;
3033
3034 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
3035         if (get_ldev(mdev)) {
3036                 warn_if_differ_considerably(mdev, "lower level device sizes",
3037                            p_size, drbd_get_max_capacity(mdev->ldev));
3038                 warn_if_differ_considerably(mdev, "user requested size",
3039                                             p_usize, mdev->ldev->dc.disk_size);
3040
3041                 /* if this is the first connect, or an otherwise expected
3042                  * param exchange, choose the minimum */
3043                 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3044                         p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
3045                                              p_usize);
3046
3047                 my_usize = mdev->ldev->dc.disk_size;
3048
3049                 if (mdev->ldev->dc.disk_size != p_usize) {
3050                         mdev->ldev->dc.disk_size = p_usize;
3051                         dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3052                              (unsigned long)mdev->ldev->dc.disk_size);
3053                 }
3054
3055                 /* Never shrink a device with usable data during connect.
3056                    But allow online shrinking if we are connected. */
3057                 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
3058                    drbd_get_capacity(mdev->this_bdev) &&
3059                    mdev->state.disk >= D_OUTDATED &&
3060                    mdev->state.conn < C_CONNECTED) {
3061                         dev_err(DEV, "The peer's disk size is too small!\n");
3062                         drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3063                         mdev->ldev->dc.disk_size = my_usize;
3064                         put_ldev(mdev);
3065                         return FALSE;
3066                 }
3067                 put_ldev(mdev);
3068         }
3069 #undef min_not_zero
3070
3071         ddsf = be16_to_cpu(p->dds_flags);
3072         if (get_ldev(mdev)) {
3073                 dd = drbd_determin_dev_size(mdev, ddsf);
3074                 put_ldev(mdev);
3075                 if (dd == dev_size_error)
3076                         return FALSE;
3077                 drbd_md_sync(mdev);
3078         } else {
3079                 /* I am diskless, need to accept the peer's size. */
3080                 drbd_set_my_capacity(mdev, p_size);
3081         }
3082
3083         if (get_ldev(mdev)) {
3084                 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3085                         mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3086                         ldsc = 1;
3087                 }
3088
3089                 if (mdev->agreed_pro_version < 94)
3090                         max_seg_s = be32_to_cpu(p->max_segment_size);
3091                 else /* drbd 8.3.8 onwards */
3092                         max_seg_s = DRBD_MAX_SEGMENT_SIZE;
3093
3094                 if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
3095                         drbd_setup_queue_param(mdev, max_seg_s);
3096
3097                 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
3098                 put_ldev(mdev);
3099         }
3100
3101         if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3102                 if (be64_to_cpu(p->c_size) !=
3103                     drbd_get_capacity(mdev->this_bdev) || ldsc) {
3104                         /* we have different sizes, probably peer
3105                          * needs to know my new size... */
3106                         drbd_send_sizes(mdev, 0, ddsf);
3107                 }
3108                 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3109                     (dd == grew && mdev->state.conn == C_CONNECTED)) {
3110                         if (mdev->state.pdsk >= D_INCONSISTENT &&
3111                             mdev->state.disk >= D_INCONSISTENT) {
3112                                 if (ddsf & DDSF_NO_RESYNC)
3113                                         dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3114                                 else
3115                                         resync_after_online_grow(mdev);
3116                         } else
3117                                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3118                 }
3119         }
3120
3121         return TRUE;
3122 }
3123
3124 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3125 {
3126         struct p_uuids *p = &mdev->data.rbuf.uuids;
3127         u64 *p_uuid;
3128         int i;
3129
3130         p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3131
3132         for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3133                 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3134
3135         kfree(mdev->p_uuid);
3136         mdev->p_uuid = p_uuid;
3137
3138         if (mdev->state.conn < C_CONNECTED &&
3139             mdev->state.disk < D_INCONSISTENT &&
3140             mdev->state.role == R_PRIMARY &&
3141             (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3142                 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3143                     (unsigned long long)mdev->ed_uuid);
3144                 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3145                 return FALSE;
3146         }
3147
3148         if (get_ldev(mdev)) {
3149                 int skip_initial_sync =
3150                         mdev->state.conn == C_CONNECTED &&
3151                         mdev->agreed_pro_version >= 90 &&
3152                         mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3153                         (p_uuid[UI_FLAGS] & 8);
3154                 if (skip_initial_sync) {
3155                         dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3156                         drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3157                                         "clear_n_write from receive_uuids");
3158                         _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3159                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
3160                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3161                                         CS_VERBOSE, NULL);
3162                         drbd_md_sync(mdev);
3163                 }
3164                 put_ldev(mdev);
3165         } else if (mdev->state.disk < D_INCONSISTENT &&
3166                    mdev->state.role == R_PRIMARY) {
3167                 /* I am a diskless primary, the peer just created a new current UUID
3168                    for me. */
3169                 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3170         }
3171
3172         /* Before we test for the disk state, we should wait until an eventually
3173            ongoing cluster wide state change is finished. That is important if
3174            we are primary and are detaching from our disk. We need to see the
3175            new disk state... */
3176         wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3177         if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3178                 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3179
3180         return TRUE;
3181 }
3182
3183 /**
3184  * convert_state() - Converts the peer's view of the cluster state to our point of view
3185  * @ps:         The state as seen by the peer.
3186  */
3187 static union drbd_state convert_state(union drbd_state ps)
3188 {
3189         union drbd_state ms;
3190
3191         static enum drbd_conns c_tab[] = {
3192                 [C_CONNECTED] = C_CONNECTED,
3193
3194                 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3195                 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3196                 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3197                 [C_VERIFY_S]       = C_VERIFY_T,
3198                 [C_MASK]   = C_MASK,
3199         };
3200
3201         ms.i = ps.i;
3202
3203         ms.conn = c_tab[ps.conn];
3204         ms.peer = ps.role;
3205         ms.role = ps.peer;
3206         ms.pdsk = ps.disk;
3207         ms.disk = ps.pdsk;
3208         ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3209
3210         return ms;
3211 }
3212
3213 static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3214 {
3215         struct p_req_state *p = &mdev->data.rbuf.req_state;
3216         union drbd_state mask, val;
3217         int rv;
3218
3219         mask.i = be32_to_cpu(p->mask);
3220         val.i = be32_to_cpu(p->val);
3221
3222         if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3223             test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3224                 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3225                 return TRUE;
3226         }
3227
3228         mask = convert_state(mask);
3229         val = convert_state(val);
3230
3231         rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3232
3233         drbd_send_sr_reply(mdev, rv);
3234         drbd_md_sync(mdev);
3235
3236         return TRUE;
3237 }
3238
3239 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3240 {
3241         struct p_state *p = &mdev->data.rbuf.state;
3242         enum drbd_conns nconn, oconn;
3243         union drbd_state ns, peer_state;
3244         enum drbd_disk_state real_peer_disk;
3245         enum chg_state_flags cs_flags;
3246         int rv;
3247
3248         peer_state.i = be32_to_cpu(p->state);
3249
3250         real_peer_disk = peer_state.disk;
3251         if (peer_state.disk == D_NEGOTIATING) {
3252                 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3253                 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3254         }
3255
3256         spin_lock_irq(&mdev->req_lock);
3257  retry:
3258         oconn = nconn = mdev->state.conn;
3259         spin_unlock_irq(&mdev->req_lock);
3260
3261         if (nconn == C_WF_REPORT_PARAMS)
3262                 nconn = C_CONNECTED;
3263
3264         if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3265             get_ldev_if_state(mdev, D_NEGOTIATING)) {
3266                 int cr; /* consider resync */
3267
3268                 /* if we established a new connection */
3269                 cr  = (oconn < C_CONNECTED);
3270                 /* if we had an established connection
3271                  * and one of the nodes newly attaches a disk */
3272                 cr |= (oconn == C_CONNECTED &&
3273                        (peer_state.disk == D_NEGOTIATING ||
3274                         mdev->state.disk == D_NEGOTIATING));
3275                 /* if we have both been inconsistent, and the peer has been
3276                  * forced to be UpToDate with --overwrite-data */
3277                 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3278                 /* if we had been plain connected, and the admin requested to
3279                  * start a sync by "invalidate" or "invalidate-remote" */
3280                 cr |= (oconn == C_CONNECTED &&
3281                                 (peer_state.conn >= C_STARTING_SYNC_S &&
3282                                  peer_state.conn <= C_WF_BITMAP_T));
3283
3284                 if (cr)
3285                         nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3286
3287                 put_ldev(mdev);
3288                 if (nconn == C_MASK) {
3289                         nconn = C_CONNECTED;
3290                         if (mdev->state.disk == D_NEGOTIATING) {
3291                                 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3292                         } else if (peer_state.disk == D_NEGOTIATING) {
3293                                 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3294                                 peer_state.disk = D_DISKLESS;
3295                                 real_peer_disk = D_DISKLESS;
3296                         } else {
3297                                 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3298                                         return FALSE;
3299                                 D_ASSERT(oconn == C_WF_REPORT_PARAMS);
3300                                 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3301                                 return FALSE;
3302                         }
3303                 }
3304         }
3305
3306         spin_lock_irq(&mdev->req_lock);
3307         if (mdev->state.conn != oconn)
3308                 goto retry;
3309         clear_bit(CONSIDER_RESYNC, &mdev->flags);
3310         ns.i = mdev->state.i;
3311         ns.conn = nconn;
3312         ns.peer = peer_state.role;
3313         ns.pdsk = real_peer_disk;
3314         ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3315         if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3316                 ns.disk = mdev->new_state_tmp.disk;
3317         cs_flags = CS_VERBOSE + (oconn < C_CONNECTED && nconn >= C_CONNECTED ? 0 : CS_HARD);
3318         if (ns.pdsk == D_CONSISTENT && is_susp(ns) && nconn == C_CONNECTED && oconn < C_CONNECTED &&
3319             test_bit(NEW_CUR_UUID, &mdev->flags)) {
3320                 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3321                    for temporal network outages! */
3322                 spin_unlock_irq(&mdev->req_lock);
3323                 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3324                 tl_clear(mdev);
3325                 drbd_uuid_new_current(mdev);
3326                 clear_bit(NEW_CUR_UUID, &mdev->flags);
3327                 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3328                 return FALSE;
3329         }
3330         rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3331         ns = mdev->state;
3332         spin_unlock_irq(&mdev->req_lock);
3333
3334         if (rv < SS_SUCCESS) {
3335                 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3336                 return FALSE;
3337         }
3338
3339         if (oconn > C_WF_REPORT_PARAMS) {
3340                 if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3341                     peer_state.disk != D_NEGOTIATING ) {
3342                         /* we want resync, peer has not yet decided to sync... */
3343                         /* Nowadays only used when forcing a node into primary role and
3344                            setting its disk to UpToDate with that */
3345                         drbd_send_uuids(mdev);
3346                         drbd_send_state(mdev);
3347                 }
3348         }
3349
3350         mdev->net_conf->want_lose = 0;
3351
3352         drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3353
3354         return TRUE;
3355 }
3356
3357 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3358 {
3359         struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
3360
3361         wait_event(mdev->misc_wait,
3362                    mdev->state.conn == C_WF_SYNC_UUID ||
3363                    mdev->state.conn < C_CONNECTED ||
3364                    mdev->state.disk < D_NEGOTIATING);
3365
3366         /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3367
3368         /* Here the _drbd_uuid_ functions are right, current should
3369            _not_ be rotated into the history */
3370         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3371                 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3372                 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3373
3374                 drbd_start_resync(mdev, C_SYNC_TARGET);
3375
3376                 put_ldev(mdev);
3377         } else
3378                 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3379
3380         return TRUE;
3381 }
3382
3383 enum receive_bitmap_ret { OK, DONE, FAILED };
3384
3385 static enum receive_bitmap_ret
3386 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3387                      unsigned long *buffer, struct bm_xfer_ctx *c)
3388 {
3389         unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3390         unsigned want = num_words * sizeof(long);
3391
3392         if (want != data_size) {
3393                 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
3394                 return FAILED;
3395         }
3396         if (want == 0)
3397                 return DONE;
3398         if (drbd_recv(mdev, buffer, want) != want)
3399                 return FAILED;
3400
3401         drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3402
3403         c->word_offset += num_words;
3404         c->bit_offset = c->word_offset * BITS_PER_LONG;
3405         if (c->bit_offset > c->bm_bits)
3406                 c->bit_offset = c->bm_bits;
3407
3408         return OK;
3409 }
3410
3411 static enum receive_bitmap_ret
3412 recv_bm_rle_bits(struct drbd_conf *mdev,
3413                 struct p_compressed_bm *p,
3414                 struct bm_xfer_ctx *c)
3415 {
3416         struct bitstream bs;
3417         u64 look_ahead;
3418         u64 rl;
3419         u64 tmp;
3420         unsigned long s = c->bit_offset;
3421         unsigned long e;
3422         int len = p->head.length - (sizeof(*p) - sizeof(p->head));
3423         int toggle = DCBP_get_start(p);
3424         int have;
3425         int bits;
3426
3427         bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3428
3429         bits = bitstream_get_bits(&bs, &look_ahead, 64);
3430         if (bits < 0)
3431                 return FAILED;
3432
3433         for (have = bits; have > 0; s += rl, toggle = !toggle) {
3434                 bits = vli_decode_bits(&rl, look_ahead);
3435                 if (bits <= 0)
3436                         return FAILED;
3437
3438                 if (toggle) {
3439                         e = s + rl -1;
3440                         if (e >= c->bm_bits) {
3441                                 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3442                                 return FAILED;
3443                         }
3444                         _drbd_bm_set_bits(mdev, s, e);
3445                 }
3446
3447                 if (have < bits) {
3448                         dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3449                                 have, bits, look_ahead,
3450                                 (unsigned int)(bs.cur.b - p->code),
3451                                 (unsigned int)bs.buf_len);
3452                         return FAILED;
3453                 }
3454                 look_ahead >>= bits;
3455                 have -= bits;
3456
3457                 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3458                 if (bits < 0)
3459                         return FAILED;
3460                 look_ahead |= tmp << have;
3461                 have += bits;
3462         }
3463
3464         c->bit_offset = s;
3465         bm_xfer_ctx_bit_to_word_offset(c);
3466
3467         return (s == c->bm_bits) ? DONE : OK;
3468 }
3469
3470 static enum receive_bitmap_ret
3471 decode_bitmap_c(struct drbd_conf *mdev,
3472                 struct p_compressed_bm *p,
3473                 struct bm_xfer_ctx *c)
3474 {
3475         if (DCBP_get_code(p) == RLE_VLI_Bits)
3476                 return recv_bm_rle_bits(mdev, p, c);
3477
3478         /* other variants had been implemented for evaluation,
3479          * but have been dropped as this one turned out to be "best"
3480          * during all our tests. */
3481
3482         dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3483         drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3484         return FAILED;
3485 }
3486
3487 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3488                 const char *direction, struct bm_xfer_ctx *c)
3489 {
3490         /* what would it take to transfer it "plaintext" */
3491         unsigned plain = sizeof(struct p_header80) *
3492                 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3493                 + c->bm_words * sizeof(long);
3494         unsigned total = c->bytes[0] + c->bytes[1];
3495         unsigned r;
3496
3497         /* total can not be zero. but just in case: */
3498         if (total == 0)
3499                 return;
3500
3501         /* don't report if not compressed */
3502         if (total >= plain)
3503                 return;
3504
3505         /* total < plain. check for overflow, still */
3506         r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3507                                     : (1000 * total / plain);
3508
3509         if (r > 1000)
3510                 r = 1000;
3511
3512         r = 1000 - r;
3513         dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3514              "total %u; compression: %u.%u%%\n",
3515                         direction,
3516                         c->bytes[1], c->packets[1],
3517                         c->bytes[0], c->packets[0],
3518                         total, r/10, r % 10);
3519 }
3520
3521 /* Since we are processing the bitfield from lower addresses to higher,
3522    it does not matter if the process it in 32 bit chunks or 64 bit
3523    chunks as long as it is little endian. (Understand it as byte stream,
3524    beginning with the lowest byte...) If we would use big endian
3525    we would need to process it from the highest address to the lowest,
3526    in order to be agnostic to the 32 vs 64 bits issue.
3527
3528    returns 0 on failure, 1 if we successfully received it. */
3529 static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3530 {
3531         struct bm_xfer_ctx c;
3532         void *buffer;
3533         enum receive_bitmap_ret ret;
3534         int ok = FALSE;
3535         struct p_header80 *h = &mdev->data.rbuf.header.h80;
3536
3537         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3538
3539         drbd_bm_lock(mdev, "receive bitmap");
3540
3541         /* maybe we should use some per thread scratch page,
3542          * and allocate that during initial device creation? */
3543         buffer   = (unsigned long *) __get_free_page(GFP_NOIO);
3544         if (!buffer) {
3545                 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3546                 goto out;
3547         }
3548
3549         c = (struct bm_xfer_ctx) {
3550                 .bm_bits = drbd_bm_bits(mdev),
3551                 .bm_words = drbd_bm_words(mdev),
3552         };
3553
3554         do {
3555                 if (cmd == P_BITMAP) {
3556                         ret = receive_bitmap_plain(mdev, data_size, buffer, &c);
3557                 } else if (cmd == P_COMPRESSED_BITMAP) {
3558                         /* MAYBE: sanity check that we speak proto >= 90,
3559                          * and the feature is enabled! */
3560                         struct p_compressed_bm *p;
3561
3562                         if (data_size > BM_PACKET_PAYLOAD_BYTES) {
3563                                 dev_err(DEV, "ReportCBitmap packet too large\n");
3564                                 goto out;
3565                         }
3566                         /* use the page buff */
3567                         p = buffer;
3568                         memcpy(p, h, sizeof(*h));
3569                         if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
3570                                 goto out;
3571                         if (p->head.length <= (sizeof(*p) - sizeof(p->head))) {
3572                                 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length);
3573                                 return FAILED;
3574                         }
3575                         ret = decode_bitmap_c(mdev, p, &c);
3576                 } else {
3577                         dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
3578                         goto out;
3579                 }
3580
3581                 c.packets[cmd == P_BITMAP]++;
3582                 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
3583
3584                 if (ret != OK)
3585                         break;
3586
3587                 if (!drbd_recv_header(mdev, &cmd, &data_size))
3588                         goto out;
3589         } while (ret == OK);
3590         if (ret == FAILED)
3591                 goto out;
3592
3593         INFO_bm_xfer_stats(mdev, "receive", &c);
3594
3595         if (mdev->state.conn == C_WF_BITMAP_T) {
3596                 ok = !drbd_send_bitmap(mdev);
3597                 if (!ok)
3598                         goto out;
3599                 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3600                 ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3601                 D_ASSERT(ok == SS_SUCCESS);
3602         } else if (mdev->state.conn != C_WF_BITMAP_S) {
3603                 /* admin may have requested C_DISCONNECTING,
3604                  * other threads may have noticed network errors */
3605                 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3606                     drbd_conn_str(mdev->state.conn));
3607         }
3608
3609         ok = TRUE;
3610  out:
3611         drbd_bm_unlock(mdev);
3612         if (ok && mdev->state.conn == C_WF_BITMAP_S)
3613                 drbd_start_resync(mdev, C_SYNC_SOURCE);
3614         free_page((unsigned long) buffer);
3615         return ok;
3616 }
3617
3618 static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3619 {
3620         /* TODO zero copy sink :) */
3621         static char sink[128];
3622         int size, want, r;
3623
3624         dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3625                  cmd, data_size);
3626
3627         size = data_size;
3628         while (size > 0) {
3629                 want = min_t(int, size, sizeof(sink));
3630                 r = drbd_recv(mdev, sink, want);
3631                 ERR_IF(r <= 0) break;
3632                 size -= r;
3633         }
3634         return size == 0;
3635 }
3636
3637 static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3638 {
3639         if (mdev->state.disk >= D_INCONSISTENT)
3640                 drbd_kick_lo(mdev);
3641
3642         /* Make sure we've acked all the TCP data associated
3643          * with the data requests being unplugged */
3644         drbd_tcp_quickack(mdev->data.socket);
3645
3646         return TRUE;
3647 }
3648
3649 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
3650
3651 struct data_cmd {
3652         int expect_payload;
3653         size_t pkt_size;
3654         drbd_cmd_handler_f function;
3655 };
3656
3657 static struct data_cmd drbd_cmd_handler[] = {
3658         [P_DATA]            = { 1, sizeof(struct p_data), receive_Data },
3659         [P_DATA_REPLY]      = { 1, sizeof(struct p_data), receive_DataReply },
3660         [P_RS_DATA_REPLY]   = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3661         [P_BARRIER]         = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3662         [P_BITMAP]          = { 1, sizeof(struct p_header80), receive_bitmap } ,
3663         [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3664         [P_UNPLUG_REMOTE]   = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3665         [P_DATA_REQUEST]    = { 0, sizeof(struct p_block_req), receive_DataRequest },
3666         [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3667         [P_SYNC_PARAM]      = { 1, sizeof(struct p_header80), receive_SyncParam },
3668         [P_SYNC_PARAM89]    = { 1, sizeof(struct p_header80), receive_SyncParam },
3669         [P_PROTOCOL]        = { 1, sizeof(struct p_protocol), receive_protocol },
3670         [P_UUIDS]           = { 0, sizeof(struct p_uuids), receive_uuids },
3671         [P_SIZES]           = { 0, sizeof(struct p_sizes), receive_sizes },
3672         [P_STATE]           = { 0, sizeof(struct p_state), receive_state },
3673         [P_STATE_CHG_REQ]   = { 0, sizeof(struct p_req_state), receive_req_state },
3674         [P_SYNC_UUID]       = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3675         [P_OV_REQUEST]      = { 0, sizeof(struct p_block_req), receive_DataRequest },
3676         [P_OV_REPLY]        = { 1, sizeof(struct p_block_req), receive_DataRequest },
3677         [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3678         [P_DELAY_PROBE]     = { 0, sizeof(struct p_delay_probe93), receive_skip },
3679         /* anything missing from this table is in
3680          * the asender_tbl, see get_asender_cmd */
3681         [P_MAX_CMD]         = { 0, 0, NULL },
3682 };
3683
3684 /* All handler functions that expect a sub-header get that sub-heder in
3685    mdev->data.rbuf.header.head.payload.
3686
3687    Usually in mdev->data.rbuf.header.head the callback can find the usual
3688    p_header, but they may not rely on that. Since there is also p_header95 !
3689  */
3690
3691 static void drbdd(struct drbd_conf *mdev)
3692 {
3693         union p_header *header = &mdev->data.rbuf.header;
3694         unsigned int packet_size;
3695         enum drbd_packets cmd;
3696         size_t shs; /* sub header size */
3697         int rv;
3698
3699         while (get_t_state(&mdev->receiver) == Running) {
3700                 drbd_thread_current_set_cpu(mdev);
3701                 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3702                         goto err_out;
3703
3704                 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3705                         dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3706                         goto err_out;
3707                 }
3708
3709                 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3710                 rv = drbd_recv(mdev, &header->h80.payload, shs);
3711                 if (unlikely(rv != shs)) {
3712                         dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3713                         goto err_out;
3714                 }
3715
3716                 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3717                         dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3718                         goto err_out;
3719                 }
3720
3721                 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3722
3723                 if (unlikely(!rv)) {
3724                         dev_err(DEV, "error receiving %s, l: %d!\n",
3725                             cmdname(cmd), packet_size);
3726                         goto err_out;
3727                 }
3728         }
3729
3730         if (0) {
3731         err_out:
3732                 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3733         }
3734 }
3735
3736 void drbd_flush_workqueue(struct drbd_conf *mdev)
3737 {
3738         struct drbd_wq_barrier barr;
3739
3740         barr.w.cb = w_prev_work_done;
3741         init_completion(&barr.done);
3742         drbd_queue_work(&mdev->data.work, &barr.w);
3743         wait_for_completion(&barr.done);
3744 }
3745
3746 void drbd_free_tl_hash(struct drbd_conf *mdev)
3747 {
3748         struct hlist_head *h;
3749
3750         spin_lock_irq(&mdev->req_lock);
3751
3752         if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3753                 spin_unlock_irq(&mdev->req_lock);
3754                 return;
3755         }
3756         /* paranoia code */
3757         for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3758                 if (h->first)
3759                         dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3760                                 (int)(h - mdev->ee_hash), h->first);
3761         kfree(mdev->ee_hash);
3762         mdev->ee_hash = NULL;
3763         mdev->ee_hash_s = 0;
3764
3765         /* paranoia code */
3766         for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3767                 if (h->first)
3768                         dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3769                                 (int)(h - mdev->tl_hash), h->first);
3770         kfree(mdev->tl_hash);
3771         mdev->tl_hash = NULL;
3772         mdev->tl_hash_s = 0;
3773         spin_unlock_irq(&mdev->req_lock);
3774 }
3775
3776 static void drbd_disconnect(struct drbd_conf *mdev)
3777 {
3778         enum drbd_fencing_p fp;
3779         union drbd_state os, ns;
3780         int rv = SS_UNKNOWN_ERROR;
3781         unsigned int i;
3782
3783         if (mdev->state.conn == C_STANDALONE)
3784                 return;
3785         if (mdev->state.conn >= C_WF_CONNECTION)
3786                 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3787                                 drbd_conn_str(mdev->state.conn));
3788
3789         /* asender does not clean up anything. it must not interfere, either */
3790         drbd_thread_stop(&mdev->asender);
3791         drbd_free_sock(mdev);
3792
3793         /* wait for current activity to cease. */
3794         spin_lock_irq(&mdev->req_lock);
3795         _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3796         _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3797         _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3798         spin_unlock_irq(&mdev->req_lock);
3799
3800         /* We do not have data structures that would allow us to
3801          * get the rs_pending_cnt down to 0 again.
3802          *  * On C_SYNC_TARGET we do not have any data structures describing
3803          *    the pending RSDataRequest's we have sent.
3804          *  * On C_SYNC_SOURCE there is no data structure that tracks
3805          *    the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3806          *  And no, it is not the sum of the reference counts in the
3807          *  resync_LRU. The resync_LRU tracks the whole operation including
3808          *  the disk-IO, while the rs_pending_cnt only tracks the blocks
3809          *  on the fly. */
3810         drbd_rs_cancel_all(mdev);
3811         mdev->rs_total = 0;
3812         mdev->rs_failed = 0;
3813         atomic_set(&mdev->rs_pending_cnt, 0);
3814         wake_up(&mdev->misc_wait);
3815
3816         /* make sure syncer is stopped and w_resume_next_sg queued */
3817         del_timer_sync(&mdev->resync_timer);
3818         resync_timer_fn((unsigned long)mdev);
3819
3820         /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3821          * w_make_resync_request etc. which may still be on the worker queue
3822          * to be "canceled" */
3823         drbd_flush_workqueue(mdev);
3824
3825         /* This also does reclaim_net_ee().  If we do this too early, we might
3826          * miss some resync ee and pages.*/
3827         drbd_process_done_ee(mdev);
3828
3829         kfree(mdev->p_uuid);
3830         mdev->p_uuid = NULL;
3831
3832         if (!is_susp(mdev->state))
3833                 tl_clear(mdev);
3834
3835         dev_info(DEV, "Connection closed\n");
3836
3837         drbd_md_sync(mdev);
3838
3839         fp = FP_DONT_CARE;
3840         if (get_ldev(mdev)) {
3841                 fp = mdev->ldev->dc.fencing;
3842                 put_ldev(mdev);
3843         }
3844
3845         if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3846                 drbd_try_outdate_peer_async(mdev);
3847
3848         spin_lock_irq(&mdev->req_lock);
3849         os = mdev->state;
3850         if (os.conn >= C_UNCONNECTED) {
3851                 /* Do not restart in case we are C_DISCONNECTING */
3852                 ns = os;
3853                 ns.conn = C_UNCONNECTED;
3854                 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3855         }
3856         spin_unlock_irq(&mdev->req_lock);
3857
3858         if (os.conn == C_DISCONNECTING) {
3859                 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
3860
3861                 if (!is_susp(mdev->state)) {
3862                         /* we must not free the tl_hash
3863                          * while application io is still on the fly */
3864                         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3865                         drbd_free_tl_hash(mdev);
3866                 }
3867
3868                 crypto_free_hash(mdev->cram_hmac_tfm);
3869                 mdev->cram_hmac_tfm = NULL;
3870
3871                 kfree(mdev->net_conf);
3872                 mdev->net_conf = NULL;
3873                 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3874         }
3875
3876         /* tcp_close and release of sendpage pages can be deferred.  I don't
3877          * want to use SO_LINGER, because apparently it can be deferred for
3878          * more than 20 seconds (longest time I checked).
3879          *
3880          * Actually we don't care for exactly when the network stack does its
3881          * put_page(), but release our reference on these pages right here.
3882          */
3883         i = drbd_release_ee(mdev, &mdev->net_ee);
3884         if (i)
3885                 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3886         i = atomic_read(&mdev->pp_in_use_by_net);
3887         if (i)
3888                 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
3889         i = atomic_read(&mdev->pp_in_use);
3890         if (i)
3891                 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3892
3893         D_ASSERT(list_empty(&mdev->read_ee));
3894         D_ASSERT(list_empty(&mdev->active_ee));
3895         D_ASSERT(list_empty(&mdev->sync_ee));
3896         D_ASSERT(list_empty(&mdev->done_ee));
3897
3898         /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3899         atomic_set(&mdev->current_epoch->epoch_size, 0);
3900         D_ASSERT(list_empty(&mdev->current_epoch->list));
3901 }
3902
3903 /*
3904  * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3905  * we can agree on is stored in agreed_pro_version.
3906  *
3907  * feature flags and the reserved array should be enough room for future
3908  * enhancements of the handshake protocol, and possible plugins...
3909  *
3910  * for now, they are expected to be zero, but ignored.
3911  */
3912 static int drbd_send_handshake(struct drbd_conf *mdev)
3913 {
3914         /* ASSERT current == mdev->receiver ... */
3915         struct p_handshake *p = &mdev->data.sbuf.handshake;
3916         int ok;
3917
3918         if (mutex_lock_interruptible(&mdev->data.mutex)) {
3919                 dev_err(DEV, "interrupted during initial handshake\n");
3920                 return 0; /* interrupted. not ok. */
3921         }
3922
3923         if (mdev->data.socket == NULL) {
3924                 mutex_unlock(&mdev->data.mutex);
3925                 return 0;
3926         }
3927
3928         memset(p, 0, sizeof(*p));
3929         p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3930         p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3931         ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3932                              (struct p_header80 *)p, sizeof(*p), 0 );
3933         mutex_unlock(&mdev->data.mutex);
3934         return ok;
3935 }
3936
3937 /*
3938  * return values:
3939  *   1 yes, we have a valid connection
3940  *   0 oops, did not work out, please try again
3941  *  -1 peer talks different language,
3942  *     no point in trying again, please go standalone.
3943  */
3944 static int drbd_do_handshake(struct drbd_conf *mdev)
3945 {
3946         /* ASSERT current == mdev->receiver ... */
3947         struct p_handshake *p = &mdev->data.rbuf.handshake;
3948         const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3949         unsigned int length;
3950         enum drbd_packets cmd;
3951         int rv;
3952
3953         rv = drbd_send_handshake(mdev);
3954         if (!rv)
3955                 return 0;
3956
3957         rv = drbd_recv_header(mdev, &cmd, &length);
3958         if (!rv)
3959                 return 0;
3960
3961         if (cmd != P_HAND_SHAKE) {
3962                 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3963                      cmdname(cmd), cmd);
3964                 return -1;
3965         }
3966
3967         if (length != expect) {
3968                 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3969                      expect, length);
3970                 return -1;
3971         }
3972
3973         rv = drbd_recv(mdev, &p->head.payload, expect);
3974
3975         if (rv != expect) {
3976                 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3977                 return 0;
3978         }
3979
3980         p->protocol_min = be32_to_cpu(p->protocol_min);
3981         p->protocol_max = be32_to_cpu(p->protocol_max);
3982         if (p->protocol_max == 0)
3983                 p->protocol_max = p->protocol_min;
3984
3985         if (PRO_VERSION_MAX < p->protocol_min ||
3986             PRO_VERSION_MIN > p->protocol_max)
3987                 goto incompat;
3988
3989         mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3990
3991         dev_info(DEV, "Handshake successful: "
3992              "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3993
3994         return 1;
3995
3996  incompat:
3997         dev_err(DEV, "incompatible DRBD dialects: "
3998             "I support %d-%d, peer supports %d-%d\n",
3999             PRO_VERSION_MIN, PRO_VERSION_MAX,
4000             p->protocol_min, p->protocol_max);
4001         return -1;
4002 }
4003
4004 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4005 static int drbd_do_auth(struct drbd_conf *mdev)
4006 {
4007         dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4008         dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4009         return -1;
4010 }
4011 #else
4012 #define CHALLENGE_LEN 64
4013
4014 /* Return value:
4015         1 - auth succeeded,
4016         0 - failed, try again (network error),
4017         -1 - auth failed, don't try again.
4018 */
4019
4020 static int drbd_do_auth(struct drbd_conf *mdev)
4021 {
4022         char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
4023         struct scatterlist sg;
4024         char *response = NULL;
4025         char *right_response = NULL;
4026         char *peers_ch = NULL;
4027         unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4028         unsigned int resp_size;
4029         struct hash_desc desc;
4030         enum drbd_packets cmd;
4031         unsigned int length;
4032         int rv;
4033
4034         desc.tfm = mdev->cram_hmac_tfm;
4035         desc.flags = 0;
4036
4037         rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4038                                 (u8 *)mdev->net_conf->shared_secret, key_len);
4039         if (rv) {
4040                 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
4041                 rv = -1;
4042                 goto fail;
4043         }
4044
4045         get_random_bytes(my_challenge, CHALLENGE_LEN);
4046
4047         rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4048         if (!rv)
4049                 goto fail;
4050
4051         rv = drbd_recv_header(mdev, &cmd, &length);
4052         if (!rv)
4053                 goto fail;
4054
4055         if (cmd != P_AUTH_CHALLENGE) {
4056                 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4057                     cmdname(cmd), cmd);
4058                 rv = 0;
4059                 goto fail;
4060         }
4061
4062         if (length > CHALLENGE_LEN * 2) {
4063                 dev_err(DEV, "expected AuthChallenge payload too big.\n");
4064                 rv = -1;
4065                 goto fail;
4066         }
4067
4068         peers_ch = kmalloc(length, GFP_NOIO);
4069         if (peers_ch == NULL) {
4070                 dev_err(DEV, "kmalloc of peers_ch failed\n");
4071                 rv = -1;
4072                 goto fail;
4073         }
4074
4075         rv = drbd_recv(mdev, peers_ch, length);
4076
4077         if (rv != length) {
4078                 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4079                 rv = 0;
4080                 goto fail;
4081         }
4082
4083         resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4084         response = kmalloc(resp_size, GFP_NOIO);
4085         if (response == NULL) {
4086                 dev_err(DEV, "kmalloc of response failed\n");
4087                 rv = -1;
4088                 goto fail;
4089         }
4090
4091         sg_init_table(&sg, 1);
4092         sg_set_buf(&sg, peers_ch, length);
4093
4094         rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4095         if (rv) {
4096                 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4097                 rv = -1;
4098                 goto fail;
4099         }
4100
4101         rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4102         if (!rv)
4103                 goto fail;
4104
4105         rv = drbd_recv_header(mdev, &cmd, &length);
4106         if (!rv)
4107                 goto fail;
4108
4109         if (cmd != P_AUTH_RESPONSE) {
4110                 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4111                         cmdname(cmd), cmd);
4112                 rv = 0;
4113                 goto fail;
4114         }
4115
4116         if (length != resp_size) {
4117                 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4118                 rv = 0;
4119                 goto fail;
4120         }
4121
4122         rv = drbd_recv(mdev, response , resp_size);
4123
4124         if (rv != resp_size) {
4125                 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4126                 rv = 0;
4127                 goto fail;
4128         }
4129
4130         right_response = kmalloc(resp_size, GFP_NOIO);
4131         if (right_response == NULL) {
4132                 dev_err(DEV, "kmalloc of right_response failed\n");
4133                 rv = -1;
4134                 goto fail;
4135         }
4136
4137         sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4138
4139         rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4140         if (rv) {
4141                 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4142                 rv = -1;
4143                 goto fail;
4144         }
4145
4146         rv = !memcmp(response, right_response, resp_size);
4147
4148         if (rv)
4149                 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4150                      resp_size, mdev->net_conf->cram_hmac_alg);
4151         else
4152                 rv = -1;
4153
4154  fail:
4155         kfree(peers_ch);
4156         kfree(response);
4157         kfree(right_response);
4158
4159         return rv;
4160 }
4161 #endif
4162
4163 int drbdd_init(struct drbd_thread *thi)
4164 {
4165         struct drbd_conf *mdev = thi->mdev;
4166         unsigned int minor = mdev_to_minor(mdev);
4167         int h;
4168
4169         sprintf(current->comm, "drbd%d_receiver", minor);
4170
4171         dev_info(DEV, "receiver (re)started\n");
4172
4173         do {
4174                 h = drbd_connect(mdev);
4175                 if (h == 0) {
4176                         drbd_disconnect(mdev);
4177                         __set_current_state(TASK_INTERRUPTIBLE);
4178                         schedule_timeout(HZ);
4179                 }
4180                 if (h == -1) {
4181                         dev_warn(DEV, "Discarding network configuration.\n");
4182                         drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4183                 }
4184         } while (h == 0);
4185
4186         if (h > 0) {
4187                 if (get_net_conf(mdev)) {
4188                         drbdd(mdev);
4189                         put_net_conf(mdev);
4190                 }
4191         }
4192
4193         drbd_disconnect(mdev);
4194
4195         dev_info(DEV, "receiver terminated\n");
4196         return 0;
4197 }
4198
4199 /* ********* acknowledge sender ******** */
4200
4201 static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4202 {
4203         struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4204
4205         int retcode = be32_to_cpu(p->retcode);
4206
4207         if (retcode >= SS_SUCCESS) {
4208                 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4209         } else {
4210                 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4211                 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4212                     drbd_set_st_err_str(retcode), retcode);
4213         }
4214         wake_up(&mdev->state_wait);
4215
4216         return TRUE;
4217 }
4218
4219 static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
4220 {
4221         return drbd_send_ping_ack(mdev);
4222
4223 }
4224
4225 static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4226 {
4227         /* restore idle timeout */
4228         mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4229         if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4230                 wake_up(&mdev->misc_wait);
4231
4232         return TRUE;
4233 }
4234
4235 static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
4236 {
4237         struct p_block_ack *p = (struct p_block_ack *)h;
4238         sector_t sector = be64_to_cpu(p->sector);
4239         int blksize = be32_to_cpu(p->blksize);
4240
4241         D_ASSERT(mdev->agreed_pro_version >= 89);
4242
4243         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4244
4245         if (get_ldev(mdev)) {
4246                 drbd_rs_complete_io(mdev, sector);
4247                 drbd_set_in_sync(mdev, sector, blksize);
4248                 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4249                 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4250                 put_ldev(mdev);
4251         }
4252         dec_rs_pending(mdev);
4253         atomic_add(blksize >> 9, &mdev->rs_sect_in);
4254
4255         return TRUE;
4256 }
4257
4258 /* when we receive the ACK for a write request,
4259  * verify that we actually know about it */
4260 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4261         u64 id, sector_t sector)
4262 {
4263         struct hlist_head *slot = tl_hash_slot(mdev, sector);
4264         struct hlist_node *n;
4265         struct drbd_request *req;
4266
4267         hlist_for_each_entry(req, n, slot, colision) {
4268                 if ((unsigned long)req == (unsigned long)id) {
4269                         if (req->sector != sector) {
4270                                 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4271                                     "wrong sector (%llus versus %llus)\n", req,
4272                                     (unsigned long long)req->sector,
4273                                     (unsigned long long)sector);
4274                                 break;
4275                         }
4276                         return req;
4277                 }
4278         }
4279         dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4280                 (void *)(unsigned long)id, (unsigned long long)sector);
4281         return NULL;
4282 }
4283
4284 typedef struct drbd_request *(req_validator_fn)
4285         (struct drbd_conf *mdev, u64 id, sector_t sector);
4286
4287 static int validate_req_change_req_state(struct drbd_conf *mdev,
4288         u64 id, sector_t sector, req_validator_fn validator,
4289         const char *func, enum drbd_req_event what)
4290 {
4291         struct drbd_request *req;
4292         struct bio_and_error m;
4293
4294         spin_lock_irq(&mdev->req_lock);
4295         req = validator(mdev, id, sector);
4296         if (unlikely(!req)) {
4297                 spin_unlock_irq(&mdev->req_lock);
4298                 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4299                 return FALSE;
4300         }
4301         __req_mod(req, what, &m);
4302         spin_unlock_irq(&mdev->req_lock);
4303
4304         if (m.bio)
4305                 complete_master_bio(mdev, &m);
4306         return TRUE;
4307 }
4308
4309 static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4310 {
4311         struct p_block_ack *p = (struct p_block_ack *)h;
4312         sector_t sector = be64_to_cpu(p->sector);
4313         int blksize = be32_to_cpu(p->blksize);
4314         enum drbd_req_event what;
4315
4316         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4317
4318         if (is_syncer_block_id(p->block_id)) {
4319                 drbd_set_in_sync(mdev, sector, blksize);
4320                 dec_rs_pending(mdev);
4321                 return TRUE;
4322         }
4323         switch (be16_to_cpu(h->command)) {
4324         case P_RS_WRITE_ACK:
4325                 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4326                 what = write_acked_by_peer_and_sis;
4327                 break;
4328         case P_WRITE_ACK:
4329                 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4330                 what = write_acked_by_peer;
4331                 break;
4332         case P_RECV_ACK:
4333                 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4334                 what = recv_acked_by_peer;
4335                 break;
4336         case P_DISCARD_ACK:
4337                 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4338                 what = conflict_discarded_by_peer;
4339                 break;
4340         default:
4341                 D_ASSERT(0);
4342                 return FALSE;
4343         }
4344
4345         return validate_req_change_req_state(mdev, p->block_id, sector,
4346                 _ack_id_to_req, __func__ , what);
4347 }
4348
4349 static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
4350 {
4351         struct p_block_ack *p = (struct p_block_ack *)h;
4352         sector_t sector = be64_to_cpu(p->sector);
4353
4354         if (__ratelimit(&drbd_ratelimit_state))
4355                 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4356
4357         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4358
4359         if (is_syncer_block_id(p->block_id)) {
4360                 int size = be32_to_cpu(p->blksize);
4361                 dec_rs_pending(mdev);
4362                 drbd_rs_failed_io(mdev, sector, size);
4363                 return TRUE;
4364         }
4365         return validate_req_change_req_state(mdev, p->block_id, sector,
4366                 _ack_id_to_req, __func__ , neg_acked);
4367 }
4368
4369 static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
4370 {
4371         struct p_block_ack *p = (struct p_block_ack *)h;
4372         sector_t sector = be64_to_cpu(p->sector);
4373
4374         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4375         dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4376             (unsigned long long)sector, be32_to_cpu(p->blksize));
4377
4378         return validate_req_change_req_state(mdev, p->block_id, sector,
4379                 _ar_id_to_req, __func__ , neg_acked);
4380 }
4381
4382 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
4383 {
4384         sector_t sector;
4385         int size;
4386         struct p_block_ack *p = (struct p_block_ack *)h;
4387
4388         sector = be64_to_cpu(p->sector);
4389         size = be32_to_cpu(p->blksize);
4390
4391         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4392
4393         dec_rs_pending(mdev);
4394
4395         if (get_ldev_if_state(mdev, D_FAILED)) {
4396                 drbd_rs_complete_io(mdev, sector);
4397                 drbd_rs_failed_io(mdev, sector, size);
4398                 put_ldev(mdev);
4399         }
4400
4401         return TRUE;
4402 }
4403
4404 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4405 {
4406         struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4407
4408         tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4409
4410         return TRUE;
4411 }
4412
4413 static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4414 {
4415         struct p_block_ack *p = (struct p_block_ack *)h;
4416         struct drbd_work *w;
4417         sector_t sector;
4418         int size;
4419
4420         sector = be64_to_cpu(p->sector);
4421         size = be32_to_cpu(p->blksize);
4422
4423         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4424
4425         if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4426                 drbd_ov_oos_found(mdev, sector, size);
4427         else
4428                 ov_oos_print(mdev);
4429
4430         if (!get_ldev(mdev))
4431                 return TRUE;
4432
4433         drbd_rs_complete_io(mdev, sector);
4434         dec_rs_pending(mdev);
4435
4436         if (--mdev->ov_left == 0) {
4437                 w = kmalloc(sizeof(*w), GFP_NOIO);
4438                 if (w) {
4439                         w->cb = w_ov_finished;
4440                         drbd_queue_work_front(&mdev->data.work, w);
4441                 } else {
4442                         dev_err(DEV, "kmalloc(w) failed.");
4443                         ov_oos_print(mdev);
4444                         drbd_resync_finished(mdev);
4445                 }
4446         }
4447         put_ldev(mdev);
4448         return TRUE;
4449 }
4450
4451 static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
4452 {
4453         return TRUE;
4454 }
4455
4456 struct asender_cmd {
4457         size_t pkt_size;
4458         int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
4459 };
4460
4461 static struct asender_cmd *get_asender_cmd(int cmd)
4462 {
4463         static struct asender_cmd asender_tbl[] = {
4464                 /* anything missing from this table is in
4465                  * the drbd_cmd_handler (drbd_default_handler) table,
4466                  * see the beginning of drbdd() */
4467         [P_PING]            = { sizeof(struct p_header80), got_Ping },
4468         [P_PING_ACK]        = { sizeof(struct p_header80), got_PingAck },
4469         [P_RECV_ACK]        = { sizeof(struct p_block_ack), got_BlockAck },
4470         [P_WRITE_ACK]       = { sizeof(struct p_block_ack), got_BlockAck },
4471         [P_RS_WRITE_ACK]    = { sizeof(struct p_block_ack), got_BlockAck },
4472         [P_DISCARD_ACK]     = { sizeof(struct p_block_ack), got_BlockAck },
4473         [P_NEG_ACK]         = { sizeof(struct p_block_ack), got_NegAck },
4474         [P_NEG_DREPLY]      = { sizeof(struct p_block_ack), got_NegDReply },
4475         [P_NEG_RS_DREPLY]   = { sizeof(struct p_block_ack), got_NegRSDReply},
4476         [P_OV_RESULT]       = { sizeof(struct p_block_ack), got_OVResult },
4477         [P_BARRIER_ACK]     = { sizeof(struct p_barrier_ack), got_BarrierAck },
4478         [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4479         [P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
4480         [P_DELAY_PROBE]     = { sizeof(struct p_delay_probe93), got_skip },
4481         [P_MAX_CMD]         = { 0, NULL },
4482         };
4483         if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4484                 return NULL;
4485         return &asender_tbl[cmd];
4486 }
4487
4488 int drbd_asender(struct drbd_thread *thi)
4489 {
4490         struct drbd_conf *mdev = thi->mdev;
4491         struct p_header80 *h = &mdev->meta.rbuf.header.h80;
4492         struct asender_cmd *cmd = NULL;
4493
4494         int rv, len;
4495         void *buf    = h;
4496         int received = 0;
4497         int expect   = sizeof(struct p_header80);
4498         int empty;
4499
4500         sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4501
4502         current->policy = SCHED_RR;  /* Make this a realtime task! */
4503         current->rt_priority = 2;    /* more important than all other tasks */
4504
4505         while (get_t_state(thi) == Running) {
4506                 drbd_thread_current_set_cpu(mdev);
4507                 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4508                         ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4509                         mdev->meta.socket->sk->sk_rcvtimeo =
4510                                 mdev->net_conf->ping_timeo*HZ/10;
4511                 }
4512
4513                 /* conditionally cork;
4514                  * it may hurt latency if we cork without much to send */
4515                 if (!mdev->net_conf->no_cork &&
4516                         3 < atomic_read(&mdev->unacked_cnt))
4517                         drbd_tcp_cork(mdev->meta.socket);
4518                 while (1) {
4519                         clear_bit(SIGNAL_ASENDER, &mdev->flags);
4520                         flush_signals(current);
4521                         if (!drbd_process_done_ee(mdev)) {
4522                                 dev_err(DEV, "process_done_ee() = NOT_OK\n");
4523                                 goto reconnect;
4524                         }
4525                         /* to avoid race with newly queued ACKs */
4526                         set_bit(SIGNAL_ASENDER, &mdev->flags);
4527                         spin_lock_irq(&mdev->req_lock);
4528                         empty = list_empty(&mdev->done_ee);
4529                         spin_unlock_irq(&mdev->req_lock);
4530                         /* new ack may have been queued right here,
4531                          * but then there is also a signal pending,
4532                          * and we start over... */
4533                         if (empty)
4534                                 break;
4535                 }
4536                 /* but unconditionally uncork unless disabled */
4537                 if (!mdev->net_conf->no_cork)
4538                         drbd_tcp_uncork(mdev->meta.socket);
4539
4540                 /* short circuit, recv_msg would return EINTR anyways. */
4541                 if (signal_pending(current))
4542                         continue;
4543
4544                 rv = drbd_recv_short(mdev, mdev->meta.socket,
4545                                      buf, expect-received, 0);
4546                 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4547
4548                 flush_signals(current);
4549
4550                 /* Note:
4551                  * -EINTR        (on meta) we got a signal
4552                  * -EAGAIN       (on meta) rcvtimeo expired
4553                  * -ECONNRESET   other side closed the connection
4554                  * -ERESTARTSYS  (on data) we got a signal
4555                  * rv <  0       other than above: unexpected error!
4556                  * rv == expected: full header or command
4557                  * rv <  expected: "woken" by signal during receive
4558                  * rv == 0       : "connection shut down by peer"
4559                  */
4560                 if (likely(rv > 0)) {
4561                         received += rv;
4562                         buf      += rv;
4563                 } else if (rv == 0) {
4564                         dev_err(DEV, "meta connection shut down by peer.\n");
4565                         goto reconnect;
4566                 } else if (rv == -EAGAIN) {
4567                         if (mdev->meta.socket->sk->sk_rcvtimeo ==
4568                             mdev->net_conf->ping_timeo*HZ/10) {
4569                                 dev_err(DEV, "PingAck did not arrive in time.\n");
4570                                 goto reconnect;
4571                         }
4572                         set_bit(SEND_PING, &mdev->flags);
4573                         continue;
4574                 } else if (rv == -EINTR) {
4575                         continue;
4576                 } else {
4577                         dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4578                         goto reconnect;
4579                 }
4580
4581                 if (received == expect && cmd == NULL) {
4582                         if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4583                                 dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n",
4584                                     (long)be32_to_cpu(h->magic),
4585                                     h->command, h->length);
4586                                 goto reconnect;
4587                         }
4588                         cmd = get_asender_cmd(be16_to_cpu(h->command));
4589                         len = be16_to_cpu(h->length);
4590                         if (unlikely(cmd == NULL)) {
4591                                 dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n",
4592                                     (long)be32_to_cpu(h->magic),
4593                                     h->command, h->length);
4594                                 goto disconnect;
4595                         }
4596                         expect = cmd->pkt_size;
4597                         ERR_IF(len != expect-sizeof(struct p_header80))
4598                                 goto reconnect;
4599                 }
4600                 if (received == expect) {
4601                         D_ASSERT(cmd != NULL);
4602                         if (!cmd->process(mdev, h))
4603                                 goto reconnect;
4604
4605                         buf      = h;
4606                         received = 0;
4607                         expect   = sizeof(struct p_header80);
4608                         cmd      = NULL;
4609                 }
4610         }
4611
4612         if (0) {
4613 reconnect:
4614                 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4615         }
4616         if (0) {
4617 disconnect:
4618                 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4619         }
4620         clear_bit(SIGNAL_ASENDER, &mdev->flags);
4621
4622         D_ASSERT(mdev->state.conn < C_CONNECTED);
4623         dev_info(DEV, "asender terminated\n");
4624
4625         return 0;
4626 }