Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[pandora-kernel.git] / drivers / block / drbd / drbd_worker.c
1 /*
2    drbd_worker.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/sched.h>
29 #include <linux/wait.h>
30 #include <linux/mm.h>
31 #include <linux/memcontrol.h>
32 #include <linux/mm_inline.h>
33 #include <linux/slab.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 #include <linux/scatterlist.h>
37
38 #include "drbd_int.h"
39 #include "drbd_req.h"
40
41 static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
42
43
44
45 /* defined here:
46    drbd_md_io_complete
47    drbd_endio_sec
48    drbd_endio_pri
49
50  * more endio handlers:
51    atodb_endio in drbd_actlog.c
52    drbd_bm_async_io_complete in drbd_bitmap.c
53
54  * For all these callbacks, note the following:
55  * The callbacks will be called in irq context by the IDE drivers,
56  * and in Softirqs/Tasklets/BH context by the SCSI drivers.
57  * Try to get the locking right :)
58  *
59  */
60
61
62 /* About the global_state_lock
63    Each state transition on an device holds a read lock. In case we have
64    to evaluate the sync after dependencies, we grab a write lock, because
65    we need stable states on all devices for that.  */
66 rwlock_t global_state_lock;
67
68 /* used for synchronous meta data and bitmap IO
69  * submitted by drbd_md_sync_page_io()
70  */
71 void drbd_md_io_complete(struct bio *bio, int error)
72 {
73         struct drbd_md_io *md_io;
74
75         md_io = (struct drbd_md_io *)bio->bi_private;
76         md_io->error = error;
77
78         complete(&md_io->event);
79 }
80
81 /* reads on behalf of the partner,
82  * "submitted" by the receiver
83  */
84 void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
85 {
86         unsigned long flags = 0;
87         struct drbd_conf *mdev = e->mdev;
88
89         D_ASSERT(e->block_id != ID_VACANT);
90
91         spin_lock_irqsave(&mdev->req_lock, flags);
92         mdev->read_cnt += e->size >> 9;
93         list_del(&e->w.list);
94         if (list_empty(&mdev->read_ee))
95                 wake_up(&mdev->ee_wait);
96         if (test_bit(__EE_WAS_ERROR, &e->flags))
97                 __drbd_chk_io_error(mdev, FALSE);
98         spin_unlock_irqrestore(&mdev->req_lock, flags);
99
100         drbd_queue_work(&mdev->data.work, &e->w);
101         put_ldev(mdev);
102 }
103
104 /* writes on behalf of the partner, or resync writes,
105  * "submitted" by the receiver, final stage.  */
106 static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
107 {
108         unsigned long flags = 0;
109         struct drbd_conf *mdev = e->mdev;
110         sector_t e_sector;
111         int do_wake;
112         int is_syncer_req;
113         int do_al_complete_io;
114
115         D_ASSERT(e->block_id != ID_VACANT);
116
117         /* after we moved e to done_ee,
118          * we may no longer access it,
119          * it may be freed/reused already!
120          * (as soon as we release the req_lock) */
121         e_sector = e->sector;
122         do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
123         is_syncer_req = is_syncer_block_id(e->block_id);
124
125         spin_lock_irqsave(&mdev->req_lock, flags);
126         mdev->writ_cnt += e->size >> 9;
127         list_del(&e->w.list); /* has been on active_ee or sync_ee */
128         list_add_tail(&e->w.list, &mdev->done_ee);
129
130         /* No hlist_del_init(&e->colision) here, we did not send the Ack yet,
131          * neither did we wake possibly waiting conflicting requests.
132          * done from "drbd_process_done_ee" within the appropriate w.cb
133          * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
134
135         do_wake = is_syncer_req
136                 ? list_empty(&mdev->sync_ee)
137                 : list_empty(&mdev->active_ee);
138
139         if (test_bit(__EE_WAS_ERROR, &e->flags))
140                 __drbd_chk_io_error(mdev, FALSE);
141         spin_unlock_irqrestore(&mdev->req_lock, flags);
142
143         if (is_syncer_req)
144                 drbd_rs_complete_io(mdev, e_sector);
145
146         if (do_wake)
147                 wake_up(&mdev->ee_wait);
148
149         if (do_al_complete_io)
150                 drbd_al_complete_io(mdev, e_sector);
151
152         wake_asender(mdev);
153         put_ldev(mdev);
154 }
155
156 /* writes on behalf of the partner, or resync writes,
157  * "submitted" by the receiver.
158  */
159 void drbd_endio_sec(struct bio *bio, int error)
160 {
161         struct drbd_epoch_entry *e = bio->bi_private;
162         struct drbd_conf *mdev = e->mdev;
163         int uptodate = bio_flagged(bio, BIO_UPTODATE);
164         int is_write = bio_data_dir(bio) == WRITE;
165
166         if (error)
167                 dev_warn(DEV, "%s: error=%d s=%llus\n",
168                                 is_write ? "write" : "read", error,
169                                 (unsigned long long)e->sector);
170         if (!error && !uptodate) {
171                 dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
172                                 is_write ? "write" : "read",
173                                 (unsigned long long)e->sector);
174                 /* strange behavior of some lower level drivers...
175                  * fail the request by clearing the uptodate flag,
176                  * but do not return any error?! */
177                 error = -EIO;
178         }
179
180         if (error)
181                 set_bit(__EE_WAS_ERROR, &e->flags);
182
183         bio_put(bio); /* no need for the bio anymore */
184         if (atomic_dec_and_test(&e->pending_bios)) {
185                 if (is_write)
186                         drbd_endio_write_sec_final(e);
187                 else
188                         drbd_endio_read_sec_final(e);
189         }
190 }
191
192 /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
193  */
194 void drbd_endio_pri(struct bio *bio, int error)
195 {
196         struct drbd_request *req = bio->bi_private;
197         struct drbd_conf *mdev = req->mdev;
198         enum drbd_req_event what;
199         int uptodate = bio_flagged(bio, BIO_UPTODATE);
200
201         if (!error && !uptodate) {
202                 dev_warn(DEV, "p %s: setting error to -EIO\n",
203                          bio_data_dir(bio) == WRITE ? "write" : "read");
204                 /* strange behavior of some lower level drivers...
205                  * fail the request by clearing the uptodate flag,
206                  * but do not return any error?! */
207                 error = -EIO;
208         }
209
210         /* to avoid recursion in __req_mod */
211         if (unlikely(error)) {
212                 what = (bio_data_dir(bio) == WRITE)
213                         ? write_completed_with_error
214                         : (bio_rw(bio) == READ)
215                           ? read_completed_with_error
216                           : read_ahead_completed_with_error;
217         } else
218                 what = completed_ok;
219
220         bio_put(req->private_bio);
221         req->private_bio = ERR_PTR(error);
222
223         req_mod(req, what);
224 }
225
226 int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
227 {
228         struct drbd_request *req = container_of(w, struct drbd_request, w);
229
230         /* We should not detach for read io-error,
231          * but try to WRITE the P_DATA_REPLY to the failed location,
232          * to give the disk the chance to relocate that block */
233
234         spin_lock_irq(&mdev->req_lock);
235         if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
236                 _req_mod(req, read_retry_remote_canceled);
237                 spin_unlock_irq(&mdev->req_lock);
238                 return 1;
239         }
240         spin_unlock_irq(&mdev->req_lock);
241
242         return w_send_read_req(mdev, w, 0);
243 }
244
245 int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
246 {
247         ERR_IF(cancel) return 1;
248         dev_err(DEV, "resync inactive, but callback triggered??\n");
249         return 1; /* Simply ignore this! */
250 }
251
252 void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
253 {
254         struct hash_desc desc;
255         struct scatterlist sg;
256         struct page *page = e->pages;
257         struct page *tmp;
258         unsigned len;
259
260         desc.tfm = tfm;
261         desc.flags = 0;
262
263         sg_init_table(&sg, 1);
264         crypto_hash_init(&desc);
265
266         while ((tmp = page_chain_next(page))) {
267                 /* all but the last page will be fully used */
268                 sg_set_page(&sg, page, PAGE_SIZE, 0);
269                 crypto_hash_update(&desc, &sg, sg.length);
270                 page = tmp;
271         }
272         /* and now the last, possibly only partially used page */
273         len = e->size & (PAGE_SIZE - 1);
274         sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
275         crypto_hash_update(&desc, &sg, sg.length);
276         crypto_hash_final(&desc, digest);
277 }
278
279 void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
280 {
281         struct hash_desc desc;
282         struct scatterlist sg;
283         struct bio_vec *bvec;
284         int i;
285
286         desc.tfm = tfm;
287         desc.flags = 0;
288
289         sg_init_table(&sg, 1);
290         crypto_hash_init(&desc);
291
292         __bio_for_each_segment(bvec, bio, i, 0) {
293                 sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
294                 crypto_hash_update(&desc, &sg, sg.length);
295         }
296         crypto_hash_final(&desc, digest);
297 }
298
299 static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
300 {
301         struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
302         int digest_size;
303         void *digest;
304         int ok;
305
306         D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
307
308         if (unlikely(cancel)) {
309                 drbd_free_ee(mdev, e);
310                 return 1;
311         }
312
313         if (likely((e->flags & EE_WAS_ERROR) == 0)) {
314                 digest_size = crypto_hash_digestsize(mdev->csums_tfm);
315                 digest = kmalloc(digest_size, GFP_NOIO);
316                 if (digest) {
317                         drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
318
319                         inc_rs_pending(mdev);
320                         ok = drbd_send_drequest_csum(mdev,
321                                                      e->sector,
322                                                      e->size,
323                                                      digest,
324                                                      digest_size,
325                                                      P_CSUM_RS_REQUEST);
326                         kfree(digest);
327                 } else {
328                         dev_err(DEV, "kmalloc() of digest failed.\n");
329                         ok = 0;
330                 }
331         } else
332                 ok = 1;
333
334         drbd_free_ee(mdev, e);
335
336         if (unlikely(!ok))
337                 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
338         return ok;
339 }
340
341 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
342
343 static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
344 {
345         struct drbd_epoch_entry *e;
346
347         if (!get_ldev(mdev))
348                 return -EIO;
349
350         if (drbd_rs_should_slow_down(mdev))
351                 goto defer;
352
353         /* GFP_TRY, because if there is no memory available right now, this may
354          * be rescheduled for later. It is "only" background resync, after all. */
355         e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
356         if (!e)
357                 goto defer;
358
359         e->w.cb = w_e_send_csum;
360         spin_lock_irq(&mdev->req_lock);
361         list_add(&e->w.list, &mdev->read_ee);
362         spin_unlock_irq(&mdev->req_lock);
363
364         atomic_add(size >> 9, &mdev->rs_sect_ev);
365         if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
366                 return 0;
367
368         /* drbd_submit_ee currently fails for one reason only:
369          * not being able to allocate enough bios.
370          * Is dropping the connection going to help? */
371         spin_lock_irq(&mdev->req_lock);
372         list_del(&e->w.list);
373         spin_unlock_irq(&mdev->req_lock);
374
375         drbd_free_ee(mdev, e);
376 defer:
377         put_ldev(mdev);
378         return -EAGAIN;
379 }
380
381 void resync_timer_fn(unsigned long data)
382 {
383         struct drbd_conf *mdev = (struct drbd_conf *) data;
384         int queue;
385
386         queue = 1;
387         switch (mdev->state.conn) {
388         case C_VERIFY_S:
389                 mdev->resync_work.cb = w_make_ov_request;
390                 break;
391         case C_SYNC_TARGET:
392                 mdev->resync_work.cb = w_make_resync_request;
393                 break;
394         default:
395                 queue = 0;
396                 mdev->resync_work.cb = w_resync_inactive;
397         }
398
399         /* harmless race: list_empty outside data.work.q_lock */
400         if (list_empty(&mdev->resync_work.list) && queue)
401                 drbd_queue_work(&mdev->data.work, &mdev->resync_work);
402 }
403
404 static void fifo_set(struct fifo_buffer *fb, int value)
405 {
406         int i;
407
408         for (i = 0; i < fb->size; i++)
409                 fb->values[i] = value;
410 }
411
412 static int fifo_push(struct fifo_buffer *fb, int value)
413 {
414         int ov;
415
416         ov = fb->values[fb->head_index];
417         fb->values[fb->head_index++] = value;
418
419         if (fb->head_index >= fb->size)
420                 fb->head_index = 0;
421
422         return ov;
423 }
424
425 static void fifo_add_val(struct fifo_buffer *fb, int value)
426 {
427         int i;
428
429         for (i = 0; i < fb->size; i++)
430                 fb->values[i] += value;
431 }
432
433 int drbd_rs_controller(struct drbd_conf *mdev)
434 {
435         unsigned int sect_in;  /* Number of sectors that came in since the last turn */
436         unsigned int want;     /* The number of sectors we want in the proxy */
437         int req_sect; /* Number of sectors to request in this turn */
438         int correction; /* Number of sectors more we need in the proxy*/
439         int cps; /* correction per invocation of drbd_rs_controller() */
440         int steps; /* Number of time steps to plan ahead */
441         int curr_corr;
442         int max_sect;
443
444         sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
445         mdev->rs_in_flight -= sect_in;
446
447         spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
448
449         steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
450
451         if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
452                 want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps;
453         } else { /* normal path */
454                 want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target :
455                         sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10);
456         }
457
458         correction = want - mdev->rs_in_flight - mdev->rs_planed;
459
460         /* Plan ahead */
461         cps = correction / steps;
462         fifo_add_val(&mdev->rs_plan_s, cps);
463         mdev->rs_planed += cps * steps;
464
465         /* What we do in this step */
466         curr_corr = fifo_push(&mdev->rs_plan_s, 0);
467         spin_unlock(&mdev->peer_seq_lock);
468         mdev->rs_planed -= curr_corr;
469
470         req_sect = sect_in + curr_corr;
471         if (req_sect < 0)
472                 req_sect = 0;
473
474         max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ;
475         if (req_sect > max_sect)
476                 req_sect = max_sect;
477
478         /*
479         dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
480                  sect_in, mdev->rs_in_flight, want, correction,
481                  steps, cps, mdev->rs_planed, curr_corr, req_sect);
482         */
483
484         return req_sect;
485 }
486
487 int w_make_resync_request(struct drbd_conf *mdev,
488                 struct drbd_work *w, int cancel)
489 {
490         unsigned long bit;
491         sector_t sector;
492         const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
493         int max_segment_size;
494         int number, rollback_i, size, pe, mx;
495         int align, queued, sndbuf;
496         int i = 0;
497
498         if (unlikely(cancel))
499                 return 1;
500
501         if (unlikely(mdev->state.conn < C_CONNECTED)) {
502                 dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected");
503                 return 0;
504         }
505
506         if (mdev->state.conn != C_SYNC_TARGET)
507                 dev_err(DEV, "%s in w_make_resync_request\n",
508                         drbd_conn_str(mdev->state.conn));
509
510         if (mdev->rs_total == 0) {
511                 /* empty resync? */
512                 drbd_resync_finished(mdev);
513                 return 1;
514         }
515
516         if (!get_ldev(mdev)) {
517                 /* Since we only need to access mdev->rsync a
518                    get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
519                    to continue resync with a broken disk makes no sense at
520                    all */
521                 dev_err(DEV, "Disk broke down during resync!\n");
522                 mdev->resync_work.cb = w_resync_inactive;
523                 return 1;
524         }
525
526         /* starting with drbd 8.3.8, we can handle multi-bio EEs,
527          * if it should be necessary */
528         max_segment_size =
529                 mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) :
530                 mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE;
531
532         if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
533                 number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
534                 mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
535         } else {
536                 mdev->c_sync_rate = mdev->sync_conf.rate;
537                 number = SLEEP_TIME * mdev->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
538         }
539
540         /* Throttle resync on lower level disk activity, which may also be
541          * caused by application IO on Primary/SyncTarget.
542          * Keep this after the call to drbd_rs_controller, as that assumes
543          * to be called as precisely as possible every SLEEP_TIME,
544          * and would be confused otherwise. */
545         if (drbd_rs_should_slow_down(mdev))
546                 goto requeue;
547
548         mutex_lock(&mdev->data.mutex);
549         if (mdev->data.socket)
550                 mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req);
551         else
552                 mx = 1;
553         mutex_unlock(&mdev->data.mutex);
554
555         /* For resync rates >160MB/sec, allow more pending RS requests */
556         if (number > mx)
557                 mx = number;
558
559         /* Limit the number of pending RS requests to no more than the peer's receive buffer */
560         pe = atomic_read(&mdev->rs_pending_cnt);
561         if ((pe + number) > mx) {
562                 number = mx - pe;
563         }
564
565         for (i = 0; i < number; i++) {
566                 /* Stop generating RS requests, when half of the send buffer is filled */
567                 mutex_lock(&mdev->data.mutex);
568                 if (mdev->data.socket) {
569                         queued = mdev->data.socket->sk->sk_wmem_queued;
570                         sndbuf = mdev->data.socket->sk->sk_sndbuf;
571                 } else {
572                         queued = 1;
573                         sndbuf = 0;
574                 }
575                 mutex_unlock(&mdev->data.mutex);
576                 if (queued > sndbuf / 2)
577                         goto requeue;
578
579 next_sector:
580                 size = BM_BLOCK_SIZE;
581                 bit  = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
582
583                 if (bit == -1UL) {
584                         mdev->bm_resync_fo = drbd_bm_bits(mdev);
585                         mdev->resync_work.cb = w_resync_inactive;
586                         put_ldev(mdev);
587                         return 1;
588                 }
589
590                 sector = BM_BIT_TO_SECT(bit);
591
592                 if (drbd_try_rs_begin_io(mdev, sector)) {
593                         mdev->bm_resync_fo = bit;
594                         goto requeue;
595                 }
596                 mdev->bm_resync_fo = bit + 1;
597
598                 if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
599                         drbd_rs_complete_io(mdev, sector);
600                         goto next_sector;
601                 }
602
603 #if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE
604                 /* try to find some adjacent bits.
605                  * we stop if we have already the maximum req size.
606                  *
607                  * Additionally always align bigger requests, in order to
608                  * be prepared for all stripe sizes of software RAIDs.
609                  */
610                 align = 1;
611                 rollback_i = i;
612                 for (;;) {
613                         if (size + BM_BLOCK_SIZE > max_segment_size)
614                                 break;
615
616                         /* Be always aligned */
617                         if (sector & ((1<<(align+3))-1))
618                                 break;
619
620                         /* do not cross extent boundaries */
621                         if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
622                                 break;
623                         /* now, is it actually dirty, after all?
624                          * caution, drbd_bm_test_bit is tri-state for some
625                          * obscure reason; ( b == 0 ) would get the out-of-band
626                          * only accidentally right because of the "oddly sized"
627                          * adjustment below */
628                         if (drbd_bm_test_bit(mdev, bit+1) != 1)
629                                 break;
630                         bit++;
631                         size += BM_BLOCK_SIZE;
632                         if ((BM_BLOCK_SIZE << align) <= size)
633                                 align++;
634                         i++;
635                 }
636                 /* if we merged some,
637                  * reset the offset to start the next drbd_bm_find_next from */
638                 if (size > BM_BLOCK_SIZE)
639                         mdev->bm_resync_fo = bit + 1;
640 #endif
641
642                 /* adjust very last sectors, in case we are oddly sized */
643                 if (sector + (size>>9) > capacity)
644                         size = (capacity-sector)<<9;
645                 if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
646                         switch (read_for_csum(mdev, sector, size)) {
647                         case -EIO: /* Disk failure */
648                                 put_ldev(mdev);
649                                 return 0;
650                         case -EAGAIN: /* allocation failed, or ldev busy */
651                                 drbd_rs_complete_io(mdev, sector);
652                                 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
653                                 i = rollback_i;
654                                 goto requeue;
655                         case 0:
656                                 /* everything ok */
657                                 break;
658                         default:
659                                 BUG();
660                         }
661                 } else {
662                         inc_rs_pending(mdev);
663                         if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
664                                                sector, size, ID_SYNCER)) {
665                                 dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
666                                 dec_rs_pending(mdev);
667                                 put_ldev(mdev);
668                                 return 0;
669                         }
670                 }
671         }
672
673         if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
674                 /* last syncer _request_ was sent,
675                  * but the P_RS_DATA_REPLY not yet received.  sync will end (and
676                  * next sync group will resume), as soon as we receive the last
677                  * resync data block, and the last bit is cleared.
678                  * until then resync "work" is "inactive" ...
679                  */
680                 mdev->resync_work.cb = w_resync_inactive;
681                 put_ldev(mdev);
682                 return 1;
683         }
684
685  requeue:
686         mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
687         mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
688         put_ldev(mdev);
689         return 1;
690 }
691
692 static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
693 {
694         int number, i, size;
695         sector_t sector;
696         const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
697
698         if (unlikely(cancel))
699                 return 1;
700
701         if (unlikely(mdev->state.conn < C_CONNECTED)) {
702                 dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected");
703                 return 0;
704         }
705
706         number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
707         if (atomic_read(&mdev->rs_pending_cnt) > number)
708                 goto requeue;
709
710         number -= atomic_read(&mdev->rs_pending_cnt);
711
712         sector = mdev->ov_position;
713         for (i = 0; i < number; i++) {
714                 if (sector >= capacity) {
715                         mdev->resync_work.cb = w_resync_inactive;
716                         return 1;
717                 }
718
719                 size = BM_BLOCK_SIZE;
720
721                 if (drbd_try_rs_begin_io(mdev, sector)) {
722                         mdev->ov_position = sector;
723                         goto requeue;
724                 }
725
726                 if (sector + (size>>9) > capacity)
727                         size = (capacity-sector)<<9;
728
729                 inc_rs_pending(mdev);
730                 if (!drbd_send_ov_request(mdev, sector, size)) {
731                         dec_rs_pending(mdev);
732                         return 0;
733                 }
734                 sector += BM_SECT_PER_BIT;
735         }
736         mdev->ov_position = sector;
737
738  requeue:
739         mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
740         return 1;
741 }
742
743
744 int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
745 {
746         kfree(w);
747         ov_oos_print(mdev);
748         drbd_resync_finished(mdev);
749
750         return 1;
751 }
752
753 static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
754 {
755         kfree(w);
756
757         drbd_resync_finished(mdev);
758
759         return 1;
760 }
761
762 static void ping_peer(struct drbd_conf *mdev)
763 {
764         clear_bit(GOT_PING_ACK, &mdev->flags);
765         request_ping(mdev);
766         wait_event(mdev->misc_wait,
767                    test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
768 }
769
770 int drbd_resync_finished(struct drbd_conf *mdev)
771 {
772         unsigned long db, dt, dbdt;
773         unsigned long n_oos;
774         union drbd_state os, ns;
775         struct drbd_work *w;
776         char *khelper_cmd = NULL;
777
778         /* Remove all elements from the resync LRU. Since future actions
779          * might set bits in the (main) bitmap, then the entries in the
780          * resync LRU would be wrong. */
781         if (drbd_rs_del_all(mdev)) {
782                 /* In case this is not possible now, most probably because
783                  * there are P_RS_DATA_REPLY Packets lingering on the worker's
784                  * queue (or even the read operations for those packets
785                  * is not finished by now).   Retry in 100ms. */
786
787                 drbd_kick_lo(mdev);
788                 __set_current_state(TASK_INTERRUPTIBLE);
789                 schedule_timeout(HZ / 10);
790                 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
791                 if (w) {
792                         w->cb = w_resync_finished;
793                         drbd_queue_work(&mdev->data.work, w);
794                         return 1;
795                 }
796                 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
797         }
798
799         dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
800         if (dt <= 0)
801                 dt = 1;
802         db = mdev->rs_total;
803         dbdt = Bit2KB(db/dt);
804         mdev->rs_paused /= HZ;
805
806         if (!get_ldev(mdev))
807                 goto out;
808
809         ping_peer(mdev);
810
811         spin_lock_irq(&mdev->req_lock);
812         os = mdev->state;
813
814         /* This protects us against multiple calls (that can happen in the presence
815            of application IO), and against connectivity loss just before we arrive here. */
816         if (os.conn <= C_CONNECTED)
817                 goto out_unlock;
818
819         ns = os;
820         ns.conn = C_CONNECTED;
821
822         dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
823              (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) ?
824              "Online verify " : "Resync",
825              dt + mdev->rs_paused, mdev->rs_paused, dbdt);
826
827         n_oos = drbd_bm_total_weight(mdev);
828
829         if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
830                 if (n_oos) {
831                         dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
832                               n_oos, Bit2KB(1));
833                         khelper_cmd = "out-of-sync";
834                 }
835         } else {
836                 D_ASSERT((n_oos - mdev->rs_failed) == 0);
837
838                 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
839                         khelper_cmd = "after-resync-target";
840
841                 if (mdev->csums_tfm && mdev->rs_total) {
842                         const unsigned long s = mdev->rs_same_csum;
843                         const unsigned long t = mdev->rs_total;
844                         const int ratio =
845                                 (t == 0)     ? 0 :
846                         (t < 100000) ? ((s*100)/t) : (s/(t/100));
847                         dev_info(DEV, "%u %% had equal check sums, eliminated: %luK; "
848                              "transferred %luK total %luK\n",
849                              ratio,
850                              Bit2KB(mdev->rs_same_csum),
851                              Bit2KB(mdev->rs_total - mdev->rs_same_csum),
852                              Bit2KB(mdev->rs_total));
853                 }
854         }
855
856         if (mdev->rs_failed) {
857                 dev_info(DEV, "            %lu failed blocks\n", mdev->rs_failed);
858
859                 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
860                         ns.disk = D_INCONSISTENT;
861                         ns.pdsk = D_UP_TO_DATE;
862                 } else {
863                         ns.disk = D_UP_TO_DATE;
864                         ns.pdsk = D_INCONSISTENT;
865                 }
866         } else {
867                 ns.disk = D_UP_TO_DATE;
868                 ns.pdsk = D_UP_TO_DATE;
869
870                 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
871                         if (mdev->p_uuid) {
872                                 int i;
873                                 for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
874                                         _drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
875                                 drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
876                                 _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
877                         } else {
878                                 dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
879                         }
880                 }
881
882                 drbd_uuid_set_bm(mdev, 0UL);
883
884                 if (mdev->p_uuid) {
885                         /* Now the two UUID sets are equal, update what we
886                          * know of the peer. */
887                         int i;
888                         for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
889                                 mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
890                 }
891         }
892
893         _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
894 out_unlock:
895         spin_unlock_irq(&mdev->req_lock);
896         put_ldev(mdev);
897 out:
898         mdev->rs_total  = 0;
899         mdev->rs_failed = 0;
900         mdev->rs_paused = 0;
901         mdev->ov_start_sector = 0;
902
903         drbd_md_sync(mdev);
904
905         if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
906                 dev_info(DEV, "Writing the whole bitmap\n");
907                 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
908         }
909
910         if (khelper_cmd)
911                 drbd_khelper(mdev, khelper_cmd);
912
913         return 1;
914 }
915
916 /* helper */
917 static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
918 {
919         if (drbd_ee_has_active_page(e)) {
920                 /* This might happen if sendpage() has not finished */
921                 int i = (e->size + PAGE_SIZE -1) >> PAGE_SHIFT;
922                 atomic_add(i, &mdev->pp_in_use_by_net);
923                 atomic_sub(i, &mdev->pp_in_use);
924                 spin_lock_irq(&mdev->req_lock);
925                 list_add_tail(&e->w.list, &mdev->net_ee);
926                 spin_unlock_irq(&mdev->req_lock);
927                 wake_up(&drbd_pp_wait);
928         } else
929                 drbd_free_ee(mdev, e);
930 }
931
932 /**
933  * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
934  * @mdev:       DRBD device.
935  * @w:          work object.
936  * @cancel:     The connection will be closed anyways
937  */
938 int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
939 {
940         struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
941         int ok;
942
943         if (unlikely(cancel)) {
944                 drbd_free_ee(mdev, e);
945                 dec_unacked(mdev);
946                 return 1;
947         }
948
949         if (likely((e->flags & EE_WAS_ERROR) == 0)) {
950                 ok = drbd_send_block(mdev, P_DATA_REPLY, e);
951         } else {
952                 if (__ratelimit(&drbd_ratelimit_state))
953                         dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
954                             (unsigned long long)e->sector);
955
956                 ok = drbd_send_ack(mdev, P_NEG_DREPLY, e);
957         }
958
959         dec_unacked(mdev);
960
961         move_to_net_ee_or_free(mdev, e);
962
963         if (unlikely(!ok))
964                 dev_err(DEV, "drbd_send_block() failed\n");
965         return ok;
966 }
967
968 /**
969  * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
970  * @mdev:       DRBD device.
971  * @w:          work object.
972  * @cancel:     The connection will be closed anyways
973  */
974 int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
975 {
976         struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
977         int ok;
978
979         if (unlikely(cancel)) {
980                 drbd_free_ee(mdev, e);
981                 dec_unacked(mdev);
982                 return 1;
983         }
984
985         if (get_ldev_if_state(mdev, D_FAILED)) {
986                 drbd_rs_complete_io(mdev, e->sector);
987                 put_ldev(mdev);
988         }
989
990         if (likely((e->flags & EE_WAS_ERROR) == 0)) {
991                 if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
992                         inc_rs_pending(mdev);
993                         ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
994                 } else {
995                         if (__ratelimit(&drbd_ratelimit_state))
996                                 dev_err(DEV, "Not sending RSDataReply, "
997                                     "partner DISKLESS!\n");
998                         ok = 1;
999                 }
1000         } else {
1001                 if (__ratelimit(&drbd_ratelimit_state))
1002                         dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
1003                             (unsigned long long)e->sector);
1004
1005                 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1006
1007                 /* update resync data with failure */
1008                 drbd_rs_failed_io(mdev, e->sector, e->size);
1009         }
1010
1011         dec_unacked(mdev);
1012
1013         move_to_net_ee_or_free(mdev, e);
1014
1015         if (unlikely(!ok))
1016                 dev_err(DEV, "drbd_send_block() failed\n");
1017         return ok;
1018 }
1019
1020 int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1021 {
1022         struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1023         struct digest_info *di;
1024         int digest_size;
1025         void *digest = NULL;
1026         int ok, eq = 0;
1027
1028         if (unlikely(cancel)) {
1029                 drbd_free_ee(mdev, e);
1030                 dec_unacked(mdev);
1031                 return 1;
1032         }
1033
1034         if (get_ldev(mdev)) {
1035                 drbd_rs_complete_io(mdev, e->sector);
1036                 put_ldev(mdev);
1037         }
1038
1039         di = e->digest;
1040
1041         if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1042                 /* quick hack to try to avoid a race against reconfiguration.
1043                  * a real fix would be much more involved,
1044                  * introducing more locking mechanisms */
1045                 if (mdev->csums_tfm) {
1046                         digest_size = crypto_hash_digestsize(mdev->csums_tfm);
1047                         D_ASSERT(digest_size == di->digest_size);
1048                         digest = kmalloc(digest_size, GFP_NOIO);
1049                 }
1050                 if (digest) {
1051                         drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
1052                         eq = !memcmp(digest, di->digest, digest_size);
1053                         kfree(digest);
1054                 }
1055
1056                 if (eq) {
1057                         drbd_set_in_sync(mdev, e->sector, e->size);
1058                         /* rs_same_csums unit is BM_BLOCK_SIZE */
1059                         mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT;
1060                         ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
1061                 } else {
1062                         inc_rs_pending(mdev);
1063                         e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1064                         e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */
1065                         kfree(di);
1066                         ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
1067                 }
1068         } else {
1069                 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1070                 if (__ratelimit(&drbd_ratelimit_state))
1071                         dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1072         }
1073
1074         dec_unacked(mdev);
1075         move_to_net_ee_or_free(mdev, e);
1076
1077         if (unlikely(!ok))
1078                 dev_err(DEV, "drbd_send_block/ack() failed\n");
1079         return ok;
1080 }
1081
1082 int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1083 {
1084         struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1085         int digest_size;
1086         void *digest;
1087         int ok = 1;
1088
1089         if (unlikely(cancel))
1090                 goto out;
1091
1092         if (unlikely((e->flags & EE_WAS_ERROR) != 0))
1093                 goto out;
1094
1095         digest_size = crypto_hash_digestsize(mdev->verify_tfm);
1096         /* FIXME if this allocation fails, online verify will not terminate! */
1097         digest = kmalloc(digest_size, GFP_NOIO);
1098         if (digest) {
1099                 drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
1100                 inc_rs_pending(mdev);
1101                 ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
1102                                              digest, digest_size, P_OV_REPLY);
1103                 if (!ok)
1104                         dec_rs_pending(mdev);
1105                 kfree(digest);
1106         }
1107
1108 out:
1109         drbd_free_ee(mdev, e);
1110
1111         dec_unacked(mdev);
1112
1113         return ok;
1114 }
1115
1116 void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
1117 {
1118         if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
1119                 mdev->ov_last_oos_size += size>>9;
1120         } else {
1121                 mdev->ov_last_oos_start = sector;
1122                 mdev->ov_last_oos_size = size>>9;
1123         }
1124         drbd_set_out_of_sync(mdev, sector, size);
1125         set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
1126 }
1127
1128 int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1129 {
1130         struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1131         struct digest_info *di;
1132         int digest_size;
1133         void *digest;
1134         int ok, eq = 0;
1135
1136         if (unlikely(cancel)) {
1137                 drbd_free_ee(mdev, e);
1138                 dec_unacked(mdev);
1139                 return 1;
1140         }
1141
1142         /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1143          * the resync lru has been cleaned up already */
1144         if (get_ldev(mdev)) {
1145                 drbd_rs_complete_io(mdev, e->sector);
1146                 put_ldev(mdev);
1147         }
1148
1149         di = e->digest;
1150
1151         if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1152                 digest_size = crypto_hash_digestsize(mdev->verify_tfm);
1153                 digest = kmalloc(digest_size, GFP_NOIO);
1154                 if (digest) {
1155                         drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
1156
1157                         D_ASSERT(digest_size == di->digest_size);
1158                         eq = !memcmp(digest, di->digest, digest_size);
1159                         kfree(digest);
1160                 }
1161         } else {
1162                 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1163                 if (__ratelimit(&drbd_ratelimit_state))
1164                         dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1165         }
1166
1167         dec_unacked(mdev);
1168         if (!eq)
1169                 drbd_ov_oos_found(mdev, e->sector, e->size);
1170         else
1171                 ov_oos_print(mdev);
1172
1173         ok = drbd_send_ack_ex(mdev, P_OV_RESULT, e->sector, e->size,
1174                               eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1175
1176         drbd_free_ee(mdev, e);
1177
1178         if (--mdev->ov_left == 0) {
1179                 ov_oos_print(mdev);
1180                 drbd_resync_finished(mdev);
1181         }
1182
1183         return ok;
1184 }
1185
1186 int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1187 {
1188         struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
1189         complete(&b->done);
1190         return 1;
1191 }
1192
1193 int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1194 {
1195         struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
1196         struct p_barrier *p = &mdev->data.sbuf.barrier;
1197         int ok = 1;
1198
1199         /* really avoid racing with tl_clear.  w.cb may have been referenced
1200          * just before it was reassigned and re-queued, so double check that.
1201          * actually, this race was harmless, since we only try to send the
1202          * barrier packet here, and otherwise do nothing with the object.
1203          * but compare with the head of w_clear_epoch */
1204         spin_lock_irq(&mdev->req_lock);
1205         if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
1206                 cancel = 1;
1207         spin_unlock_irq(&mdev->req_lock);
1208         if (cancel)
1209                 return 1;
1210
1211         if (!drbd_get_data_sock(mdev))
1212                 return 0;
1213         p->barrier = b->br_number;
1214         /* inc_ap_pending was done where this was queued.
1215          * dec_ap_pending will be done in got_BarrierAck
1216          * or (on connection loss) in w_clear_epoch.  */
1217         ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER,
1218                                 (struct p_header80 *)p, sizeof(*p), 0);
1219         drbd_put_data_sock(mdev);
1220
1221         return ok;
1222 }
1223
1224 int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1225 {
1226         if (cancel)
1227                 return 1;
1228         return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
1229 }
1230
1231 /**
1232  * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1233  * @mdev:       DRBD device.
1234  * @w:          work object.
1235  * @cancel:     The connection will be closed anyways
1236  */
1237 int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1238 {
1239         struct drbd_request *req = container_of(w, struct drbd_request, w);
1240         int ok;
1241
1242         if (unlikely(cancel)) {
1243                 req_mod(req, send_canceled);
1244                 return 1;
1245         }
1246
1247         ok = drbd_send_dblock(mdev, req);
1248         req_mod(req, ok ? handed_over_to_network : send_failed);
1249
1250         return ok;
1251 }
1252
1253 /**
1254  * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1255  * @mdev:       DRBD device.
1256  * @w:          work object.
1257  * @cancel:     The connection will be closed anyways
1258  */
1259 int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1260 {
1261         struct drbd_request *req = container_of(w, struct drbd_request, w);
1262         int ok;
1263
1264         if (unlikely(cancel)) {
1265                 req_mod(req, send_canceled);
1266                 return 1;
1267         }
1268
1269         ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->sector, req->size,
1270                                 (unsigned long)req);
1271
1272         if (!ok) {
1273                 /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
1274                  * so this is probably redundant */
1275                 if (mdev->state.conn >= C_CONNECTED)
1276                         drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
1277         }
1278         req_mod(req, ok ? handed_over_to_network : send_failed);
1279
1280         return ok;
1281 }
1282
1283 int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1284 {
1285         struct drbd_request *req = container_of(w, struct drbd_request, w);
1286
1287         if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
1288                 drbd_al_begin_io(mdev, req->sector);
1289         /* Calling drbd_al_begin_io() out of the worker might deadlocks
1290            theoretically. Practically it can not deadlock, since this is
1291            only used when unfreezing IOs. All the extents of the requests
1292            that made it into the TL are already active */
1293
1294         drbd_req_make_private_bio(req, req->master_bio);
1295         req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
1296         generic_make_request(req->private_bio);
1297
1298         return 1;
1299 }
1300
1301 static int _drbd_may_sync_now(struct drbd_conf *mdev)
1302 {
1303         struct drbd_conf *odev = mdev;
1304
1305         while (1) {
1306                 if (odev->sync_conf.after == -1)
1307                         return 1;
1308                 odev = minor_to_mdev(odev->sync_conf.after);
1309                 ERR_IF(!odev) return 1;
1310                 if ((odev->state.conn >= C_SYNC_SOURCE &&
1311                      odev->state.conn <= C_PAUSED_SYNC_T) ||
1312                     odev->state.aftr_isp || odev->state.peer_isp ||
1313                     odev->state.user_isp)
1314                         return 0;
1315         }
1316 }
1317
1318 /**
1319  * _drbd_pause_after() - Pause resync on all devices that may not resync now
1320  * @mdev:       DRBD device.
1321  *
1322  * Called from process context only (admin command and after_state_ch).
1323  */
1324 static int _drbd_pause_after(struct drbd_conf *mdev)
1325 {
1326         struct drbd_conf *odev;
1327         int i, rv = 0;
1328
1329         for (i = 0; i < minor_count; i++) {
1330                 odev = minor_to_mdev(i);
1331                 if (!odev)
1332                         continue;
1333                 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1334                         continue;
1335                 if (!_drbd_may_sync_now(odev))
1336                         rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
1337                                != SS_NOTHING_TO_DO);
1338         }
1339
1340         return rv;
1341 }
1342
1343 /**
1344  * _drbd_resume_next() - Resume resync on all devices that may resync now
1345  * @mdev:       DRBD device.
1346  *
1347  * Called from process context only (admin command and worker).
1348  */
1349 static int _drbd_resume_next(struct drbd_conf *mdev)
1350 {
1351         struct drbd_conf *odev;
1352         int i, rv = 0;
1353
1354         for (i = 0; i < minor_count; i++) {
1355                 odev = minor_to_mdev(i);
1356                 if (!odev)
1357                         continue;
1358                 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1359                         continue;
1360                 if (odev->state.aftr_isp) {
1361                         if (_drbd_may_sync_now(odev))
1362                                 rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0),
1363                                                         CS_HARD, NULL)
1364                                        != SS_NOTHING_TO_DO) ;
1365                 }
1366         }
1367         return rv;
1368 }
1369
1370 void resume_next_sg(struct drbd_conf *mdev)
1371 {
1372         write_lock_irq(&global_state_lock);
1373         _drbd_resume_next(mdev);
1374         write_unlock_irq(&global_state_lock);
1375 }
1376
1377 void suspend_other_sg(struct drbd_conf *mdev)
1378 {
1379         write_lock_irq(&global_state_lock);
1380         _drbd_pause_after(mdev);
1381         write_unlock_irq(&global_state_lock);
1382 }
1383
1384 static int sync_after_error(struct drbd_conf *mdev, int o_minor)
1385 {
1386         struct drbd_conf *odev;
1387
1388         if (o_minor == -1)
1389                 return NO_ERROR;
1390         if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
1391                 return ERR_SYNC_AFTER;
1392
1393         /* check for loops */
1394         odev = minor_to_mdev(o_minor);
1395         while (1) {
1396                 if (odev == mdev)
1397                         return ERR_SYNC_AFTER_CYCLE;
1398
1399                 /* dependency chain ends here, no cycles. */
1400                 if (odev->sync_conf.after == -1)
1401                         return NO_ERROR;
1402
1403                 /* follow the dependency chain */
1404                 odev = minor_to_mdev(odev->sync_conf.after);
1405         }
1406 }
1407
1408 int drbd_alter_sa(struct drbd_conf *mdev, int na)
1409 {
1410         int changes;
1411         int retcode;
1412
1413         write_lock_irq(&global_state_lock);
1414         retcode = sync_after_error(mdev, na);
1415         if (retcode == NO_ERROR) {
1416                 mdev->sync_conf.after = na;
1417                 do {
1418                         changes  = _drbd_pause_after(mdev);
1419                         changes |= _drbd_resume_next(mdev);
1420                 } while (changes);
1421         }
1422         write_unlock_irq(&global_state_lock);
1423         return retcode;
1424 }
1425
1426 /**
1427  * drbd_start_resync() - Start the resync process
1428  * @mdev:       DRBD device.
1429  * @side:       Either C_SYNC_SOURCE or C_SYNC_TARGET
1430  *
1431  * This function might bring you directly into one of the
1432  * C_PAUSED_SYNC_* states.
1433  */
1434 void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1435 {
1436         union drbd_state ns;
1437         int r;
1438
1439         if (mdev->state.conn >= C_SYNC_SOURCE) {
1440                 dev_err(DEV, "Resync already running!\n");
1441                 return;
1442         }
1443
1444         /* In case a previous resync run was aborted by an IO error/detach on the peer. */
1445         drbd_rs_cancel_all(mdev);
1446
1447         if (side == C_SYNC_TARGET) {
1448                 /* Since application IO was locked out during C_WF_BITMAP_T and
1449                    C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1450                    we check that we might make the data inconsistent. */
1451                 r = drbd_khelper(mdev, "before-resync-target");
1452                 r = (r >> 8) & 0xff;
1453                 if (r > 0) {
1454                         dev_info(DEV, "before-resync-target handler returned %d, "
1455                              "dropping connection.\n", r);
1456                         drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1457                         return;
1458                 }
1459         }
1460
1461         drbd_state_lock(mdev);
1462
1463         if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
1464                 drbd_state_unlock(mdev);
1465                 return;
1466         }
1467
1468         if (side == C_SYNC_TARGET) {
1469                 mdev->bm_resync_fo = 0;
1470         } else /* side == C_SYNC_SOURCE */ {
1471                 u64 uuid;
1472
1473                 get_random_bytes(&uuid, sizeof(u64));
1474                 drbd_uuid_set(mdev, UI_BITMAP, uuid);
1475                 drbd_send_sync_uuid(mdev, uuid);
1476
1477                 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
1478         }
1479
1480         write_lock_irq(&global_state_lock);
1481         ns = mdev->state;
1482
1483         ns.aftr_isp = !_drbd_may_sync_now(mdev);
1484
1485         ns.conn = side;
1486
1487         if (side == C_SYNC_TARGET)
1488                 ns.disk = D_INCONSISTENT;
1489         else /* side == C_SYNC_SOURCE */
1490                 ns.pdsk = D_INCONSISTENT;
1491
1492         r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1493         ns = mdev->state;
1494
1495         if (ns.conn < C_CONNECTED)
1496                 r = SS_UNKNOWN_ERROR;
1497
1498         if (r == SS_SUCCESS) {
1499                 unsigned long tw = drbd_bm_total_weight(mdev);
1500                 unsigned long now = jiffies;
1501                 int i;
1502
1503                 mdev->rs_failed    = 0;
1504                 mdev->rs_paused    = 0;
1505                 mdev->rs_same_csum = 0;
1506                 mdev->rs_last_events = 0;
1507                 mdev->rs_last_sect_ev = 0;
1508                 mdev->rs_total     = tw;
1509                 mdev->rs_start     = now;
1510                 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1511                         mdev->rs_mark_left[i] = tw;
1512                         mdev->rs_mark_time[i] = now;
1513                 }
1514                 _drbd_pause_after(mdev);
1515         }
1516         write_unlock_irq(&global_state_lock);
1517         put_ldev(mdev);
1518
1519         if (r == SS_SUCCESS) {
1520                 dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1521                      drbd_conn_str(ns.conn),
1522                      (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
1523                      (unsigned long) mdev->rs_total);
1524
1525                 if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
1526                         /* This still has a race (about when exactly the peers
1527                          * detect connection loss) that can lead to a full sync
1528                          * on next handshake. In 8.3.9 we fixed this with explicit
1529                          * resync-finished notifications, but the fix
1530                          * introduces a protocol change.  Sleeping for some
1531                          * time longer than the ping interval + timeout on the
1532                          * SyncSource, to give the SyncTarget the chance to
1533                          * detect connection loss, then waiting for a ping
1534                          * response (implicit in drbd_resync_finished) reduces
1535                          * the race considerably, but does not solve it. */
1536                         if (side == C_SYNC_SOURCE)
1537                                 schedule_timeout_interruptible(
1538                                         mdev->net_conf->ping_int * HZ +
1539                                         mdev->net_conf->ping_timeo*HZ/9);
1540                         drbd_resync_finished(mdev);
1541                 }
1542
1543                 atomic_set(&mdev->rs_sect_in, 0);
1544                 atomic_set(&mdev->rs_sect_ev, 0);
1545                 mdev->rs_in_flight = 0;
1546                 mdev->rs_planed = 0;
1547                 spin_lock(&mdev->peer_seq_lock);
1548                 fifo_set(&mdev->rs_plan_s, 0);
1549                 spin_unlock(&mdev->peer_seq_lock);
1550                 /* ns.conn may already be != mdev->state.conn,
1551                  * we may have been paused in between, or become paused until
1552                  * the timer triggers.
1553                  * No matter, that is handled in resync_timer_fn() */
1554                 if (ns.conn == C_SYNC_TARGET)
1555                         mod_timer(&mdev->resync_timer, jiffies);
1556
1557                 drbd_md_sync(mdev);
1558         }
1559         drbd_state_unlock(mdev);
1560 }
1561
1562 int drbd_worker(struct drbd_thread *thi)
1563 {
1564         struct drbd_conf *mdev = thi->mdev;
1565         struct drbd_work *w = NULL;
1566         LIST_HEAD(work_list);
1567         int intr = 0, i;
1568
1569         sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev));
1570
1571         while (get_t_state(thi) == Running) {
1572                 drbd_thread_current_set_cpu(mdev);
1573
1574                 if (down_trylock(&mdev->data.work.s)) {
1575                         mutex_lock(&mdev->data.mutex);
1576                         if (mdev->data.socket && !mdev->net_conf->no_cork)
1577                                 drbd_tcp_uncork(mdev->data.socket);
1578                         mutex_unlock(&mdev->data.mutex);
1579
1580                         intr = down_interruptible(&mdev->data.work.s);
1581
1582                         mutex_lock(&mdev->data.mutex);
1583                         if (mdev->data.socket  && !mdev->net_conf->no_cork)
1584                                 drbd_tcp_cork(mdev->data.socket);
1585                         mutex_unlock(&mdev->data.mutex);
1586                 }
1587
1588                 if (intr) {
1589                         D_ASSERT(intr == -EINTR);
1590                         flush_signals(current);
1591                         ERR_IF (get_t_state(thi) == Running)
1592                                 continue;
1593                         break;
1594                 }
1595
1596                 if (get_t_state(thi) != Running)
1597                         break;
1598                 /* With this break, we have done a down() but not consumed
1599                    the entry from the list. The cleanup code takes care of
1600                    this...   */
1601
1602                 w = NULL;
1603                 spin_lock_irq(&mdev->data.work.q_lock);
1604                 ERR_IF(list_empty(&mdev->data.work.q)) {
1605                         /* something terribly wrong in our logic.
1606                          * we were able to down() the semaphore,
1607                          * but the list is empty... doh.
1608                          *
1609                          * what is the best thing to do now?
1610                          * try again from scratch, restarting the receiver,
1611                          * asender, whatnot? could break even more ugly,
1612                          * e.g. when we are primary, but no good local data.
1613                          *
1614                          * I'll try to get away just starting over this loop.
1615                          */
1616                         spin_unlock_irq(&mdev->data.work.q_lock);
1617                         continue;
1618                 }
1619                 w = list_entry(mdev->data.work.q.next, struct drbd_work, list);
1620                 list_del_init(&w->list);
1621                 spin_unlock_irq(&mdev->data.work.q_lock);
1622
1623                 if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) {
1624                         /* dev_warn(DEV, "worker: a callback failed! \n"); */
1625                         if (mdev->state.conn >= C_CONNECTED)
1626                                 drbd_force_state(mdev,
1627                                                 NS(conn, C_NETWORK_FAILURE));
1628                 }
1629         }
1630         D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
1631         D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
1632
1633         spin_lock_irq(&mdev->data.work.q_lock);
1634         i = 0;
1635         while (!list_empty(&mdev->data.work.q)) {
1636                 list_splice_init(&mdev->data.work.q, &work_list);
1637                 spin_unlock_irq(&mdev->data.work.q_lock);
1638
1639                 while (!list_empty(&work_list)) {
1640                         w = list_entry(work_list.next, struct drbd_work, list);
1641                         list_del_init(&w->list);
1642                         w->cb(mdev, w, 1);
1643                         i++; /* dead debugging code */
1644                 }
1645
1646                 spin_lock_irq(&mdev->data.work.q_lock);
1647         }
1648         sema_init(&mdev->data.work.s, 0);
1649         /* DANGEROUS race: if someone did queue his work within the spinlock,
1650          * but up() ed outside the spinlock, we could get an up() on the
1651          * semaphore without corresponding list entry.
1652          * So don't do that.
1653          */
1654         spin_unlock_irq(&mdev->data.work.q_lock);
1655
1656         D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
1657         /* _drbd_set_state only uses stop_nowait.
1658          * wait here for the Exiting receiver. */
1659         drbd_thread_stop(&mdev->receiver);
1660         drbd_mdev_cleanup(mdev);
1661
1662         dev_info(DEV, "worker terminated\n");
1663
1664         clear_bit(DEVICE_DYING, &mdev->flags);
1665         clear_bit(CONFIG_PENDING, &mdev->flags);
1666         wake_up(&mdev->state_wait);
1667
1668         return 0;
1669 }