4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/sched.h>
29 #include <linux/wait.h>
31 #include <linux/memcontrol.h>
32 #include <linux/mm_inline.h>
33 #include <linux/slab.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 #include <linux/scatterlist.h>
41 static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
50 * more endio handlers:
51 atodb_endio in drbd_actlog.c
52 drbd_bm_async_io_complete in drbd_bitmap.c
54 * For all these callbacks, note the following:
55 * The callbacks will be called in irq context by the IDE drivers,
56 * and in Softirqs/Tasklets/BH context by the SCSI drivers.
57 * Try to get the locking right :)
62 /* About the global_state_lock
63 Each state transition on an device holds a read lock. In case we have
64 to evaluate the sync after dependencies, we grab a write lock, because
65 we need stable states on all devices for that. */
66 rwlock_t global_state_lock;
68 /* used for synchronous meta data and bitmap IO
69 * submitted by drbd_md_sync_page_io()
71 void drbd_md_io_complete(struct bio *bio, int error)
73 struct drbd_md_io *md_io;
75 md_io = (struct drbd_md_io *)bio->bi_private;
78 complete(&md_io->event);
81 /* reads on behalf of the partner,
82 * "submitted" by the receiver
84 void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
86 unsigned long flags = 0;
87 struct drbd_conf *mdev = e->mdev;
89 D_ASSERT(e->block_id != ID_VACANT);
91 spin_lock_irqsave(&mdev->req_lock, flags);
92 mdev->read_cnt += e->size >> 9;
94 if (list_empty(&mdev->read_ee))
95 wake_up(&mdev->ee_wait);
96 if (test_bit(__EE_WAS_ERROR, &e->flags))
97 __drbd_chk_io_error(mdev, FALSE);
98 spin_unlock_irqrestore(&mdev->req_lock, flags);
100 drbd_queue_work(&mdev->data.work, &e->w);
104 /* writes on behalf of the partner, or resync writes,
105 * "submitted" by the receiver, final stage. */
106 static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
108 unsigned long flags = 0;
109 struct drbd_conf *mdev = e->mdev;
113 int do_al_complete_io;
115 D_ASSERT(e->block_id != ID_VACANT);
117 /* after we moved e to done_ee,
118 * we may no longer access it,
119 * it may be freed/reused already!
120 * (as soon as we release the req_lock) */
121 e_sector = e->sector;
122 do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
123 is_syncer_req = is_syncer_block_id(e->block_id);
125 spin_lock_irqsave(&mdev->req_lock, flags);
126 mdev->writ_cnt += e->size >> 9;
127 list_del(&e->w.list); /* has been on active_ee or sync_ee */
128 list_add_tail(&e->w.list, &mdev->done_ee);
130 /* No hlist_del_init(&e->colision) here, we did not send the Ack yet,
131 * neither did we wake possibly waiting conflicting requests.
132 * done from "drbd_process_done_ee" within the appropriate w.cb
133 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
135 do_wake = is_syncer_req
136 ? list_empty(&mdev->sync_ee)
137 : list_empty(&mdev->active_ee);
139 if (test_bit(__EE_WAS_ERROR, &e->flags))
140 __drbd_chk_io_error(mdev, FALSE);
141 spin_unlock_irqrestore(&mdev->req_lock, flags);
144 drbd_rs_complete_io(mdev, e_sector);
147 wake_up(&mdev->ee_wait);
149 if (do_al_complete_io)
150 drbd_al_complete_io(mdev, e_sector);
156 /* writes on behalf of the partner, or resync writes,
157 * "submitted" by the receiver.
159 void drbd_endio_sec(struct bio *bio, int error)
161 struct drbd_epoch_entry *e = bio->bi_private;
162 struct drbd_conf *mdev = e->mdev;
163 int uptodate = bio_flagged(bio, BIO_UPTODATE);
164 int is_write = bio_data_dir(bio) == WRITE;
167 dev_warn(DEV, "%s: error=%d s=%llus\n",
168 is_write ? "write" : "read", error,
169 (unsigned long long)e->sector);
170 if (!error && !uptodate) {
171 dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
172 is_write ? "write" : "read",
173 (unsigned long long)e->sector);
174 /* strange behavior of some lower level drivers...
175 * fail the request by clearing the uptodate flag,
176 * but do not return any error?! */
181 set_bit(__EE_WAS_ERROR, &e->flags);
183 bio_put(bio); /* no need for the bio anymore */
184 if (atomic_dec_and_test(&e->pending_bios)) {
186 drbd_endio_write_sec_final(e);
188 drbd_endio_read_sec_final(e);
192 /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
194 void drbd_endio_pri(struct bio *bio, int error)
197 struct drbd_request *req = bio->bi_private;
198 struct drbd_conf *mdev = req->mdev;
199 struct bio_and_error m;
200 enum drbd_req_event what;
201 int uptodate = bio_flagged(bio, BIO_UPTODATE);
203 if (!error && !uptodate) {
204 dev_warn(DEV, "p %s: setting error to -EIO\n",
205 bio_data_dir(bio) == WRITE ? "write" : "read");
206 /* strange behavior of some lower level drivers...
207 * fail the request by clearing the uptodate flag,
208 * but do not return any error?! */
212 /* to avoid recursion in __req_mod */
213 if (unlikely(error)) {
214 what = (bio_data_dir(bio) == WRITE)
215 ? write_completed_with_error
216 : (bio_rw(bio) == READ)
217 ? read_completed_with_error
218 : read_ahead_completed_with_error;
222 bio_put(req->private_bio);
223 req->private_bio = ERR_PTR(error);
225 /* not req_mod(), we need irqsave here! */
226 spin_lock_irqsave(&mdev->req_lock, flags);
227 __req_mod(req, what, &m);
228 spin_unlock_irqrestore(&mdev->req_lock, flags);
231 complete_master_bio(mdev, &m);
234 int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
236 struct drbd_request *req = container_of(w, struct drbd_request, w);
238 /* We should not detach for read io-error,
239 * but try to WRITE the P_DATA_REPLY to the failed location,
240 * to give the disk the chance to relocate that block */
242 spin_lock_irq(&mdev->req_lock);
243 if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
244 _req_mod(req, read_retry_remote_canceled);
245 spin_unlock_irq(&mdev->req_lock);
248 spin_unlock_irq(&mdev->req_lock);
250 return w_send_read_req(mdev, w, 0);
253 int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
255 ERR_IF(cancel) return 1;
256 dev_err(DEV, "resync inactive, but callback triggered??\n");
257 return 1; /* Simply ignore this! */
260 void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
262 struct hash_desc desc;
263 struct scatterlist sg;
264 struct page *page = e->pages;
271 sg_init_table(&sg, 1);
272 crypto_hash_init(&desc);
274 while ((tmp = page_chain_next(page))) {
275 /* all but the last page will be fully used */
276 sg_set_page(&sg, page, PAGE_SIZE, 0);
277 crypto_hash_update(&desc, &sg, sg.length);
280 /* and now the last, possibly only partially used page */
281 len = e->size & (PAGE_SIZE - 1);
282 sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
283 crypto_hash_update(&desc, &sg, sg.length);
284 crypto_hash_final(&desc, digest);
287 void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
289 struct hash_desc desc;
290 struct scatterlist sg;
291 struct bio_vec *bvec;
297 sg_init_table(&sg, 1);
298 crypto_hash_init(&desc);
300 __bio_for_each_segment(bvec, bio, i, 0) {
301 sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
302 crypto_hash_update(&desc, &sg, sg.length);
304 crypto_hash_final(&desc, digest);
307 static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
309 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
314 D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
316 if (unlikely(cancel)) {
317 drbd_free_ee(mdev, e);
321 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
322 digest_size = crypto_hash_digestsize(mdev->csums_tfm);
323 digest = kmalloc(digest_size, GFP_NOIO);
325 drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
327 inc_rs_pending(mdev);
328 ok = drbd_send_drequest_csum(mdev,
336 dev_err(DEV, "kmalloc() of digest failed.\n");
342 drbd_free_ee(mdev, e);
345 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
349 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
351 static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
353 struct drbd_epoch_entry *e;
358 if (drbd_rs_should_slow_down(mdev))
361 /* GFP_TRY, because if there is no memory available right now, this may
362 * be rescheduled for later. It is "only" background resync, after all. */
363 e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
367 e->w.cb = w_e_send_csum;
368 spin_lock_irq(&mdev->req_lock);
369 list_add(&e->w.list, &mdev->read_ee);
370 spin_unlock_irq(&mdev->req_lock);
372 atomic_add(size >> 9, &mdev->rs_sect_ev);
373 if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
376 /* drbd_submit_ee currently fails for one reason only:
377 * not being able to allocate enough bios.
378 * Is dropping the connection going to help? */
379 spin_lock_irq(&mdev->req_lock);
380 list_del(&e->w.list);
381 spin_unlock_irq(&mdev->req_lock);
383 drbd_free_ee(mdev, e);
389 void resync_timer_fn(unsigned long data)
391 struct drbd_conf *mdev = (struct drbd_conf *) data;
395 switch (mdev->state.conn) {
397 mdev->resync_work.cb = w_make_ov_request;
400 mdev->resync_work.cb = w_make_resync_request;
404 mdev->resync_work.cb = w_resync_inactive;
407 /* harmless race: list_empty outside data.work.q_lock */
408 if (list_empty(&mdev->resync_work.list) && queue)
409 drbd_queue_work(&mdev->data.work, &mdev->resync_work);
412 static void fifo_set(struct fifo_buffer *fb, int value)
416 for (i = 0; i < fb->size; i++)
417 fb->values[i] = value;
420 static int fifo_push(struct fifo_buffer *fb, int value)
424 ov = fb->values[fb->head_index];
425 fb->values[fb->head_index++] = value;
427 if (fb->head_index >= fb->size)
433 static void fifo_add_val(struct fifo_buffer *fb, int value)
437 for (i = 0; i < fb->size; i++)
438 fb->values[i] += value;
441 int drbd_rs_controller(struct drbd_conf *mdev)
443 unsigned int sect_in; /* Number of sectors that came in since the last turn */
444 unsigned int want; /* The number of sectors we want in the proxy */
445 int req_sect; /* Number of sectors to request in this turn */
446 int correction; /* Number of sectors more we need in the proxy*/
447 int cps; /* correction per invocation of drbd_rs_controller() */
448 int steps; /* Number of time steps to plan ahead */
452 sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
453 mdev->rs_in_flight -= sect_in;
455 spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
457 steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
459 if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
460 want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps;
461 } else { /* normal path */
462 want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target :
463 sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10);
466 correction = want - mdev->rs_in_flight - mdev->rs_planed;
469 cps = correction / steps;
470 fifo_add_val(&mdev->rs_plan_s, cps);
471 mdev->rs_planed += cps * steps;
473 /* What we do in this step */
474 curr_corr = fifo_push(&mdev->rs_plan_s, 0);
475 spin_unlock(&mdev->peer_seq_lock);
476 mdev->rs_planed -= curr_corr;
478 req_sect = sect_in + curr_corr;
482 max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ;
483 if (req_sect > max_sect)
487 dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
488 sect_in, mdev->rs_in_flight, want, correction,
489 steps, cps, mdev->rs_planed, curr_corr, req_sect);
495 int w_make_resync_request(struct drbd_conf *mdev,
496 struct drbd_work *w, int cancel)
500 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
501 int max_segment_size;
502 int number, rollback_i, size, pe, mx;
503 int align, queued, sndbuf;
506 if (unlikely(cancel))
509 if (unlikely(mdev->state.conn < C_CONNECTED)) {
510 dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected");
514 if (mdev->state.conn != C_SYNC_TARGET)
515 dev_err(DEV, "%s in w_make_resync_request\n",
516 drbd_conn_str(mdev->state.conn));
518 if (mdev->rs_total == 0) {
520 drbd_resync_finished(mdev);
524 if (!get_ldev(mdev)) {
525 /* Since we only need to access mdev->rsync a
526 get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
527 to continue resync with a broken disk makes no sense at
529 dev_err(DEV, "Disk broke down during resync!\n");
530 mdev->resync_work.cb = w_resync_inactive;
534 /* starting with drbd 8.3.8, we can handle multi-bio EEs,
535 * if it should be necessary */
537 mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) :
538 mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE;
540 if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
541 number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
542 mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
544 mdev->c_sync_rate = mdev->sync_conf.rate;
545 number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
548 /* Throttle resync on lower level disk activity, which may also be
549 * caused by application IO on Primary/SyncTarget.
550 * Keep this after the call to drbd_rs_controller, as that assumes
551 * to be called as precisely as possible every SLEEP_TIME,
552 * and would be confused otherwise. */
553 if (drbd_rs_should_slow_down(mdev))
556 mutex_lock(&mdev->data.mutex);
557 if (mdev->data.socket)
558 mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req);
561 mutex_unlock(&mdev->data.mutex);
563 /* For resync rates >160MB/sec, allow more pending RS requests */
567 /* Limit the number of pending RS requests to no more than the peer's receive buffer */
568 pe = atomic_read(&mdev->rs_pending_cnt);
569 if ((pe + number) > mx) {
573 for (i = 0; i < number; i++) {
574 /* Stop generating RS requests, when half of the send buffer is filled */
575 mutex_lock(&mdev->data.mutex);
576 if (mdev->data.socket) {
577 queued = mdev->data.socket->sk->sk_wmem_queued;
578 sndbuf = mdev->data.socket->sk->sk_sndbuf;
583 mutex_unlock(&mdev->data.mutex);
584 if (queued > sndbuf / 2)
588 size = BM_BLOCK_SIZE;
589 bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
592 mdev->bm_resync_fo = drbd_bm_bits(mdev);
593 mdev->resync_work.cb = w_resync_inactive;
598 sector = BM_BIT_TO_SECT(bit);
600 if (drbd_try_rs_begin_io(mdev, sector)) {
601 mdev->bm_resync_fo = bit;
604 mdev->bm_resync_fo = bit + 1;
606 if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
607 drbd_rs_complete_io(mdev, sector);
611 #if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE
612 /* try to find some adjacent bits.
613 * we stop if we have already the maximum req size.
615 * Additionally always align bigger requests, in order to
616 * be prepared for all stripe sizes of software RAIDs.
621 if (size + BM_BLOCK_SIZE > max_segment_size)
624 /* Be always aligned */
625 if (sector & ((1<<(align+3))-1))
628 /* do not cross extent boundaries */
629 if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
631 /* now, is it actually dirty, after all?
632 * caution, drbd_bm_test_bit is tri-state for some
633 * obscure reason; ( b == 0 ) would get the out-of-band
634 * only accidentally right because of the "oddly sized"
635 * adjustment below */
636 if (drbd_bm_test_bit(mdev, bit+1) != 1)
639 size += BM_BLOCK_SIZE;
640 if ((BM_BLOCK_SIZE << align) <= size)
644 /* if we merged some,
645 * reset the offset to start the next drbd_bm_find_next from */
646 if (size > BM_BLOCK_SIZE)
647 mdev->bm_resync_fo = bit + 1;
650 /* adjust very last sectors, in case we are oddly sized */
651 if (sector + (size>>9) > capacity)
652 size = (capacity-sector)<<9;
653 if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
654 switch (read_for_csum(mdev, sector, size)) {
655 case -EIO: /* Disk failure */
658 case -EAGAIN: /* allocation failed, or ldev busy */
659 drbd_rs_complete_io(mdev, sector);
660 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
670 inc_rs_pending(mdev);
671 if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
672 sector, size, ID_SYNCER)) {
673 dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
674 dec_rs_pending(mdev);
681 if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
682 /* last syncer _request_ was sent,
683 * but the P_RS_DATA_REPLY not yet received. sync will end (and
684 * next sync group will resume), as soon as we receive the last
685 * resync data block, and the last bit is cleared.
686 * until then resync "work" is "inactive" ...
688 mdev->resync_work.cb = w_resync_inactive;
694 mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
695 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
700 static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
704 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
706 if (unlikely(cancel))
709 if (unlikely(mdev->state.conn < C_CONNECTED)) {
710 dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected");
714 number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
715 if (atomic_read(&mdev->rs_pending_cnt) > number)
718 number -= atomic_read(&mdev->rs_pending_cnt);
720 sector = mdev->ov_position;
721 for (i = 0; i < number; i++) {
722 if (sector >= capacity) {
723 mdev->resync_work.cb = w_resync_inactive;
727 size = BM_BLOCK_SIZE;
729 if (drbd_try_rs_begin_io(mdev, sector)) {
730 mdev->ov_position = sector;
734 if (sector + (size>>9) > capacity)
735 size = (capacity-sector)<<9;
737 inc_rs_pending(mdev);
738 if (!drbd_send_ov_request(mdev, sector, size)) {
739 dec_rs_pending(mdev);
742 sector += BM_SECT_PER_BIT;
744 mdev->ov_position = sector;
747 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
752 int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
756 drbd_resync_finished(mdev);
761 static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
765 drbd_resync_finished(mdev);
770 static void ping_peer(struct drbd_conf *mdev)
772 clear_bit(GOT_PING_ACK, &mdev->flags);
774 wait_event(mdev->misc_wait,
775 test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
778 int drbd_resync_finished(struct drbd_conf *mdev)
780 unsigned long db, dt, dbdt;
782 union drbd_state os, ns;
784 char *khelper_cmd = NULL;
787 /* Remove all elements from the resync LRU. Since future actions
788 * might set bits in the (main) bitmap, then the entries in the
789 * resync LRU would be wrong. */
790 if (drbd_rs_del_all(mdev)) {
791 /* In case this is not possible now, most probably because
792 * there are P_RS_DATA_REPLY Packets lingering on the worker's
793 * queue (or even the read operations for those packets
794 * is not finished by now). Retry in 100ms. */
796 __set_current_state(TASK_INTERRUPTIBLE);
797 schedule_timeout(HZ / 10);
798 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
800 w->cb = w_resync_finished;
801 drbd_queue_work(&mdev->data.work, w);
804 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
807 dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
811 dbdt = Bit2KB(db/dt);
812 mdev->rs_paused /= HZ;
819 spin_lock_irq(&mdev->req_lock);
822 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
824 /* This protects us against multiple calls (that can happen in the presence
825 of application IO), and against connectivity loss just before we arrive here. */
826 if (os.conn <= C_CONNECTED)
830 ns.conn = C_CONNECTED;
832 dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
833 verify_done ? "Online verify " : "Resync",
834 dt + mdev->rs_paused, mdev->rs_paused, dbdt);
836 n_oos = drbd_bm_total_weight(mdev);
838 if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
840 dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
842 khelper_cmd = "out-of-sync";
845 D_ASSERT((n_oos - mdev->rs_failed) == 0);
847 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
848 khelper_cmd = "after-resync-target";
850 if (mdev->csums_tfm && mdev->rs_total) {
851 const unsigned long s = mdev->rs_same_csum;
852 const unsigned long t = mdev->rs_total;
855 (t < 100000) ? ((s*100)/t) : (s/(t/100));
856 dev_info(DEV, "%u %% had equal check sums, eliminated: %luK; "
857 "transferred %luK total %luK\n",
859 Bit2KB(mdev->rs_same_csum),
860 Bit2KB(mdev->rs_total - mdev->rs_same_csum),
861 Bit2KB(mdev->rs_total));
865 if (mdev->rs_failed) {
866 dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed);
868 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
869 ns.disk = D_INCONSISTENT;
870 ns.pdsk = D_UP_TO_DATE;
872 ns.disk = D_UP_TO_DATE;
873 ns.pdsk = D_INCONSISTENT;
876 ns.disk = D_UP_TO_DATE;
877 ns.pdsk = D_UP_TO_DATE;
879 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
882 for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
883 _drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
884 drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
885 _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
887 dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
891 drbd_uuid_set_bm(mdev, 0UL);
894 /* Now the two UUID sets are equal, update what we
895 * know of the peer. */
897 for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
898 mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
902 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
904 spin_unlock_irq(&mdev->req_lock);
911 mdev->ov_start_sector = 0;
915 if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
916 dev_info(DEV, "Writing the whole bitmap\n");
917 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
921 drbd_khelper(mdev, khelper_cmd);
927 static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
929 if (drbd_ee_has_active_page(e)) {
930 /* This might happen if sendpage() has not finished */
931 int i = (e->size + PAGE_SIZE -1) >> PAGE_SHIFT;
932 atomic_add(i, &mdev->pp_in_use_by_net);
933 atomic_sub(i, &mdev->pp_in_use);
934 spin_lock_irq(&mdev->req_lock);
935 list_add_tail(&e->w.list, &mdev->net_ee);
936 spin_unlock_irq(&mdev->req_lock);
937 wake_up(&drbd_pp_wait);
939 drbd_free_ee(mdev, e);
943 * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
944 * @mdev: DRBD device.
946 * @cancel: The connection will be closed anyways
948 int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
950 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
953 if (unlikely(cancel)) {
954 drbd_free_ee(mdev, e);
959 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
960 ok = drbd_send_block(mdev, P_DATA_REPLY, e);
962 if (__ratelimit(&drbd_ratelimit_state))
963 dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
964 (unsigned long long)e->sector);
966 ok = drbd_send_ack(mdev, P_NEG_DREPLY, e);
971 move_to_net_ee_or_free(mdev, e);
974 dev_err(DEV, "drbd_send_block() failed\n");
979 * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
980 * @mdev: DRBD device.
982 * @cancel: The connection will be closed anyways
984 int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
986 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
989 if (unlikely(cancel)) {
990 drbd_free_ee(mdev, e);
995 if (get_ldev_if_state(mdev, D_FAILED)) {
996 drbd_rs_complete_io(mdev, e->sector);
1000 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1001 if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
1002 inc_rs_pending(mdev);
1003 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
1005 if (__ratelimit(&drbd_ratelimit_state))
1006 dev_err(DEV, "Not sending RSDataReply, "
1007 "partner DISKLESS!\n");
1011 if (__ratelimit(&drbd_ratelimit_state))
1012 dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
1013 (unsigned long long)e->sector);
1015 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1017 /* update resync data with failure */
1018 drbd_rs_failed_io(mdev, e->sector, e->size);
1023 move_to_net_ee_or_free(mdev, e);
1026 dev_err(DEV, "drbd_send_block() failed\n");
1030 int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1032 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1033 struct digest_info *di;
1035 void *digest = NULL;
1038 if (unlikely(cancel)) {
1039 drbd_free_ee(mdev, e);
1044 if (get_ldev(mdev)) {
1045 drbd_rs_complete_io(mdev, e->sector);
1051 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1052 /* quick hack to try to avoid a race against reconfiguration.
1053 * a real fix would be much more involved,
1054 * introducing more locking mechanisms */
1055 if (mdev->csums_tfm) {
1056 digest_size = crypto_hash_digestsize(mdev->csums_tfm);
1057 D_ASSERT(digest_size == di->digest_size);
1058 digest = kmalloc(digest_size, GFP_NOIO);
1061 drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
1062 eq = !memcmp(digest, di->digest, digest_size);
1067 drbd_set_in_sync(mdev, e->sector, e->size);
1068 /* rs_same_csums unit is BM_BLOCK_SIZE */
1069 mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT;
1070 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
1072 inc_rs_pending(mdev);
1073 e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1074 e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */
1076 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
1079 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1080 if (__ratelimit(&drbd_ratelimit_state))
1081 dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1085 move_to_net_ee_or_free(mdev, e);
1088 dev_err(DEV, "drbd_send_block/ack() failed\n");
1092 int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1094 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1099 if (unlikely(cancel))
1102 if (unlikely((e->flags & EE_WAS_ERROR) != 0))
1105 digest_size = crypto_hash_digestsize(mdev->verify_tfm);
1106 /* FIXME if this allocation fails, online verify will not terminate! */
1107 digest = kmalloc(digest_size, GFP_NOIO);
1109 drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
1110 inc_rs_pending(mdev);
1111 ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
1112 digest, digest_size, P_OV_REPLY);
1114 dec_rs_pending(mdev);
1119 drbd_free_ee(mdev, e);
1126 void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
1128 if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
1129 mdev->ov_last_oos_size += size>>9;
1131 mdev->ov_last_oos_start = sector;
1132 mdev->ov_last_oos_size = size>>9;
1134 drbd_set_out_of_sync(mdev, sector, size);
1135 set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
1138 int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1140 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1141 struct digest_info *di;
1146 if (unlikely(cancel)) {
1147 drbd_free_ee(mdev, e);
1152 /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1153 * the resync lru has been cleaned up already */
1154 if (get_ldev(mdev)) {
1155 drbd_rs_complete_io(mdev, e->sector);
1161 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1162 digest_size = crypto_hash_digestsize(mdev->verify_tfm);
1163 digest = kmalloc(digest_size, GFP_NOIO);
1165 drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
1167 D_ASSERT(digest_size == di->digest_size);
1168 eq = !memcmp(digest, di->digest, digest_size);
1172 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1173 if (__ratelimit(&drbd_ratelimit_state))
1174 dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1179 drbd_ov_oos_found(mdev, e->sector, e->size);
1183 ok = drbd_send_ack_ex(mdev, P_OV_RESULT, e->sector, e->size,
1184 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1186 drbd_free_ee(mdev, e);
1190 /* let's advance progress step marks only for every other megabyte */
1191 if ((mdev->ov_left & 0x200) == 0x200)
1192 drbd_advance_rs_marks(mdev, mdev->ov_left);
1194 if (mdev->ov_left == 0) {
1196 drbd_resync_finished(mdev);
1202 int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1204 struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
1209 int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1211 struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
1212 struct p_barrier *p = &mdev->data.sbuf.barrier;
1215 /* really avoid racing with tl_clear. w.cb may have been referenced
1216 * just before it was reassigned and re-queued, so double check that.
1217 * actually, this race was harmless, since we only try to send the
1218 * barrier packet here, and otherwise do nothing with the object.
1219 * but compare with the head of w_clear_epoch */
1220 spin_lock_irq(&mdev->req_lock);
1221 if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
1223 spin_unlock_irq(&mdev->req_lock);
1227 if (!drbd_get_data_sock(mdev))
1229 p->barrier = b->br_number;
1230 /* inc_ap_pending was done where this was queued.
1231 * dec_ap_pending will be done in got_BarrierAck
1232 * or (on connection loss) in w_clear_epoch. */
1233 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER,
1234 (struct p_header80 *)p, sizeof(*p), 0);
1235 drbd_put_data_sock(mdev);
1240 int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1244 return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
1248 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1249 * @mdev: DRBD device.
1251 * @cancel: The connection will be closed anyways
1253 int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1255 struct drbd_request *req = container_of(w, struct drbd_request, w);
1258 if (unlikely(cancel)) {
1259 req_mod(req, send_canceled);
1263 ok = drbd_send_dblock(mdev, req);
1264 req_mod(req, ok ? handed_over_to_network : send_failed);
1270 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1271 * @mdev: DRBD device.
1273 * @cancel: The connection will be closed anyways
1275 int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1277 struct drbd_request *req = container_of(w, struct drbd_request, w);
1280 if (unlikely(cancel)) {
1281 req_mod(req, send_canceled);
1285 ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->sector, req->size,
1286 (unsigned long)req);
1289 /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
1290 * so this is probably redundant */
1291 if (mdev->state.conn >= C_CONNECTED)
1292 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
1294 req_mod(req, ok ? handed_over_to_network : send_failed);
1299 int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1301 struct drbd_request *req = container_of(w, struct drbd_request, w);
1303 if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
1304 drbd_al_begin_io(mdev, req->sector);
1305 /* Calling drbd_al_begin_io() out of the worker might deadlocks
1306 theoretically. Practically it can not deadlock, since this is
1307 only used when unfreezing IOs. All the extents of the requests
1308 that made it into the TL are already active */
1310 drbd_req_make_private_bio(req, req->master_bio);
1311 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
1312 generic_make_request(req->private_bio);
1317 static int _drbd_may_sync_now(struct drbd_conf *mdev)
1319 struct drbd_conf *odev = mdev;
1322 if (odev->sync_conf.after == -1)
1324 odev = minor_to_mdev(odev->sync_conf.after);
1325 ERR_IF(!odev) return 1;
1326 if ((odev->state.conn >= C_SYNC_SOURCE &&
1327 odev->state.conn <= C_PAUSED_SYNC_T) ||
1328 odev->state.aftr_isp || odev->state.peer_isp ||
1329 odev->state.user_isp)
1335 * _drbd_pause_after() - Pause resync on all devices that may not resync now
1336 * @mdev: DRBD device.
1338 * Called from process context only (admin command and after_state_ch).
1340 static int _drbd_pause_after(struct drbd_conf *mdev)
1342 struct drbd_conf *odev;
1345 for (i = 0; i < minor_count; i++) {
1346 odev = minor_to_mdev(i);
1349 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1351 if (!_drbd_may_sync_now(odev))
1352 rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
1353 != SS_NOTHING_TO_DO);
1360 * _drbd_resume_next() - Resume resync on all devices that may resync now
1361 * @mdev: DRBD device.
1363 * Called from process context only (admin command and worker).
1365 static int _drbd_resume_next(struct drbd_conf *mdev)
1367 struct drbd_conf *odev;
1370 for (i = 0; i < minor_count; i++) {
1371 odev = minor_to_mdev(i);
1374 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1376 if (odev->state.aftr_isp) {
1377 if (_drbd_may_sync_now(odev))
1378 rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0),
1380 != SS_NOTHING_TO_DO) ;
1386 void resume_next_sg(struct drbd_conf *mdev)
1388 write_lock_irq(&global_state_lock);
1389 _drbd_resume_next(mdev);
1390 write_unlock_irq(&global_state_lock);
1393 void suspend_other_sg(struct drbd_conf *mdev)
1395 write_lock_irq(&global_state_lock);
1396 _drbd_pause_after(mdev);
1397 write_unlock_irq(&global_state_lock);
1400 static int sync_after_error(struct drbd_conf *mdev, int o_minor)
1402 struct drbd_conf *odev;
1406 if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
1407 return ERR_SYNC_AFTER;
1409 /* check for loops */
1410 odev = minor_to_mdev(o_minor);
1413 return ERR_SYNC_AFTER_CYCLE;
1415 /* dependency chain ends here, no cycles. */
1416 if (odev->sync_conf.after == -1)
1419 /* follow the dependency chain */
1420 odev = minor_to_mdev(odev->sync_conf.after);
1424 int drbd_alter_sa(struct drbd_conf *mdev, int na)
1429 write_lock_irq(&global_state_lock);
1430 retcode = sync_after_error(mdev, na);
1431 if (retcode == NO_ERROR) {
1432 mdev->sync_conf.after = na;
1434 changes = _drbd_pause_after(mdev);
1435 changes |= _drbd_resume_next(mdev);
1438 write_unlock_irq(&global_state_lock);
1443 * drbd_start_resync() - Start the resync process
1444 * @mdev: DRBD device.
1445 * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
1447 * This function might bring you directly into one of the
1448 * C_PAUSED_SYNC_* states.
1450 void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1452 union drbd_state ns;
1455 if (mdev->state.conn >= C_SYNC_SOURCE) {
1456 dev_err(DEV, "Resync already running!\n");
1460 /* In case a previous resync run was aborted by an IO error/detach on the peer. */
1461 drbd_rs_cancel_all(mdev);
1463 if (side == C_SYNC_TARGET) {
1464 /* Since application IO was locked out during C_WF_BITMAP_T and
1465 C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1466 we check that we might make the data inconsistent. */
1467 r = drbd_khelper(mdev, "before-resync-target");
1468 r = (r >> 8) & 0xff;
1470 dev_info(DEV, "before-resync-target handler returned %d, "
1471 "dropping connection.\n", r);
1472 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1477 drbd_state_lock(mdev);
1479 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
1480 drbd_state_unlock(mdev);
1484 if (side == C_SYNC_TARGET) {
1485 mdev->bm_resync_fo = 0;
1486 } else /* side == C_SYNC_SOURCE */ {
1489 get_random_bytes(&uuid, sizeof(u64));
1490 drbd_uuid_set(mdev, UI_BITMAP, uuid);
1491 drbd_send_sync_uuid(mdev, uuid);
1493 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
1496 write_lock_irq(&global_state_lock);
1499 ns.aftr_isp = !_drbd_may_sync_now(mdev);
1503 if (side == C_SYNC_TARGET)
1504 ns.disk = D_INCONSISTENT;
1505 else /* side == C_SYNC_SOURCE */
1506 ns.pdsk = D_INCONSISTENT;
1508 r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1511 if (ns.conn < C_CONNECTED)
1512 r = SS_UNKNOWN_ERROR;
1514 if (r == SS_SUCCESS) {
1515 unsigned long tw = drbd_bm_total_weight(mdev);
1516 unsigned long now = jiffies;
1519 mdev->rs_failed = 0;
1520 mdev->rs_paused = 0;
1521 mdev->rs_same_csum = 0;
1522 mdev->rs_last_events = 0;
1523 mdev->rs_last_sect_ev = 0;
1524 mdev->rs_total = tw;
1525 mdev->rs_start = now;
1526 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1527 mdev->rs_mark_left[i] = tw;
1528 mdev->rs_mark_time[i] = now;
1530 _drbd_pause_after(mdev);
1532 write_unlock_irq(&global_state_lock);
1535 if (r == SS_SUCCESS) {
1536 dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1537 drbd_conn_str(ns.conn),
1538 (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
1539 (unsigned long) mdev->rs_total);
1541 if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
1542 /* This still has a race (about when exactly the peers
1543 * detect connection loss) that can lead to a full sync
1544 * on next handshake. In 8.3.9 we fixed this with explicit
1545 * resync-finished notifications, but the fix
1546 * introduces a protocol change. Sleeping for some
1547 * time longer than the ping interval + timeout on the
1548 * SyncSource, to give the SyncTarget the chance to
1549 * detect connection loss, then waiting for a ping
1550 * response (implicit in drbd_resync_finished) reduces
1551 * the race considerably, but does not solve it. */
1552 if (side == C_SYNC_SOURCE)
1553 schedule_timeout_interruptible(
1554 mdev->net_conf->ping_int * HZ +
1555 mdev->net_conf->ping_timeo*HZ/9);
1556 drbd_resync_finished(mdev);
1559 atomic_set(&mdev->rs_sect_in, 0);
1560 atomic_set(&mdev->rs_sect_ev, 0);
1561 mdev->rs_in_flight = 0;
1562 mdev->rs_planed = 0;
1563 spin_lock(&mdev->peer_seq_lock);
1564 fifo_set(&mdev->rs_plan_s, 0);
1565 spin_unlock(&mdev->peer_seq_lock);
1566 /* ns.conn may already be != mdev->state.conn,
1567 * we may have been paused in between, or become paused until
1568 * the timer triggers.
1569 * No matter, that is handled in resync_timer_fn() */
1570 if (ns.conn == C_SYNC_TARGET)
1571 mod_timer(&mdev->resync_timer, jiffies);
1575 drbd_state_unlock(mdev);
1578 int drbd_worker(struct drbd_thread *thi)
1580 struct drbd_conf *mdev = thi->mdev;
1581 struct drbd_work *w = NULL;
1582 LIST_HEAD(work_list);
1585 sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev));
1587 while (get_t_state(thi) == Running) {
1588 drbd_thread_current_set_cpu(mdev);
1590 if (down_trylock(&mdev->data.work.s)) {
1591 mutex_lock(&mdev->data.mutex);
1592 if (mdev->data.socket && !mdev->net_conf->no_cork)
1593 drbd_tcp_uncork(mdev->data.socket);
1594 mutex_unlock(&mdev->data.mutex);
1596 intr = down_interruptible(&mdev->data.work.s);
1598 mutex_lock(&mdev->data.mutex);
1599 if (mdev->data.socket && !mdev->net_conf->no_cork)
1600 drbd_tcp_cork(mdev->data.socket);
1601 mutex_unlock(&mdev->data.mutex);
1605 D_ASSERT(intr == -EINTR);
1606 flush_signals(current);
1607 ERR_IF (get_t_state(thi) == Running)
1612 if (get_t_state(thi) != Running)
1614 /* With this break, we have done a down() but not consumed
1615 the entry from the list. The cleanup code takes care of
1619 spin_lock_irq(&mdev->data.work.q_lock);
1620 ERR_IF(list_empty(&mdev->data.work.q)) {
1621 /* something terribly wrong in our logic.
1622 * we were able to down() the semaphore,
1623 * but the list is empty... doh.
1625 * what is the best thing to do now?
1626 * try again from scratch, restarting the receiver,
1627 * asender, whatnot? could break even more ugly,
1628 * e.g. when we are primary, but no good local data.
1630 * I'll try to get away just starting over this loop.
1632 spin_unlock_irq(&mdev->data.work.q_lock);
1635 w = list_entry(mdev->data.work.q.next, struct drbd_work, list);
1636 list_del_init(&w->list);
1637 spin_unlock_irq(&mdev->data.work.q_lock);
1639 if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) {
1640 /* dev_warn(DEV, "worker: a callback failed! \n"); */
1641 if (mdev->state.conn >= C_CONNECTED)
1642 drbd_force_state(mdev,
1643 NS(conn, C_NETWORK_FAILURE));
1646 D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
1647 D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
1649 spin_lock_irq(&mdev->data.work.q_lock);
1651 while (!list_empty(&mdev->data.work.q)) {
1652 list_splice_init(&mdev->data.work.q, &work_list);
1653 spin_unlock_irq(&mdev->data.work.q_lock);
1655 while (!list_empty(&work_list)) {
1656 w = list_entry(work_list.next, struct drbd_work, list);
1657 list_del_init(&w->list);
1659 i++; /* dead debugging code */
1662 spin_lock_irq(&mdev->data.work.q_lock);
1664 sema_init(&mdev->data.work.s, 0);
1665 /* DANGEROUS race: if someone did queue his work within the spinlock,
1666 * but up() ed outside the spinlock, we could get an up() on the
1667 * semaphore without corresponding list entry.
1670 spin_unlock_irq(&mdev->data.work.q_lock);
1672 D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
1673 /* _drbd_set_state only uses stop_nowait.
1674 * wait here for the Exiting receiver. */
1675 drbd_thread_stop(&mdev->receiver);
1676 drbd_mdev_cleanup(mdev);
1678 dev_info(DEV, "worker terminated\n");
1680 clear_bit(DEVICE_DYING, &mdev->flags);
1681 clear_bit(CONFIG_PENDING, &mdev->flags);
1682 wake_up(&mdev->state_wait);