Merge commit 'v2.6.39-rc3' into for-2.6.39
[pandora-kernel.git] / drivers / block / drbd / drbd_req.c
index 53e7cc5..5c0c8be 100644 (file)
@@ -445,7 +445,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state |= RQ_LOCAL_COMPLETED;
                req->rq_state &= ~RQ_LOCAL_PENDING;
 
-               __drbd_chk_io_error(mdev, FALSE);
+               __drbd_chk_io_error(mdev, false);
                _req_may_be_done_not_susp(req, m);
                put_ldev(mdev);
                break;
@@ -466,7 +466,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 
                D_ASSERT(!(req->rq_state & RQ_NET_MASK));
 
-               __drbd_chk_io_error(mdev, FALSE);
+               __drbd_chk_io_error(mdev, false);
                put_ldev(mdev);
 
                /* no point in retrying if there is no good remote data,
@@ -712,10 +712,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                        dev_err(DEV, "FIXME (barrier_acked but pending)\n");
                        list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
                }
-               D_ASSERT(req->rq_state & RQ_NET_SENT);
-               req->rq_state |= RQ_NET_DONE;
-               if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
-                       atomic_sub(req->size>>9, &mdev->ap_in_flight);
+               if ((req->rq_state & RQ_NET_MASK) != 0) {
+                       req->rq_state |= RQ_NET_DONE;
+                       if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
+                               atomic_sub(req->size>>9, &mdev->ap_in_flight);
+               }
                _req_may_be_done(req, m); /* Allowed while state.susp */
                break;
 
@@ -762,23 +763,6 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
        return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
 }
 
-static int drbd_should_do_remote(struct drbd_conf *mdev)
-{
-       union drbd_state s = mdev->state;
-
-       return s.pdsk == D_UP_TO_DATE ||
-               (s.pdsk >= D_INCONSISTENT &&
-                s.conn >= C_WF_BITMAP_T &&
-                s.conn < C_AHEAD);
-}
-static int drbd_should_send_oos(struct drbd_conf *mdev)
-{
-       union drbd_state s = mdev->state;
-
-       return s.pdsk >= D_INCONSISTENT &&
-               (s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S);
-}
-
 static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 {
        const int rw = bio_rw(bio);
@@ -850,8 +834,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
                drbd_al_begin_io(mdev, sector);
        }
 
-       remote = remote && drbd_should_do_remote(mdev);
-       send_oos = rw == WRITE && drbd_should_send_oos(mdev);
+       remote = remote && drbd_should_do_remote(mdev->state);
+       send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
        D_ASSERT(!(remote && send_oos));
 
        if (!(local || remote) && !is_susp(mdev->state)) {
@@ -892,8 +876,8 @@ allocate_barrier:
        }
 
        if (remote || send_oos) {
-               remote = drbd_should_do_remote(mdev);
-               send_oos = rw == WRITE && drbd_should_send_oos(mdev);
+               remote = drbd_should_do_remote(mdev->state);
+               send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
                D_ASSERT(!(remote && send_oos));
 
                if (!(remote || send_oos))
@@ -999,7 +983,7 @@ allocate_barrier:
                }
 
                if (congested) {
-                       queue_barrier(mdev);
+                       queue_barrier(mdev); /* last barrier, after mirrored writes */
 
                        if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
                                _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
@@ -1143,11 +1127,7 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
                const int sps = 1 << HT_SHIFT; /* sectors per slot */
                const int mask = sps - 1;
                const sector_t first_sectors = sps - (sect & mask);
-               bp = bio_split(bio,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-                               bio_split_pool,
-#endif
-                               first_sectors);
+               bp = bio_split(bio, first_sectors);
 
                /* we need to get a "reference count" (ap_bio_cnt)
                 * to avoid races with the disconnect/reconnect/suspend code.
@@ -1210,3 +1190,42 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
        }
        return limit;
 }
+
+void request_timer_fn(unsigned long data)
+{
+       struct drbd_conf *mdev = (struct drbd_conf *) data;
+       struct drbd_request *req; /* oldest request */
+       struct list_head *le;
+       unsigned long et = 0; /* effective timeout = ko_count * timeout */
+
+       if (get_net_conf(mdev)) {
+               et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
+               put_net_conf(mdev);
+       }
+       if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
+               return; /* Recurring timer stopped */
+
+       spin_lock_irq(&mdev->req_lock);
+       le = &mdev->oldest_tle->requests;
+       if (list_empty(le)) {
+               spin_unlock_irq(&mdev->req_lock);
+               mod_timer(&mdev->request_timer, jiffies + et);
+               return;
+       }
+
+       le = le->prev;
+       req = list_entry(le, struct drbd_request, tl_requests);
+       if (time_is_before_eq_jiffies(req->start_time + et)) {
+               if (req->rq_state & RQ_NET_PENDING) {
+                       dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
+                       _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL);
+               } else {
+                       dev_warn(DEV, "Local backing block device frozen?\n");
+                       mod_timer(&mdev->request_timer, jiffies + et);
+               }
+       } else {
+               mod_timer(&mdev->request_timer, req->start_time + et);
+       }
+
+       spin_unlock_irq(&mdev->req_lock);
+}