Merge branch 'master' into for-2.6.35
[pandora-kernel.git] / drivers / block / drbd / drbd_worker.c
index d771b1e..727ff63 100644 (file)
@@ -127,7 +127,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
                drbd_bump_write_ordering(mdev, WO_bdev_flush);
                spin_lock_irqsave(&mdev->req_lock, flags);
                list_del(&e->w.list);
-               e->flags |= EE_RESUBMITTED;
+               e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
                e->w.cb = w_e_reissue;
                /* put_ldev actually happens below, once we come here again. */
                __release(local);
@@ -240,7 +240,7 @@ void drbd_endio_pri(struct bio *bio, int error)
        if (unlikely(error)) {
                what = (bio_data_dir(bio) == WRITE)
                        ? write_completed_with_error
-                       : (bio_rw(bio) == READA)
+                       : (bio_rw(bio) == READ)
                          ? read_completed_with_error
                          : read_ahead_completed_with_error;
        } else
@@ -462,7 +462,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
        unsigned long bit;
        sector_t sector;
        const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
-       int max_segment_size = queue_max_segment_size(mdev->rq_queue);
+       int max_segment_size;
        int number, i, size, pe, mx;
        int align, queued, sndbuf;
 
@@ -488,6 +488,11 @@ int w_make_resync_request(struct drbd_conf *mdev,
                return 1;
        }
 
+       /* starting with drbd 8.3.8, we can handle multi-bio EEs,
+        * if it should be necessary */
+       max_segment_size = mdev->agreed_pro_version < 94 ?
+               queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
+
        mdev->c_sync_rate = calc_resync_rate(mdev);
        number = SLEEP_TIME * mdev->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
        pe = atomic_read(&mdev->rs_pending_cnt);
@@ -552,12 +557,6 @@ next_sector:
                 *
                 * Additionally always align bigger requests, in order to
                 * be prepared for all stripe sizes of software RAIDs.
-                *
-                * we _do_ care about the agreed-upon q->max_segment_size
-                * here, as splitting up the requests on the other side is more
-                * difficult.  the consequence is, that on lvm and md and other
-                * "indirect" devices, this is dead code, since
-                * q->max_segment_size will be PAGE_SIZE.
                 */
                align = 1;
                for (;;) {