4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2006-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
8 Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>.
10 DRBD is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 DRBD is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/drbd.h>
33 #include "drbd_wrappers.h"
35 /* The request callbacks will be called in irq context by the IDE drivers,
36 and in Softirqs/Tasklets/BH context by the SCSI drivers,
37 and by the receiver and worker in kernel-thread context.
38 Try to get the locking right :) */
41 * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
42 * associated with IO requests originating from the block layer above us.
44 * There are quite a few things that may happen to a drbd request
45 * during its lifetime.
48 * It will be marked with the intention to be
49 * submitted to local disk and/or
50 * send via the network.
52 * It has to be placed on the transfer log and other housekeeping lists,
53 * In case we have a network connection.
55 * It may be identified as a concurrent (write) request
56 * and be handled accordingly.
58 * It may me handed over to the local disk subsystem.
59 * It may be completed by the local disk subsystem,
60 * either successfully or with io-error.
61 * In case it is a READ request, and it failed locally,
62 * it may be retried remotely.
64 * It may be queued for sending.
65 * It may be handed over to the network stack,
67 * It may be acknowledged by the "peer" according to the wire_protocol in use.
68 * this may be a negative ack.
69 * It may receive a faked ack when the network connection is lost and the
70 * transfer log is cleaned up.
71 * Sending may be canceled due to network connection loss.
72 * When it finally has outlived its time,
73 * corresponding dirty bits in the resync-bitmap may be cleared or set,
74 * it will be destroyed,
75 * and completion will be signalled to the originator,
76 * with or without "success".
84 /* XXX yes, now I am inconsistent...
85 * these two are not "events" but "actions"
92 handed_over_to_network,
93 connection_lost_while_pending,
94 read_retry_remote_canceled,
97 write_acked_by_peer_and_sis, /* and set_in_sync */
98 conflict_discarded_by_peer,
100 barrier_acked, /* in protocol A and B */
101 data_received, /* (remote read) */
103 read_completed_with_error,
104 read_ahead_completed_with_error,
105 write_completed_with_error,
108 nothing, /* for tracing only */
111 /* encoding of request states for now. we don't actually need that many bits.
112 * we don't need to do atomic bit operations either, since most of the time we
113 * need to look at the connection state and/or manipulate some lists at the
114 * same time, so we should hold the request lock anyways.
116 enum drbd_req_state_bits {
118 * 000: no local possible
119 * 001: to be submitted
120 * UNUSED, we could map: 011: submitted, completion still pending
122 * 010: completed with error
125 __RQ_LOCAL_COMPLETED,
129 * 00000: no network possible
131 * 00011: to be send, on worker queue
132 * 00101: sent, expecting recv_ack (B) or write_ack (C)
134 * recv_ack (B) or implicit "ack" (A),
135 * still waiting for the barrier ack.
136 * master_bio may already be completed and invalidated.
137 * 11100: write_acked (C),
138 * data_received (for remote read, any protocol)
139 * or finally the barrier ack has arrived (B,A)...
140 * request can be freed
141 * 01100: neg-acked (write, protocol C)
142 * or neg-d-acked (read, any protocol)
143 * or killed from the transfer log
144 * during cleanup after connection loss
145 * request can be freed
146 * 01000: canceled or send failed...
147 * request can be freed
150 /* if "SENT" is not set, yet, this can still fail or be canceled.
151 * if "SENT" is set already, we still wait for an Ack packet.
152 * when cleared, the master_bio may be completed.
153 * in (B,A) the request object may still linger on the transaction log
154 * until the corresponding barrier ack comes in */
157 /* If it is QUEUED, and it is a WRITE, it is also registered in the
158 * transfer log. Currently we need this flag to avoid conflicts between
159 * worker canceling the request and tl_clear_barrier killing it from
160 * transfer log. We should restructure the code so this conflict does
161 * no longer occur. */
164 /* well, actually only "handed over to the network stack".
166 * TODO can potentially be dropped because of the similar meaning
167 * of RQ_NET_SENT and ~RQ_NET_QUEUED.
168 * however it is not exactly the same. before we drop it
169 * we must ensure that we can tell a request with network part
170 * from a request without, regardless of what happens to it. */
173 /* when set, the request may be freed (if RQ_NET_QUEUED is clear).
174 * basically this means the corresponding P_BARRIER_ACK was received */
177 /* whether or not we know (C) or pretend (B,A) that the write
178 * was successfully written on the peer.
182 /* peer called drbd_set_in_sync() for this write */
185 /* keep this last, its for the RQ_NET_MASK */
188 /* Set when this is a write, clear for a read */
192 #define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
193 #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
194 #define RQ_LOCAL_OK (1UL << __RQ_LOCAL_OK)
196 #define RQ_LOCAL_MASK ((RQ_LOCAL_OK << 1)-1) /* 0x07 */
198 #define RQ_NET_PENDING (1UL << __RQ_NET_PENDING)
199 #define RQ_NET_QUEUED (1UL << __RQ_NET_QUEUED)
200 #define RQ_NET_SENT (1UL << __RQ_NET_SENT)
201 #define RQ_NET_DONE (1UL << __RQ_NET_DONE)
202 #define RQ_NET_OK (1UL << __RQ_NET_OK)
203 #define RQ_NET_SIS (1UL << __RQ_NET_SIS)
206 #define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
208 #define RQ_WRITE (1UL << __RQ_WRITE)
210 /* For waking up the frozen transfer log mod_req() has to return if the request
211 should be counted in the epoch object*/
212 #define MR_WRITE_SHIFT 0
213 #define MR_WRITE (1 << MR_WRITE_SHIFT)
214 #define MR_READ_SHIFT 1
215 #define MR_READ (1 << MR_READ_SHIFT)
219 struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
221 BUG_ON(mdev->ee_hash_s == 0);
222 return mdev->ee_hash +
223 ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
226 /* transfer log (drbd_request objects) */
228 struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
230 BUG_ON(mdev->tl_hash_s == 0);
231 return mdev->tl_hash +
232 ((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
235 /* application reads (drbd_request objects) */
236 static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
238 return mdev->app_reads_hash
239 + ((unsigned int)(sector) % APP_R_HSIZE);
242 /* when we receive the answer for a read request,
243 * verify that we actually know about it */
244 static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
245 u64 id, sector_t sector)
247 struct hlist_head *slot = ar_hash_slot(mdev, sector);
248 struct hlist_node *n;
249 struct drbd_request *req;
251 hlist_for_each_entry(req, n, slot, colision) {
252 if ((unsigned long)req == (unsigned long)id) {
253 D_ASSERT(req->sector == sector);
260 static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
264 struct drbd_request *req =
265 mempool_alloc(drbd_request_mempool, GFP_NOIO);
267 bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
269 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
271 req->master_bio = bio_src;
272 req->private_bio = bio;
274 req->sector = bio->bi_sector;
275 req->size = bio->bi_size;
276 req->start_time = jiffies;
277 INIT_HLIST_NODE(&req->colision);
278 INIT_LIST_HEAD(&req->tl_requests);
279 INIT_LIST_HEAD(&req->w.list);
281 bio->bi_private = req;
282 bio->bi_end_io = drbd_endio_pri;
288 static inline void drbd_req_free(struct drbd_request *req)
290 mempool_free(req, drbd_request_mempool);
293 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
295 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
298 /* Short lived temporary struct on the stack.
299 * We could squirrel the error to be returned into
300 * bio->bi_size, or similar. But that would be too ugly. */
301 struct bio_and_error {
306 extern void _req_may_be_done(struct drbd_request *req,
307 struct bio_and_error *m);
308 extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
309 struct bio_and_error *m);
310 extern void complete_master_bio(struct drbd_conf *mdev,
311 struct bio_and_error *m);
313 /* use this if you don't want to deal with calling complete_master_bio()
314 * outside the spinlock, e.g. when walking some list on cleanup. */
315 static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
317 struct drbd_conf *mdev = req->mdev;
318 struct bio_and_error m;
321 /* __req_mod possibly frees req, do not touch req after that! */
322 rv = __req_mod(req, what, &m);
324 complete_master_bio(mdev, &m);
329 /* completion of master bio is outside of spinlock.
330 * If you need it irqsave, do it your self! */
331 static inline int req_mod(struct drbd_request *req,
332 enum drbd_req_event what)
334 struct drbd_conf *mdev = req->mdev;
335 struct bio_and_error m;
338 spin_lock_irq(&mdev->req_lock);
339 rv = __req_mod(req, what, &m);
340 spin_unlock_irq(&mdev->req_lock);
343 complete_master_bio(mdev, &m);