4 typedef struct mirror_info mirror_info_t;
8 sector_t head_position;
12 * memory pools need a pointer to the mddev, so they can force an unplug
13 * when memory is tight, and a count of the number of drives that the
14 * pool was allocated for, so they know how much to allocate and free.
15 * mddev->raid_disks cannot be used, as it can change while a pool is active
16 * These two datums are stored in a kmalloced struct.
24 struct r1_private_data_s {
26 mirror_info_t *mirrors;
29 /* When choose the best device for a read (read_balance())
30 * we try to keep sequential reads one the same device
31 * using 'last_used' and 'next_seq_sect'
34 sector_t next_seq_sect;
35 /* During resync, read_balancing is only allowed on the part
36 * of the array that has been resynced. 'next_resync' tells us
41 spinlock_t device_lock;
43 /* list of 'struct r1bio' that need to be processed by raid1d,
44 * whether to retry a read, writeout a resync or recovery
45 * block, or anything else.
47 struct list_head retry_list;
49 /* queue pending writes to be submitted on unplug */
50 struct bio_list pending_bio_list;
52 /* for use when syncing mirrors:
53 * We don't allow both normal IO and resync/recovery IO at
54 * the same time - resync/recovery can only happen when there
55 * is no other IO. So when either is active, the other has to wait.
56 * See more details description in raid1.c near raise_barrier().
58 wait_queue_head_t wait_barrier;
59 spinlock_t resync_lock;
65 /* Set to 1 if a full sync is needed, (fresh device added).
66 * Cleared when a sync completes.
70 /* When the same as mddev->recovery_disabled we don't allow
71 * recovery to be attempted as we expect a read error.
73 int recovery_disabled;
76 /* poolinfo contains information about the content of the
77 * mempools - it changes when the array grows or shrinks
79 struct pool_info *poolinfo;
80 mempool_t *r1bio_pool;
81 mempool_t *r1buf_pool;
83 /* temporary buffer to synchronous IO when attempting to repair
89 /* When taking over an array from a different personality, we store
90 * the new thread here until we fully activate the array.
92 struct md_thread *thread;
95 typedef struct r1_private_data_s conf_t;
98 * this is our 'private' RAID1 bio.
100 * it contains information about what kind of IO operations were started
101 * for this RAID1 operation, and about their status:
105 atomic_t remaining; /* 'have we finished' count,
106 * used from IRQ handlers
108 atomic_t behind_remaining; /* number of write-behind ios remaining
109 * in this BehindIO request
116 * original bio going to /dev/mdx
118 struct bio *master_bio;
120 * if the IO is in READ direction, then this is where we read
124 struct list_head retry_list;
125 /* Next two are only valid when R1BIO_BehindIO is set */
126 struct bio_vec *behind_bvecs;
127 int behind_page_count;
129 * if the IO is in WRITE direction, then multiple bios are used.
130 * We choose the number when they are allocated.
133 /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
136 /* when we get a read error on a read-only array, we redirect to another
137 * device without failing the first device, or trying to over-write to
138 * correct the read error. To keep track of bad blocks on a per-bio
139 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
141 #define IO_BLOCKED ((struct bio *)1)
142 /* When we successfully write to a known bad-block, we need to remove the
143 * bad-block marking which must be done from process context. So we record
144 * the success by setting bios[n] to IO_MADE_GOOD
146 #define IO_MADE_GOOD ((struct bio *)2)
148 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
150 /* bits for r1bio.state */
151 #define R1BIO_Uptodate 0
152 #define R1BIO_IsSync 1
153 #define R1BIO_Degraded 2
154 #define R1BIO_BehindIO 3
155 /* Set ReadError on bios that experience a readerror so that
156 * raid1d knows what to do with them.
158 #define R1BIO_ReadError 4
159 /* For write-behind requests, we call bi_end_io when
160 * the last non-write-behind device completes, providing
161 * any write was successful. Otherwise we call when
162 * any write-behind write succeeds, otherwise we call
163 * with failure when last write completes (and all failed).
164 * Record that bi_end_io was called with this flag...
166 #define R1BIO_Returned 6
167 /* If a write for this request means we can clear some
168 * known-bad-block records, we set this flag
170 #define R1BIO_MadeGood 7
171 #define R1BIO_WriteError 8
173 extern int md_raid1_congested(struct mddev *mddev, int bits);