[PATCH] md: make sure md bitmap is cleared on a clean start.
[pandora-kernel.git] / drivers / md / bitmap.c
1 /*
2  * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
3  *
4  * bitmap_create  - sets up the bitmap structure
5  * bitmap_destroy - destroys the bitmap structure
6  *
7  * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
8  * - added disk storage for bitmap
9  * - changes to allow various bitmap chunk sizes
10  * - added bitmap daemon (to asynchronously clear bitmap bits from disk)
11  */
12
13 /*
14  * Still to do:
15  *
16  * flush after percent set rather than just time based. (maybe both).
17  * wait if count gets too high, wake when it drops to half.
18  * allow bitmap to be mirrored with superblock (before or after...)
19  * allow hot-add to re-instate a current device.
20  * allow hot-add of bitmap after quiessing device
21  */
22
23 #include <linux/module.h>
24 #include <linux/version.h>
25 #include <linux/errno.h>
26 #include <linux/slab.h>
27 #include <linux/init.h>
28 #include <linux/config.h>
29 #include <linux/timer.h>
30 #include <linux/sched.h>
31 #include <linux/list.h>
32 #include <linux/file.h>
33 #include <linux/mount.h>
34 #include <linux/buffer_head.h>
35 #include <linux/raid/md.h>
36 #include <linux/raid/bitmap.h>
37
38 /* debug macros */
39
40 #define DEBUG 0
41
42 #if DEBUG
43 /* these are for debugging purposes only! */
44
45 /* define one and only one of these */
46 #define INJECT_FAULTS_1 0 /* cause bitmap_alloc_page to fail always */
47 #define INJECT_FAULTS_2 0 /* cause bitmap file to be kicked when first bit set*/
48 #define INJECT_FAULTS_3 0 /* treat bitmap file as kicked at init time */
49 #define INJECT_FAULTS_4 0 /* undef */
50 #define INJECT_FAULTS_5 0 /* undef */
51 #define INJECT_FAULTS_6 0
52
53 /* if these are defined, the driver will fail! debug only */
54 #define INJECT_FATAL_FAULT_1 0 /* fail kmalloc, causing bitmap_create to fail */
55 #define INJECT_FATAL_FAULT_2 0 /* undef */
56 #define INJECT_FATAL_FAULT_3 0 /* undef */
57 #endif
58
59 //#define DPRINTK PRINTK /* set this NULL to avoid verbose debug output */
60 #define DPRINTK(x...) do { } while(0)
61
62 #ifndef PRINTK
63 #  if DEBUG > 0
64 #    define PRINTK(x...) printk(KERN_DEBUG x)
65 #  else
66 #    define PRINTK(x...)
67 #  endif
68 #endif
69
70 static inline char * bmname(struct bitmap *bitmap)
71 {
72         return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
73 }
74
75
76 /*
77  * test if the bitmap is active
78  */
79 int bitmap_active(struct bitmap *bitmap)
80 {
81         unsigned long flags;
82         int res = 0;
83
84         if (!bitmap)
85                 return res;
86         spin_lock_irqsave(&bitmap->lock, flags);
87         res = bitmap->flags & BITMAP_ACTIVE;
88         spin_unlock_irqrestore(&bitmap->lock, flags);
89         return res;
90 }
91
92 #define WRITE_POOL_SIZE 256
93 /* mempool for queueing pending writes on the bitmap file */
94 static void *write_pool_alloc(unsigned int gfp_flags, void *data)
95 {
96         return kmalloc(sizeof(struct page_list), gfp_flags);
97 }
98
99 static void write_pool_free(void *ptr, void *data)
100 {
101         kfree(ptr);
102 }
103
104 /*
105  * just a placeholder - calls kmalloc for bitmap pages
106  */
107 static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
108 {
109         unsigned char *page;
110
111 #if INJECT_FAULTS_1
112         page = NULL;
113 #else
114         page = kmalloc(PAGE_SIZE, GFP_NOIO);
115 #endif
116         if (!page)
117                 printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap));
118         else
119                 printk("%s: bitmap_alloc_page: allocated page at %p\n",
120                         bmname(bitmap), page);
121         return page;
122 }
123
124 /*
125  * for now just a placeholder -- just calls kfree for bitmap pages
126  */
127 static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
128 {
129         PRINTK("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page);
130         kfree(page);
131 }
132
133 /*
134  * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
135  *
136  * 1) check to see if this page is allocated, if it's not then try to alloc
137  * 2) if the alloc fails, set the page's hijacked flag so we'll use the
138  *    page pointer directly as a counter
139  *
140  * if we find our page, we increment the page's refcount so that it stays
141  * allocated while we're using it
142  */
143 static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create)
144 {
145         unsigned char *mappage;
146
147         if (page >= bitmap->pages) {
148                 printk(KERN_ALERT
149                         "%s: invalid bitmap page request: %lu (> %lu)\n",
150                         bmname(bitmap), page, bitmap->pages-1);
151                 return -EINVAL;
152         }
153
154
155         if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
156                 return 0;
157
158         if (bitmap->bp[page].map) /* page is already allocated, just return */
159                 return 0;
160
161         if (!create)
162                 return -ENOENT;
163
164         spin_unlock_irq(&bitmap->lock);
165
166         /* this page has not been allocated yet */
167
168         if ((mappage = bitmap_alloc_page(bitmap)) == NULL) {
169                 PRINTK("%s: bitmap map page allocation failed, hijacking\n",
170                         bmname(bitmap));
171                 /* failed - set the hijacked flag so that we can use the
172                  * pointer as a counter */
173                 spin_lock_irq(&bitmap->lock);
174                 if (!bitmap->bp[page].map)
175                         bitmap->bp[page].hijacked = 1;
176                 goto out;
177         }
178
179         /* got a page */
180
181         spin_lock_irq(&bitmap->lock);
182
183         /* recheck the page */
184
185         if (bitmap->bp[page].map || bitmap->bp[page].hijacked) {
186                 /* somebody beat us to getting the page */
187                 bitmap_free_page(bitmap, mappage);
188                 return 0;
189         }
190
191         /* no page was in place and we have one, so install it */
192
193         memset(mappage, 0, PAGE_SIZE);
194         bitmap->bp[page].map = mappage;
195         bitmap->missing_pages--;
196 out:
197         return 0;
198 }
199
200
201 /* if page is completely empty, put it back on the free list, or dealloc it */
202 /* if page was hijacked, unmark the flag so it might get alloced next time */
203 /* Note: lock should be held when calling this */
204 static inline void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
205 {
206         char *ptr;
207
208         if (bitmap->bp[page].count) /* page is still busy */
209                 return;
210
211         /* page is no longer in use, it can be released */
212
213         if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
214                 bitmap->bp[page].hijacked = 0;
215                 bitmap->bp[page].map = NULL;
216                 return;
217         }
218
219         /* normal case, free the page */
220
221 #if 0
222 /* actually ... let's not.  We will probably need the page again exactly when
223  * memory is tight and we are flusing to disk
224  */
225         return;
226 #else
227         ptr = bitmap->bp[page].map;
228         bitmap->bp[page].map = NULL;
229         bitmap->missing_pages++;
230         bitmap_free_page(bitmap, ptr);
231         return;
232 #endif
233 }
234
235
236 /*
237  * bitmap file handling - read and write the bitmap file and its superblock
238  */
239
240 /* copy the pathname of a file to a buffer */
241 char *file_path(struct file *file, char *buf, int count)
242 {
243         struct dentry *d;
244         struct vfsmount *v;
245
246         if (!buf)
247                 return NULL;
248
249         d = file->f_dentry;
250         v = file->f_vfsmnt;
251
252         buf = d_path(d, v, buf, count);
253
254         return IS_ERR(buf) ? NULL : buf;
255 }
256
257 /*
258  * basic page I/O operations
259  */
260
261 /*
262  * write out a page
263  */
264 static int write_page(struct page *page, int wait)
265 {
266         int ret = -ENOMEM;
267
268         lock_page(page);
269
270         if (page->mapping == NULL)
271                 goto unlock_out;
272         else if (i_size_read(page->mapping->host) < page->index << PAGE_SHIFT) {
273                 ret = -ENOENT;
274                 goto unlock_out;
275         }
276
277         ret = page->mapping->a_ops->prepare_write(NULL, page, 0, PAGE_SIZE);
278         if (!ret)
279                 ret = page->mapping->a_ops->commit_write(NULL, page, 0,
280                         PAGE_SIZE);
281         if (ret) {
282 unlock_out:
283                 unlock_page(page);
284                 return ret;
285         }
286
287         set_page_dirty(page); /* force it to be written out */
288         return write_one_page(page, wait);
289 }
290
291 /* read a page from a file, pinning it into cache, and return bytes_read */
292 static struct page *read_page(struct file *file, unsigned long index,
293                                         unsigned long *bytes_read)
294 {
295         struct inode *inode = file->f_mapping->host;
296         struct page *page = NULL;
297         loff_t isize = i_size_read(inode);
298         unsigned long end_index = isize >> PAGE_CACHE_SHIFT;
299
300         PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_CACHE_SIZE,
301                         (unsigned long long)index << PAGE_CACHE_SHIFT);
302
303         page = read_cache_page(inode->i_mapping, index,
304                         (filler_t *)inode->i_mapping->a_ops->readpage, file);
305         if (IS_ERR(page))
306                 goto out;
307         wait_on_page_locked(page);
308         if (!PageUptodate(page) || PageError(page)) {
309                 page_cache_release(page);
310                 page = ERR_PTR(-EIO);
311                 goto out;
312         }
313
314         if (index > end_index) /* we have read beyond EOF */
315                 *bytes_read = 0;
316         else if (index == end_index) /* possible short read */
317                 *bytes_read = isize & ~PAGE_CACHE_MASK;
318         else
319                 *bytes_read = PAGE_CACHE_SIZE; /* got a full page */
320 out:
321         if (IS_ERR(page))
322                 printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
323                         (int)PAGE_CACHE_SIZE,
324                         (unsigned long long)index << PAGE_CACHE_SHIFT,
325                         PTR_ERR(page));
326         return page;
327 }
328
329 /*
330  * bitmap file superblock operations
331  */
332
333 /* update the event counter and sync the superblock to disk */
334 int bitmap_update_sb(struct bitmap *bitmap)
335 {
336         bitmap_super_t *sb;
337         unsigned long flags;
338
339         if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
340                 return 0;
341         spin_lock_irqsave(&bitmap->lock, flags);
342         if (!bitmap->sb_page) { /* no superblock */
343                 spin_unlock_irqrestore(&bitmap->lock, flags);
344                 return 0;
345         }
346         page_cache_get(bitmap->sb_page);
347         spin_unlock_irqrestore(&bitmap->lock, flags);
348         sb = (bitmap_super_t *)kmap(bitmap->sb_page);
349         sb->events = cpu_to_le64(bitmap->mddev->events);
350         if (!bitmap->mddev->degraded)
351                 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
352         kunmap(bitmap->sb_page);
353         write_page(bitmap->sb_page, 0);
354         return 0;
355 }
356
357 /* print out the bitmap file superblock */
358 void bitmap_print_sb(struct bitmap *bitmap)
359 {
360         bitmap_super_t *sb;
361
362         if (!bitmap || !bitmap->sb_page)
363                 return;
364         sb = (bitmap_super_t *)kmap(bitmap->sb_page);
365         printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
366         printk(KERN_DEBUG "       magic: %08x\n", le32_to_cpu(sb->magic));
367         printk(KERN_DEBUG "     version: %d\n", le32_to_cpu(sb->version));
368         printk(KERN_DEBUG "        uuid: %08x.%08x.%08x.%08x\n",
369                                         *(__u32 *)(sb->uuid+0),
370                                         *(__u32 *)(sb->uuid+4),
371                                         *(__u32 *)(sb->uuid+8),
372                                         *(__u32 *)(sb->uuid+12));
373         printk(KERN_DEBUG "      events: %llu\n",
374                         (unsigned long long) le64_to_cpu(sb->events));
375         printk(KERN_DEBUG "events_clred: %llu\n",
376                         (unsigned long long) le64_to_cpu(sb->events_cleared));
377         printk(KERN_DEBUG "       state: %08x\n", le32_to_cpu(sb->state));
378         printk(KERN_DEBUG "   chunksize: %d B\n", le32_to_cpu(sb->chunksize));
379         printk(KERN_DEBUG "daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
380         printk(KERN_DEBUG "   sync size: %llu KB\n", le64_to_cpu(sb->sync_size));
381         kunmap(bitmap->sb_page);
382 }
383
384 /* read the superblock from the bitmap file and initialize some bitmap fields */
385 static int bitmap_read_sb(struct bitmap *bitmap)
386 {
387         char *reason = NULL;
388         bitmap_super_t *sb;
389         unsigned long chunksize, daemon_sleep;
390         unsigned long bytes_read;
391         unsigned long long events;
392         int err = -EINVAL;
393
394         /* page 0 is the superblock, read it... */
395         bitmap->sb_page = read_page(bitmap->file, 0, &bytes_read);
396         if (IS_ERR(bitmap->sb_page)) {
397                 err = PTR_ERR(bitmap->sb_page);
398                 bitmap->sb_page = NULL;
399                 return err;
400         }
401
402         sb = (bitmap_super_t *)kmap(bitmap->sb_page);
403
404         if (bytes_read < sizeof(*sb)) { /* short read */
405                 printk(KERN_INFO "%s: bitmap file superblock truncated\n",
406                         bmname(bitmap));
407                 err = -ENOSPC;
408                 goto out;
409         }
410
411         chunksize = le32_to_cpu(sb->chunksize);
412         daemon_sleep = le32_to_cpu(sb->daemon_sleep);
413
414         /* verify that the bitmap-specific fields are valid */
415         if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
416                 reason = "bad magic";
417         else if (sb->version != cpu_to_le32(BITMAP_MAJOR))
418                 reason = "unrecognized superblock version";
419         else if (chunksize < 512 || chunksize > (1024 * 1024 * 4))
420                 reason = "bitmap chunksize out of range (512B - 4MB)";
421         else if ((1 << ffz(~chunksize)) != chunksize)
422                 reason = "bitmap chunksize not a power of 2";
423         else if (daemon_sleep < 1 || daemon_sleep > 15)
424                 reason = "daemon sleep period out of range";
425         if (reason) {
426                 printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
427                         bmname(bitmap), reason);
428                 goto out;
429         }
430
431         /* keep the array size field of the bitmap superblock up to date */
432         sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
433
434         if (!bitmap->mddev->persistent)
435                 goto success;
436
437         /*
438          * if we have a persistent array superblock, compare the
439          * bitmap's UUID and event counter to the mddev's
440          */
441         if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
442                 printk(KERN_INFO "%s: bitmap superblock UUID mismatch\n",
443                         bmname(bitmap));
444                 goto out;
445         }
446         events = le64_to_cpu(sb->events);
447         if (events < bitmap->mddev->events) {
448                 printk(KERN_INFO "%s: bitmap file is out of date (%llu < %llu) "
449                         "-- forcing full recovery\n", bmname(bitmap), events,
450                         (unsigned long long) bitmap->mddev->events);
451                 sb->state |= BITMAP_STALE;
452         }
453 success:
454         /* assign fields using values from superblock */
455         bitmap->chunksize = chunksize;
456         bitmap->daemon_sleep = daemon_sleep;
457         bitmap->flags |= sb->state;
458         bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
459         err = 0;
460 out:
461         kunmap(bitmap->sb_page);
462         if (err)
463                 bitmap_print_sb(bitmap);
464         return err;
465 }
466
467 enum bitmap_mask_op {
468         MASK_SET,
469         MASK_UNSET
470 };
471
472 /* record the state of the bitmap in the superblock */
473 static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
474                                 enum bitmap_mask_op op)
475 {
476         bitmap_super_t *sb;
477         unsigned long flags;
478
479         spin_lock_irqsave(&bitmap->lock, flags);
480         if (!bitmap || !bitmap->sb_page) { /* can't set the state */
481                 spin_unlock_irqrestore(&bitmap->lock, flags);
482                 return;
483         }
484         page_cache_get(bitmap->sb_page);
485         spin_unlock_irqrestore(&bitmap->lock, flags);
486         sb = (bitmap_super_t *)kmap(bitmap->sb_page);
487         switch (op) {
488                 case MASK_SET: sb->state |= bits;
489                                 break;
490                 case MASK_UNSET: sb->state &= ~bits;
491                                 break;
492                 default: BUG();
493         }
494         kunmap(bitmap->sb_page);
495         page_cache_release(bitmap->sb_page);
496 }
497
498 /*
499  * general bitmap file operations
500  */
501
502 /* calculate the index of the page that contains this bit */
503 static inline unsigned long file_page_index(unsigned long chunk)
504 {
505         return CHUNK_BIT_OFFSET(chunk) >> PAGE_BIT_SHIFT;
506 }
507
508 /* calculate the (bit) offset of this bit within a page */
509 static inline unsigned long file_page_offset(unsigned long chunk)
510 {
511         return CHUNK_BIT_OFFSET(chunk) & (PAGE_BITS - 1);
512 }
513
514 /*
515  * return a pointer to the page in the filemap that contains the given bit
516  *
517  * this lookup is complicated by the fact that the bitmap sb might be exactly
518  * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page
519  * 0 or page 1
520  */
521 static inline struct page *filemap_get_page(struct bitmap *bitmap,
522                                         unsigned long chunk)
523 {
524         return bitmap->filemap[file_page_index(chunk) - file_page_index(0)];
525 }
526
527
528 static void bitmap_file_unmap(struct bitmap *bitmap)
529 {
530         struct page **map, *sb_page;
531         unsigned long *attr;
532         int pages;
533         unsigned long flags;
534
535         spin_lock_irqsave(&bitmap->lock, flags);
536         map = bitmap->filemap;
537         bitmap->filemap = NULL;
538         attr = bitmap->filemap_attr;
539         bitmap->filemap_attr = NULL;
540         pages = bitmap->file_pages;
541         bitmap->file_pages = 0;
542         sb_page = bitmap->sb_page;
543         bitmap->sb_page = NULL;
544         spin_unlock_irqrestore(&bitmap->lock, flags);
545
546         while (pages--)
547                 if (map[pages]->index != 0) /* 0 is sb_page, release it below */
548                         page_cache_release(map[pages]);
549         kfree(map);
550         kfree(attr);
551
552         if (sb_page)
553                 page_cache_release(sb_page);
554 }
555
556 static void bitmap_stop_daemons(struct bitmap *bitmap);
557
558 /* dequeue the next item in a page list -- don't call from irq context */
559 static struct page_list *dequeue_page(struct bitmap *bitmap,
560                                         struct list_head *head)
561 {
562         struct page_list *item = NULL;
563
564         spin_lock(&bitmap->write_lock);
565         if (list_empty(head))
566                 goto out;
567         item = list_entry(head->prev, struct page_list, list);
568         list_del(head->prev);
569 out:
570         spin_unlock(&bitmap->write_lock);
571         return item;
572 }
573
574 static void drain_write_queues(struct bitmap *bitmap)
575 {
576         struct list_head *queues[] = {  &bitmap->complete_pages, NULL };
577         struct list_head *head;
578         struct page_list *item;
579         int i;
580
581         for (i = 0; queues[i]; i++) {
582                 head = queues[i];
583                 while ((item = dequeue_page(bitmap, head))) {
584                         page_cache_release(item->page);
585                         mempool_free(item, bitmap->write_pool);
586                 }
587         }
588
589         spin_lock(&bitmap->write_lock);
590         bitmap->writes_pending = 0; /* make sure waiters continue */
591         wake_up(&bitmap->write_wait);
592         spin_unlock(&bitmap->write_lock);
593 }
594
595 static void bitmap_file_put(struct bitmap *bitmap)
596 {
597         struct file *file;
598         struct inode *inode;
599         unsigned long flags;
600
601         spin_lock_irqsave(&bitmap->lock, flags);
602         file = bitmap->file;
603         bitmap->file = NULL;
604         spin_unlock_irqrestore(&bitmap->lock, flags);
605
606         bitmap_stop_daemons(bitmap);
607
608         drain_write_queues(bitmap);
609
610         bitmap_file_unmap(bitmap);
611
612         if (file) {
613                 inode = file->f_mapping->host;
614                 spin_lock(&inode->i_lock);
615                 atomic_set(&inode->i_writecount, 1); /* allow writes again */
616                 spin_unlock(&inode->i_lock);
617                 fput(file);
618         }
619 }
620
621
622 /*
623  * bitmap_file_kick - if an error occurs while manipulating the bitmap file
624  * then it is no longer reliable, so we stop using it and we mark the file
625  * as failed in the superblock
626  */
627 static void bitmap_file_kick(struct bitmap *bitmap)
628 {
629         char *path, *ptr = NULL;
630
631         bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET);
632         bitmap_update_sb(bitmap);
633
634         path = kmalloc(PAGE_SIZE, GFP_KERNEL);
635         if (path)
636                 ptr = file_path(bitmap->file, path, PAGE_SIZE);
637
638         printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n",
639                 bmname(bitmap), ptr ? ptr : "");
640
641         kfree(path);
642
643         bitmap_file_put(bitmap);
644
645         return;
646 }
647
648 enum bitmap_page_attr {
649         BITMAP_PAGE_DIRTY = 1, // there are set bits that need to be synced
650         BITMAP_PAGE_CLEAN = 2, // there are bits that might need to be cleared
651         BITMAP_PAGE_NEEDWRITE=4, // there are cleared bits that need to be synced
652 };
653
654 static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
655                                 enum bitmap_page_attr attr)
656 {
657         bitmap->filemap_attr[page->index] |= attr;
658 }
659
660 static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
661                                 enum bitmap_page_attr attr)
662 {
663         bitmap->filemap_attr[page->index] &= ~attr;
664 }
665
666 static inline unsigned long get_page_attr(struct bitmap *bitmap, struct page *page)
667 {
668         return bitmap->filemap_attr[page->index];
669 }
670
671 /*
672  * bitmap_file_set_bit -- called before performing a write to the md device
673  * to set (and eventually sync) a particular bit in the bitmap file
674  *
675  * we set the bit immediately, then we record the page number so that
676  * when an unplug occurs, we can flush the dirty pages out to disk
677  */
678 static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
679 {
680         unsigned long bit;
681         struct page *page;
682         void *kaddr;
683         unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
684
685         if (!bitmap->file || !bitmap->filemap) {
686                 return;
687         }
688
689         page = filemap_get_page(bitmap, chunk);
690         bit = file_page_offset(chunk);
691
692
693         /* make sure the page stays cached until it gets written out */
694         if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY))
695                 page_cache_get(page);
696
697         /* set the bit */
698         kaddr = kmap_atomic(page, KM_USER0);
699         set_bit(bit, kaddr);
700         kunmap_atomic(kaddr, KM_USER0);
701         PRINTK("set file bit %lu page %lu\n", bit, page->index);
702
703         /* record page number so it gets flushed to disk when unplug occurs */
704         set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
705
706 }
707
708 /* this gets called when the md device is ready to unplug its underlying
709  * (slave) device queues -- before we let any writes go down, we need to
710  * sync the dirty pages of the bitmap file to disk */
711 int bitmap_unplug(struct bitmap *bitmap)
712 {
713         unsigned long i, attr, flags;
714         struct page *page;
715         int wait = 0;
716
717         if (!bitmap)
718                 return 0;
719
720         /* look at each page to see if there are any set bits that need to be
721          * flushed out to disk */
722         for (i = 0; i < bitmap->file_pages; i++) {
723                 spin_lock_irqsave(&bitmap->lock, flags);
724                 if (!bitmap->file || !bitmap->filemap) {
725                         spin_unlock_irqrestore(&bitmap->lock, flags);
726                         return 0;
727                 }
728                 page = bitmap->filemap[i];
729                 attr = get_page_attr(bitmap, page);
730                 clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
731                 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
732                 if ((attr & BITMAP_PAGE_DIRTY))
733                         wait = 1;
734                 spin_unlock_irqrestore(&bitmap->lock, flags);
735
736                 if (attr & (BITMAP_PAGE_DIRTY | BITMAP_PAGE_NEEDWRITE))
737                         write_page(page, 0);
738         }
739         if (wait) { /* if any writes were performed, we need to wait on them */
740                 spin_lock_irq(&bitmap->write_lock);
741                 wait_event_lock_irq(bitmap->write_wait,
742                         bitmap->writes_pending == 0, bitmap->write_lock,
743                         wake_up_process(bitmap->writeback_daemon->tsk));
744                 spin_unlock_irq(&bitmap->write_lock);
745         }
746         return 0;
747 }
748
749 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset,
750         unsigned long sectors, int in_sync);
751 /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
752  * the in-memory bitmap from the on-disk bitmap -- also, sets up the
753  * memory mapping of the bitmap file
754  * Special cases:
755  *   if there's no bitmap file, or if the bitmap file had been
756  *   previously kicked from the array, we mark all the bits as
757  *   1's in order to cause a full resync.
758  */
759 static int bitmap_init_from_disk(struct bitmap *bitmap, int in_sync)
760 {
761         unsigned long i, chunks, index, oldindex, bit;
762         struct page *page = NULL, *oldpage = NULL;
763         unsigned long num_pages, bit_cnt = 0;
764         struct file *file;
765         unsigned long bytes, offset, dummy;
766         int outofdate;
767         int ret = -ENOSPC;
768
769         chunks = bitmap->chunks;
770         file = bitmap->file;
771
772         BUG_ON(!file);
773
774 #if INJECT_FAULTS_3
775         outofdate = 1;
776 #else
777         outofdate = bitmap->flags & BITMAP_STALE;
778 #endif
779         if (outofdate)
780                 printk(KERN_INFO "%s: bitmap file is out of date, doing full "
781                         "recovery\n", bmname(bitmap));
782
783         bytes = (chunks + 7) / 8;
784
785         num_pages = (bytes + sizeof(bitmap_super_t) + PAGE_SIZE - 1) / PAGE_SIZE;
786
787         if (i_size_read(file->f_mapping->host) < bytes + sizeof(bitmap_super_t)) {
788                 printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
789                         bmname(bitmap),
790                         (unsigned long) i_size_read(file->f_mapping->host),
791                         bytes + sizeof(bitmap_super_t));
792                 goto out;
793         }
794
795         ret = -ENOMEM;
796
797         bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
798         if (!bitmap->filemap)
799                 goto out;
800
801         bitmap->filemap_attr = kmalloc(sizeof(long) * num_pages, GFP_KERNEL);
802         if (!bitmap->filemap_attr)
803                 goto out;
804
805         memset(bitmap->filemap_attr, 0, sizeof(long) * num_pages);
806
807         oldindex = ~0L;
808
809         for (i = 0; i < chunks; i++) {
810                 index = file_page_index(i);
811                 bit = file_page_offset(i);
812                 if (index != oldindex) { /* this is a new page, read it in */
813                         /* unmap the old page, we're done with it */
814                         if (oldpage != NULL)
815                                 kunmap(oldpage);
816                         if (index == 0) {
817                                 /*
818                                  * if we're here then the superblock page
819                                  * contains some bits (PAGE_SIZE != sizeof sb)
820                                  * we've already read it in, so just use it
821                                  */
822                                 page = bitmap->sb_page;
823                                 offset = sizeof(bitmap_super_t);
824                         } else {
825                                 page = read_page(file, index, &dummy);
826                                 if (IS_ERR(page)) { /* read error */
827                                         ret = PTR_ERR(page);
828                                         goto out;
829                                 }
830                                 offset = 0;
831                         }
832                         oldindex = index;
833                         oldpage = page;
834                         kmap(page);
835
836                         if (outofdate) {
837                                 /*
838                                  * if bitmap is out of date, dirty the
839                                  * whole page and write it out
840                                  */
841                                 memset(page_address(page) + offset, 0xff,
842                                         PAGE_SIZE - offset);
843                                 ret = write_page(page, 1);
844                                 if (ret) {
845                                         kunmap(page);
846                                         /* release, page not in filemap yet */
847                                         page_cache_release(page);
848                                         goto out;
849                                 }
850                         }
851
852                         bitmap->filemap[bitmap->file_pages++] = page;
853                 }
854                 if (test_bit(bit, page_address(page))) {
855                         /* if the disk bit is set, set the memory bit */
856                         bitmap_set_memory_bits(bitmap,
857                                         i << CHUNK_BLOCK_SHIFT(bitmap), 1, in_sync);
858                         bit_cnt++;
859                 }
860         }
861
862         /* everything went OK */
863         ret = 0;
864         bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
865
866         if (page) /* unmap the last page */
867                 kunmap(page);
868
869         if (bit_cnt) { /* Kick recovery if any bits were set */
870                 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
871                 md_wakeup_thread(bitmap->mddev->thread);
872         }
873
874 out:
875         printk(KERN_INFO "%s: bitmap initialized from disk: "
876                 "read %lu/%lu pages, set %lu bits, status: %d\n",
877                 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, ret);
878
879         return ret;
880 }
881
882
883 static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
884 {
885         sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
886         unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
887         bitmap->bp[page].count += inc;
888 /*
889         if (page == 0) printk("count page 0, offset %llu: %d gives %d\n",
890                               (unsigned long long)offset, inc, bitmap->bp[page].count);
891 */
892         bitmap_checkfree(bitmap, page);
893 }
894 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
895                                             sector_t offset, int *blocks,
896                                             int create);
897
898 /*
899  * bitmap daemon -- periodically wakes up to clean bits and flush pages
900  *                      out to disk
901  */
902
903 int bitmap_daemon_work(struct bitmap *bitmap)
904 {
905         unsigned long bit, j;
906         unsigned long flags;
907         struct page *page = NULL, *lastpage = NULL;
908         int err = 0;
909         int blocks;
910         int attr;
911
912         if (bitmap == NULL)
913                 return 0;
914         if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
915                 return 0;
916         bitmap->daemon_lastrun = jiffies;
917
918         for (j = 0; j < bitmap->chunks; j++) {
919                 bitmap_counter_t *bmc;
920                 spin_lock_irqsave(&bitmap->lock, flags);
921                 if (!bitmap->file || !bitmap->filemap) {
922                         /* error or shutdown */
923                         spin_unlock_irqrestore(&bitmap->lock, flags);
924                         break;
925                 }
926
927                 page = filemap_get_page(bitmap, j);
928                 /* skip this page unless it's marked as needing cleaning */
929                 if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) {
930                         if (attr & BITMAP_PAGE_NEEDWRITE) {
931                                 page_cache_get(page);
932                                 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
933                         }
934                         spin_unlock_irqrestore(&bitmap->lock, flags);
935                         if (attr & BITMAP_PAGE_NEEDWRITE) {
936                                 if (write_page(page, 0))
937                                         bitmap_file_kick(bitmap);
938                                 page_cache_release(page);
939                         }
940                         continue;
941                 }
942
943                 bit = file_page_offset(j);
944
945                 if (page != lastpage) {
946                         /* grab the new page, sync and release the old */
947                         page_cache_get(page);
948                         if (lastpage != NULL) {
949                                 if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) {
950                                         clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
951                                         spin_unlock_irqrestore(&bitmap->lock, flags);
952                                         write_page(lastpage, 0);
953                                 } else {
954                                         set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
955                                         spin_unlock_irqrestore(&bitmap->lock, flags);
956                                 }
957                                 kunmap(lastpage);
958                                 page_cache_release(lastpage);
959                                 if (err)
960                                         bitmap_file_kick(bitmap);
961                         } else
962                                 spin_unlock_irqrestore(&bitmap->lock, flags);
963                         lastpage = page;
964                         kmap(page);
965 /*
966                         printk("bitmap clean at page %lu\n", j);
967 */
968                         spin_lock_irqsave(&bitmap->lock, flags);
969                         clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
970                 }
971                 bmc = bitmap_get_counter(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap),
972                                         &blocks, 0);
973                 if (bmc) {
974 /*
975   if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc);
976 */
977                         if (*bmc == 2) {
978                                 *bmc=1; /* maybe clear the bit next time */
979                                 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
980                         } else if (*bmc == 1) {
981                                 /* we can clear the bit */
982                                 *bmc = 0;
983                                 bitmap_count_page(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap),
984                                                   -1);
985
986                                 /* clear the bit */
987                                 clear_bit(bit, page_address(page));
988                         }
989                 }
990                 spin_unlock_irqrestore(&bitmap->lock, flags);
991         }
992
993         /* now sync the final page */
994         if (lastpage != NULL) {
995                 kunmap(lastpage);
996                 spin_lock_irqsave(&bitmap->lock, flags);
997                 if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) {
998                         clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
999                         spin_unlock_irqrestore(&bitmap->lock, flags);
1000                         write_page(lastpage, 0);
1001                 } else {
1002                         set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1003                         spin_unlock_irqrestore(&bitmap->lock, flags);
1004                 }
1005
1006                 page_cache_release(lastpage);
1007         }
1008
1009         return err;
1010 }
1011
1012 static void daemon_exit(struct bitmap *bitmap, mdk_thread_t **daemon)
1013 {
1014         mdk_thread_t *dmn;
1015         unsigned long flags;
1016
1017         /* if no one is waiting on us, we'll free the md thread struct
1018          * and exit, otherwise we let the waiter clean things up */
1019         spin_lock_irqsave(&bitmap->lock, flags);
1020         if ((dmn = *daemon)) { /* no one is waiting, cleanup and exit */
1021                 *daemon = NULL;
1022                 spin_unlock_irqrestore(&bitmap->lock, flags);
1023                 kfree(dmn);
1024                 complete_and_exit(NULL, 0); /* do_exit not exported */
1025         }
1026         spin_unlock_irqrestore(&bitmap->lock, flags);
1027 }
1028
1029 static void bitmap_writeback_daemon(mddev_t *mddev)
1030 {
1031         struct bitmap *bitmap = mddev->bitmap;
1032         struct page *page;
1033         struct page_list *item;
1034         int err = 0;
1035
1036         while (1) {
1037                 PRINTK("%s: bitmap writeback daemon waiting...\n", bmname(bitmap));
1038                 down_interruptible(&bitmap->write_done);
1039                 if (signal_pending(current)) {
1040                         printk(KERN_INFO
1041                             "%s: bitmap writeback daemon got signal, exiting...\n",
1042                             bmname(bitmap));
1043                         break;
1044                 }
1045
1046                 PRINTK("%s: bitmap writeback daemon woke up...\n", bmname(bitmap));
1047                 /* wait on bitmap page writebacks */
1048                 while ((item = dequeue_page(bitmap, &bitmap->complete_pages))) {
1049                         page = item->page;
1050                         mempool_free(item, bitmap->write_pool);
1051                         PRINTK("wait on page writeback: %p %lu\n", page, bitmap->writes_pending);
1052                         wait_on_page_writeback(page);
1053                         PRINTK("finished page writeback: %p %lu\n", page, bitmap->writes_pending);
1054                         spin_lock(&bitmap->write_lock);
1055                         if (!--bitmap->writes_pending)
1056                                 wake_up(&bitmap->write_wait);
1057                         spin_unlock(&bitmap->write_lock);
1058                         err = PageError(page);
1059                         page_cache_release(page);
1060                         if (err) {
1061                                 printk(KERN_WARNING "%s: bitmap file writeback "
1062                                         "failed (page %lu): %d\n",
1063                                         bmname(bitmap), page->index, err);
1064                                 bitmap_file_kick(bitmap);
1065                                 goto out;
1066                         }
1067                 }
1068         }
1069 out:
1070         if (err) {
1071                 printk(KERN_INFO "%s: bitmap writeback daemon exiting (%d)\n",
1072                         bmname(bitmap), err);
1073                 daemon_exit(bitmap, &bitmap->writeback_daemon);
1074         }
1075         return;
1076 }
1077
1078 static int bitmap_start_daemon(struct bitmap *bitmap, mdk_thread_t **ptr,
1079                                 void (*func)(mddev_t *), char *name)
1080 {
1081         mdk_thread_t *daemon;
1082         unsigned long flags;
1083         char namebuf[32];
1084
1085         spin_lock_irqsave(&bitmap->lock, flags);
1086         *ptr = NULL;
1087         if (!bitmap->file) /* no need for daemon if there's no backing file */
1088                 goto out_unlock;
1089
1090         spin_unlock_irqrestore(&bitmap->lock, flags);
1091
1092 #if INJECT_FATAL_FAULT_2
1093         daemon = NULL;
1094 #else
1095         sprintf(namebuf, "%%s_%s", name);
1096         daemon = md_register_thread(func, bitmap->mddev, namebuf);
1097 #endif
1098         if (!daemon) {
1099                 printk(KERN_ERR "%s: failed to start bitmap daemon\n",
1100                         bmname(bitmap));
1101                 return -ECHILD;
1102         }
1103
1104         spin_lock_irqsave(&bitmap->lock, flags);
1105         *ptr = daemon;
1106
1107         md_wakeup_thread(daemon); /* start it running */
1108
1109         PRINTK("%s: %s daemon (pid %d) started...\n",
1110                 bmname(bitmap), name, daemon->tsk->pid);
1111 out_unlock:
1112         spin_unlock_irqrestore(&bitmap->lock, flags);
1113         return 0;
1114 }
1115
1116 static int bitmap_start_daemons(struct bitmap *bitmap)
1117 {
1118         int err = bitmap_start_daemon(bitmap, &bitmap->writeback_daemon,
1119                                         bitmap_writeback_daemon, "bitmap_wb");
1120         return err;
1121 }
1122
1123 static void bitmap_stop_daemon(struct bitmap *bitmap, mdk_thread_t **ptr)
1124 {
1125         mdk_thread_t *daemon;
1126         unsigned long flags;
1127
1128         spin_lock_irqsave(&bitmap->lock, flags);
1129         daemon = *ptr;
1130         *ptr = NULL;
1131         spin_unlock_irqrestore(&bitmap->lock, flags);
1132         if (daemon)
1133                 md_unregister_thread(daemon); /* destroy the thread */
1134 }
1135
1136 static void bitmap_stop_daemons(struct bitmap *bitmap)
1137 {
1138         /* the daemons can't stop themselves... they'll just exit instead... */
1139         if (bitmap->writeback_daemon &&
1140             current->pid != bitmap->writeback_daemon->tsk->pid)
1141                 bitmap_stop_daemon(bitmap, &bitmap->writeback_daemon);
1142 }
1143
1144 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
1145                                             sector_t offset, int *blocks,
1146                                             int create)
1147 {
1148         /* If 'create', we might release the lock and reclaim it.
1149          * The lock must have been taken with interrupts enabled.
1150          * If !create, we don't release the lock.
1151          */
1152         sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
1153         unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1154         unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1155         sector_t csize;
1156
1157         if (bitmap_checkpage(bitmap, page, create) < 0) {
1158                 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
1159                 *blocks = csize - (offset & (csize- 1));
1160                 return NULL;
1161         }
1162         /* now locked ... */
1163
1164         if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1165                 /* should we use the first or second counter field
1166                  * of the hijacked pointer? */
1167                 int hi = (pageoff > PAGE_COUNTER_MASK);
1168                 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
1169                                           PAGE_COUNTER_SHIFT - 1);
1170                 *blocks = csize - (offset & (csize- 1));
1171                 return  &((bitmap_counter_t *)
1172                           &bitmap->bp[page].map)[hi];
1173         } else { /* page is allocated */
1174                 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
1175                 *blocks = csize - (offset & (csize- 1));
1176                 return (bitmap_counter_t *)
1177                         &(bitmap->bp[page].map[pageoff]);
1178         }
1179 }
1180
1181 int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors)
1182 {
1183         if (!bitmap) return 0;
1184         while (sectors) {
1185                 int blocks;
1186                 bitmap_counter_t *bmc;
1187
1188                 spin_lock_irq(&bitmap->lock);
1189                 bmc = bitmap_get_counter(bitmap, offset, &blocks, 1);
1190                 if (!bmc) {
1191                         spin_unlock_irq(&bitmap->lock);
1192                         return 0;
1193                 }
1194
1195                 switch(*bmc) {
1196                 case 0:
1197                         bitmap_file_set_bit(bitmap, offset);
1198                         bitmap_count_page(bitmap,offset, 1);
1199                         blk_plug_device(bitmap->mddev->queue);
1200                         /* fall through */
1201                 case 1:
1202                         *bmc = 2;
1203                 }
1204                 if ((*bmc & COUNTER_MAX) == COUNTER_MAX) BUG();
1205                 (*bmc)++;
1206
1207                 spin_unlock_irq(&bitmap->lock);
1208
1209                 offset += blocks;
1210                 if (sectors > blocks)
1211                         sectors -= blocks;
1212                 else sectors = 0;
1213         }
1214         return 0;
1215 }
1216
1217 void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
1218                      int success)
1219 {
1220         if (!bitmap) return;
1221         while (sectors) {
1222                 int blocks;
1223                 unsigned long flags;
1224                 bitmap_counter_t *bmc;
1225
1226                 spin_lock_irqsave(&bitmap->lock, flags);
1227                 bmc = bitmap_get_counter(bitmap, offset, &blocks, 0);
1228                 if (!bmc) {
1229                         spin_unlock_irqrestore(&bitmap->lock, flags);
1230                         return;
1231                 }
1232
1233                 if (!success && ! (*bmc & NEEDED_MASK))
1234                         *bmc |= NEEDED_MASK;
1235
1236                 (*bmc)--;
1237                 if (*bmc <= 2) {
1238                         set_page_attr(bitmap,
1239                                       filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
1240                                       BITMAP_PAGE_CLEAN);
1241                 }
1242                 spin_unlock_irqrestore(&bitmap->lock, flags);
1243                 offset += blocks;
1244                 if (sectors > blocks)
1245                         sectors -= blocks;
1246                 else sectors = 0;
1247         }
1248 }
1249
1250 int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks)
1251 {
1252         bitmap_counter_t *bmc;
1253         int rv;
1254         if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1255                 *blocks = 1024;
1256                 return 1; /* always resync if no bitmap */
1257         }
1258         spin_lock_irq(&bitmap->lock);
1259         bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
1260         rv = 0;
1261         if (bmc) {
1262                 /* locked */
1263                 if (RESYNC(*bmc))
1264                         rv = 1;
1265                 else if (NEEDED(*bmc)) {
1266                         rv = 1;
1267                         *bmc |= RESYNC_MASK;
1268                         *bmc &= ~NEEDED_MASK;
1269                 }
1270         }
1271         spin_unlock_irq(&bitmap->lock);
1272         return rv;
1273 }
1274
1275 void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted)
1276 {
1277         bitmap_counter_t *bmc;
1278         unsigned long flags;
1279 /*
1280         if (offset == 0) printk("bitmap_end_sync 0 (%d)\n", aborted);
1281 */      if (bitmap == NULL) {
1282                 *blocks = 1024;
1283                 return;
1284         }
1285         spin_lock_irqsave(&bitmap->lock, flags);
1286         bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
1287         if (bmc == NULL)
1288                 goto unlock;
1289         /* locked */
1290 /*
1291         if (offset == 0) printk("bitmap_end sync found 0x%x, blocks %d\n", *bmc, *blocks);
1292 */
1293         if (RESYNC(*bmc)) {
1294                 *bmc &= ~RESYNC_MASK;
1295
1296                 if (!NEEDED(*bmc) && aborted)
1297                         *bmc |= NEEDED_MASK;
1298                 else {
1299                         if (*bmc <= 2) {
1300                                 set_page_attr(bitmap,
1301                                               filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
1302                                               BITMAP_PAGE_CLEAN);
1303                         }
1304                 }
1305         }
1306  unlock:
1307         spin_unlock_irqrestore(&bitmap->lock, flags);
1308 }
1309
1310 void bitmap_close_sync(struct bitmap *bitmap)
1311 {
1312         /* Sync has finished, and any bitmap chunks that weren't synced
1313          * properly have been aborted.  It remains to us to clear the
1314          * RESYNC bit wherever it is still on
1315          */
1316         sector_t sector = 0;
1317         int blocks;
1318         if (!bitmap) return;
1319         while (sector < bitmap->mddev->resync_max_sectors) {
1320                 bitmap_end_sync(bitmap, sector, &blocks, 0);
1321 /*
1322                 if (sector < 500) printk("bitmap_close_sync: sec %llu blks %d\n",
1323                                          (unsigned long long)sector, blocks);
1324 */              sector += blocks;
1325         }
1326 }
1327
1328 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset,
1329                                    unsigned long sectors, int in_sync)
1330 {
1331         /* For each chunk covered by any of these sectors, set the
1332          * counter to 1 and set resync_needed unless in_sync.  They should all
1333          * be 0 at this point
1334          */
1335         while (sectors) {
1336                 int secs;
1337                 bitmap_counter_t *bmc;
1338                 spin_lock_irq(&bitmap->lock);
1339                 bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
1340                 if (!bmc) {
1341                         spin_unlock_irq(&bitmap->lock);
1342                         return;
1343                 }
1344                 if (! *bmc) {
1345                         struct page *page;
1346                         *bmc = 1 | (in_sync? 0 : NEEDED_MASK);
1347                         bitmap_count_page(bitmap, offset, 1);
1348                         page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
1349                         set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
1350                 }
1351                 spin_unlock_irq(&bitmap->lock);
1352                 if (sectors > secs)
1353                         sectors -= secs;
1354                 else
1355                         sectors = 0;
1356         }
1357 }
1358
1359 /* dirty the entire bitmap */
1360 int bitmap_setallbits(struct bitmap *bitmap)
1361 {
1362         unsigned long flags;
1363         unsigned long j;
1364
1365         /* dirty the in-memory bitmap */
1366         bitmap_set_memory_bits(bitmap, 0, bitmap->chunks << CHUNK_BLOCK_SHIFT(bitmap), 1);
1367
1368         /* dirty the bitmap file */
1369         for (j = 0; j < bitmap->file_pages; j++) {
1370                 struct page *page = bitmap->filemap[j];
1371
1372                 spin_lock_irqsave(&bitmap->lock, flags);
1373                 page_cache_get(page);
1374                 spin_unlock_irqrestore(&bitmap->lock, flags);
1375                 memset(kmap(page), 0xff, PAGE_SIZE);
1376                 kunmap(page);
1377                 write_page(page, 0);
1378         }
1379
1380         return 0;
1381 }
1382
1383 /*
1384  * free memory that was allocated
1385  */
1386 void bitmap_destroy(mddev_t *mddev)
1387 {
1388         unsigned long k, pages;
1389         struct bitmap_page *bp;
1390         struct bitmap *bitmap = mddev->bitmap;
1391
1392         if (!bitmap) /* there was no bitmap */
1393                 return;
1394
1395         mddev->bitmap = NULL; /* disconnect from the md device */
1396
1397         /* release the bitmap file and kill the daemon */
1398         bitmap_file_put(bitmap);
1399
1400         bp = bitmap->bp;
1401         pages = bitmap->pages;
1402
1403         /* free all allocated memory */
1404
1405         mempool_destroy(bitmap->write_pool);
1406
1407         if (bp) /* deallocate the page memory */
1408                 for (k = 0; k < pages; k++)
1409                         if (bp[k].map && !bp[k].hijacked)
1410                                 kfree(bp[k].map);
1411         kfree(bp);
1412         kfree(bitmap);
1413 }
1414
1415 /*
1416  * initialize the bitmap structure
1417  * if this returns an error, bitmap_destroy must be called to do clean up
1418  */
1419 int bitmap_create(mddev_t *mddev)
1420 {
1421         struct bitmap *bitmap;
1422         unsigned long blocks = mddev->resync_max_sectors;
1423         unsigned long chunks;
1424         unsigned long pages;
1425         struct file *file = mddev->bitmap_file;
1426         int err;
1427
1428         BUG_ON(sizeof(bitmap_super_t) != 256);
1429
1430         if (!file) /* bitmap disabled, nothing to do */
1431                 return 0;
1432
1433         bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
1434         if (!bitmap)
1435                 return -ENOMEM;
1436
1437         memset(bitmap, 0, sizeof(*bitmap));
1438
1439         spin_lock_init(&bitmap->lock);
1440         bitmap->mddev = mddev;
1441         mddev->bitmap = bitmap;
1442
1443         spin_lock_init(&bitmap->write_lock);
1444         init_MUTEX_LOCKED(&bitmap->write_done);
1445         INIT_LIST_HEAD(&bitmap->complete_pages);
1446         init_waitqueue_head(&bitmap->write_wait);
1447         bitmap->write_pool = mempool_create(WRITE_POOL_SIZE, write_pool_alloc,
1448                                 write_pool_free, NULL);
1449         if (!bitmap->write_pool)
1450                 return -ENOMEM;
1451
1452         bitmap->file = file;
1453         get_file(file);
1454         /* read superblock from bitmap file (this sets bitmap->chunksize) */
1455         err = bitmap_read_sb(bitmap);
1456         if (err)
1457                 return err;
1458
1459         bitmap->chunkshift = find_first_bit(&bitmap->chunksize,
1460                                         sizeof(bitmap->chunksize));
1461
1462         /* now that chunksize and chunkshift are set, we can use these macros */
1463         chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) /
1464                         CHUNK_BLOCK_RATIO(bitmap);
1465         pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
1466
1467         BUG_ON(!pages);
1468
1469         bitmap->chunks = chunks;
1470         bitmap->pages = pages;
1471         bitmap->missing_pages = pages;
1472         bitmap->counter_bits = COUNTER_BITS;
1473
1474         bitmap->syncchunk = ~0UL;
1475
1476 #if INJECT_FATAL_FAULT_1
1477         bitmap->bp = NULL;
1478 #else
1479         bitmap->bp = kmalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
1480 #endif
1481         if (!bitmap->bp)
1482                 return -ENOMEM;
1483         memset(bitmap->bp, 0, pages * sizeof(*bitmap->bp));
1484
1485         bitmap->flags |= BITMAP_ACTIVE;
1486
1487         /* now that we have some pages available, initialize the in-memory
1488          * bitmap from the on-disk bitmap */
1489         err = bitmap_init_from_disk(bitmap, mddev->recovery_cp == MaxSector);
1490         if (err)
1491                 return err;
1492
1493         printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
1494                 pages, bmname(bitmap));
1495
1496         /* kick off the bitmap daemons */
1497         err = bitmap_start_daemons(bitmap);
1498         if (err)
1499                 return err;
1500         return bitmap_update_sb(bitmap);
1501 }
1502
1503 /* the bitmap API -- for raid personalities */
1504 EXPORT_SYMBOL(bitmap_startwrite);
1505 EXPORT_SYMBOL(bitmap_endwrite);
1506 EXPORT_SYMBOL(bitmap_start_sync);
1507 EXPORT_SYMBOL(bitmap_end_sync);
1508 EXPORT_SYMBOL(bitmap_unplug);
1509 EXPORT_SYMBOL(bitmap_close_sync);
1510 EXPORT_SYMBOL(bitmap_daemon_work);