4 * bitmap_create - sets up the bitmap structure
5 * bitmap_destroy - destroys the bitmap structure
7 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
8 * - added disk storage for bitmap
9 * - changes to allow various bitmap chunk sizes
15 * flush after percent set rather than just time based. (maybe both).
16 * wait if count gets too high, wake when it drops to half.
19 #include <linux/module.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/config.h>
24 #include <linux/timer.h>
25 #include <linux/sched.h>
26 #include <linux/list.h>
27 #include <linux/file.h>
28 #include <linux/mount.h>
29 #include <linux/buffer_head.h>
30 #include <linux/raid/md.h>
31 #include <linux/raid/bitmap.h>
38 /* these are for debugging purposes only! */
40 /* define one and only one of these */
41 #define INJECT_FAULTS_1 0 /* cause bitmap_alloc_page to fail always */
42 #define INJECT_FAULTS_2 0 /* cause bitmap file to be kicked when first bit set*/
43 #define INJECT_FAULTS_3 0 /* treat bitmap file as kicked at init time */
44 #define INJECT_FAULTS_4 0 /* undef */
45 #define INJECT_FAULTS_5 0 /* undef */
46 #define INJECT_FAULTS_6 0
48 /* if these are defined, the driver will fail! debug only */
49 #define INJECT_FATAL_FAULT_1 0 /* fail kmalloc, causing bitmap_create to fail */
50 #define INJECT_FATAL_FAULT_2 0 /* undef */
51 #define INJECT_FATAL_FAULT_3 0 /* undef */
54 //#define DPRINTK PRINTK /* set this NULL to avoid verbose debug output */
55 #define DPRINTK(x...) do { } while(0)
59 # define PRINTK(x...) printk(KERN_DEBUG x)
65 static inline char * bmname(struct bitmap *bitmap)
67 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
70 #define WRITE_POOL_SIZE 256
73 * just a placeholder - calls kmalloc for bitmap pages
75 static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
79 #ifdef INJECT_FAULTS_1
82 page = kmalloc(PAGE_SIZE, GFP_NOIO);
85 printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap));
87 PRINTK("%s: bitmap_alloc_page: allocated page at %p\n",
88 bmname(bitmap), page);
93 * for now just a placeholder -- just calls kfree for bitmap pages
95 static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
97 PRINTK("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page);
102 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
104 * 1) check to see if this page is allocated, if it's not then try to alloc
105 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
106 * page pointer directly as a counter
108 * if we find our page, we increment the page's refcount so that it stays
109 * allocated while we're using it
111 static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create)
113 unsigned char *mappage;
115 if (page >= bitmap->pages) {
117 "%s: invalid bitmap page request: %lu (> %lu)\n",
118 bmname(bitmap), page, bitmap->pages-1);
123 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
126 if (bitmap->bp[page].map) /* page is already allocated, just return */
132 spin_unlock_irq(&bitmap->lock);
134 /* this page has not been allocated yet */
136 if ((mappage = bitmap_alloc_page(bitmap)) == NULL) {
137 PRINTK("%s: bitmap map page allocation failed, hijacking\n",
139 /* failed - set the hijacked flag so that we can use the
140 * pointer as a counter */
141 spin_lock_irq(&bitmap->lock);
142 if (!bitmap->bp[page].map)
143 bitmap->bp[page].hijacked = 1;
149 spin_lock_irq(&bitmap->lock);
151 /* recheck the page */
153 if (bitmap->bp[page].map || bitmap->bp[page].hijacked) {
154 /* somebody beat us to getting the page */
155 bitmap_free_page(bitmap, mappage);
159 /* no page was in place and we have one, so install it */
161 memset(mappage, 0, PAGE_SIZE);
162 bitmap->bp[page].map = mappage;
163 bitmap->missing_pages--;
169 /* if page is completely empty, put it back on the free list, or dealloc it */
170 /* if page was hijacked, unmark the flag so it might get alloced next time */
171 /* Note: lock should be held when calling this */
172 static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
176 if (bitmap->bp[page].count) /* page is still busy */
179 /* page is no longer in use, it can be released */
181 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
182 bitmap->bp[page].hijacked = 0;
183 bitmap->bp[page].map = NULL;
187 /* normal case, free the page */
190 /* actually ... let's not. We will probably need the page again exactly when
191 * memory is tight and we are flusing to disk
195 ptr = bitmap->bp[page].map;
196 bitmap->bp[page].map = NULL;
197 bitmap->missing_pages++;
198 bitmap_free_page(bitmap, ptr);
205 * bitmap file handling - read and write the bitmap file and its superblock
208 /* copy the pathname of a file to a buffer */
209 char *file_path(struct file *file, char *buf, int count)
220 buf = d_path(d, v, buf, count);
222 return IS_ERR(buf) ? NULL : buf;
226 * basic page I/O operations
229 /* IO operations when bitmap is stored near all superblocks */
230 static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long index)
232 /* choose a good rdev and read the page from there */
235 struct list_head *tmp;
236 struct page *page = alloc_page(GFP_KERNEL);
240 return ERR_PTR(-ENOMEM);
242 ITERATE_RDEV(mddev, rdev, tmp) {
243 if (! test_bit(In_sync, &rdev->flags)
244 || test_bit(Faulty, &rdev->flags))
247 target = (rdev->sb_offset << 1) + offset + index * (PAGE_SIZE/512);
249 if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) {
254 return ERR_PTR(-EIO);
258 static int write_sb_page(mddev_t *mddev, long offset, struct page *page, int wait)
261 struct list_head *tmp;
263 ITERATE_RDEV(mddev, rdev, tmp)
264 if (test_bit(In_sync, &rdev->flags)
265 && !test_bit(Faulty, &rdev->flags))
266 md_super_write(mddev, rdev,
267 (rdev->sb_offset<<1) + offset
268 + page->index * (PAGE_SIZE/512),
273 md_super_wait(mddev);
278 * write out a page to a file
280 static int write_page(struct bitmap *bitmap, struct page *page, int wait)
284 if (bitmap->file == NULL)
285 return write_sb_page(bitmap->mddev, bitmap->offset, page, wait);
287 flush_dcache_page(page); /* make sure visible to anyone reading the file */
292 if (TestSetPageLocked(page))
293 return -EAGAIN; /* already locked */
294 if (PageWriteback(page)) {
300 ret = page->mapping->a_ops->prepare_write(bitmap->file, page, 0, PAGE_SIZE);
302 ret = page->mapping->a_ops->commit_write(bitmap->file, page, 0,
309 set_page_dirty(page); /* force it to be written out */
312 /* add to list to be waited for */
313 struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO);
315 spin_lock(&bitmap->write_lock);
316 list_add(&item->list, &bitmap->complete_pages);
317 spin_unlock(&bitmap->write_lock);
319 return write_one_page(page, wait);
322 /* read a page from a file, pinning it into cache, and return bytes_read */
323 static struct page *read_page(struct file *file, unsigned long index,
324 unsigned long *bytes_read)
326 struct inode *inode = file->f_mapping->host;
327 struct page *page = NULL;
328 loff_t isize = i_size_read(inode);
329 unsigned long end_index = isize >> PAGE_SHIFT;
331 PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE,
332 (unsigned long long)index << PAGE_SHIFT);
334 page = read_cache_page(inode->i_mapping, index,
335 (filler_t *)inode->i_mapping->a_ops->readpage, file);
338 wait_on_page_locked(page);
339 if (!PageUptodate(page) || PageError(page)) {
341 page = ERR_PTR(-EIO);
345 if (index > end_index) /* we have read beyond EOF */
347 else if (index == end_index) /* possible short read */
348 *bytes_read = isize & ~PAGE_MASK;
350 *bytes_read = PAGE_SIZE; /* got a full page */
353 printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
355 (unsigned long long)index << PAGE_SHIFT,
361 * bitmap file superblock operations
364 /* update the event counter and sync the superblock to disk */
365 int bitmap_update_sb(struct bitmap *bitmap)
370 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
372 spin_lock_irqsave(&bitmap->lock, flags);
373 if (!bitmap->sb_page) { /* no superblock */
374 spin_unlock_irqrestore(&bitmap->lock, flags);
377 spin_unlock_irqrestore(&bitmap->lock, flags);
378 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
379 sb->events = cpu_to_le64(bitmap->mddev->events);
380 if (!bitmap->mddev->degraded)
381 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
382 kunmap_atomic(sb, KM_USER0);
383 return write_page(bitmap, bitmap->sb_page, 1);
386 /* print out the bitmap file superblock */
387 void bitmap_print_sb(struct bitmap *bitmap)
391 if (!bitmap || !bitmap->sb_page)
393 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
394 printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
395 printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
396 printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
397 printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n",
398 *(__u32 *)(sb->uuid+0),
399 *(__u32 *)(sb->uuid+4),
400 *(__u32 *)(sb->uuid+8),
401 *(__u32 *)(sb->uuid+12));
402 printk(KERN_DEBUG " events: %llu\n",
403 (unsigned long long) le64_to_cpu(sb->events));
404 printk(KERN_DEBUG "events cleared: %llu\n",
405 (unsigned long long) le64_to_cpu(sb->events_cleared));
406 printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state));
407 printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize));
408 printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
409 printk(KERN_DEBUG " sync size: %llu KB\n",
410 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
411 printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
412 kunmap_atomic(sb, KM_USER0);
415 /* read the superblock from the bitmap file and initialize some bitmap fields */
416 static int bitmap_read_sb(struct bitmap *bitmap)
420 unsigned long chunksize, daemon_sleep, write_behind;
421 unsigned long bytes_read;
422 unsigned long long events;
425 /* page 0 is the superblock, read it... */
427 bitmap->sb_page = read_page(bitmap->file, 0, &bytes_read);
429 bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0);
430 bytes_read = PAGE_SIZE;
432 if (IS_ERR(bitmap->sb_page)) {
433 err = PTR_ERR(bitmap->sb_page);
434 bitmap->sb_page = NULL;
438 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
440 if (bytes_read < sizeof(*sb)) { /* short read */
441 printk(KERN_INFO "%s: bitmap file superblock truncated\n",
447 chunksize = le32_to_cpu(sb->chunksize);
448 daemon_sleep = le32_to_cpu(sb->daemon_sleep);
449 write_behind = le32_to_cpu(sb->write_behind);
451 /* verify that the bitmap-specific fields are valid */
452 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
453 reason = "bad magic";
454 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
455 le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
456 reason = "unrecognized superblock version";
457 else if (chunksize < PAGE_SIZE)
458 reason = "bitmap chunksize too small";
459 else if ((1 << ffz(~chunksize)) != chunksize)
460 reason = "bitmap chunksize not a power of 2";
461 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ)
462 reason = "daemon sleep period out of range";
463 else if (write_behind > COUNTER_MAX)
464 reason = "write-behind limit out of range (0 - 16383)";
466 printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
467 bmname(bitmap), reason);
471 /* keep the array size field of the bitmap superblock up to date */
472 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
474 if (!bitmap->mddev->persistent)
478 * if we have a persistent array superblock, compare the
479 * bitmap's UUID and event counter to the mddev's
481 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
482 printk(KERN_INFO "%s: bitmap superblock UUID mismatch\n",
486 events = le64_to_cpu(sb->events);
487 if (events < bitmap->mddev->events) {
488 printk(KERN_INFO "%s: bitmap file is out of date (%llu < %llu) "
489 "-- forcing full recovery\n", bmname(bitmap), events,
490 (unsigned long long) bitmap->mddev->events);
491 sb->state |= BITMAP_STALE;
494 /* assign fields using values from superblock */
495 bitmap->chunksize = chunksize;
496 bitmap->daemon_sleep = daemon_sleep;
497 bitmap->daemon_lastrun = jiffies;
498 bitmap->max_write_behind = write_behind;
499 bitmap->flags |= sb->state;
500 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
501 bitmap->flags |= BITMAP_HOSTENDIAN;
502 bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
503 if (sb->state & BITMAP_STALE)
504 bitmap->events_cleared = bitmap->mddev->events;
507 kunmap_atomic(sb, KM_USER0);
509 bitmap_print_sb(bitmap);
513 enum bitmap_mask_op {
518 /* record the state of the bitmap in the superblock */
519 static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
520 enum bitmap_mask_op op)
525 spin_lock_irqsave(&bitmap->lock, flags);
526 if (!bitmap->sb_page) { /* can't set the state */
527 spin_unlock_irqrestore(&bitmap->lock, flags);
530 spin_unlock_irqrestore(&bitmap->lock, flags);
531 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
533 case MASK_SET: sb->state |= bits;
535 case MASK_UNSET: sb->state &= ~bits;
539 kunmap_atomic(sb, KM_USER0);
543 * general bitmap file operations
546 /* calculate the index of the page that contains this bit */
547 static inline unsigned long file_page_index(unsigned long chunk)
549 return CHUNK_BIT_OFFSET(chunk) >> PAGE_BIT_SHIFT;
552 /* calculate the (bit) offset of this bit within a page */
553 static inline unsigned long file_page_offset(unsigned long chunk)
555 return CHUNK_BIT_OFFSET(chunk) & (PAGE_BITS - 1);
559 * return a pointer to the page in the filemap that contains the given bit
561 * this lookup is complicated by the fact that the bitmap sb might be exactly
562 * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page
565 static inline struct page *filemap_get_page(struct bitmap *bitmap,
568 return bitmap->filemap[file_page_index(chunk) - file_page_index(0)];
572 static void bitmap_file_unmap(struct bitmap *bitmap)
574 struct page **map, *sb_page;
579 spin_lock_irqsave(&bitmap->lock, flags);
580 map = bitmap->filemap;
581 bitmap->filemap = NULL;
582 attr = bitmap->filemap_attr;
583 bitmap->filemap_attr = NULL;
584 pages = bitmap->file_pages;
585 bitmap->file_pages = 0;
586 sb_page = bitmap->sb_page;
587 bitmap->sb_page = NULL;
588 spin_unlock_irqrestore(&bitmap->lock, flags);
591 if (map[pages]->index != 0) /* 0 is sb_page, release it below */
592 put_page(map[pages]);
596 safe_put_page(sb_page);
599 /* dequeue the next item in a page list -- don't call from irq context */
600 static struct page_list *dequeue_page(struct bitmap *bitmap)
602 struct page_list *item = NULL;
603 struct list_head *head = &bitmap->complete_pages;
605 spin_lock(&bitmap->write_lock);
606 if (list_empty(head))
608 item = list_entry(head->prev, struct page_list, list);
609 list_del(head->prev);
611 spin_unlock(&bitmap->write_lock);
615 static void drain_write_queues(struct bitmap *bitmap)
617 struct page_list *item;
619 while ((item = dequeue_page(bitmap))) {
620 /* don't bother to wait */
621 mempool_free(item, bitmap->write_pool);
625 static void bitmap_file_put(struct bitmap *bitmap)
631 spin_lock_irqsave(&bitmap->lock, flags);
634 spin_unlock_irqrestore(&bitmap->lock, flags);
636 drain_write_queues(bitmap);
638 bitmap_file_unmap(bitmap);
641 inode = file->f_mapping->host;
642 spin_lock(&inode->i_lock);
643 atomic_set(&inode->i_writecount, 1); /* allow writes again */
644 spin_unlock(&inode->i_lock);
651 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
652 * then it is no longer reliable, so we stop using it and we mark the file
653 * as failed in the superblock
655 static void bitmap_file_kick(struct bitmap *bitmap)
657 char *path, *ptr = NULL;
659 bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET);
660 bitmap_update_sb(bitmap);
663 path = kmalloc(PAGE_SIZE, GFP_KERNEL);
665 ptr = file_path(bitmap->file, path, PAGE_SIZE);
667 printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n",
668 bmname(bitmap), ptr ? ptr : "");
673 bitmap_file_put(bitmap);
678 enum bitmap_page_attr {
679 BITMAP_PAGE_DIRTY = 0, // there are set bits that need to be synced
680 BITMAP_PAGE_CLEAN = 1, // there are bits that might need to be cleared
681 BITMAP_PAGE_NEEDWRITE=2, // there are cleared bits that need to be synced
684 static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
685 enum bitmap_page_attr attr)
687 __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
690 static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
691 enum bitmap_page_attr attr)
693 __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
696 static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
697 enum bitmap_page_attr attr)
699 return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
703 * bitmap_file_set_bit -- called before performing a write to the md device
704 * to set (and eventually sync) a particular bit in the bitmap file
706 * we set the bit immediately, then we record the page number so that
707 * when an unplug occurs, we can flush the dirty pages out to disk
709 static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
714 unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
716 if (!bitmap->filemap) {
720 page = filemap_get_page(bitmap, chunk);
721 bit = file_page_offset(chunk);
724 kaddr = kmap_atomic(page, KM_USER0);
725 if (bitmap->flags & BITMAP_HOSTENDIAN)
728 ext2_set_bit(bit, kaddr);
729 kunmap_atomic(kaddr, KM_USER0);
730 PRINTK("set file bit %lu page %lu\n", bit, page->index);
732 /* record page number so it gets flushed to disk when unplug occurs */
733 set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
737 static void bitmap_writeback(struct bitmap *bitmap);
739 /* this gets called when the md device is ready to unplug its underlying
740 * (slave) device queues -- before we let any writes go down, we need to
741 * sync the dirty pages of the bitmap file to disk */
742 int bitmap_unplug(struct bitmap *bitmap)
744 unsigned long i, flags;
745 int dirty, need_write;
753 /* look at each page to see if there are any set bits that need to be
754 * flushed out to disk */
755 for (i = 0; i < bitmap->file_pages; i++) {
756 spin_lock_irqsave(&bitmap->lock, flags);
757 if (!bitmap->filemap) {
758 spin_unlock_irqrestore(&bitmap->lock, flags);
761 page = bitmap->filemap[i];
762 dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
763 need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
764 clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
765 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
768 spin_unlock_irqrestore(&bitmap->lock, flags);
770 if (dirty | need_write) {
771 err = write_page(bitmap, page, 0);
772 if (err == -EAGAIN) {
774 err = write_page(bitmap, page, 1);
782 if (wait) { /* if any writes were performed, we need to wait on them */
784 bitmap_writeback(bitmap);
786 md_super_wait(bitmap->mddev);
791 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
792 /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
793 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
794 * memory mapping of the bitmap file
796 * if there's no bitmap file, or if the bitmap file had been
797 * previously kicked from the array, we mark all the bits as
798 * 1's in order to cause a full resync.
800 * We ignore all bits for sectors that end earlier than 'start'.
801 * This is used when reading an out-of-date bitmap...
803 static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
805 unsigned long i, chunks, index, oldindex, bit;
806 struct page *page = NULL, *oldpage = NULL;
807 unsigned long num_pages, bit_cnt = 0;
809 unsigned long bytes, offset, dummy;
814 chunks = bitmap->chunks;
817 BUG_ON(!file && !bitmap->offset);
819 #ifdef INJECT_FAULTS_3
822 outofdate = bitmap->flags & BITMAP_STALE;
825 printk(KERN_INFO "%s: bitmap file is out of date, doing full "
826 "recovery\n", bmname(bitmap));
828 bytes = (chunks + 7) / 8;
830 num_pages = (bytes + sizeof(bitmap_super_t) + PAGE_SIZE - 1) / PAGE_SIZE;
832 if (file && i_size_read(file->f_mapping->host) < bytes + sizeof(bitmap_super_t)) {
833 printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
835 (unsigned long) i_size_read(file->f_mapping->host),
836 bytes + sizeof(bitmap_super_t));
842 bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
843 if (!bitmap->filemap)
846 /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
847 bitmap->filemap_attr = kzalloc(
848 (((num_pages*4/8)+sizeof(unsigned long)-1)
849 /sizeof(unsigned long))
850 *sizeof(unsigned long),
852 if (!bitmap->filemap_attr)
857 for (i = 0; i < chunks; i++) {
859 index = file_page_index(i);
860 bit = file_page_offset(i);
861 if (index != oldindex) { /* this is a new page, read it in */
862 /* unmap the old page, we're done with it */
865 * if we're here then the superblock page
866 * contains some bits (PAGE_SIZE != sizeof sb)
867 * we've already read it in, so just use it
869 page = bitmap->sb_page;
870 offset = sizeof(bitmap_super_t);
872 page = read_page(file, index, &dummy);
875 page = read_sb_page(bitmap->mddev, bitmap->offset, index);
878 if (IS_ERR(page)) { /* read error */
888 * if bitmap is out of date, dirty the
889 * whole page and write it out
891 paddr = kmap_atomic(page, KM_USER0);
892 memset(paddr + offset, 0xff,
894 kunmap_atomic(paddr, KM_USER0);
895 ret = write_page(bitmap, page, 1);
897 /* release, page not in filemap yet */
903 bitmap->filemap[bitmap->file_pages++] = page;
905 paddr = kmap_atomic(page, KM_USER0);
906 if (bitmap->flags & BITMAP_HOSTENDIAN)
907 b = test_bit(bit, paddr);
909 b = ext2_test_bit(bit, paddr);
910 kunmap_atomic(paddr, KM_USER0);
912 /* if the disk bit is set, set the memory bit */
913 bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap),
914 ((i+1) << (CHUNK_BLOCK_SHIFT(bitmap)) >= start)
917 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
921 /* everything went OK */
923 bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
925 if (bit_cnt) { /* Kick recovery if any bits were set */
926 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
927 md_wakeup_thread(bitmap->mddev->thread);
931 printk(KERN_INFO "%s: bitmap initialized from disk: "
932 "read %lu/%lu pages, set %lu bits, status: %d\n",
933 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, ret);
938 void bitmap_write_all(struct bitmap *bitmap)
940 /* We don't actually write all bitmap blocks here,
941 * just flag them as needing to be written
945 for (i=0; i < bitmap->file_pages; i++)
946 set_page_attr(bitmap, bitmap->filemap[i],
947 BITMAP_PAGE_NEEDWRITE);
951 static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
953 sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
954 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
955 bitmap->bp[page].count += inc;
957 if (page == 0) printk("count page 0, offset %llu: %d gives %d\n",
958 (unsigned long long)offset, inc, bitmap->bp[page].count);
960 bitmap_checkfree(bitmap, page);
962 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
963 sector_t offset, int *blocks,
967 * bitmap daemon -- periodically wakes up to clean bits and flush pages
971 int bitmap_daemon_work(struct bitmap *bitmap)
975 struct page *page = NULL, *lastpage = NULL;
982 if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
984 bitmap->daemon_lastrun = jiffies;
986 for (j = 0; j < bitmap->chunks; j++) {
987 bitmap_counter_t *bmc;
988 spin_lock_irqsave(&bitmap->lock, flags);
989 if (!bitmap->filemap) {
990 /* error or shutdown */
991 spin_unlock_irqrestore(&bitmap->lock, flags);
995 page = filemap_get_page(bitmap, j);
997 if (page != lastpage) {
998 /* skip this page unless it's marked as needing cleaning */
999 if (!test_page_attr(bitmap, page, BITMAP_PAGE_CLEAN)) {
1000 int need_write = test_page_attr(bitmap, page,
1001 BITMAP_PAGE_NEEDWRITE);
1003 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
1005 spin_unlock_irqrestore(&bitmap->lock, flags);
1007 switch (write_page(bitmap, page, 0)) {
1009 set_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
1014 bitmap_file_kick(bitmap);
1020 /* grab the new page, sync and release the old */
1021 if (lastpage != NULL) {
1022 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
1023 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1024 spin_unlock_irqrestore(&bitmap->lock, flags);
1025 err = write_page(bitmap, lastpage, 0);
1026 if (err == -EAGAIN) {
1028 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1031 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1032 spin_unlock_irqrestore(&bitmap->lock, flags);
1035 bitmap_file_kick(bitmap);
1037 spin_unlock_irqrestore(&bitmap->lock, flags);
1040 printk("bitmap clean at page %lu\n", j);
1042 spin_lock_irqsave(&bitmap->lock, flags);
1043 clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
1045 bmc = bitmap_get_counter(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap),
1049 if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc);
1052 *bmc=1; /* maybe clear the bit next time */
1053 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
1054 } else if (*bmc == 1) {
1055 /* we can clear the bit */
1057 bitmap_count_page(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap),
1061 paddr = kmap_atomic(page, KM_USER0);
1062 if (bitmap->flags & BITMAP_HOSTENDIAN)
1063 clear_bit(file_page_offset(j), paddr);
1065 ext2_clear_bit(file_page_offset(j), paddr);
1066 kunmap_atomic(paddr, KM_USER0);
1069 spin_unlock_irqrestore(&bitmap->lock, flags);
1072 /* now sync the final page */
1073 if (lastpage != NULL) {
1074 spin_lock_irqsave(&bitmap->lock, flags);
1075 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
1076 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1077 spin_unlock_irqrestore(&bitmap->lock, flags);
1078 err = write_page(bitmap, lastpage, 0);
1079 if (err == -EAGAIN) {
1080 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1084 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1085 spin_unlock_irqrestore(&bitmap->lock, flags);
1092 static void bitmap_writeback(struct bitmap *bitmap)
1095 struct page_list *item;
1098 PRINTK("%s: bitmap writeback daemon woke up...\n", bmname(bitmap));
1099 /* wait on bitmap page writebacks */
1100 while ((item = dequeue_page(bitmap))) {
1102 mempool_free(item, bitmap->write_pool);
1103 PRINTK("wait on page writeback: %p\n", page);
1104 wait_on_page_writeback(page);
1105 PRINTK("finished page writeback: %p\n", page);
1107 err = PageError(page);
1109 printk(KERN_WARNING "%s: bitmap file writeback "
1110 "failed (page %lu): %d\n",
1111 bmname(bitmap), page->index, err);
1112 bitmap_file_kick(bitmap);
1118 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
1119 sector_t offset, int *blocks,
1122 /* If 'create', we might release the lock and reclaim it.
1123 * The lock must have been taken with interrupts enabled.
1124 * If !create, we don't release the lock.
1126 sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
1127 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1128 unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1131 if (bitmap_checkpage(bitmap, page, create) < 0) {
1132 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
1133 *blocks = csize - (offset & (csize- 1));
1136 /* now locked ... */
1138 if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1139 /* should we use the first or second counter field
1140 * of the hijacked pointer? */
1141 int hi = (pageoff > PAGE_COUNTER_MASK);
1142 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
1143 PAGE_COUNTER_SHIFT - 1);
1144 *blocks = csize - (offset & (csize- 1));
1145 return &((bitmap_counter_t *)
1146 &bitmap->bp[page].map)[hi];
1147 } else { /* page is allocated */
1148 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
1149 *blocks = csize - (offset & (csize- 1));
1150 return (bitmap_counter_t *)
1151 &(bitmap->bp[page].map[pageoff]);
1155 int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1157 if (!bitmap) return 0;
1160 atomic_inc(&bitmap->behind_writes);
1161 PRINTK(KERN_DEBUG "inc write-behind count %d/%d\n",
1162 atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
1167 bitmap_counter_t *bmc;
1169 spin_lock_irq(&bitmap->lock);
1170 bmc = bitmap_get_counter(bitmap, offset, &blocks, 1);
1172 spin_unlock_irq(&bitmap->lock);
1178 bitmap_file_set_bit(bitmap, offset);
1179 bitmap_count_page(bitmap,offset, 1);
1180 blk_plug_device(bitmap->mddev->queue);
1185 BUG_ON((*bmc & COUNTER_MAX) == COUNTER_MAX);
1188 spin_unlock_irq(&bitmap->lock);
1191 if (sectors > blocks)
1198 void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
1199 int success, int behind)
1201 if (!bitmap) return;
1203 atomic_dec(&bitmap->behind_writes);
1204 PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n",
1205 atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
1210 unsigned long flags;
1211 bitmap_counter_t *bmc;
1213 spin_lock_irqsave(&bitmap->lock, flags);
1214 bmc = bitmap_get_counter(bitmap, offset, &blocks, 0);
1216 spin_unlock_irqrestore(&bitmap->lock, flags);
1220 if (!success && ! (*bmc & NEEDED_MASK))
1221 *bmc |= NEEDED_MASK;
1225 set_page_attr(bitmap,
1226 filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
1229 spin_unlock_irqrestore(&bitmap->lock, flags);
1231 if (sectors > blocks)
1237 int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
1240 bitmap_counter_t *bmc;
1242 if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1244 return 1; /* always resync if no bitmap */
1246 spin_lock_irq(&bitmap->lock);
1247 bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
1253 else if (NEEDED(*bmc)) {
1255 if (!degraded) { /* don't set/clear bits if degraded */
1256 *bmc |= RESYNC_MASK;
1257 *bmc &= ~NEEDED_MASK;
1261 spin_unlock_irq(&bitmap->lock);
1265 void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted)
1267 bitmap_counter_t *bmc;
1268 unsigned long flags;
1270 if (offset == 0) printk("bitmap_end_sync 0 (%d)\n", aborted);
1271 */ if (bitmap == NULL) {
1275 spin_lock_irqsave(&bitmap->lock, flags);
1276 bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
1281 if (offset == 0) printk("bitmap_end sync found 0x%x, blocks %d\n", *bmc, *blocks);
1284 *bmc &= ~RESYNC_MASK;
1286 if (!NEEDED(*bmc) && aborted)
1287 *bmc |= NEEDED_MASK;
1290 set_page_attr(bitmap,
1291 filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
1297 spin_unlock_irqrestore(&bitmap->lock, flags);
1300 void bitmap_close_sync(struct bitmap *bitmap)
1302 /* Sync has finished, and any bitmap chunks that weren't synced
1303 * properly have been aborted. It remains to us to clear the
1304 * RESYNC bit wherever it is still on
1306 sector_t sector = 0;
1308 if (!bitmap) return;
1309 while (sector < bitmap->mddev->resync_max_sectors) {
1310 bitmap_end_sync(bitmap, sector, &blocks, 0);
1312 if (sector < 500) printk("bitmap_close_sync: sec %llu blks %d\n",
1313 (unsigned long long)sector, blocks);
1314 */ sector += blocks;
1318 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1320 /* For each chunk covered by any of these sectors, set the
1321 * counter to 1 and set resync_needed. They should all
1322 * be 0 at this point
1326 bitmap_counter_t *bmc;
1327 spin_lock_irq(&bitmap->lock);
1328 bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
1330 spin_unlock_irq(&bitmap->lock);
1335 *bmc = 1 | (needed?NEEDED_MASK:0);
1336 bitmap_count_page(bitmap, offset, 1);
1337 page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
1338 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
1340 spin_unlock_irq(&bitmap->lock);
1345 * flush out any pending updates
1347 void bitmap_flush(mddev_t *mddev)
1349 struct bitmap *bitmap = mddev->bitmap;
1352 if (!bitmap) /* there was no bitmap */
1355 /* run the daemon_work three time to ensure everything is flushed
1358 sleep = bitmap->daemon_sleep;
1359 bitmap->daemon_sleep = 0;
1360 bitmap_daemon_work(bitmap);
1361 bitmap_daemon_work(bitmap);
1362 bitmap_daemon_work(bitmap);
1363 bitmap->daemon_sleep = sleep;
1364 bitmap_update_sb(bitmap);
1368 * free memory that was allocated
1370 static void bitmap_free(struct bitmap *bitmap)
1372 unsigned long k, pages;
1373 struct bitmap_page *bp;
1375 if (!bitmap) /* there was no bitmap */
1378 /* release the bitmap file and kill the daemon */
1379 bitmap_file_put(bitmap);
1382 pages = bitmap->pages;
1384 /* free all allocated memory */
1386 mempool_destroy(bitmap->write_pool);
1388 if (bp) /* deallocate the page memory */
1389 for (k = 0; k < pages; k++)
1390 if (bp[k].map && !bp[k].hijacked)
1395 void bitmap_destroy(mddev_t *mddev)
1397 struct bitmap *bitmap = mddev->bitmap;
1399 if (!bitmap) /* there was no bitmap */
1402 mddev->bitmap = NULL; /* disconnect from the md device */
1404 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1406 bitmap_free(bitmap);
1410 * initialize the bitmap structure
1411 * if this returns an error, bitmap_destroy must be called to do clean up
1413 int bitmap_create(mddev_t *mddev)
1415 struct bitmap *bitmap;
1416 unsigned long blocks = mddev->resync_max_sectors;
1417 unsigned long chunks;
1418 unsigned long pages;
1419 struct file *file = mddev->bitmap_file;
1423 BUG_ON(sizeof(bitmap_super_t) != 256);
1425 if (!file && !mddev->bitmap_offset) /* bitmap disabled, nothing to do */
1428 BUG_ON(file && mddev->bitmap_offset);
1430 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1434 spin_lock_init(&bitmap->lock);
1435 bitmap->mddev = mddev;
1437 spin_lock_init(&bitmap->write_lock);
1438 INIT_LIST_HEAD(&bitmap->complete_pages);
1439 bitmap->write_pool = mempool_create_kmalloc_pool(WRITE_POOL_SIZE,
1440 sizeof(struct page_list));
1442 if (!bitmap->write_pool)
1445 bitmap->file = file;
1446 bitmap->offset = mddev->bitmap_offset;
1447 if (file) get_file(file);
1448 /* read superblock from bitmap file (this sets bitmap->chunksize) */
1449 err = bitmap_read_sb(bitmap);
1453 bitmap->chunkshift = find_first_bit(&bitmap->chunksize,
1454 sizeof(bitmap->chunksize));
1456 /* now that chunksize and chunkshift are set, we can use these macros */
1457 chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) /
1458 CHUNK_BLOCK_RATIO(bitmap);
1459 pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
1463 bitmap->chunks = chunks;
1464 bitmap->pages = pages;
1465 bitmap->missing_pages = pages;
1466 bitmap->counter_bits = COUNTER_BITS;
1468 bitmap->syncchunk = ~0UL;
1470 #ifdef INJECT_FATAL_FAULT_1
1473 bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
1479 /* now that we have some pages available, initialize the in-memory
1480 * bitmap from the on-disk bitmap */
1482 if (mddev->degraded == 0
1483 || bitmap->events_cleared == mddev->events)
1484 /* no need to keep dirty bits to optimise a re-add of a missing device */
1485 start = mddev->recovery_cp;
1486 err = bitmap_init_from_disk(bitmap, start);
1491 printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
1492 pages, bmname(bitmap));
1494 mddev->bitmap = bitmap;
1496 mddev->thread->timeout = bitmap->daemon_sleep * HZ;
1498 return bitmap_update_sb(bitmap);
1501 bitmap_free(bitmap);
1505 /* the bitmap API -- for raid personalities */
1506 EXPORT_SYMBOL(bitmap_startwrite);
1507 EXPORT_SYMBOL(bitmap_endwrite);
1508 EXPORT_SYMBOL(bitmap_start_sync);
1509 EXPORT_SYMBOL(bitmap_end_sync);
1510 EXPORT_SYMBOL(bitmap_unplug);
1511 EXPORT_SYMBOL(bitmap_close_sync);
1512 EXPORT_SYMBOL(bitmap_daemon_work);