diff options
| -rw-r--r-- | drivers/md/bitmap.c | 301 | ||||
| -rw-r--r-- | drivers/md/md.h | 2 |
2 files changed, 136 insertions, 167 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 4518994712c7..67fb32d1124d 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | * Still to do: | 13 | * Still to do: |
| 14 | * | 14 | * |
| 15 | * flush after percent set rather than just time based. (maybe both). | 15 | * flush after percent set rather than just time based. (maybe both). |
| 16 | * wait if count gets too high, wake when it drops to half. | ||
| 17 | */ | 16 | */ |
| 18 | 17 | ||
| 19 | #include <linux/blkdev.h> | 18 | #include <linux/blkdev.h> |
| @@ -51,9 +50,6 @@ | |||
| 51 | #define INJECT_FATAL_FAULT_3 0 /* undef */ | 50 | #define INJECT_FATAL_FAULT_3 0 /* undef */ |
| 52 | #endif | 51 | #endif |
| 53 | 52 | ||
| 54 | //#define DPRINTK PRINTK /* set this NULL to avoid verbose debug output */ | ||
| 55 | #define DPRINTK(x...) do { } while(0) | ||
| 56 | |||
| 57 | #ifndef PRINTK | 53 | #ifndef PRINTK |
| 58 | # if DEBUG > 0 | 54 | # if DEBUG > 0 |
| 59 | # define PRINTK(x...) printk(KERN_DEBUG x) | 55 | # define PRINTK(x...) printk(KERN_DEBUG x) |
| @@ -62,12 +58,11 @@ | |||
| 62 | # endif | 58 | # endif |
| 63 | #endif | 59 | #endif |
| 64 | 60 | ||
| 65 | static inline char * bmname(struct bitmap *bitmap) | 61 | static inline char *bmname(struct bitmap *bitmap) |
| 66 | { | 62 | { |
| 67 | return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; | 63 | return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; |
| 68 | } | 64 | } |
| 69 | 65 | ||
| 70 | |||
| 71 | /* | 66 | /* |
| 72 | * just a placeholder - calls kmalloc for bitmap pages | 67 | * just a placeholder - calls kmalloc for bitmap pages |
| 73 | */ | 68 | */ |
| @@ -78,7 +73,7 @@ static unsigned char *bitmap_alloc_page(struct bitmap *bitmap) | |||
| 78 | #ifdef INJECT_FAULTS_1 | 73 | #ifdef INJECT_FAULTS_1 |
| 79 | page = NULL; | 74 | page = NULL; |
| 80 | #else | 75 | #else |
| 81 | page = kmalloc(PAGE_SIZE, GFP_NOIO); | 76 | page = kzalloc(PAGE_SIZE, GFP_NOIO); |
| 82 | #endif | 77 | #endif |
| 83 | if (!page) | 78 | if (!page) |
| 84 | printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap)); | 79 | printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap)); |
| @@ -107,7 +102,8 @@ static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page) | |||
| 107 | * if we find our page, we increment the page's refcount so that it stays | 102 | * if we find our page, we increment the page's refcount so that it stays |
| 108 | * allocated while we're using it | 103 | * allocated while we're using it |
| 109 | */ | 104 | */ |
| 110 | static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create) | 105 | static int bitmap_checkpage(struct bitmap *bitmap, |
| 106 | unsigned long page, int create) | ||
| 111 | __releases(bitmap->lock) | 107 | __releases(bitmap->lock) |
| 112 | __acquires(bitmap->lock) | 108 | __acquires(bitmap->lock) |
| 113 | { | 109 | { |
| @@ -121,7 +117,6 @@ __acquires(bitmap->lock) | |||
| 121 | return -EINVAL; | 117 | return -EINVAL; |
| 122 | } | 118 | } |
| 123 | 119 | ||
| 124 | |||
| 125 | if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ | 120 | if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ |
| 126 | return 0; | 121 | return 0; |
| 127 | 122 | ||
| @@ -131,43 +126,34 @@ __acquires(bitmap->lock) | |||
| 131 | if (!create) | 126 | if (!create) |
| 132 | return -ENOENT; | 127 | return -ENOENT; |
| 133 | 128 | ||
| 134 | spin_unlock_irq(&bitmap->lock); | ||
| 135 | |||
| 136 | /* this page has not been allocated yet */ | 129 | /* this page has not been allocated yet */ |
| 137 | 130 | ||
| 138 | if ((mappage = bitmap_alloc_page(bitmap)) == NULL) { | 131 | spin_unlock_irq(&bitmap->lock); |
| 132 | mappage = bitmap_alloc_page(bitmap); | ||
| 133 | spin_lock_irq(&bitmap->lock); | ||
| 134 | |||
| 135 | if (mappage == NULL) { | ||
| 139 | PRINTK("%s: bitmap map page allocation failed, hijacking\n", | 136 | PRINTK("%s: bitmap map page allocation failed, hijacking\n", |
| 140 | bmname(bitmap)); | 137 | bmname(bitmap)); |
| 141 | /* failed - set the hijacked flag so that we can use the | 138 | /* failed - set the hijacked flag so that we can use the |
| 142 | * pointer as a counter */ | 139 | * pointer as a counter */ |
| 143 | spin_lock_irq(&bitmap->lock); | ||
| 144 | if (!bitmap->bp[page].map) | 140 | if (!bitmap->bp[page].map) |
| 145 | bitmap->bp[page].hijacked = 1; | 141 | bitmap->bp[page].hijacked = 1; |
| 146 | goto out; | 142 | } else if (bitmap->bp[page].map || |
| 147 | } | 143 | bitmap->bp[page].hijacked) { |
| 148 | |||
| 149 | /* got a page */ | ||
| 150 | |||
| 151 | spin_lock_irq(&bitmap->lock); | ||
| 152 | |||
| 153 | /* recheck the page */ | ||
| 154 | |||
| 155 | if (bitmap->bp[page].map || bitmap->bp[page].hijacked) { | ||
| 156 | /* somebody beat us to getting the page */ | 144 | /* somebody beat us to getting the page */ |
| 157 | bitmap_free_page(bitmap, mappage); | 145 | bitmap_free_page(bitmap, mappage); |
| 158 | return 0; | 146 | return 0; |
| 159 | } | 147 | } else { |
| 160 | 148 | ||
| 161 | /* no page was in place and we have one, so install it */ | 149 | /* no page was in place and we have one, so install it */ |
| 162 | 150 | ||
| 163 | memset(mappage, 0, PAGE_SIZE); | 151 | bitmap->bp[page].map = mappage; |
| 164 | bitmap->bp[page].map = mappage; | 152 | bitmap->missing_pages--; |
| 165 | bitmap->missing_pages--; | 153 | } |
| 166 | out: | ||
| 167 | return 0; | 154 | return 0; |
| 168 | } | 155 | } |
| 169 | 156 | ||
| 170 | |||
| 171 | /* if page is completely empty, put it back on the free list, or dealloc it */ | 157 | /* if page is completely empty, put it back on the free list, or dealloc it */ |
| 172 | /* if page was hijacked, unmark the flag so it might get alloced next time */ | 158 | /* if page was hijacked, unmark the flag so it might get alloced next time */ |
| 173 | /* Note: lock should be held when calling this */ | 159 | /* Note: lock should be held when calling this */ |
| @@ -183,26 +169,15 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) | |||
| 183 | if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ | 169 | if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ |
| 184 | bitmap->bp[page].hijacked = 0; | 170 | bitmap->bp[page].hijacked = 0; |
| 185 | bitmap->bp[page].map = NULL; | 171 | bitmap->bp[page].map = NULL; |
| 186 | return; | 172 | } else { |
| 173 | /* normal case, free the page */ | ||
| 174 | ptr = bitmap->bp[page].map; | ||
| 175 | bitmap->bp[page].map = NULL; | ||
| 176 | bitmap->missing_pages++; | ||
| 177 | bitmap_free_page(bitmap, ptr); | ||
| 187 | } | 178 | } |
| 188 | |||
| 189 | /* normal case, free the page */ | ||
| 190 | |||
| 191 | #if 0 | ||
| 192 | /* actually ... let's not. We will probably need the page again exactly when | ||
| 193 | * memory is tight and we are flusing to disk | ||
| 194 | */ | ||
| 195 | return; | ||
| 196 | #else | ||
| 197 | ptr = bitmap->bp[page].map; | ||
| 198 | bitmap->bp[page].map = NULL; | ||
| 199 | bitmap->missing_pages++; | ||
| 200 | bitmap_free_page(bitmap, ptr); | ||
| 201 | return; | ||
| 202 | #endif | ||
| 203 | } | 179 | } |
| 204 | 180 | ||
| 205 | |||
| 206 | /* | 181 | /* |
| 207 | * bitmap file handling - read and write the bitmap file and its superblock | 182 | * bitmap file handling - read and write the bitmap file and its superblock |
| 208 | */ | 183 | */ |
| @@ -220,11 +195,14 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset, | |||
| 220 | 195 | ||
| 221 | mdk_rdev_t *rdev; | 196 | mdk_rdev_t *rdev; |
| 222 | sector_t target; | 197 | sector_t target; |
| 198 | int did_alloc = 0; | ||
| 223 | 199 | ||
| 224 | if (!page) | 200 | if (!page) { |
| 225 | page = alloc_page(GFP_KERNEL); | 201 | page = alloc_page(GFP_KERNEL); |
| 226 | if (!page) | 202 | if (!page) |
| 227 | return ERR_PTR(-ENOMEM); | 203 | return ERR_PTR(-ENOMEM); |
| 204 | did_alloc = 1; | ||
| 205 | } | ||
| 228 | 206 | ||
| 229 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 207 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
| 230 | if (! test_bit(In_sync, &rdev->flags) | 208 | if (! test_bit(In_sync, &rdev->flags) |
| @@ -242,6 +220,8 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset, | |||
| 242 | return page; | 220 | return page; |
| 243 | } | 221 | } |
| 244 | } | 222 | } |
| 223 | if (did_alloc) | ||
| 224 | put_page(page); | ||
| 245 | return ERR_PTR(-EIO); | 225 | return ERR_PTR(-EIO); |
| 246 | 226 | ||
| 247 | } | 227 | } |
| @@ -286,49 +266,51 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) | |||
| 286 | mddev_t *mddev = bitmap->mddev; | 266 | mddev_t *mddev = bitmap->mddev; |
| 287 | 267 | ||
| 288 | while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { | 268 | while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { |
| 289 | int size = PAGE_SIZE; | 269 | int size = PAGE_SIZE; |
| 290 | loff_t offset = mddev->bitmap_info.offset; | 270 | loff_t offset = mddev->bitmap_info.offset; |
| 291 | if (page->index == bitmap->file_pages-1) | 271 | if (page->index == bitmap->file_pages-1) |
| 292 | size = roundup(bitmap->last_page_size, | 272 | size = roundup(bitmap->last_page_size, |
| 293 | bdev_logical_block_size(rdev->bdev)); | 273 | bdev_logical_block_size(rdev->bdev)); |
| 294 | /* Just make sure we aren't corrupting data or | 274 | /* Just make sure we aren't corrupting data or |
| 295 | * metadata | 275 | * metadata |
| 296 | */ | 276 | */ |
| 297 | if (mddev->external) { | 277 | if (mddev->external) { |
| 298 | /* Bitmap could be anywhere. */ | 278 | /* Bitmap could be anywhere. */ |
| 299 | if (rdev->sb_start + offset + (page->index *(PAGE_SIZE/512)) > | 279 | if (rdev->sb_start + offset + (page->index |
| 300 | rdev->data_offset && | 280 | * (PAGE_SIZE/512)) |
| 301 | rdev->sb_start + offset < | 281 | > rdev->data_offset |
| 302 | rdev->data_offset + mddev->dev_sectors + | 282 | && |
| 303 | (PAGE_SIZE/512)) | 283 | rdev->sb_start + offset |
| 304 | goto bad_alignment; | 284 | < (rdev->data_offset + mddev->dev_sectors |
| 305 | } else if (offset < 0) { | 285 | + (PAGE_SIZE/512))) |
| 306 | /* DATA BITMAP METADATA */ | 286 | goto bad_alignment; |
| 307 | if (offset | 287 | } else if (offset < 0) { |
| 308 | + (long)(page->index * (PAGE_SIZE/512)) | 288 | /* DATA BITMAP METADATA */ |
| 309 | + size/512 > 0) | 289 | if (offset |
| 310 | /* bitmap runs in to metadata */ | 290 | + (long)(page->index * (PAGE_SIZE/512)) |
| 311 | goto bad_alignment; | 291 | + size/512 > 0) |
| 312 | if (rdev->data_offset + mddev->dev_sectors | 292 | /* bitmap runs in to metadata */ |
| 313 | > rdev->sb_start + offset) | 293 | goto bad_alignment; |
| 314 | /* data runs in to bitmap */ | 294 | if (rdev->data_offset + mddev->dev_sectors |
| 315 | goto bad_alignment; | 295 | > rdev->sb_start + offset) |
| 316 | } else if (rdev->sb_start < rdev->data_offset) { | 296 | /* data runs in to bitmap */ |
| 317 | /* METADATA BITMAP DATA */ | 297 | goto bad_alignment; |
| 318 | if (rdev->sb_start | 298 | } else if (rdev->sb_start < rdev->data_offset) { |
| 319 | + offset | 299 | /* METADATA BITMAP DATA */ |
| 320 | + page->index*(PAGE_SIZE/512) + size/512 | 300 | if (rdev->sb_start |
| 321 | > rdev->data_offset) | 301 | + offset |
| 322 | /* bitmap runs in to data */ | 302 | + page->index*(PAGE_SIZE/512) + size/512 |
| 323 | goto bad_alignment; | 303 | > rdev->data_offset) |
| 324 | } else { | 304 | /* bitmap runs in to data */ |
| 325 | /* DATA METADATA BITMAP - no problems */ | 305 | goto bad_alignment; |
| 326 | } | 306 | } else { |
| 327 | md_super_write(mddev, rdev, | 307 | /* DATA METADATA BITMAP - no problems */ |
| 328 | rdev->sb_start + offset | 308 | } |
| 329 | + page->index * (PAGE_SIZE/512), | 309 | md_super_write(mddev, rdev, |
| 330 | size, | 310 | rdev->sb_start + offset |
| 331 | page); | 311 | + page->index * (PAGE_SIZE/512), |
| 312 | size, | ||
| 313 | page); | ||
| 332 | } | 314 | } |
| 333 | 315 | ||
| 334 | if (wait) | 316 | if (wait) |
| @@ -364,10 +346,9 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) | |||
| 364 | bh = bh->b_this_page; | 346 | bh = bh->b_this_page; |
| 365 | } | 347 | } |
| 366 | 348 | ||
| 367 | if (wait) { | 349 | if (wait) |
| 368 | wait_event(bitmap->write_wait, | 350 | wait_event(bitmap->write_wait, |
| 369 | atomic_read(&bitmap->pending_writes)==0); | 351 | atomic_read(&bitmap->pending_writes)==0); |
| 370 | } | ||
| 371 | } | 352 | } |
| 372 | if (bitmap->flags & BITMAP_WRITE_ERROR) | 353 | if (bitmap->flags & BITMAP_WRITE_ERROR) |
| 373 | bitmap_file_kick(bitmap); | 354 | bitmap_file_kick(bitmap); |
| @@ -424,7 +405,7 @@ static struct page *read_page(struct file *file, unsigned long index, | |||
| 424 | struct buffer_head *bh; | 405 | struct buffer_head *bh; |
| 425 | sector_t block; | 406 | sector_t block; |
| 426 | 407 | ||
| 427 | PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE, | 408 | PRINTK("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, |
| 428 | (unsigned long long)index << PAGE_SHIFT); | 409 | (unsigned long long)index << PAGE_SHIFT); |
| 429 | 410 | ||
| 430 | page = alloc_page(GFP_KERNEL); | 411 | page = alloc_page(GFP_KERNEL); |
| @@ -478,7 +459,7 @@ static struct page *read_page(struct file *file, unsigned long index, | |||
| 478 | } | 459 | } |
| 479 | out: | 460 | out: |
| 480 | if (IS_ERR(page)) | 461 | if (IS_ERR(page)) |
| 481 | printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n", | 462 | printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n", |
| 482 | (int)PAGE_SIZE, | 463 | (int)PAGE_SIZE, |
| 483 | (unsigned long long)index << PAGE_SHIFT, | 464 | (unsigned long long)index << PAGE_SHIFT, |
| 484 | PTR_ERR(page)); | 465 | PTR_ERR(page)); |
| @@ -664,11 +645,14 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, | |||
| 664 | sb = kmap_atomic(bitmap->sb_page, KM_USER0); | 645 | sb = kmap_atomic(bitmap->sb_page, KM_USER0); |
| 665 | old = le32_to_cpu(sb->state) & bits; | 646 | old = le32_to_cpu(sb->state) & bits; |
| 666 | switch (op) { | 647 | switch (op) { |
| 667 | case MASK_SET: sb->state |= cpu_to_le32(bits); | 648 | case MASK_SET: |
| 668 | break; | 649 | sb->state |= cpu_to_le32(bits); |
| 669 | case MASK_UNSET: sb->state &= cpu_to_le32(~bits); | 650 | break; |
| 670 | break; | 651 | case MASK_UNSET: |
| 671 | default: BUG(); | 652 | sb->state &= cpu_to_le32(~bits); |
| 653 | break; | ||
| 654 | default: | ||
| 655 | BUG(); | ||
| 672 | } | 656 | } |
| 673 | kunmap_atomic(sb, KM_USER0); | 657 | kunmap_atomic(sb, KM_USER0); |
| 674 | return old; | 658 | return old; |
| @@ -710,12 +694,12 @@ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned lon | |||
| 710 | static inline struct page *filemap_get_page(struct bitmap *bitmap, | 694 | static inline struct page *filemap_get_page(struct bitmap *bitmap, |
| 711 | unsigned long chunk) | 695 | unsigned long chunk) |
| 712 | { | 696 | { |
| 713 | if (file_page_index(bitmap, chunk) >= bitmap->file_pages) return NULL; | 697 | if (file_page_index(bitmap, chunk) >= bitmap->file_pages) |
| 698 | return NULL; | ||
| 714 | return bitmap->filemap[file_page_index(bitmap, chunk) | 699 | return bitmap->filemap[file_page_index(bitmap, chunk) |
| 715 | - file_page_index(bitmap, 0)]; | 700 | - file_page_index(bitmap, 0)]; |
| 716 | } | 701 | } |
| 717 | 702 | ||
| 718 | |||
| 719 | static void bitmap_file_unmap(struct bitmap *bitmap) | 703 | static void bitmap_file_unmap(struct bitmap *bitmap) |
| 720 | { | 704 | { |
| 721 | struct page **map, *sb_page; | 705 | struct page **map, *sb_page; |
| @@ -766,7 +750,6 @@ static void bitmap_file_put(struct bitmap *bitmap) | |||
| 766 | } | 750 | } |
| 767 | } | 751 | } |
| 768 | 752 | ||
| 769 | |||
| 770 | /* | 753 | /* |
| 771 | * bitmap_file_kick - if an error occurs while manipulating the bitmap file | 754 | * bitmap_file_kick - if an error occurs while manipulating the bitmap file |
| 772 | * then it is no longer reliable, so we stop using it and we mark the file | 755 | * then it is no longer reliable, so we stop using it and we mark the file |
| @@ -785,7 +768,6 @@ static void bitmap_file_kick(struct bitmap *bitmap) | |||
| 785 | ptr = d_path(&bitmap->file->f_path, path, | 768 | ptr = d_path(&bitmap->file->f_path, path, |
| 786 | PAGE_SIZE); | 769 | PAGE_SIZE); |
| 787 | 770 | ||
| 788 | |||
| 789 | printk(KERN_ALERT | 771 | printk(KERN_ALERT |
| 790 | "%s: kicking failed bitmap file %s from array!\n", | 772 | "%s: kicking failed bitmap file %s from array!\n", |
| 791 | bmname(bitmap), IS_ERR(ptr) ? "" : ptr); | 773 | bmname(bitmap), IS_ERR(ptr) ? "" : ptr); |
| @@ -803,9 +785,9 @@ static void bitmap_file_kick(struct bitmap *bitmap) | |||
| 803 | } | 785 | } |
| 804 | 786 | ||
| 805 | enum bitmap_page_attr { | 787 | enum bitmap_page_attr { |
| 806 | BITMAP_PAGE_DIRTY = 0, // there are set bits that need to be synced | 788 | BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */ |
| 807 | BITMAP_PAGE_CLEAN = 1, // there are bits that might need to be cleared | 789 | BITMAP_PAGE_CLEAN = 1, /* there are bits that might need to be cleared */ |
| 808 | BITMAP_PAGE_NEEDWRITE=2, // there are cleared bits that need to be synced | 790 | BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */ |
| 809 | }; | 791 | }; |
| 810 | 792 | ||
| 811 | static inline void set_page_attr(struct bitmap *bitmap, struct page *page, | 793 | static inline void set_page_attr(struct bitmap *bitmap, struct page *page, |
| @@ -840,15 +822,15 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) | |||
| 840 | void *kaddr; | 822 | void *kaddr; |
| 841 | unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap); | 823 | unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap); |
| 842 | 824 | ||
| 843 | if (!bitmap->filemap) { | 825 | if (!bitmap->filemap) |
| 844 | return; | 826 | return; |
| 845 | } | ||
| 846 | 827 | ||
| 847 | page = filemap_get_page(bitmap, chunk); | 828 | page = filemap_get_page(bitmap, chunk); |
| 848 | if (!page) return; | 829 | if (!page) |
| 830 | return; | ||
| 849 | bit = file_page_offset(bitmap, chunk); | 831 | bit = file_page_offset(bitmap, chunk); |
| 850 | 832 | ||
| 851 | /* set the bit */ | 833 | /* set the bit */ |
| 852 | kaddr = kmap_atomic(page, KM_USER0); | 834 | kaddr = kmap_atomic(page, KM_USER0); |
| 853 | if (bitmap->flags & BITMAP_HOSTENDIAN) | 835 | if (bitmap->flags & BITMAP_HOSTENDIAN) |
| 854 | set_bit(bit, kaddr); | 836 | set_bit(bit, kaddr); |
| @@ -859,7 +841,6 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) | |||
| 859 | 841 | ||
| 860 | /* record page number so it gets flushed to disk when unplug occurs */ | 842 | /* record page number so it gets flushed to disk when unplug occurs */ |
| 861 | set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); | 843 | set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); |
| 862 | |||
| 863 | } | 844 | } |
| 864 | 845 | ||
| 865 | /* this gets called when the md device is ready to unplug its underlying | 846 | /* this gets called when the md device is ready to unplug its underlying |
| @@ -892,7 +873,7 @@ void bitmap_unplug(struct bitmap *bitmap) | |||
| 892 | wait = 1; | 873 | wait = 1; |
| 893 | spin_unlock_irqrestore(&bitmap->lock, flags); | 874 | spin_unlock_irqrestore(&bitmap->lock, flags); |
| 894 | 875 | ||
| 895 | if (dirty | need_write) | 876 | if (dirty || need_write) |
| 896 | write_page(bitmap, page, 0); | 877 | write_page(bitmap, page, 0); |
| 897 | } | 878 | } |
| 898 | if (wait) { /* if any writes were performed, we need to wait on them */ | 879 | if (wait) { /* if any writes were performed, we need to wait on them */ |
| @@ -905,6 +886,7 @@ void bitmap_unplug(struct bitmap *bitmap) | |||
| 905 | if (bitmap->flags & BITMAP_WRITE_ERROR) | 886 | if (bitmap->flags & BITMAP_WRITE_ERROR) |
| 906 | bitmap_file_kick(bitmap); | 887 | bitmap_file_kick(bitmap); |
| 907 | } | 888 | } |
| 889 | EXPORT_SYMBOL(bitmap_unplug); | ||
| 908 | 890 | ||
| 909 | static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); | 891 | static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); |
| 910 | /* * bitmap_init_from_disk -- called at bitmap_create time to initialize | 892 | /* * bitmap_init_from_disk -- called at bitmap_create time to initialize |
| @@ -947,7 +929,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) | |||
| 947 | if (!bitmap->mddev->bitmap_info.external) | 929 | if (!bitmap->mddev->bitmap_info.external) |
| 948 | bytes += sizeof(bitmap_super_t); | 930 | bytes += sizeof(bitmap_super_t); |
| 949 | 931 | ||
| 950 | |||
| 951 | num_pages = (bytes + PAGE_SIZE - 1) / PAGE_SIZE; | 932 | num_pages = (bytes + PAGE_SIZE - 1) / PAGE_SIZE; |
| 952 | 933 | ||
| 953 | if (file && i_size_read(file->f_mapping->host) < bytes) { | 934 | if (file && i_size_read(file->f_mapping->host) < bytes) { |
| @@ -966,7 +947,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) | |||
| 966 | 947 | ||
| 967 | /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */ | 948 | /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */ |
| 968 | bitmap->filemap_attr = kzalloc( | 949 | bitmap->filemap_attr = kzalloc( |
| 969 | roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), | 950 | roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), |
| 970 | GFP_KERNEL); | 951 | GFP_KERNEL); |
| 971 | if (!bitmap->filemap_attr) | 952 | if (!bitmap->filemap_attr) |
| 972 | goto err; | 953 | goto err; |
| @@ -1021,7 +1002,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) | |||
| 1021 | if (outofdate) { | 1002 | if (outofdate) { |
| 1022 | /* | 1003 | /* |
| 1023 | * if bitmap is out of date, dirty the | 1004 | * if bitmap is out of date, dirty the |
| 1024 | * whole page and write it out | 1005 | * whole page and write it out |
| 1025 | */ | 1006 | */ |
| 1026 | paddr = kmap_atomic(page, KM_USER0); | 1007 | paddr = kmap_atomic(page, KM_USER0); |
| 1027 | memset(paddr + offset, 0xff, | 1008 | memset(paddr + offset, 0xff, |
| @@ -1052,7 +1033,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) | |||
| 1052 | } | 1033 | } |
| 1053 | } | 1034 | } |
| 1054 | 1035 | ||
| 1055 | /* everything went OK */ | 1036 | /* everything went OK */ |
| 1056 | ret = 0; | 1037 | ret = 0; |
| 1057 | bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET); | 1038 | bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET); |
| 1058 | 1039 | ||
| @@ -1080,21 +1061,16 @@ void bitmap_write_all(struct bitmap *bitmap) | |||
| 1080 | */ | 1061 | */ |
| 1081 | int i; | 1062 | int i; |
| 1082 | 1063 | ||
| 1083 | for (i=0; i < bitmap->file_pages; i++) | 1064 | for (i = 0; i < bitmap->file_pages; i++) |
| 1084 | set_page_attr(bitmap, bitmap->filemap[i], | 1065 | set_page_attr(bitmap, bitmap->filemap[i], |
| 1085 | BITMAP_PAGE_NEEDWRITE); | 1066 | BITMAP_PAGE_NEEDWRITE); |
| 1086 | } | 1067 | } |
| 1087 | 1068 | ||
| 1088 | |||
| 1089 | static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) | 1069 | static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) |
| 1090 | { | 1070 | { |
| 1091 | sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap); | 1071 | sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap); |
| 1092 | unsigned long page = chunk >> PAGE_COUNTER_SHIFT; | 1072 | unsigned long page = chunk >> PAGE_COUNTER_SHIFT; |
| 1093 | bitmap->bp[page].count += inc; | 1073 | bitmap->bp[page].count += inc; |
| 1094 | /* | ||
| 1095 | if (page == 0) printk("count page 0, offset %llu: %d gives %d\n", | ||
| 1096 | (unsigned long long)offset, inc, bitmap->bp[page].count); | ||
| 1097 | */ | ||
| 1098 | bitmap_checkfree(bitmap, page); | 1074 | bitmap_checkfree(bitmap, page); |
| 1099 | } | 1075 | } |
| 1100 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, | 1076 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, |
| @@ -1197,14 +1173,11 @@ void bitmap_daemon_work(mddev_t *mddev) | |||
| 1197 | (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap), | 1173 | (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap), |
| 1198 | &blocks, 0); | 1174 | &blocks, 0); |
| 1199 | if (bmc) { | 1175 | if (bmc) { |
| 1200 | /* | ||
| 1201 | if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); | ||
| 1202 | */ | ||
| 1203 | if (*bmc) | 1176 | if (*bmc) |
| 1204 | bitmap->allclean = 0; | 1177 | bitmap->allclean = 0; |
| 1205 | 1178 | ||
| 1206 | if (*bmc == 2) { | 1179 | if (*bmc == 2) { |
| 1207 | *bmc=1; /* maybe clear the bit next time */ | 1180 | *bmc = 1; /* maybe clear the bit next time */ |
| 1208 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1181 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); |
| 1209 | } else if (*bmc == 1 && !bitmap->need_sync) { | 1182 | } else if (*bmc == 1 && !bitmap->need_sync) { |
| 1210 | /* we can clear the bit */ | 1183 | /* we can clear the bit */ |
| @@ -1243,7 +1216,7 @@ void bitmap_daemon_work(mddev_t *mddev) | |||
| 1243 | 1216 | ||
| 1244 | done: | 1217 | done: |
| 1245 | if (bitmap->allclean == 0) | 1218 | if (bitmap->allclean == 0) |
| 1246 | bitmap->mddev->thread->timeout = | 1219 | bitmap->mddev->thread->timeout = |
| 1247 | bitmap->mddev->bitmap_info.daemon_sleep; | 1220 | bitmap->mddev->bitmap_info.daemon_sleep; |
| 1248 | mutex_unlock(&mddev->bitmap_info.mutex); | 1221 | mutex_unlock(&mddev->bitmap_info.mutex); |
| 1249 | } | 1222 | } |
| @@ -1265,7 +1238,7 @@ __acquires(bitmap->lock) | |||
| 1265 | 1238 | ||
| 1266 | if (bitmap_checkpage(bitmap, page, create) < 0) { | 1239 | if (bitmap_checkpage(bitmap, page, create) < 0) { |
| 1267 | csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap)); | 1240 | csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap)); |
| 1268 | *blocks = csize - (offset & (csize- 1)); | 1241 | *blocks = csize - (offset & (csize - 1)); |
| 1269 | return NULL; | 1242 | return NULL; |
| 1270 | } | 1243 | } |
| 1271 | /* now locked ... */ | 1244 | /* now locked ... */ |
| @@ -1276,12 +1249,12 @@ __acquires(bitmap->lock) | |||
| 1276 | int hi = (pageoff > PAGE_COUNTER_MASK); | 1249 | int hi = (pageoff > PAGE_COUNTER_MASK); |
| 1277 | csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) + | 1250 | csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) + |
| 1278 | PAGE_COUNTER_SHIFT - 1); | 1251 | PAGE_COUNTER_SHIFT - 1); |
| 1279 | *blocks = csize - (offset & (csize- 1)); | 1252 | *blocks = csize - (offset & (csize - 1)); |
| 1280 | return &((bitmap_counter_t *) | 1253 | return &((bitmap_counter_t *) |
| 1281 | &bitmap->bp[page].map)[hi]; | 1254 | &bitmap->bp[page].map)[hi]; |
| 1282 | } else { /* page is allocated */ | 1255 | } else { /* page is allocated */ |
| 1283 | csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap)); | 1256 | csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap)); |
| 1284 | *blocks = csize - (offset & (csize- 1)); | 1257 | *blocks = csize - (offset & (csize - 1)); |
| 1285 | return (bitmap_counter_t *) | 1258 | return (bitmap_counter_t *) |
| 1286 | &(bitmap->bp[page].map[pageoff]); | 1259 | &(bitmap->bp[page].map[pageoff]); |
| 1287 | } | 1260 | } |
| @@ -1289,7 +1262,8 @@ __acquires(bitmap->lock) | |||
| 1289 | 1262 | ||
| 1290 | int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) | 1263 | int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) |
| 1291 | { | 1264 | { |
| 1292 | if (!bitmap) return 0; | 1265 | if (!bitmap) |
| 1266 | return 0; | ||
| 1293 | 1267 | ||
| 1294 | if (behind) { | 1268 | if (behind) { |
| 1295 | int bw; | 1269 | int bw; |
| @@ -1328,10 +1302,10 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
| 1328 | continue; | 1302 | continue; |
| 1329 | } | 1303 | } |
| 1330 | 1304 | ||
| 1331 | switch(*bmc) { | 1305 | switch (*bmc) { |
| 1332 | case 0: | 1306 | case 0: |
| 1333 | bitmap_file_set_bit(bitmap, offset); | 1307 | bitmap_file_set_bit(bitmap, offset); |
| 1334 | bitmap_count_page(bitmap,offset, 1); | 1308 | bitmap_count_page(bitmap, offset, 1); |
| 1335 | blk_plug_device_unlocked(bitmap->mddev->queue); | 1309 | blk_plug_device_unlocked(bitmap->mddev->queue); |
| 1336 | /* fall through */ | 1310 | /* fall through */ |
| 1337 | case 1: | 1311 | case 1: |
| @@ -1345,16 +1319,19 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
| 1345 | offset += blocks; | 1319 | offset += blocks; |
| 1346 | if (sectors > blocks) | 1320 | if (sectors > blocks) |
| 1347 | sectors -= blocks; | 1321 | sectors -= blocks; |
| 1348 | else sectors = 0; | 1322 | else |
| 1323 | sectors = 0; | ||
| 1349 | } | 1324 | } |
| 1350 | bitmap->allclean = 0; | 1325 | bitmap->allclean = 0; |
| 1351 | return 0; | 1326 | return 0; |
| 1352 | } | 1327 | } |
| 1328 | EXPORT_SYMBOL(bitmap_startwrite); | ||
| 1353 | 1329 | ||
| 1354 | void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, | 1330 | void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, |
| 1355 | int success, int behind) | 1331 | int success, int behind) |
| 1356 | { | 1332 | { |
| 1357 | if (!bitmap) return; | 1333 | if (!bitmap) |
| 1334 | return; | ||
| 1358 | if (behind) { | 1335 | if (behind) { |
| 1359 | if (atomic_dec_and_test(&bitmap->behind_writes)) | 1336 | if (atomic_dec_and_test(&bitmap->behind_writes)) |
| 1360 | wake_up(&bitmap->behind_wait); | 1337 | wake_up(&bitmap->behind_wait); |
| @@ -1391,18 +1368,20 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto | |||
| 1391 | wake_up(&bitmap->overflow_wait); | 1368 | wake_up(&bitmap->overflow_wait); |
| 1392 | 1369 | ||
| 1393 | (*bmc)--; | 1370 | (*bmc)--; |
| 1394 | if (*bmc <= 2) { | 1371 | if (*bmc <= 2) |
| 1395 | set_page_attr(bitmap, | 1372 | set_page_attr(bitmap, |
| 1396 | filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), | 1373 | filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), |
| 1397 | BITMAP_PAGE_CLEAN); | 1374 | BITMAP_PAGE_CLEAN); |
| 1398 | } | 1375 | |
| 1399 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1376 | spin_unlock_irqrestore(&bitmap->lock, flags); |
| 1400 | offset += blocks; | 1377 | offset += blocks; |
| 1401 | if (sectors > blocks) | 1378 | if (sectors > blocks) |
| 1402 | sectors -= blocks; | 1379 | sectors -= blocks; |
| 1403 | else sectors = 0; | 1380 | else |
| 1381 | sectors = 0; | ||
| 1404 | } | 1382 | } |
| 1405 | } | 1383 | } |
| 1384 | EXPORT_SYMBOL(bitmap_endwrite); | ||
| 1406 | 1385 | ||
| 1407 | static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, | 1386 | static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, |
| 1408 | int degraded) | 1387 | int degraded) |
| @@ -1455,14 +1434,14 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, | |||
| 1455 | } | 1434 | } |
| 1456 | return rv; | 1435 | return rv; |
| 1457 | } | 1436 | } |
| 1437 | EXPORT_SYMBOL(bitmap_start_sync); | ||
| 1458 | 1438 | ||
| 1459 | void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted) | 1439 | void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted) |
| 1460 | { | 1440 | { |
| 1461 | bitmap_counter_t *bmc; | 1441 | bitmap_counter_t *bmc; |
| 1462 | unsigned long flags; | 1442 | unsigned long flags; |
| 1463 | /* | 1443 | |
| 1464 | if (offset == 0) printk("bitmap_end_sync 0 (%d)\n", aborted); | 1444 | if (bitmap == NULL) { |
| 1465 | */ if (bitmap == NULL) { | ||
| 1466 | *blocks = 1024; | 1445 | *blocks = 1024; |
| 1467 | return; | 1446 | return; |
| 1468 | } | 1447 | } |
| @@ -1471,26 +1450,23 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab | |||
| 1471 | if (bmc == NULL) | 1450 | if (bmc == NULL) |
| 1472 | goto unlock; | 1451 | goto unlock; |
| 1473 | /* locked */ | 1452 | /* locked */ |
| 1474 | /* | ||
| 1475 | if (offset == 0) printk("bitmap_end sync found 0x%x, blocks %d\n", *bmc, *blocks); | ||
| 1476 | */ | ||
| 1477 | if (RESYNC(*bmc)) { | 1453 | if (RESYNC(*bmc)) { |
| 1478 | *bmc &= ~RESYNC_MASK; | 1454 | *bmc &= ~RESYNC_MASK; |
| 1479 | 1455 | ||
| 1480 | if (!NEEDED(*bmc) && aborted) | 1456 | if (!NEEDED(*bmc) && aborted) |
| 1481 | *bmc |= NEEDED_MASK; | 1457 | *bmc |= NEEDED_MASK; |
| 1482 | else { | 1458 | else { |
| 1483 | if (*bmc <= 2) { | 1459 | if (*bmc <= 2) |
| 1484 | set_page_attr(bitmap, | 1460 | set_page_attr(bitmap, |
| 1485 | filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), | 1461 | filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), |
| 1486 | BITMAP_PAGE_CLEAN); | 1462 | BITMAP_PAGE_CLEAN); |
| 1487 | } | ||
| 1488 | } | 1463 | } |
| 1489 | } | 1464 | } |
| 1490 | unlock: | 1465 | unlock: |
| 1491 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1466 | spin_unlock_irqrestore(&bitmap->lock, flags); |
| 1492 | bitmap->allclean = 0; | 1467 | bitmap->allclean = 0; |
| 1493 | } | 1468 | } |
| 1469 | EXPORT_SYMBOL(bitmap_end_sync); | ||
| 1494 | 1470 | ||
| 1495 | void bitmap_close_sync(struct bitmap *bitmap) | 1471 | void bitmap_close_sync(struct bitmap *bitmap) |
| 1496 | { | 1472 | { |
| @@ -1507,6 +1483,7 @@ void bitmap_close_sync(struct bitmap *bitmap) | |||
| 1507 | sector += blocks; | 1483 | sector += blocks; |
| 1508 | } | 1484 | } |
| 1509 | } | 1485 | } |
| 1486 | EXPORT_SYMBOL(bitmap_close_sync); | ||
| 1510 | 1487 | ||
| 1511 | void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) | 1488 | void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) |
| 1512 | { | 1489 | { |
| @@ -1537,6 +1514,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) | |||
| 1537 | bitmap->last_end_sync = jiffies; | 1514 | bitmap->last_end_sync = jiffies; |
| 1538 | sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); | 1515 | sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); |
| 1539 | } | 1516 | } |
| 1517 | EXPORT_SYMBOL(bitmap_cond_end_sync); | ||
| 1540 | 1518 | ||
| 1541 | static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) | 1519 | static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) |
| 1542 | { | 1520 | { |
| @@ -1553,9 +1531,9 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n | |||
| 1553 | spin_unlock_irq(&bitmap->lock); | 1531 | spin_unlock_irq(&bitmap->lock); |
| 1554 | return; | 1532 | return; |
| 1555 | } | 1533 | } |
| 1556 | if (! *bmc) { | 1534 | if (!*bmc) { |
| 1557 | struct page *page; | 1535 | struct page *page; |
| 1558 | *bmc = 1 | (needed?NEEDED_MASK:0); | 1536 | *bmc = 1 | (needed ? NEEDED_MASK : 0); |
| 1559 | bitmap_count_page(bitmap, offset, 1); | 1537 | bitmap_count_page(bitmap, offset, 1); |
| 1560 | page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)); | 1538 | page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)); |
| 1561 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1539 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); |
| @@ -1720,9 +1698,9 @@ int bitmap_create(mddev_t *mddev) | |||
| 1720 | bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize); | 1698 | bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize); |
| 1721 | 1699 | ||
| 1722 | /* now that chunksize and chunkshift are set, we can use these macros */ | 1700 | /* now that chunksize and chunkshift are set, we can use these macros */ |
| 1723 | chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >> | 1701 | chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >> |
| 1724 | CHUNK_BLOCK_SHIFT(bitmap); | 1702 | CHUNK_BLOCK_SHIFT(bitmap); |
| 1725 | pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; | 1703 | pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; |
| 1726 | 1704 | ||
| 1727 | BUG_ON(!pages); | 1705 | BUG_ON(!pages); |
| 1728 | 1706 | ||
| @@ -1775,11 +1753,11 @@ static ssize_t | |||
| 1775 | location_show(mddev_t *mddev, char *page) | 1753 | location_show(mddev_t *mddev, char *page) |
| 1776 | { | 1754 | { |
| 1777 | ssize_t len; | 1755 | ssize_t len; |
| 1778 | if (mddev->bitmap_info.file) { | 1756 | if (mddev->bitmap_info.file) |
| 1779 | len = sprintf(page, "file"); | 1757 | len = sprintf(page, "file"); |
| 1780 | } else if (mddev->bitmap_info.offset) { | 1758 | else if (mddev->bitmap_info.offset) |
| 1781 | len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); | 1759 | len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); |
| 1782 | } else | 1760 | else |
| 1783 | len = sprintf(page, "none"); | 1761 | len = sprintf(page, "none"); |
| 1784 | len += sprintf(page+len, "\n"); | 1762 | len += sprintf(page+len, "\n"); |
| 1785 | return len; | 1763 | return len; |
| @@ -1868,7 +1846,7 @@ timeout_show(mddev_t *mddev, char *page) | |||
| 1868 | ssize_t len; | 1846 | ssize_t len; |
| 1869 | unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; | 1847 | unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; |
| 1870 | unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; | 1848 | unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; |
| 1871 | 1849 | ||
| 1872 | len = sprintf(page, "%lu", secs); | 1850 | len = sprintf(page, "%lu", secs); |
| 1873 | if (jifs) | 1851 | if (jifs) |
| 1874 | len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs)); | 1852 | len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs)); |
| @@ -2050,12 +2028,3 @@ struct attribute_group md_bitmap_group = { | |||
| 2050 | .attrs = md_bitmap_attrs, | 2028 | .attrs = md_bitmap_attrs, |
| 2051 | }; | 2029 | }; |
| 2052 | 2030 | ||
| 2053 | |||
| 2054 | /* the bitmap API -- for raid personalities */ | ||
| 2055 | EXPORT_SYMBOL(bitmap_startwrite); | ||
| 2056 | EXPORT_SYMBOL(bitmap_endwrite); | ||
| 2057 | EXPORT_SYMBOL(bitmap_start_sync); | ||
| 2058 | EXPORT_SYMBOL(bitmap_end_sync); | ||
| 2059 | EXPORT_SYMBOL(bitmap_unplug); | ||
| 2060 | EXPORT_SYMBOL(bitmap_close_sync); | ||
| 2061 | EXPORT_SYMBOL(bitmap_cond_end_sync); | ||
diff --git a/drivers/md/md.h b/drivers/md/md.h index 57eb864a8249..209993207a55 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
| @@ -319,7 +319,7 @@ struct mddev_s | |||
| 319 | */ | 319 | */ |
| 320 | struct mutex mutex; | 320 | struct mutex mutex; |
| 321 | unsigned long chunksize; | 321 | unsigned long chunksize; |
| 322 | unsigned long daemon_sleep; /* how many seconds between updates? */ | 322 | unsigned long daemon_sleep; /* how many jiffies between updates? */ |
| 323 | unsigned long max_write_behind; /* write-behind mode */ | 323 | unsigned long max_write_behind; /* write-behind mode */ |
| 324 | int external; | 324 | int external; |
| 325 | } bitmap_info; | 325 | } bitmap_info; |
