aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c12
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/md/md.c32
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/md/raid5.c5
5 files changed, 69 insertions, 14 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 5432d07c074d..11108165e264 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -479,9 +479,12 @@ static int bitmap_read_sb(struct bitmap *bitmap)
479 int err = -EINVAL; 479 int err = -EINVAL;
480 480
481 /* page 0 is the superblock, read it... */ 481 /* page 0 is the superblock, read it... */
482 if (bitmap->file) 482 if (bitmap->file) {
483 bitmap->sb_page = read_page(bitmap->file, 0, bitmap, PAGE_SIZE); 483 loff_t isize = i_size_read(bitmap->file->f_mapping->host);
484 else { 484 int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
485
486 bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
487 } else {
485 bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0); 488 bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0);
486 } 489 }
487 if (IS_ERR(bitmap->sb_page)) { 490 if (IS_ERR(bitmap->sb_page)) {
@@ -877,7 +880,8 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
877 int count; 880 int count;
878 /* unmap the old page, we're done with it */ 881 /* unmap the old page, we're done with it */
879 if (index == num_pages-1) 882 if (index == num_pages-1)
880 count = bytes - index * PAGE_SIZE; 883 count = bytes + sizeof(bitmap_super_t)
884 - index * PAGE_SIZE;
881 else 885 else
882 count = PAGE_SIZE; 886 count = PAGE_SIZE;
883 if (index == 0) { 887 if (index == 0) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fe7c56e10435..3668b170ea68 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1116,7 +1116,8 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
1116 if (size != get_capacity(md->disk)) 1116 if (size != get_capacity(md->disk))
1117 memset(&md->geometry, 0, sizeof(md->geometry)); 1117 memset(&md->geometry, 0, sizeof(md->geometry));
1118 1118
1119 __set_size(md, size); 1119 if (md->suspended_bdev)
1120 __set_size(md, size);
1120 if (size == 0) 1121 if (size == 0)
1121 return 0; 1122 return 0;
1122 1123
@@ -1264,6 +1265,11 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1264 if (!dm_suspended(md)) 1265 if (!dm_suspended(md))
1265 goto out; 1266 goto out;
1266 1267
1268 /* without bdev, the device size cannot be changed */
1269 if (!md->suspended_bdev)
1270 if (get_capacity(md->disk) != dm_table_get_size(table))
1271 goto out;
1272
1267 __unbind(md); 1273 __unbind(md);
1268 r = __bind(md, table); 1274 r = __bind(md, table);
1269 1275
@@ -1341,11 +1347,14 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1341 /* This does not get reverted if there's an error later. */ 1347 /* This does not get reverted if there's an error later. */
1342 dm_table_presuspend_targets(map); 1348 dm_table_presuspend_targets(map);
1343 1349
1344 md->suspended_bdev = bdget_disk(md->disk, 0); 1350 /* bdget() can stall if the pending I/Os are not flushed */
1345 if (!md->suspended_bdev) { 1351 if (!noflush) {
1346 DMWARN("bdget failed in dm_suspend"); 1352 md->suspended_bdev = bdget_disk(md->disk, 0);
1347 r = -ENOMEM; 1353 if (!md->suspended_bdev) {
1348 goto flush_and_out; 1354 DMWARN("bdget failed in dm_suspend");
1355 r = -ENOMEM;
1356 goto flush_and_out;
1357 }
1349 } 1358 }
1350 1359
1351 /* 1360 /*
@@ -1473,8 +1482,10 @@ int dm_resume(struct mapped_device *md)
1473 1482
1474 unlock_fs(md); 1483 unlock_fs(md);
1475 1484
1476 bdput(md->suspended_bdev); 1485 if (md->suspended_bdev) {
1477 md->suspended_bdev = NULL; 1486 bdput(md->suspended_bdev);
1487 md->suspended_bdev = NULL;
1488 }
1478 1489
1479 clear_bit(DMF_SUSPENDED, &md->flags); 1490 clear_bit(DMF_SUSPENDED, &md->flags);
1480 1491
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d1cb45f6d6a9..e8807ea5377d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1633,7 +1633,8 @@ repeat:
1633 * and 'events' is odd, we can roll back to the previous clean state */ 1633 * and 'events' is odd, we can roll back to the previous clean state */
1634 if (nospares 1634 if (nospares
1635 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 1635 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1636 && (mddev->events & 1)) 1636 && (mddev->events & 1)
1637 && mddev->events != 1)
1637 mddev->events--; 1638 mddev->events--;
1638 else { 1639 else {
1639 /* otherwise we have to go forward and ... */ 1640 /* otherwise we have to go forward and ... */
@@ -3563,6 +3564,8 @@ static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3563 char *ptr, *buf = NULL; 3564 char *ptr, *buf = NULL;
3564 int err = -ENOMEM; 3565 int err = -ENOMEM;
3565 3566
3567 md_allow_write(mddev);
3568
3566 file = kmalloc(sizeof(*file), GFP_KERNEL); 3569 file = kmalloc(sizeof(*file), GFP_KERNEL);
3567 if (!file) 3570 if (!file)
3568 goto out; 3571 goto out;
@@ -5031,6 +5034,33 @@ void md_write_end(mddev_t *mddev)
5031 } 5034 }
5032} 5035}
5033 5036
5037/* md_allow_write(mddev)
5038 * Calling this ensures that the array is marked 'active' so that writes
5039 * may proceed without blocking. It is important to call this before
5040 * attempting a GFP_KERNEL allocation while holding the mddev lock.
5041 * Must be called with mddev_lock held.
5042 */
5043void md_allow_write(mddev_t *mddev)
5044{
5045 if (!mddev->pers)
5046 return;
5047 if (mddev->ro)
5048 return;
5049
5050 spin_lock_irq(&mddev->write_lock);
5051 if (mddev->in_sync) {
5052 mddev->in_sync = 0;
5053 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5054 if (mddev->safemode_delay &&
5055 mddev->safemode == 0)
5056 mddev->safemode = 1;
5057 spin_unlock_irq(&mddev->write_lock);
5058 md_update_sb(mddev, 0);
5059 } else
5060 spin_unlock_irq(&mddev->write_lock);
5061}
5062EXPORT_SYMBOL_GPL(md_allow_write);
5063
5034static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 5064static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
5035 5065
5036#define SYNC_MARKS 10 5066#define SYNC_MARKS 10
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 164b25dca101..97ee870b265d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1266,6 +1266,11 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1266 sbio->bi_sector = r1_bio->sector + 1266 sbio->bi_sector = r1_bio->sector +
1267 conf->mirrors[i].rdev->data_offset; 1267 conf->mirrors[i].rdev->data_offset;
1268 sbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1268 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1269 for (j = 0; j < vcnt ; j++)
1270 memcpy(page_address(sbio->bi_io_vec[j].bv_page),
1271 page_address(pbio->bi_io_vec[j].bv_page),
1272 PAGE_SIZE);
1273
1269 } 1274 }
1270 } 1275 }
1271 } 1276 }
@@ -2099,6 +2104,8 @@ static int raid1_reshape(mddev_t *mddev)
2099 return -EINVAL; 2104 return -EINVAL;
2100 } 2105 }
2101 2106
2107 md_allow_write(mddev);
2108
2102 raid_disks = mddev->raid_disks + mddev->delta_disks; 2109 raid_disks = mddev->raid_disks + mddev->delta_disks;
2103 2110
2104 if (raid_disks < conf->raid_disks) { 2111 if (raid_disks < conf->raid_disks) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index be008f034ada..467c16982d02 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -405,6 +405,8 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
405 if (newsize <= conf->pool_size) 405 if (newsize <= conf->pool_size)
406 return 0; /* never bother to shrink */ 406 return 0; /* never bother to shrink */
407 407
408 md_allow_write(conf->mddev);
409
408 /* Step 1 */ 410 /* Step 1 */
409 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 411 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
410 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 412 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
@@ -2678,7 +2680,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
2678 mdk_rdev_t *rdev; 2680 mdk_rdev_t *rdev;
2679 2681
2680 if (!in_chunk_boundary(mddev, raid_bio)) { 2682 if (!in_chunk_boundary(mddev, raid_bio)) {
2681 printk("chunk_aligned_read : non aligned\n"); 2683 PRINTK("chunk_aligned_read : non aligned\n");
2682 return 0; 2684 return 0;
2683 } 2685 }
2684 /* 2686 /*
@@ -3250,6 +3252,7 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
3250 else 3252 else
3251 break; 3253 break;
3252 } 3254 }
3255 md_allow_write(mddev);
3253 while (new > conf->max_nr_stripes) { 3256 while (new > conf->max_nr_stripes) {
3254 if (grow_one_stripe(conf)) 3257 if (grow_one_stripe(conf))
3255 conf->max_nr_stripes++; 3258 conf->max_nr_stripes++;