aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/resize.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/resize.c')
-rw-r--r--fs/ext4/resize.c199
1 files changed, 103 insertions, 96 deletions
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 80bbc9c60c24..707d3f16f7ce 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -16,6 +16,35 @@
16 16
17#include "ext4_jbd2.h" 17#include "ext4_jbd2.h"
18 18
19int ext4_resize_begin(struct super_block *sb)
20{
21 int ret = 0;
22
23 if (!capable(CAP_SYS_RESOURCE))
24 return -EPERM;
25
26 /*
27 * We are not allowed to do online-resizing on a filesystem mounted
28 * with error, because it can destroy the filesystem easily.
29 */
30 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
31 ext4_warning(sb, "There are errors in the filesystem, "
32 "so online resizing is not allowed\n");
33 return -EPERM;
34 }
35
36 if (test_and_set_bit_lock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags))
37 ret = -EBUSY;
38
39 return ret;
40}
41
42void ext4_resize_end(struct super_block *sb)
43{
44 clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags);
45 smp_mb__after_clear_bit();
46}
47
19#define outside(b, first, last) ((b) < (first) || (b) >= (last)) 48#define outside(b, first, last) ((b) < (first) || (b) >= (last))
20#define inside(b, first, last) ((b) >= (first) && (b) < (last)) 49#define inside(b, first, last) ((b) >= (first) && (b) < (last))
21 50
@@ -118,10 +147,8 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
118 brelse(bh); 147 brelse(bh);
119 bh = ERR_PTR(err); 148 bh = ERR_PTR(err);
120 } else { 149 } else {
121 lock_buffer(bh);
122 memset(bh->b_data, 0, sb->s_blocksize); 150 memset(bh->b_data, 0, sb->s_blocksize);
123 set_buffer_uptodate(bh); 151 set_buffer_uptodate(bh);
124 unlock_buffer(bh);
125 } 152 }
126 153
127 return bh; 154 return bh;
@@ -132,8 +159,7 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
132 * If that fails, restart the transaction & regain write access for the 159 * If that fails, restart the transaction & regain write access for the
133 * buffer head which is used for block_bitmap modifications. 160 * buffer head which is used for block_bitmap modifications.
134 */ 161 */
135static int extend_or_restart_transaction(handle_t *handle, int thresh, 162static int extend_or_restart_transaction(handle_t *handle, int thresh)
136 struct buffer_head *bh)
137{ 163{
138 int err; 164 int err;
139 165
@@ -144,9 +170,8 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh,
144 if (err < 0) 170 if (err < 0)
145 return err; 171 return err;
146 if (err) { 172 if (err) {
147 if ((err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA))) 173 err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA);
148 return err; 174 if (err)
149 if ((err = ext4_journal_get_write_access(handle, bh)))
150 return err; 175 return err;
151 } 176 }
152 177
@@ -181,21 +206,7 @@ static int setup_new_group_blocks(struct super_block *sb,
181 if (IS_ERR(handle)) 206 if (IS_ERR(handle))
182 return PTR_ERR(handle); 207 return PTR_ERR(handle);
183 208
184 mutex_lock(&sbi->s_resize_lock); 209 BUG_ON(input->group != sbi->s_groups_count);
185 if (input->group != sbi->s_groups_count) {
186 err = -EBUSY;
187 goto exit_journal;
188 }
189
190 if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
191 err = PTR_ERR(bh);
192 goto exit_journal;
193 }
194
195 if (ext4_bg_has_super(sb, input->group)) {
196 ext4_debug("mark backup superblock %#04llx (+0)\n", start);
197 ext4_set_bit(0, bh->b_data);
198 }
199 210
200 /* Copy all of the GDT blocks into the backup in this group */ 211 /* Copy all of the GDT blocks into the backup in this group */
201 for (i = 0, bit = 1, block = start + 1; 212 for (i = 0, bit = 1, block = start + 1;
@@ -203,29 +214,26 @@ static int setup_new_group_blocks(struct super_block *sb,
203 struct buffer_head *gdb; 214 struct buffer_head *gdb;
204 215
205 ext4_debug("update backup group %#04llx (+%d)\n", block, bit); 216 ext4_debug("update backup group %#04llx (+%d)\n", block, bit);
206 217 err = extend_or_restart_transaction(handle, 1);
207 if ((err = extend_or_restart_transaction(handle, 1, bh))) 218 if (err)
208 goto exit_bh; 219 goto exit_journal;
209 220
210 gdb = sb_getblk(sb, block); 221 gdb = sb_getblk(sb, block);
211 if (!gdb) { 222 if (!gdb) {
212 err = -EIO; 223 err = -EIO;
213 goto exit_bh; 224 goto exit_journal;
214 } 225 }
215 if ((err = ext4_journal_get_write_access(handle, gdb))) { 226 if ((err = ext4_journal_get_write_access(handle, gdb))) {
216 brelse(gdb); 227 brelse(gdb);
217 goto exit_bh; 228 goto exit_journal;
218 } 229 }
219 lock_buffer(gdb);
220 memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size); 230 memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
221 set_buffer_uptodate(gdb); 231 set_buffer_uptodate(gdb);
222 unlock_buffer(gdb);
223 err = ext4_handle_dirty_metadata(handle, NULL, gdb); 232 err = ext4_handle_dirty_metadata(handle, NULL, gdb);
224 if (unlikely(err)) { 233 if (unlikely(err)) {
225 brelse(gdb); 234 brelse(gdb);
226 goto exit_bh; 235 goto exit_journal;
227 } 236 }
228 ext4_set_bit(bit, bh->b_data);
229 brelse(gdb); 237 brelse(gdb);
230 } 238 }
231 239
@@ -235,9 +243,22 @@ static int setup_new_group_blocks(struct super_block *sb,
235 err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb, 243 err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
236 GFP_NOFS); 244 GFP_NOFS);
237 if (err) 245 if (err)
238 goto exit_bh; 246 goto exit_journal;
239 for (i = 0, bit = gdblocks + 1; i < reserved_gdb; i++, bit++) 247
240 ext4_set_bit(bit, bh->b_data); 248 err = extend_or_restart_transaction(handle, 2);
249 if (err)
250 goto exit_journal;
251
252 bh = bclean(handle, sb, input->block_bitmap);
253 if (IS_ERR(bh)) {
254 err = PTR_ERR(bh);
255 goto exit_journal;
256 }
257
258 if (ext4_bg_has_super(sb, input->group)) {
259 ext4_debug("mark backup group tables %#04llx (+0)\n", start);
260 ext4_set_bits(bh->b_data, 0, gdblocks + reserved_gdb + 1);
261 }
241 262
242 ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap, 263 ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap,
243 input->block_bitmap - start); 264 input->block_bitmap - start);
@@ -253,12 +274,9 @@ static int setup_new_group_blocks(struct super_block *sb,
253 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS); 274 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
254 if (err) 275 if (err)
255 goto exit_bh; 276 goto exit_bh;
256 for (i = 0, bit = input->inode_table - start; 277 ext4_set_bits(bh->b_data, input->inode_table - start,
257 i < sbi->s_itb_per_group; i++, bit++) 278 sbi->s_itb_per_group);
258 ext4_set_bit(bit, bh->b_data);
259 279
260 if ((err = extend_or_restart_transaction(handle, 2, bh)))
261 goto exit_bh;
262 280
263 ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, 281 ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
264 bh->b_data); 282 bh->b_data);
@@ -285,7 +303,6 @@ exit_bh:
285 brelse(bh); 303 brelse(bh);
286 304
287exit_journal: 305exit_journal:
288 mutex_unlock(&sbi->s_resize_lock);
289 if ((err2 = ext4_journal_stop(handle)) && !err) 306 if ((err2 = ext4_journal_stop(handle)) && !err)
290 err = err2; 307 err = err2;
291 308
@@ -377,15 +394,15 @@ static int verify_reserved_gdb(struct super_block *sb,
377 * fail once we start modifying the data on disk, because JBD has no rollback. 394 * fail once we start modifying the data on disk, because JBD has no rollback.
378 */ 395 */
379static int add_new_gdb(handle_t *handle, struct inode *inode, 396static int add_new_gdb(handle_t *handle, struct inode *inode,
380 struct ext4_new_group_data *input, 397 ext4_group_t group)
381 struct buffer_head **primary)
382{ 398{
383 struct super_block *sb = inode->i_sb; 399 struct super_block *sb = inode->i_sb;
384 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 400 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
385 unsigned long gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); 401 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
386 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; 402 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
387 struct buffer_head **o_group_desc, **n_group_desc; 403 struct buffer_head **o_group_desc, **n_group_desc;
388 struct buffer_head *dind; 404 struct buffer_head *dind;
405 struct buffer_head *gdb_bh;
389 int gdbackups; 406 int gdbackups;
390 struct ext4_iloc iloc; 407 struct ext4_iloc iloc;
391 __le32 *data; 408 __le32 *data;
@@ -408,11 +425,12 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
408 return -EPERM; 425 return -EPERM;
409 } 426 }
410 427
411 *primary = sb_bread(sb, gdblock); 428 gdb_bh = sb_bread(sb, gdblock);
412 if (!*primary) 429 if (!gdb_bh)
413 return -EIO; 430 return -EIO;
414 431
415 if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) { 432 gdbackups = verify_reserved_gdb(sb, gdb_bh);
433 if (gdbackups < 0) {
416 err = gdbackups; 434 err = gdbackups;
417 goto exit_bh; 435 goto exit_bh;
418 } 436 }
@@ -427,7 +445,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
427 data = (__le32 *)dind->b_data; 445 data = (__le32 *)dind->b_data;
428 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { 446 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
429 ext4_warning(sb, "new group %u GDT block %llu not reserved", 447 ext4_warning(sb, "new group %u GDT block %llu not reserved",
430 input->group, gdblock); 448 group, gdblock);
431 err = -EINVAL; 449 err = -EINVAL;
432 goto exit_dind; 450 goto exit_dind;
433 } 451 }
@@ -436,7 +454,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
436 if (unlikely(err)) 454 if (unlikely(err))
437 goto exit_dind; 455 goto exit_dind;
438 456
439 err = ext4_journal_get_write_access(handle, *primary); 457 err = ext4_journal_get_write_access(handle, gdb_bh);
440 if (unlikely(err)) 458 if (unlikely(err))
441 goto exit_sbh; 459 goto exit_sbh;
442 460
@@ -449,12 +467,13 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
449 if (unlikely(err)) 467 if (unlikely(err))
450 goto exit_dindj; 468 goto exit_dindj;
451 469
452 n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 470 n_group_desc = ext4_kvmalloc((gdb_num + 1) *
453 GFP_NOFS); 471 sizeof(struct buffer_head *),
472 GFP_NOFS);
454 if (!n_group_desc) { 473 if (!n_group_desc) {
455 err = -ENOMEM; 474 err = -ENOMEM;
456 ext4_warning(sb, 475 ext4_warning(sb, "not enough memory for %lu groups",
457 "not enough memory for %lu groups", gdb_num + 1); 476 gdb_num + 1);
458 goto exit_inode; 477 goto exit_inode;
459 } 478 }
460 479
@@ -475,8 +494,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
475 } 494 }
476 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; 495 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
477 ext4_mark_iloc_dirty(handle, inode, &iloc); 496 ext4_mark_iloc_dirty(handle, inode, &iloc);
478 memset((*primary)->b_data, 0, sb->s_blocksize); 497 memset(gdb_bh->b_data, 0, sb->s_blocksize);
479 err = ext4_handle_dirty_metadata(handle, NULL, *primary); 498 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
480 if (unlikely(err)) { 499 if (unlikely(err)) {
481 ext4_std_error(sb, err); 500 ext4_std_error(sb, err);
482 goto exit_inode; 501 goto exit_inode;
@@ -486,10 +505,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
486 o_group_desc = EXT4_SB(sb)->s_group_desc; 505 o_group_desc = EXT4_SB(sb)->s_group_desc;
487 memcpy(n_group_desc, o_group_desc, 506 memcpy(n_group_desc, o_group_desc,
488 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 507 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
489 n_group_desc[gdb_num] = *primary; 508 n_group_desc[gdb_num] = gdb_bh;
490 EXT4_SB(sb)->s_group_desc = n_group_desc; 509 EXT4_SB(sb)->s_group_desc = n_group_desc;
491 EXT4_SB(sb)->s_gdb_count++; 510 EXT4_SB(sb)->s_gdb_count++;
492 kfree(o_group_desc); 511 ext4_kvfree(o_group_desc);
493 512
494 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 513 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
495 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 514 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
@@ -499,6 +518,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
499 return err; 518 return err;
500 519
501exit_inode: 520exit_inode:
521 ext4_kvfree(n_group_desc);
502 /* ext4_handle_release_buffer(handle, iloc.bh); */ 522 /* ext4_handle_release_buffer(handle, iloc.bh); */
503 brelse(iloc.bh); 523 brelse(iloc.bh);
504exit_dindj: 524exit_dindj:
@@ -508,7 +528,7 @@ exit_sbh:
508exit_dind: 528exit_dind:
509 brelse(dind); 529 brelse(dind);
510exit_bh: 530exit_bh:
511 brelse(*primary); 531 brelse(gdb_bh);
512 532
513 ext4_debug("leaving with error %d\n", err); 533 ext4_debug("leaving with error %d\n", err);
514 return err; 534 return err;
@@ -528,7 +548,7 @@ exit_bh:
528 * backup GDT blocks are stored in their reserved primary GDT block. 548 * backup GDT blocks are stored in their reserved primary GDT block.
529 */ 549 */
530static int reserve_backup_gdb(handle_t *handle, struct inode *inode, 550static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
531 struct ext4_new_group_data *input) 551 ext4_group_t group)
532{ 552{
533 struct super_block *sb = inode->i_sb; 553 struct super_block *sb = inode->i_sb;
534 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 554 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
@@ -599,7 +619,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
599 * Finally we can add each of the reserved backup GDT blocks from 619 * Finally we can add each of the reserved backup GDT blocks from
600 * the new group to its reserved primary GDT block. 620 * the new group to its reserved primary GDT block.
601 */ 621 */
602 blk = input->group * EXT4_BLOCKS_PER_GROUP(sb); 622 blk = group * EXT4_BLOCKS_PER_GROUP(sb);
603 for (i = 0; i < reserved_gdb; i++) { 623 for (i = 0; i < reserved_gdb; i++) {
604 int err2; 624 int err2;
605 data = (__le32 *)primary[i]->b_data; 625 data = (__le32 *)primary[i]->b_data;
@@ -799,13 +819,6 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
799 goto exit_put; 819 goto exit_put;
800 } 820 }
801 821
802 mutex_lock(&sbi->s_resize_lock);
803 if (input->group != sbi->s_groups_count) {
804 ext4_warning(sb, "multiple resizers run on filesystem!");
805 err = -EBUSY;
806 goto exit_journal;
807 }
808
809 if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh))) 822 if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
810 goto exit_journal; 823 goto exit_journal;
811 824
@@ -820,16 +833,25 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
820 if ((err = ext4_journal_get_write_access(handle, primary))) 833 if ((err = ext4_journal_get_write_access(handle, primary)))
821 goto exit_journal; 834 goto exit_journal;
822 835
823 if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) && 836 if (reserved_gdb && ext4_bg_num_gdb(sb, input->group)) {
824 (err = reserve_backup_gdb(handle, inode, input))) 837 err = reserve_backup_gdb(handle, inode, input->group);
838 if (err)
839 goto exit_journal;
840 }
841 } else {
842 /*
843 * Note that we can access new group descriptor block safely
844 * only if add_new_gdb() succeeds.
845 */
846 err = add_new_gdb(handle, inode, input->group);
847 if (err)
825 goto exit_journal; 848 goto exit_journal;
826 } else if ((err = add_new_gdb(handle, inode, input, &primary))) 849 primary = sbi->s_group_desc[gdb_num];
827 goto exit_journal; 850 }
828 851
829 /* 852 /*
830 * OK, now we've set up the new group. Time to make it active. 853 * OK, now we've set up the new group. Time to make it active.
831 * 854 *
832 * We do not lock all allocations via s_resize_lock
833 * so we have to be safe wrt. concurrent accesses the group 855 * so we have to be safe wrt. concurrent accesses the group
834 * data. So we need to be careful to set all of the relevant 856 * data. So we need to be careful to set all of the relevant
835 * group descriptor data etc. *before* we enable the group. 857 * group descriptor data etc. *before* we enable the group.
@@ -886,13 +908,9 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
886 * 908 *
887 * The precise rules we use are: 909 * The precise rules we use are:
888 * 910 *
889 * * Writers of s_groups_count *must* hold s_resize_lock
890 * AND
891 * * Writers must perform a smp_wmb() after updating all dependent 911 * * Writers must perform a smp_wmb() after updating all dependent
892 * data and before modifying the groups count 912 * data and before modifying the groups count
893 * 913 *
894 * * Readers must hold s_resize_lock over the access
895 * OR
896 * * Readers must perform an smp_rmb() after reading the groups count 914 * * Readers must perform an smp_rmb() after reading the groups count
897 * and before reading any dependent data. 915 * and before reading any dependent data.
898 * 916 *
@@ -937,10 +955,9 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
937 ext4_handle_dirty_super(handle, sb); 955 ext4_handle_dirty_super(handle, sb);
938 956
939exit_journal: 957exit_journal:
940 mutex_unlock(&sbi->s_resize_lock);
941 if ((err2 = ext4_journal_stop(handle)) && !err) 958 if ((err2 = ext4_journal_stop(handle)) && !err)
942 err = err2; 959 err = err2;
943 if (!err) { 960 if (!err && primary) {
944 update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, 961 update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
945 sizeof(struct ext4_super_block)); 962 sizeof(struct ext4_super_block));
946 update_backups(sb, primary->b_blocknr, primary->b_data, 963 update_backups(sb, primary->b_blocknr, primary->b_data,
@@ -969,16 +986,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
969 ext4_grpblk_t add; 986 ext4_grpblk_t add;
970 struct buffer_head *bh; 987 struct buffer_head *bh;
971 handle_t *handle; 988 handle_t *handle;
972 int err; 989 int err, err2;
973 ext4_group_t group; 990 ext4_group_t group;
974 991
975 /* We don't need to worry about locking wrt other resizers just
976 * yet: we're going to revalidate es->s_blocks_count after
977 * taking the s_resize_lock below. */
978 o_blocks_count = ext4_blocks_count(es); 992 o_blocks_count = ext4_blocks_count(es);
979 993
980 if (test_opt(sb, DEBUG)) 994 if (test_opt(sb, DEBUG))
981 printk(KERN_DEBUG "EXT4-fs: extending last group from %llu uto %llu blocks\n", 995 printk(KERN_DEBUG "EXT4-fs: extending last group from %llu to %llu blocks\n",
982 o_blocks_count, n_blocks_count); 996 o_blocks_count, n_blocks_count);
983 997
984 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) 998 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
@@ -995,7 +1009,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
995 1009
996 if (n_blocks_count < o_blocks_count) { 1010 if (n_blocks_count < o_blocks_count) {
997 ext4_warning(sb, "can't shrink FS - resize aborted"); 1011 ext4_warning(sb, "can't shrink FS - resize aborted");
998 return -EBUSY; 1012 return -EINVAL;
999 } 1013 }
1000 1014
1001 /* Handle the remaining blocks in the last group only. */ 1015 /* Handle the remaining blocks in the last group only. */
@@ -1038,32 +1052,25 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1038 goto exit_put; 1052 goto exit_put;
1039 } 1053 }
1040 1054
1041 mutex_lock(&EXT4_SB(sb)->s_resize_lock);
1042 if (o_blocks_count != ext4_blocks_count(es)) {
1043 ext4_warning(sb, "multiple resizers run on filesystem!");
1044 mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
1045 ext4_journal_stop(handle);
1046 err = -EBUSY;
1047 goto exit_put;
1048 }
1049
1050 if ((err = ext4_journal_get_write_access(handle, 1055 if ((err = ext4_journal_get_write_access(handle,
1051 EXT4_SB(sb)->s_sbh))) { 1056 EXT4_SB(sb)->s_sbh))) {
1052 ext4_warning(sb, "error %d on journal write access", err); 1057 ext4_warning(sb, "error %d on journal write access", err);
1053 mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
1054 ext4_journal_stop(handle); 1058 ext4_journal_stop(handle);
1055 goto exit_put; 1059 goto exit_put;
1056 } 1060 }
1057 ext4_blocks_count_set(es, o_blocks_count + add); 1061 ext4_blocks_count_set(es, o_blocks_count + add);
1058 mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
1059 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, 1062 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1060 o_blocks_count + add); 1063 o_blocks_count + add);
1061 /* We add the blocks to the bitmap and set the group need init bit */ 1064 /* We add the blocks to the bitmap and set the group need init bit */
1062 ext4_add_groupblocks(handle, sb, o_blocks_count, add); 1065 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1063 ext4_handle_dirty_super(handle, sb); 1066 ext4_handle_dirty_super(handle, sb);
1064 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, 1067 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1065 o_blocks_count + add); 1068 o_blocks_count + add);
1066 if ((err = ext4_journal_stop(handle))) 1069 err2 = ext4_journal_stop(handle);
1070 if (!err && err2)
1071 err = err2;
1072
1073 if (err)
1067 goto exit_put; 1074 goto exit_put;
1068 1075
1069 if (test_opt(sb, DEBUG)) 1076 if (test_opt(sb, DEBUG))