diff options
author | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-11-22 13:06:44 -0500 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-11-22 13:06:44 -0500 |
commit | 0bd2af46839ad6262d25714a6ec0365db9d6b98f (patch) | |
tree | dcced72d230d69fd0c5816ac6dd03ab84799a93e /fs/ext4/resize.c | |
parent | e138a5d2356729b8752e88520cc1525fae9794ac (diff) | |
parent | f26b90440cd74c78fe10c9bd5160809704a9627c (diff) |
Merge ../scsi-rc-fixes-2.6
Diffstat (limited to 'fs/ext4/resize.c')
-rw-r--r-- | fs/ext4/resize.c | 1050 |
1 files changed, 1050 insertions, 0 deletions
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c new file mode 100644 index 000000000000..4fe49c3661b2 --- /dev/null +++ b/fs/ext4/resize.c | |||
@@ -0,0 +1,1050 @@ | |||
1 | /* | ||
2 | * linux/fs/ext4/resize.c | ||
3 | * | ||
4 | * Support for resizing an ext4 filesystem while it is mounted. | ||
5 | * | ||
6 | * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> | ||
7 | * | ||
8 | * This could probably be made into a module, because it is not often in use. | ||
9 | */ | ||
10 | |||
11 | |||
12 | #define EXT4FS_DEBUG | ||
13 | |||
14 | #include <linux/sched.h> | ||
15 | #include <linux/smp_lock.h> | ||
16 | #include <linux/ext4_jbd2.h> | ||
17 | |||
18 | #include <linux/errno.h> | ||
19 | #include <linux/slab.h> | ||
20 | |||
21 | |||
22 | #define outside(b, first, last) ((b) < (first) || (b) >= (last)) | ||
23 | #define inside(b, first, last) ((b) >= (first) && (b) < (last)) | ||
24 | |||
25 | static int verify_group_input(struct super_block *sb, | ||
26 | struct ext4_new_group_data *input) | ||
27 | { | ||
28 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
29 | struct ext4_super_block *es = sbi->s_es; | ||
30 | ext4_fsblk_t start = ext4_blocks_count(es); | ||
31 | ext4_fsblk_t end = start + input->blocks_count; | ||
32 | unsigned group = input->group; | ||
33 | ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; | ||
34 | unsigned overhead = ext4_bg_has_super(sb, group) ? | ||
35 | (1 + ext4_bg_num_gdb(sb, group) + | ||
36 | le16_to_cpu(es->s_reserved_gdt_blocks)) : 0; | ||
37 | ext4_fsblk_t metaend = start + overhead; | ||
38 | struct buffer_head *bh = NULL; | ||
39 | ext4_grpblk_t free_blocks_count, offset; | ||
40 | int err = -EINVAL; | ||
41 | |||
42 | input->free_blocks_count = free_blocks_count = | ||
43 | input->blocks_count - 2 - overhead - sbi->s_itb_per_group; | ||
44 | |||
45 | if (test_opt(sb, DEBUG)) | ||
46 | printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " | ||
47 | "(%d free, %u reserved)\n", | ||
48 | ext4_bg_has_super(sb, input->group) ? "normal" : | ||
49 | "no-super", input->group, input->blocks_count, | ||
50 | free_blocks_count, input->reserved_blocks); | ||
51 | |||
52 | ext4_get_group_no_and_offset(sb, start, NULL, &offset); | ||
53 | if (group != sbi->s_groups_count) | ||
54 | ext4_warning(sb, __FUNCTION__, | ||
55 | "Cannot add at group %u (only %lu groups)", | ||
56 | input->group, sbi->s_groups_count); | ||
57 | else if (offset != 0) | ||
58 | ext4_warning(sb, __FUNCTION__, "Last group not full"); | ||
59 | else if (input->reserved_blocks > input->blocks_count / 5) | ||
60 | ext4_warning(sb, __FUNCTION__, "Reserved blocks too high (%u)", | ||
61 | input->reserved_blocks); | ||
62 | else if (free_blocks_count < 0) | ||
63 | ext4_warning(sb, __FUNCTION__, "Bad blocks count %u", | ||
64 | input->blocks_count); | ||
65 | else if (!(bh = sb_bread(sb, end - 1))) | ||
66 | ext4_warning(sb, __FUNCTION__, | ||
67 | "Cannot read last block (%llu)", | ||
68 | end - 1); | ||
69 | else if (outside(input->block_bitmap, start, end)) | ||
70 | ext4_warning(sb, __FUNCTION__, | ||
71 | "Block bitmap not in group (block %llu)", | ||
72 | (unsigned long long)input->block_bitmap); | ||
73 | else if (outside(input->inode_bitmap, start, end)) | ||
74 | ext4_warning(sb, __FUNCTION__, | ||
75 | "Inode bitmap not in group (block %llu)", | ||
76 | (unsigned long long)input->inode_bitmap); | ||
77 | else if (outside(input->inode_table, start, end) || | ||
78 | outside(itend - 1, start, end)) | ||
79 | ext4_warning(sb, __FUNCTION__, | ||
80 | "Inode table not in group (blocks %llu-%llu)", | ||
81 | (unsigned long long)input->inode_table, itend - 1); | ||
82 | else if (input->inode_bitmap == input->block_bitmap) | ||
83 | ext4_warning(sb, __FUNCTION__, | ||
84 | "Block bitmap same as inode bitmap (%llu)", | ||
85 | (unsigned long long)input->block_bitmap); | ||
86 | else if (inside(input->block_bitmap, input->inode_table, itend)) | ||
87 | ext4_warning(sb, __FUNCTION__, | ||
88 | "Block bitmap (%llu) in inode table (%llu-%llu)", | ||
89 | (unsigned long long)input->block_bitmap, | ||
90 | (unsigned long long)input->inode_table, itend - 1); | ||
91 | else if (inside(input->inode_bitmap, input->inode_table, itend)) | ||
92 | ext4_warning(sb, __FUNCTION__, | ||
93 | "Inode bitmap (%llu) in inode table (%llu-%llu)", | ||
94 | (unsigned long long)input->inode_bitmap, | ||
95 | (unsigned long long)input->inode_table, itend - 1); | ||
96 | else if (inside(input->block_bitmap, start, metaend)) | ||
97 | ext4_warning(sb, __FUNCTION__, | ||
98 | "Block bitmap (%llu) in GDT table" | ||
99 | " (%llu-%llu)", | ||
100 | (unsigned long long)input->block_bitmap, | ||
101 | start, metaend - 1); | ||
102 | else if (inside(input->inode_bitmap, start, metaend)) | ||
103 | ext4_warning(sb, __FUNCTION__, | ||
104 | "Inode bitmap (%llu) in GDT table" | ||
105 | " (%llu-%llu)", | ||
106 | (unsigned long long)input->inode_bitmap, | ||
107 | start, metaend - 1); | ||
108 | else if (inside(input->inode_table, start, metaend) || | ||
109 | inside(itend - 1, start, metaend)) | ||
110 | ext4_warning(sb, __FUNCTION__, | ||
111 | "Inode table (%llu-%llu) overlaps" | ||
112 | "GDT table (%llu-%llu)", | ||
113 | (unsigned long long)input->inode_table, | ||
114 | itend - 1, start, metaend - 1); | ||
115 | else | ||
116 | err = 0; | ||
117 | brelse(bh); | ||
118 | |||
119 | return err; | ||
120 | } | ||
121 | |||
122 | static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, | ||
123 | ext4_fsblk_t blk) | ||
124 | { | ||
125 | struct buffer_head *bh; | ||
126 | int err; | ||
127 | |||
128 | bh = sb_getblk(sb, blk); | ||
129 | if (!bh) | ||
130 | return ERR_PTR(-EIO); | ||
131 | if ((err = ext4_journal_get_write_access(handle, bh))) { | ||
132 | brelse(bh); | ||
133 | bh = ERR_PTR(err); | ||
134 | } else { | ||
135 | lock_buffer(bh); | ||
136 | memset(bh->b_data, 0, sb->s_blocksize); | ||
137 | set_buffer_uptodate(bh); | ||
138 | unlock_buffer(bh); | ||
139 | } | ||
140 | |||
141 | return bh; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * To avoid calling the atomic setbit hundreds or thousands of times, we only | ||
146 | * need to use it within a single byte (to ensure we get endianness right). | ||
147 | * We can use memset for the rest of the bitmap as there are no other users. | ||
148 | */ | ||
149 | static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap) | ||
150 | { | ||
151 | int i; | ||
152 | |||
153 | if (start_bit >= end_bit) | ||
154 | return; | ||
155 | |||
156 | ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit); | ||
157 | for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) | ||
158 | ext4_set_bit(i, bitmap); | ||
159 | if (i < end_bit) | ||
160 | memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Set up the block and inode bitmaps, and the inode table for the new group. | ||
165 | * This doesn't need to be part of the main transaction, since we are only | ||
166 | * changing blocks outside the actual filesystem. We still do journaling to | ||
167 | * ensure the recovery is correct in case of a failure just after resize. | ||
168 | * If any part of this fails, we simply abort the resize. | ||
169 | */ | ||
170 | static int setup_new_group_blocks(struct super_block *sb, | ||
171 | struct ext4_new_group_data *input) | ||
172 | { | ||
173 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
174 | ext4_fsblk_t start = ext4_group_first_block_no(sb, input->group); | ||
175 | int reserved_gdb = ext4_bg_has_super(sb, input->group) ? | ||
176 | le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0; | ||
177 | unsigned long gdblocks = ext4_bg_num_gdb(sb, input->group); | ||
178 | struct buffer_head *bh; | ||
179 | handle_t *handle; | ||
180 | ext4_fsblk_t block; | ||
181 | ext4_grpblk_t bit; | ||
182 | int i; | ||
183 | int err = 0, err2; | ||
184 | |||
185 | handle = ext4_journal_start_sb(sb, reserved_gdb + gdblocks + | ||
186 | 2 + sbi->s_itb_per_group); | ||
187 | if (IS_ERR(handle)) | ||
188 | return PTR_ERR(handle); | ||
189 | |||
190 | lock_super(sb); | ||
191 | if (input->group != sbi->s_groups_count) { | ||
192 | err = -EBUSY; | ||
193 | goto exit_journal; | ||
194 | } | ||
195 | |||
196 | if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) { | ||
197 | err = PTR_ERR(bh); | ||
198 | goto exit_journal; | ||
199 | } | ||
200 | |||
201 | if (ext4_bg_has_super(sb, input->group)) { | ||
202 | ext4_debug("mark backup superblock %#04lx (+0)\n", start); | ||
203 | ext4_set_bit(0, bh->b_data); | ||
204 | } | ||
205 | |||
206 | /* Copy all of the GDT blocks into the backup in this group */ | ||
207 | for (i = 0, bit = 1, block = start + 1; | ||
208 | i < gdblocks; i++, block++, bit++) { | ||
209 | struct buffer_head *gdb; | ||
210 | |||
211 | ext4_debug("update backup group %#04lx (+%d)\n", block, bit); | ||
212 | |||
213 | gdb = sb_getblk(sb, block); | ||
214 | if (!gdb) { | ||
215 | err = -EIO; | ||
216 | goto exit_bh; | ||
217 | } | ||
218 | if ((err = ext4_journal_get_write_access(handle, gdb))) { | ||
219 | brelse(gdb); | ||
220 | goto exit_bh; | ||
221 | } | ||
222 | lock_buffer(bh); | ||
223 | memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, bh->b_size); | ||
224 | set_buffer_uptodate(gdb); | ||
225 | unlock_buffer(bh); | ||
226 | ext4_journal_dirty_metadata(handle, gdb); | ||
227 | ext4_set_bit(bit, bh->b_data); | ||
228 | brelse(gdb); | ||
229 | } | ||
230 | |||
231 | /* Zero out all of the reserved backup group descriptor table blocks */ | ||
232 | for (i = 0, bit = gdblocks + 1, block = start + bit; | ||
233 | i < reserved_gdb; i++, block++, bit++) { | ||
234 | struct buffer_head *gdb; | ||
235 | |||
236 | ext4_debug("clear reserved block %#04lx (+%d)\n", block, bit); | ||
237 | |||
238 | if (IS_ERR(gdb = bclean(handle, sb, block))) { | ||
239 | err = PTR_ERR(bh); | ||
240 | goto exit_bh; | ||
241 | } | ||
242 | ext4_journal_dirty_metadata(handle, gdb); | ||
243 | ext4_set_bit(bit, bh->b_data); | ||
244 | brelse(gdb); | ||
245 | } | ||
246 | ext4_debug("mark block bitmap %#04x (+%ld)\n", input->block_bitmap, | ||
247 | input->block_bitmap - start); | ||
248 | ext4_set_bit(input->block_bitmap - start, bh->b_data); | ||
249 | ext4_debug("mark inode bitmap %#04x (+%ld)\n", input->inode_bitmap, | ||
250 | input->inode_bitmap - start); | ||
251 | ext4_set_bit(input->inode_bitmap - start, bh->b_data); | ||
252 | |||
253 | /* Zero out all of the inode table blocks */ | ||
254 | for (i = 0, block = input->inode_table, bit = block - start; | ||
255 | i < sbi->s_itb_per_group; i++, bit++, block++) { | ||
256 | struct buffer_head *it; | ||
257 | |||
258 | ext4_debug("clear inode block %#04lx (+%d)\n", block, bit); | ||
259 | if (IS_ERR(it = bclean(handle, sb, block))) { | ||
260 | err = PTR_ERR(it); | ||
261 | goto exit_bh; | ||
262 | } | ||
263 | ext4_journal_dirty_metadata(handle, it); | ||
264 | brelse(it); | ||
265 | ext4_set_bit(bit, bh->b_data); | ||
266 | } | ||
267 | mark_bitmap_end(input->blocks_count, EXT4_BLOCKS_PER_GROUP(sb), | ||
268 | bh->b_data); | ||
269 | ext4_journal_dirty_metadata(handle, bh); | ||
270 | brelse(bh); | ||
271 | |||
272 | /* Mark unused entries in inode bitmap used */ | ||
273 | ext4_debug("clear inode bitmap %#04x (+%ld)\n", | ||
274 | input->inode_bitmap, input->inode_bitmap - start); | ||
275 | if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) { | ||
276 | err = PTR_ERR(bh); | ||
277 | goto exit_journal; | ||
278 | } | ||
279 | |||
280 | mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb), | ||
281 | bh->b_data); | ||
282 | ext4_journal_dirty_metadata(handle, bh); | ||
283 | exit_bh: | ||
284 | brelse(bh); | ||
285 | |||
286 | exit_journal: | ||
287 | unlock_super(sb); | ||
288 | if ((err2 = ext4_journal_stop(handle)) && !err) | ||
289 | err = err2; | ||
290 | |||
291 | return err; | ||
292 | } | ||
293 | |||
294 | |||
295 | /* | ||
296 | * Iterate through the groups which hold BACKUP superblock/GDT copies in an | ||
297 | * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before | ||
298 | * calling this for the first time. In a sparse filesystem it will be the | ||
299 | * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... | ||
300 | * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... | ||
301 | */ | ||
302 | static unsigned ext4_list_backups(struct super_block *sb, unsigned *three, | ||
303 | unsigned *five, unsigned *seven) | ||
304 | { | ||
305 | unsigned *min = three; | ||
306 | int mult = 3; | ||
307 | unsigned ret; | ||
308 | |||
309 | if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, | ||
310 | EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { | ||
311 | ret = *min; | ||
312 | *min += 1; | ||
313 | return ret; | ||
314 | } | ||
315 | |||
316 | if (*five < *min) { | ||
317 | min = five; | ||
318 | mult = 5; | ||
319 | } | ||
320 | if (*seven < *min) { | ||
321 | min = seven; | ||
322 | mult = 7; | ||
323 | } | ||
324 | |||
325 | ret = *min; | ||
326 | *min *= mult; | ||
327 | |||
328 | return ret; | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * Check that all of the backup GDT blocks are held in the primary GDT block. | ||
333 | * It is assumed that they are stored in group order. Returns the number of | ||
334 | * groups in current filesystem that have BACKUPS, or -ve error code. | ||
335 | */ | ||
336 | static int verify_reserved_gdb(struct super_block *sb, | ||
337 | struct buffer_head *primary) | ||
338 | { | ||
339 | const ext4_fsblk_t blk = primary->b_blocknr; | ||
340 | const unsigned long end = EXT4_SB(sb)->s_groups_count; | ||
341 | unsigned three = 1; | ||
342 | unsigned five = 5; | ||
343 | unsigned seven = 7; | ||
344 | unsigned grp; | ||
345 | __le32 *p = (__le32 *)primary->b_data; | ||
346 | int gdbackups = 0; | ||
347 | |||
348 | while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { | ||
349 | if (le32_to_cpu(*p++) != | ||
350 | grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ | ||
351 | ext4_warning(sb, __FUNCTION__, | ||
352 | "reserved GDT %llu" | ||
353 | " missing grp %d (%llu)", | ||
354 | blk, grp, | ||
355 | grp * | ||
356 | (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + | ||
357 | blk); | ||
358 | return -EINVAL; | ||
359 | } | ||
360 | if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) | ||
361 | return -EFBIG; | ||
362 | } | ||
363 | |||
364 | return gdbackups; | ||
365 | } | ||
366 | |||
367 | /* | ||
368 | * Called when we need to bring a reserved group descriptor table block into | ||
369 | * use from the resize inode. The primary copy of the new GDT block currently | ||
370 | * is an indirect block (under the double indirect block in the resize inode). | ||
371 | * The new backup GDT blocks will be stored as leaf blocks in this indirect | ||
372 | * block, in group order. Even though we know all the block numbers we need, | ||
373 | * we check to ensure that the resize inode has actually reserved these blocks. | ||
374 | * | ||
375 | * Don't need to update the block bitmaps because the blocks are still in use. | ||
376 | * | ||
377 | * We get all of the error cases out of the way, so that we are sure to not | ||
378 | * fail once we start modifying the data on disk, because JBD has no rollback. | ||
379 | */ | ||
380 | static int add_new_gdb(handle_t *handle, struct inode *inode, | ||
381 | struct ext4_new_group_data *input, | ||
382 | struct buffer_head **primary) | ||
383 | { | ||
384 | struct super_block *sb = inode->i_sb; | ||
385 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; | ||
386 | unsigned long gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); | ||
387 | ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; | ||
388 | struct buffer_head **o_group_desc, **n_group_desc; | ||
389 | struct buffer_head *dind; | ||
390 | int gdbackups; | ||
391 | struct ext4_iloc iloc; | ||
392 | __le32 *data; | ||
393 | int err; | ||
394 | |||
395 | if (test_opt(sb, DEBUG)) | ||
396 | printk(KERN_DEBUG | ||
397 | "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", | ||
398 | gdb_num); | ||
399 | |||
400 | /* | ||
401 | * If we are not using the primary superblock/GDT copy don't resize, | ||
402 | * because the user tools have no way of handling this. Probably a | ||
403 | * bad time to do it anyways. | ||
404 | */ | ||
405 | if (EXT4_SB(sb)->s_sbh->b_blocknr != | ||
406 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { | ||
407 | ext4_warning(sb, __FUNCTION__, | ||
408 | "won't resize using backup superblock at %llu", | ||
409 | (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); | ||
410 | return -EPERM; | ||
411 | } | ||
412 | |||
413 | *primary = sb_bread(sb, gdblock); | ||
414 | if (!*primary) | ||
415 | return -EIO; | ||
416 | |||
417 | if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) { | ||
418 | err = gdbackups; | ||
419 | goto exit_bh; | ||
420 | } | ||
421 | |||
422 | data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; | ||
423 | dind = sb_bread(sb, le32_to_cpu(*data)); | ||
424 | if (!dind) { | ||
425 | err = -EIO; | ||
426 | goto exit_bh; | ||
427 | } | ||
428 | |||
429 | data = (__le32 *)dind->b_data; | ||
430 | if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { | ||
431 | ext4_warning(sb, __FUNCTION__, | ||
432 | "new group %u GDT block %llu not reserved", | ||
433 | input->group, gdblock); | ||
434 | err = -EINVAL; | ||
435 | goto exit_dind; | ||
436 | } | ||
437 | |||
438 | if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh))) | ||
439 | goto exit_dind; | ||
440 | |||
441 | if ((err = ext4_journal_get_write_access(handle, *primary))) | ||
442 | goto exit_sbh; | ||
443 | |||
444 | if ((err = ext4_journal_get_write_access(handle, dind))) | ||
445 | goto exit_primary; | ||
446 | |||
447 | /* ext4_reserve_inode_write() gets a reference on the iloc */ | ||
448 | if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) | ||
449 | goto exit_dindj; | ||
450 | |||
451 | n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *), | ||
452 | GFP_KERNEL); | ||
453 | if (!n_group_desc) { | ||
454 | err = -ENOMEM; | ||
455 | ext4_warning (sb, __FUNCTION__, | ||
456 | "not enough memory for %lu groups", gdb_num + 1); | ||
457 | goto exit_inode; | ||
458 | } | ||
459 | |||
460 | /* | ||
461 | * Finally, we have all of the possible failures behind us... | ||
462 | * | ||
463 | * Remove new GDT block from inode double-indirect block and clear out | ||
464 | * the new GDT block for use (which also "frees" the backup GDT blocks | ||
465 | * from the reserved inode). We don't need to change the bitmaps for | ||
466 | * these blocks, because they are marked as in-use from being in the | ||
467 | * reserved inode, and will become GDT blocks (primary and backup). | ||
468 | */ | ||
469 | data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; | ||
470 | ext4_journal_dirty_metadata(handle, dind); | ||
471 | brelse(dind); | ||
472 | inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; | ||
473 | ext4_mark_iloc_dirty(handle, inode, &iloc); | ||
474 | memset((*primary)->b_data, 0, sb->s_blocksize); | ||
475 | ext4_journal_dirty_metadata(handle, *primary); | ||
476 | |||
477 | o_group_desc = EXT4_SB(sb)->s_group_desc; | ||
478 | memcpy(n_group_desc, o_group_desc, | ||
479 | EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); | ||
480 | n_group_desc[gdb_num] = *primary; | ||
481 | EXT4_SB(sb)->s_group_desc = n_group_desc; | ||
482 | EXT4_SB(sb)->s_gdb_count++; | ||
483 | kfree(o_group_desc); | ||
484 | |||
485 | es->s_reserved_gdt_blocks = | ||
486 | cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1); | ||
487 | ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); | ||
488 | |||
489 | return 0; | ||
490 | |||
491 | exit_inode: | ||
492 | //ext4_journal_release_buffer(handle, iloc.bh); | ||
493 | brelse(iloc.bh); | ||
494 | exit_dindj: | ||
495 | //ext4_journal_release_buffer(handle, dind); | ||
496 | exit_primary: | ||
497 | //ext4_journal_release_buffer(handle, *primary); | ||
498 | exit_sbh: | ||
499 | //ext4_journal_release_buffer(handle, *primary); | ||
500 | exit_dind: | ||
501 | brelse(dind); | ||
502 | exit_bh: | ||
503 | brelse(*primary); | ||
504 | |||
505 | ext4_debug("leaving with error %d\n", err); | ||
506 | return err; | ||
507 | } | ||
508 | |||
509 | /* | ||
510 | * Called when we are adding a new group which has a backup copy of each of | ||
511 | * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. | ||
512 | * We need to add these reserved backup GDT blocks to the resize inode, so | ||
513 | * that they are kept for future resizing and not allocated to files. | ||
514 | * | ||
515 | * Each reserved backup GDT block will go into a different indirect block. | ||
516 | * The indirect blocks are actually the primary reserved GDT blocks, | ||
517 | * so we know in advance what their block numbers are. We only get the | ||
518 | * double-indirect block to verify it is pointing to the primary reserved | ||
519 | * GDT blocks so we don't overwrite a data block by accident. The reserved | ||
520 | * backup GDT blocks are stored in their reserved primary GDT block. | ||
521 | */ | ||
522 | static int reserve_backup_gdb(handle_t *handle, struct inode *inode, | ||
523 | struct ext4_new_group_data *input) | ||
524 | { | ||
525 | struct super_block *sb = inode->i_sb; | ||
526 | int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); | ||
527 | struct buffer_head **primary; | ||
528 | struct buffer_head *dind; | ||
529 | struct ext4_iloc iloc; | ||
530 | ext4_fsblk_t blk; | ||
531 | __le32 *data, *end; | ||
532 | int gdbackups = 0; | ||
533 | int res, i; | ||
534 | int err; | ||
535 | |||
536 | primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_KERNEL); | ||
537 | if (!primary) | ||
538 | return -ENOMEM; | ||
539 | |||
540 | data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; | ||
541 | dind = sb_bread(sb, le32_to_cpu(*data)); | ||
542 | if (!dind) { | ||
543 | err = -EIO; | ||
544 | goto exit_free; | ||
545 | } | ||
546 | |||
547 | blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; | ||
548 | data = (__le32 *)dind->b_data + EXT4_SB(sb)->s_gdb_count; | ||
549 | end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); | ||
550 | |||
551 | /* Get each reserved primary GDT block and verify it holds backups */ | ||
552 | for (res = 0; res < reserved_gdb; res++, blk++) { | ||
553 | if (le32_to_cpu(*data) != blk) { | ||
554 | ext4_warning(sb, __FUNCTION__, | ||
555 | "reserved block %llu" | ||
556 | " not at offset %ld", | ||
557 | blk, | ||
558 | (long)(data - (__le32 *)dind->b_data)); | ||
559 | err = -EINVAL; | ||
560 | goto exit_bh; | ||
561 | } | ||
562 | primary[res] = sb_bread(sb, blk); | ||
563 | if (!primary[res]) { | ||
564 | err = -EIO; | ||
565 | goto exit_bh; | ||
566 | } | ||
567 | if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) { | ||
568 | brelse(primary[res]); | ||
569 | err = gdbackups; | ||
570 | goto exit_bh; | ||
571 | } | ||
572 | if (++data >= end) | ||
573 | data = (__le32 *)dind->b_data; | ||
574 | } | ||
575 | |||
576 | for (i = 0; i < reserved_gdb; i++) { | ||
577 | if ((err = ext4_journal_get_write_access(handle, primary[i]))) { | ||
578 | /* | ||
579 | int j; | ||
580 | for (j = 0; j < i; j++) | ||
581 | ext4_journal_release_buffer(handle, primary[j]); | ||
582 | */ | ||
583 | goto exit_bh; | ||
584 | } | ||
585 | } | ||
586 | |||
587 | if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) | ||
588 | goto exit_bh; | ||
589 | |||
590 | /* | ||
591 | * Finally we can add each of the reserved backup GDT blocks from | ||
592 | * the new group to its reserved primary GDT block. | ||
593 | */ | ||
594 | blk = input->group * EXT4_BLOCKS_PER_GROUP(sb); | ||
595 | for (i = 0; i < reserved_gdb; i++) { | ||
596 | int err2; | ||
597 | data = (__le32 *)primary[i]->b_data; | ||
598 | /* printk("reserving backup %lu[%u] = %lu\n", | ||
599 | primary[i]->b_blocknr, gdbackups, | ||
600 | blk + primary[i]->b_blocknr); */ | ||
601 | data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); | ||
602 | err2 = ext4_journal_dirty_metadata(handle, primary[i]); | ||
603 | if (!err) | ||
604 | err = err2; | ||
605 | } | ||
606 | inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9; | ||
607 | ext4_mark_iloc_dirty(handle, inode, &iloc); | ||
608 | |||
609 | exit_bh: | ||
610 | while (--res >= 0) | ||
611 | brelse(primary[res]); | ||
612 | brelse(dind); | ||
613 | |||
614 | exit_free: | ||
615 | kfree(primary); | ||
616 | |||
617 | return err; | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * Update the backup copies of the ext4 metadata. These don't need to be part | ||
622 | * of the main resize transaction, because e2fsck will re-write them if there | ||
623 | * is a problem (basically only OOM will cause a problem). However, we | ||
624 | * _should_ update the backups if possible, in case the primary gets trashed | ||
625 | * for some reason and we need to run e2fsck from a backup superblock. The | ||
626 | * important part is that the new block and inode counts are in the backup | ||
627 | * superblocks, and the location of the new group metadata in the GDT backups. | ||
628 | * | ||
629 | * We do not need lock_super() for this, because these blocks are not | ||
630 | * otherwise touched by the filesystem code when it is mounted. We don't | ||
631 | * need to worry about last changing from sbi->s_groups_count, because the | ||
632 | * worst that can happen is that we do not copy the full number of backups | ||
633 | * at this time. The resize which changed s_groups_count will backup again. | ||
634 | */ | ||
635 | static void update_backups(struct super_block *sb, | ||
636 | int blk_off, char *data, int size) | ||
637 | { | ||
638 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
639 | const unsigned long last = sbi->s_groups_count; | ||
640 | const int bpg = EXT4_BLOCKS_PER_GROUP(sb); | ||
641 | unsigned three = 1; | ||
642 | unsigned five = 5; | ||
643 | unsigned seven = 7; | ||
644 | unsigned group; | ||
645 | int rest = sb->s_blocksize - size; | ||
646 | handle_t *handle; | ||
647 | int err = 0, err2; | ||
648 | |||
649 | handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA); | ||
650 | if (IS_ERR(handle)) { | ||
651 | group = 1; | ||
652 | err = PTR_ERR(handle); | ||
653 | goto exit_err; | ||
654 | } | ||
655 | |||
656 | while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) { | ||
657 | struct buffer_head *bh; | ||
658 | |||
659 | /* Out of journal space, and can't get more - abort - so sad */ | ||
660 | if (handle->h_buffer_credits == 0 && | ||
661 | ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) && | ||
662 | (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA))) | ||
663 | break; | ||
664 | |||
665 | bh = sb_getblk(sb, group * bpg + blk_off); | ||
666 | if (!bh) { | ||
667 | err = -EIO; | ||
668 | break; | ||
669 | } | ||
670 | ext4_debug("update metadata backup %#04lx\n", | ||
671 | (unsigned long)bh->b_blocknr); | ||
672 | if ((err = ext4_journal_get_write_access(handle, bh))) | ||
673 | break; | ||
674 | lock_buffer(bh); | ||
675 | memcpy(bh->b_data, data, size); | ||
676 | if (rest) | ||
677 | memset(bh->b_data + size, 0, rest); | ||
678 | set_buffer_uptodate(bh); | ||
679 | unlock_buffer(bh); | ||
680 | ext4_journal_dirty_metadata(handle, bh); | ||
681 | brelse(bh); | ||
682 | } | ||
683 | if ((err2 = ext4_journal_stop(handle)) && !err) | ||
684 | err = err2; | ||
685 | |||
686 | /* | ||
687 | * Ugh! Need to have e2fsck write the backup copies. It is too | ||
688 | * late to revert the resize, we shouldn't fail just because of | ||
689 | * the backup copies (they are only needed in case of corruption). | ||
690 | * | ||
691 | * However, if we got here we have a journal problem too, so we | ||
692 | * can't really start a transaction to mark the superblock. | ||
693 | * Chicken out and just set the flag on the hope it will be written | ||
694 | * to disk, and if not - we will simply wait until next fsck. | ||
695 | */ | ||
696 | exit_err: | ||
697 | if (err) { | ||
698 | ext4_warning(sb, __FUNCTION__, | ||
699 | "can't update backup for group %d (err %d), " | ||
700 | "forcing fsck on next reboot", group, err); | ||
701 | sbi->s_mount_state &= ~EXT4_VALID_FS; | ||
702 | sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); | ||
703 | mark_buffer_dirty(sbi->s_sbh); | ||
704 | } | ||
705 | } | ||
706 | |||
707 | /* Add group descriptor data to an existing or new group descriptor block. | ||
708 | * Ensure we handle all possible error conditions _before_ we start modifying | ||
709 | * the filesystem, because we cannot abort the transaction and not have it | ||
710 | * write the data to disk. | ||
711 | * | ||
712 | * If we are on a GDT block boundary, we need to get the reserved GDT block. | ||
713 | * Otherwise, we may need to add backup GDT blocks for a sparse group. | ||
714 | * | ||
715 | * We only need to hold the superblock lock while we are actually adding | ||
716 | * in the new group's counts to the superblock. Prior to that we have | ||
717 | * not really "added" the group at all. We re-check that we are still | ||
718 | * adding in the last group in case things have changed since verifying. | ||
719 | */ | ||
720 | int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) | ||
721 | { | ||
722 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
723 | struct ext4_super_block *es = sbi->s_es; | ||
724 | int reserved_gdb = ext4_bg_has_super(sb, input->group) ? | ||
725 | le16_to_cpu(es->s_reserved_gdt_blocks) : 0; | ||
726 | struct buffer_head *primary = NULL; | ||
727 | struct ext4_group_desc *gdp; | ||
728 | struct inode *inode = NULL; | ||
729 | handle_t *handle; | ||
730 | int gdb_off, gdb_num; | ||
731 | int err, err2; | ||
732 | |||
733 | gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); | ||
734 | gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); | ||
735 | |||
736 | if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, | ||
737 | EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { | ||
738 | ext4_warning(sb, __FUNCTION__, | ||
739 | "Can't resize non-sparse filesystem further"); | ||
740 | return -EPERM; | ||
741 | } | ||
742 | |||
743 | if (ext4_blocks_count(es) + input->blocks_count < | ||
744 | ext4_blocks_count(es)) { | ||
745 | ext4_warning(sb, __FUNCTION__, "blocks_count overflow\n"); | ||
746 | return -EINVAL; | ||
747 | } | ||
748 | |||
749 | if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < | ||
750 | le32_to_cpu(es->s_inodes_count)) { | ||
751 | ext4_warning(sb, __FUNCTION__, "inodes_count overflow\n"); | ||
752 | return -EINVAL; | ||
753 | } | ||
754 | |||
755 | if (reserved_gdb || gdb_off == 0) { | ||
756 | if (!EXT4_HAS_COMPAT_FEATURE(sb, | ||
757 | EXT4_FEATURE_COMPAT_RESIZE_INODE)){ | ||
758 | ext4_warning(sb, __FUNCTION__, | ||
759 | "No reserved GDT blocks, can't resize"); | ||
760 | return -EPERM; | ||
761 | } | ||
762 | inode = iget(sb, EXT4_RESIZE_INO); | ||
763 | if (!inode || is_bad_inode(inode)) { | ||
764 | ext4_warning(sb, __FUNCTION__, | ||
765 | "Error opening resize inode"); | ||
766 | iput(inode); | ||
767 | return -ENOENT; | ||
768 | } | ||
769 | } | ||
770 | |||
771 | if ((err = verify_group_input(sb, input))) | ||
772 | goto exit_put; | ||
773 | |||
774 | if ((err = setup_new_group_blocks(sb, input))) | ||
775 | goto exit_put; | ||
776 | |||
777 | /* | ||
778 | * We will always be modifying at least the superblock and a GDT | ||
779 | * block. If we are adding a group past the last current GDT block, | ||
780 | * we will also modify the inode and the dindirect block. If we | ||
781 | * are adding a group with superblock/GDT backups we will also | ||
782 | * modify each of the reserved GDT dindirect blocks. | ||
783 | */ | ||
784 | handle = ext4_journal_start_sb(sb, | ||
785 | ext4_bg_has_super(sb, input->group) ? | ||
786 | 3 + reserved_gdb : 4); | ||
787 | if (IS_ERR(handle)) { | ||
788 | err = PTR_ERR(handle); | ||
789 | goto exit_put; | ||
790 | } | ||
791 | |||
792 | lock_super(sb); | ||
793 | if (input->group != sbi->s_groups_count) { | ||
794 | ext4_warning(sb, __FUNCTION__, | ||
795 | "multiple resizers run on filesystem!"); | ||
796 | err = -EBUSY; | ||
797 | goto exit_journal; | ||
798 | } | ||
799 | |||
800 | if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh))) | ||
801 | goto exit_journal; | ||
802 | |||
803 | /* | ||
804 | * We will only either add reserved group blocks to a backup group | ||
805 | * or remove reserved blocks for the first group in a new group block. | ||
806 | * Doing both would be mean more complex code, and sane people don't | ||
807 | * use non-sparse filesystems anymore. This is already checked above. | ||
808 | */ | ||
809 | if (gdb_off) { | ||
810 | primary = sbi->s_group_desc[gdb_num]; | ||
811 | if ((err = ext4_journal_get_write_access(handle, primary))) | ||
812 | goto exit_journal; | ||
813 | |||
814 | if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) && | ||
815 | (err = reserve_backup_gdb(handle, inode, input))) | ||
816 | goto exit_journal; | ||
817 | } else if ((err = add_new_gdb(handle, inode, input, &primary))) | ||
818 | goto exit_journal; | ||
819 | |||
820 | /* | ||
821 | * OK, now we've set up the new group. Time to make it active. | ||
822 | * | ||
823 | * Current kernels don't lock all allocations via lock_super(), | ||
824 | * so we have to be safe wrt. concurrent accesses the group | ||
825 | * data. So we need to be careful to set all of the relevant | ||
826 | * group descriptor data etc. *before* we enable the group. | ||
827 | * | ||
828 | * The key field here is sbi->s_groups_count: as long as | ||
829 | * that retains its old value, nobody is going to access the new | ||
830 | * group. | ||
831 | * | ||
832 | * So first we update all the descriptor metadata for the new | ||
833 | * group; then we update the total disk blocks count; then we | ||
834 | * update the groups count to enable the group; then finally we | ||
835 | * update the free space counts so that the system can start | ||
836 | * using the new disk blocks. | ||
837 | */ | ||
838 | |||
839 | /* Update group descriptor block for new group */ | ||
840 | gdp = (struct ext4_group_desc *)primary->b_data + gdb_off; | ||
841 | |||
842 | ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */ | ||
843 | ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */ | ||
844 | ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */ | ||
845 | gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count); | ||
846 | gdp->bg_free_inodes_count = cpu_to_le16(EXT4_INODES_PER_GROUP(sb)); | ||
847 | |||
848 | /* | ||
849 | * Make the new blocks and inodes valid next. We do this before | ||
850 | * increasing the group count so that once the group is enabled, | ||
851 | * all of its blocks and inodes are already valid. | ||
852 | * | ||
853 | * We always allocate group-by-group, then block-by-block or | ||
854 | * inode-by-inode within a group, so enabling these | ||
855 | * blocks/inodes before the group is live won't actually let us | ||
856 | * allocate the new space yet. | ||
857 | */ | ||
858 | ext4_blocks_count_set(es, ext4_blocks_count(es) + | ||
859 | input->blocks_count); | ||
860 | es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) + | ||
861 | EXT4_INODES_PER_GROUP(sb)); | ||
862 | |||
863 | /* | ||
864 | * We need to protect s_groups_count against other CPUs seeing | ||
865 | * inconsistent state in the superblock. | ||
866 | * | ||
867 | * The precise rules we use are: | ||
868 | * | ||
869 | * * Writers of s_groups_count *must* hold lock_super | ||
870 | * AND | ||
871 | * * Writers must perform a smp_wmb() after updating all dependent | ||
872 | * data and before modifying the groups count | ||
873 | * | ||
874 | * * Readers must hold lock_super() over the access | ||
875 | * OR | ||
876 | * * Readers must perform an smp_rmb() after reading the groups count | ||
877 | * and before reading any dependent data. | ||
878 | * | ||
879 | * NB. These rules can be relaxed when checking the group count | ||
880 | * while freeing data, as we can only allocate from a block | ||
881 | * group after serialising against the group count, and we can | ||
882 | * only then free after serialising in turn against that | ||
883 | * allocation. | ||
884 | */ | ||
885 | smp_wmb(); | ||
886 | |||
887 | /* Update the global fs size fields */ | ||
888 | sbi->s_groups_count++; | ||
889 | |||
890 | ext4_journal_dirty_metadata(handle, primary); | ||
891 | |||
892 | /* Update the reserved block counts only once the new group is | ||
893 | * active. */ | ||
894 | ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + | ||
895 | input->reserved_blocks); | ||
896 | |||
897 | /* Update the free space counts */ | ||
898 | percpu_counter_mod(&sbi->s_freeblocks_counter, | ||
899 | input->free_blocks_count); | ||
900 | percpu_counter_mod(&sbi->s_freeinodes_counter, | ||
901 | EXT4_INODES_PER_GROUP(sb)); | ||
902 | |||
903 | ext4_journal_dirty_metadata(handle, sbi->s_sbh); | ||
904 | sb->s_dirt = 1; | ||
905 | |||
906 | exit_journal: | ||
907 | unlock_super(sb); | ||
908 | if ((err2 = ext4_journal_stop(handle)) && !err) | ||
909 | err = err2; | ||
910 | if (!err) { | ||
911 | update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, | ||
912 | sizeof(struct ext4_super_block)); | ||
913 | update_backups(sb, primary->b_blocknr, primary->b_data, | ||
914 | primary->b_size); | ||
915 | } | ||
916 | exit_put: | ||
917 | iput(inode); | ||
918 | return err; | ||
919 | } /* ext4_group_add */ | ||
920 | |||
921 | /* Extend the filesystem to the new number of blocks specified. This entry | ||
922 | * point is only used to extend the current filesystem to the end of the last | ||
923 | * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" | ||
924 | * for emergencies (because it has no dependencies on reserved blocks). | ||
925 | * | ||
926 | * If we _really_ wanted, we could use default values to call ext4_group_add() | ||
927 | * allow the "remount" trick to work for arbitrary resizing, assuming enough | ||
928 | * GDT blocks are reserved to grow to the desired size. | ||
929 | */ | ||
930 | int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | ||
931 | ext4_fsblk_t n_blocks_count) | ||
932 | { | ||
933 | ext4_fsblk_t o_blocks_count; | ||
934 | unsigned long o_groups_count; | ||
935 | ext4_grpblk_t last; | ||
936 | ext4_grpblk_t add; | ||
937 | struct buffer_head * bh; | ||
938 | handle_t *handle; | ||
939 | int err; | ||
940 | unsigned long freed_blocks; | ||
941 | |||
942 | /* We don't need to worry about locking wrt other resizers just | ||
943 | * yet: we're going to revalidate es->s_blocks_count after | ||
944 | * taking lock_super() below. */ | ||
945 | o_blocks_count = ext4_blocks_count(es); | ||
946 | o_groups_count = EXT4_SB(sb)->s_groups_count; | ||
947 | |||
948 | if (test_opt(sb, DEBUG)) | ||
949 | printk(KERN_DEBUG "EXT4-fs: extending last group from %llu uto %llu blocks\n", | ||
950 | o_blocks_count, n_blocks_count); | ||
951 | |||
952 | if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) | ||
953 | return 0; | ||
954 | |||
955 | if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { | ||
956 | printk(KERN_ERR "EXT4-fs: filesystem on %s:" | ||
957 | " too large to resize to %llu blocks safely\n", | ||
958 | sb->s_id, n_blocks_count); | ||
959 | if (sizeof(sector_t) < 8) | ||
960 | ext4_warning(sb, __FUNCTION__, | ||
961 | "CONFIG_LBD not enabled\n"); | ||
962 | return -EINVAL; | ||
963 | } | ||
964 | |||
965 | if (n_blocks_count < o_blocks_count) { | ||
966 | ext4_warning(sb, __FUNCTION__, | ||
967 | "can't shrink FS - resize aborted"); | ||
968 | return -EBUSY; | ||
969 | } | ||
970 | |||
971 | /* Handle the remaining blocks in the last group only. */ | ||
972 | ext4_get_group_no_and_offset(sb, o_blocks_count, NULL, &last); | ||
973 | |||
974 | if (last == 0) { | ||
975 | ext4_warning(sb, __FUNCTION__, | ||
976 | "need to use ext2online to resize further"); | ||
977 | return -EPERM; | ||
978 | } | ||
979 | |||
980 | add = EXT4_BLOCKS_PER_GROUP(sb) - last; | ||
981 | |||
982 | if (o_blocks_count + add < o_blocks_count) { | ||
983 | ext4_warning(sb, __FUNCTION__, "blocks_count overflow"); | ||
984 | return -EINVAL; | ||
985 | } | ||
986 | |||
987 | if (o_blocks_count + add > n_blocks_count) | ||
988 | add = n_blocks_count - o_blocks_count; | ||
989 | |||
990 | if (o_blocks_count + add < n_blocks_count) | ||
991 | ext4_warning(sb, __FUNCTION__, | ||
992 | "will only finish group (%llu" | ||
993 | " blocks, %u new)", | ||
994 | o_blocks_count + add, add); | ||
995 | |||
996 | /* See if the device is actually as big as what was requested */ | ||
997 | bh = sb_bread(sb, o_blocks_count + add -1); | ||
998 | if (!bh) { | ||
999 | ext4_warning(sb, __FUNCTION__, | ||
1000 | "can't read last block, resize aborted"); | ||
1001 | return -ENOSPC; | ||
1002 | } | ||
1003 | brelse(bh); | ||
1004 | |||
1005 | /* We will update the superblock, one block bitmap, and | ||
1006 | * one group descriptor via ext4_free_blocks(). | ||
1007 | */ | ||
1008 | handle = ext4_journal_start_sb(sb, 3); | ||
1009 | if (IS_ERR(handle)) { | ||
1010 | err = PTR_ERR(handle); | ||
1011 | ext4_warning(sb, __FUNCTION__, "error %d on journal start",err); | ||
1012 | goto exit_put; | ||
1013 | } | ||
1014 | |||
1015 | lock_super(sb); | ||
1016 | if (o_blocks_count != ext4_blocks_count(es)) { | ||
1017 | ext4_warning(sb, __FUNCTION__, | ||
1018 | "multiple resizers run on filesystem!"); | ||
1019 | unlock_super(sb); | ||
1020 | err = -EBUSY; | ||
1021 | goto exit_put; | ||
1022 | } | ||
1023 | |||
1024 | if ((err = ext4_journal_get_write_access(handle, | ||
1025 | EXT4_SB(sb)->s_sbh))) { | ||
1026 | ext4_warning(sb, __FUNCTION__, | ||
1027 | "error %d on journal write access", err); | ||
1028 | unlock_super(sb); | ||
1029 | ext4_journal_stop(handle); | ||
1030 | goto exit_put; | ||
1031 | } | ||
1032 | ext4_blocks_count_set(es, o_blocks_count + add); | ||
1033 | ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); | ||
1034 | sb->s_dirt = 1; | ||
1035 | unlock_super(sb); | ||
1036 | ext4_debug("freeing blocks %lu through %llu\n", o_blocks_count, | ||
1037 | o_blocks_count + add); | ||
1038 | ext4_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); | ||
1039 | ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, | ||
1040 | o_blocks_count + add); | ||
1041 | if ((err = ext4_journal_stop(handle))) | ||
1042 | goto exit_put; | ||
1043 | if (test_opt(sb, DEBUG)) | ||
1044 | printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n", | ||
1045 | ext4_blocks_count(es)); | ||
1046 | update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es, | ||
1047 | sizeof(struct ext4_super_block)); | ||
1048 | exit_put: | ||
1049 | return err; | ||
1050 | } /* ext4_group_extend */ | ||