diff options
Diffstat (limited to 'fs/ext4/balloc.c')
-rw-r--r-- | fs/ext4/balloc.c | 1833 |
1 files changed, 1833 insertions, 0 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c new file mode 100644 index 000000000000..5d45582f9517 --- /dev/null +++ b/fs/ext4/balloc.c | |||
@@ -0,0 +1,1833 @@ | |||
1 | /* | ||
2 | * linux/fs/ext4/balloc.c | ||
3 | * | ||
4 | * Copyright (C) 1992, 1993, 1994, 1995 | ||
5 | * Remy Card (card@masi.ibp.fr) | ||
6 | * Laboratoire MASI - Institut Blaise Pascal | ||
7 | * Universite Pierre et Marie Curie (Paris VI) | ||
8 | * | ||
9 | * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 | ||
10 | * Big-endian to little-endian byte-swapping/bitmaps by | ||
11 | * David S. Miller (davem@caip.rutgers.edu), 1995 | ||
12 | */ | ||
13 | |||
14 | #include <linux/time.h> | ||
15 | #include <linux/capability.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/jbd2.h> | ||
18 | #include <linux/ext4_fs.h> | ||
19 | #include <linux/ext4_jbd2.h> | ||
20 | #include <linux/quotaops.h> | ||
21 | #include <linux/buffer_head.h> | ||
22 | |||
23 | /* | ||
24 | * balloc.c contains the blocks allocation and deallocation routines | ||
25 | */ | ||
26 | |||
27 | /* | ||
28 | * Calculate the block group number and offset, given a block number | ||
29 | */ | ||
30 | void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, | ||
31 | unsigned long *blockgrpp, ext4_grpblk_t *offsetp) | ||
32 | { | ||
33 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; | ||
34 | ext4_grpblk_t offset; | ||
35 | |||
36 | blocknr = blocknr - le32_to_cpu(es->s_first_data_block); | ||
37 | offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)); | ||
38 | if (offsetp) | ||
39 | *offsetp = offset; | ||
40 | if (blockgrpp) | ||
41 | *blockgrpp = blocknr; | ||
42 | |||
43 | } | ||
44 | |||
45 | /* | ||
46 | * The free blocks are managed by bitmaps. A file system contains several | ||
47 | * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap | ||
48 | * block for inodes, N blocks for the inode table and data blocks. | ||
49 | * | ||
50 | * The file system contains group descriptors which are located after the | ||
51 | * super block. Each descriptor contains the number of the bitmap block and | ||
52 | * the free blocks count in the block. The descriptors are loaded in memory | ||
53 | * when a file system is mounted (see ext4_read_super). | ||
54 | */ | ||
55 | |||
56 | |||
57 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | ||
58 | |||
59 | /** | ||
60 | * ext4_get_group_desc() -- load group descriptor from disk | ||
61 | * @sb: super block | ||
62 | * @block_group: given block group | ||
63 | * @bh: pointer to the buffer head to store the block | ||
64 | * group descriptor | ||
65 | */ | ||
66 | struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, | ||
67 | unsigned int block_group, | ||
68 | struct buffer_head ** bh) | ||
69 | { | ||
70 | unsigned long group_desc; | ||
71 | unsigned long offset; | ||
72 | struct ext4_group_desc * desc; | ||
73 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
74 | |||
75 | if (block_group >= sbi->s_groups_count) { | ||
76 | ext4_error (sb, "ext4_get_group_desc", | ||
77 | "block_group >= groups_count - " | ||
78 | "block_group = %d, groups_count = %lu", | ||
79 | block_group, sbi->s_groups_count); | ||
80 | |||
81 | return NULL; | ||
82 | } | ||
83 | smp_rmb(); | ||
84 | |||
85 | group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); | ||
86 | offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); | ||
87 | if (!sbi->s_group_desc[group_desc]) { | ||
88 | ext4_error (sb, "ext4_get_group_desc", | ||
89 | "Group descriptor not loaded - " | ||
90 | "block_group = %d, group_desc = %lu, desc = %lu", | ||
91 | block_group, group_desc, offset); | ||
92 | return NULL; | ||
93 | } | ||
94 | |||
95 | desc = (struct ext4_group_desc *)( | ||
96 | (__u8 *)sbi->s_group_desc[group_desc]->b_data + | ||
97 | offset * EXT4_DESC_SIZE(sb)); | ||
98 | if (bh) | ||
99 | *bh = sbi->s_group_desc[group_desc]; | ||
100 | return desc; | ||
101 | } | ||
102 | |||
103 | /** | ||
104 | * read_block_bitmap() | ||
105 | * @sb: super block | ||
106 | * @block_group: given block group | ||
107 | * | ||
108 | * Read the bitmap for a given block_group, reading into the specified | ||
109 | * slot in the superblock's bitmap cache. | ||
110 | * | ||
111 | * Return buffer_head on success or NULL in case of failure. | ||
112 | */ | ||
113 | static struct buffer_head * | ||
114 | read_block_bitmap(struct super_block *sb, unsigned int block_group) | ||
115 | { | ||
116 | struct ext4_group_desc * desc; | ||
117 | struct buffer_head * bh = NULL; | ||
118 | |||
119 | desc = ext4_get_group_desc (sb, block_group, NULL); | ||
120 | if (!desc) | ||
121 | goto error_out; | ||
122 | bh = sb_bread(sb, ext4_block_bitmap(sb, desc)); | ||
123 | if (!bh) | ||
124 | ext4_error (sb, "read_block_bitmap", | ||
125 | "Cannot read block bitmap - " | ||
126 | "block_group = %d, block_bitmap = %llu", | ||
127 | block_group, | ||
128 | ext4_block_bitmap(sb, desc)); | ||
129 | error_out: | ||
130 | return bh; | ||
131 | } | ||
132 | /* | ||
133 | * The reservation window structure operations | ||
134 | * -------------------------------------------- | ||
135 | * Operations include: | ||
136 | * dump, find, add, remove, is_empty, find_next_reservable_window, etc. | ||
137 | * | ||
138 | * We use a red-black tree to represent per-filesystem reservation | ||
139 | * windows. | ||
140 | * | ||
141 | */ | ||
142 | |||
143 | /** | ||
144 | * __rsv_window_dump() -- Dump the filesystem block allocation reservation map | ||
145 | * @rb_root: root of per-filesystem reservation rb tree | ||
146 | * @verbose: verbose mode | ||
147 | * @fn: function which wishes to dump the reservation map | ||
148 | * | ||
149 | * If verbose is turned on, it will print the whole block reservation | ||
150 | * windows(start, end). Otherwise, it will only print out the "bad" windows, | ||
151 | * those windows that overlap with their immediate neighbors. | ||
152 | */ | ||
153 | #if 1 | ||
154 | static void __rsv_window_dump(struct rb_root *root, int verbose, | ||
155 | const char *fn) | ||
156 | { | ||
157 | struct rb_node *n; | ||
158 | struct ext4_reserve_window_node *rsv, *prev; | ||
159 | int bad; | ||
160 | |||
161 | restart: | ||
162 | n = rb_first(root); | ||
163 | bad = 0; | ||
164 | prev = NULL; | ||
165 | |||
166 | printk("Block Allocation Reservation Windows Map (%s):\n", fn); | ||
167 | while (n) { | ||
168 | rsv = list_entry(n, struct ext4_reserve_window_node, rsv_node); | ||
169 | if (verbose) | ||
170 | printk("reservation window 0x%p " | ||
171 | "start: %llu, end: %llu\n", | ||
172 | rsv, rsv->rsv_start, rsv->rsv_end); | ||
173 | if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) { | ||
174 | printk("Bad reservation %p (start >= end)\n", | ||
175 | rsv); | ||
176 | bad = 1; | ||
177 | } | ||
178 | if (prev && prev->rsv_end >= rsv->rsv_start) { | ||
179 | printk("Bad reservation %p (prev->end >= start)\n", | ||
180 | rsv); | ||
181 | bad = 1; | ||
182 | } | ||
183 | if (bad) { | ||
184 | if (!verbose) { | ||
185 | printk("Restarting reservation walk in verbose mode\n"); | ||
186 | verbose = 1; | ||
187 | goto restart; | ||
188 | } | ||
189 | } | ||
190 | n = rb_next(n); | ||
191 | prev = rsv; | ||
192 | } | ||
193 | printk("Window map complete.\n"); | ||
194 | if (bad) | ||
195 | BUG(); | ||
196 | } | ||
197 | #define rsv_window_dump(root, verbose) \ | ||
198 | __rsv_window_dump((root), (verbose), __FUNCTION__) | ||
199 | #else | ||
200 | #define rsv_window_dump(root, verbose) do {} while (0) | ||
201 | #endif | ||
202 | |||
203 | /** | ||
204 | * goal_in_my_reservation() | ||
205 | * @rsv: inode's reservation window | ||
206 | * @grp_goal: given goal block relative to the allocation block group | ||
207 | * @group: the current allocation block group | ||
208 | * @sb: filesystem super block | ||
209 | * | ||
210 | * Test if the given goal block (group relative) is within the file's | ||
211 | * own block reservation window range. | ||
212 | * | ||
213 | * If the reservation window is outside the goal allocation group, return 0; | ||
214 | * grp_goal (given goal block) could be -1, which means no specific | ||
215 | * goal block. In this case, always return 1. | ||
216 | * If the goal block is within the reservation window, return 1; | ||
217 | * otherwise, return 0; | ||
218 | */ | ||
219 | static int | ||
220 | goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal, | ||
221 | unsigned int group, struct super_block * sb) | ||
222 | { | ||
223 | ext4_fsblk_t group_first_block, group_last_block; | ||
224 | |||
225 | group_first_block = ext4_group_first_block_no(sb, group); | ||
226 | group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); | ||
227 | |||
228 | if ((rsv->_rsv_start > group_last_block) || | ||
229 | (rsv->_rsv_end < group_first_block)) | ||
230 | return 0; | ||
231 | if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start) | ||
232 | || (grp_goal + group_first_block > rsv->_rsv_end))) | ||
233 | return 0; | ||
234 | return 1; | ||
235 | } | ||
236 | |||
237 | /** | ||
238 | * search_reserve_window() | ||
239 | * @rb_root: root of reservation tree | ||
240 | * @goal: target allocation block | ||
241 | * | ||
242 | * Find the reserved window which includes the goal, or the previous one | ||
243 | * if the goal is not in any window. | ||
244 | * Returns NULL if there are no windows or if all windows start after the goal. | ||
245 | */ | ||
246 | static struct ext4_reserve_window_node * | ||
247 | search_reserve_window(struct rb_root *root, ext4_fsblk_t goal) | ||
248 | { | ||
249 | struct rb_node *n = root->rb_node; | ||
250 | struct ext4_reserve_window_node *rsv; | ||
251 | |||
252 | if (!n) | ||
253 | return NULL; | ||
254 | |||
255 | do { | ||
256 | rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node); | ||
257 | |||
258 | if (goal < rsv->rsv_start) | ||
259 | n = n->rb_left; | ||
260 | else if (goal > rsv->rsv_end) | ||
261 | n = n->rb_right; | ||
262 | else | ||
263 | return rsv; | ||
264 | } while (n); | ||
265 | /* | ||
266 | * We've fallen off the end of the tree: the goal wasn't inside | ||
267 | * any particular node. OK, the previous node must be to one | ||
268 | * side of the interval containing the goal. If it's the RHS, | ||
269 | * we need to back up one. | ||
270 | */ | ||
271 | if (rsv->rsv_start > goal) { | ||
272 | n = rb_prev(&rsv->rsv_node); | ||
273 | rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node); | ||
274 | } | ||
275 | return rsv; | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree. | ||
280 | * @sb: super block | ||
281 | * @rsv: reservation window to add | ||
282 | * | ||
283 | * Must be called with rsv_lock hold. | ||
284 | */ | ||
285 | void ext4_rsv_window_add(struct super_block *sb, | ||
286 | struct ext4_reserve_window_node *rsv) | ||
287 | { | ||
288 | struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root; | ||
289 | struct rb_node *node = &rsv->rsv_node; | ||
290 | ext4_fsblk_t start = rsv->rsv_start; | ||
291 | |||
292 | struct rb_node ** p = &root->rb_node; | ||
293 | struct rb_node * parent = NULL; | ||
294 | struct ext4_reserve_window_node *this; | ||
295 | |||
296 | while (*p) | ||
297 | { | ||
298 | parent = *p; | ||
299 | this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node); | ||
300 | |||
301 | if (start < this->rsv_start) | ||
302 | p = &(*p)->rb_left; | ||
303 | else if (start > this->rsv_end) | ||
304 | p = &(*p)->rb_right; | ||
305 | else { | ||
306 | rsv_window_dump(root, 1); | ||
307 | BUG(); | ||
308 | } | ||
309 | } | ||
310 | |||
311 | rb_link_node(node, parent, p); | ||
312 | rb_insert_color(node, root); | ||
313 | } | ||
314 | |||
315 | /** | ||
316 | * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree | ||
317 | * @sb: super block | ||
318 | * @rsv: reservation window to remove | ||
319 | * | ||
320 | * Mark the block reservation window as not allocated, and unlink it | ||
321 | * from the filesystem reservation window rb tree. Must be called with | ||
322 | * rsv_lock hold. | ||
323 | */ | ||
324 | static void rsv_window_remove(struct super_block *sb, | ||
325 | struct ext4_reserve_window_node *rsv) | ||
326 | { | ||
327 | rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; | ||
328 | rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; | ||
329 | rsv->rsv_alloc_hit = 0; | ||
330 | rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root); | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * rsv_is_empty() -- Check if the reservation window is allocated. | ||
335 | * @rsv: given reservation window to check | ||
336 | * | ||
337 | * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED. | ||
338 | */ | ||
339 | static inline int rsv_is_empty(struct ext4_reserve_window *rsv) | ||
340 | { | ||
341 | /* a valid reservation end block could not be 0 */ | ||
342 | return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED; | ||
343 | } | ||
344 | |||
345 | /** | ||
346 | * ext4_init_block_alloc_info() | ||
347 | * @inode: file inode structure | ||
348 | * | ||
349 | * Allocate and initialize the reservation window structure, and | ||
350 | * link the window to the ext4 inode structure at last | ||
351 | * | ||
352 | * The reservation window structure is only dynamically allocated | ||
353 | * and linked to ext4 inode the first time the open file | ||
354 | * needs a new block. So, before every ext4_new_block(s) call, for | ||
355 | * regular files, we should check whether the reservation window | ||
356 | * structure exists or not. In the latter case, this function is called. | ||
357 | * Fail to do so will result in block reservation being turned off for that | ||
358 | * open file. | ||
359 | * | ||
360 | * This function is called from ext4_get_blocks_handle(), also called | ||
361 | * when setting the reservation window size through ioctl before the file | ||
362 | * is open for write (needs block allocation). | ||
363 | * | ||
364 | * Needs truncate_mutex protection prior to call this function. | ||
365 | */ | ||
366 | void ext4_init_block_alloc_info(struct inode *inode) | ||
367 | { | ||
368 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
369 | struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info; | ||
370 | struct super_block *sb = inode->i_sb; | ||
371 | |||
372 | block_i = kmalloc(sizeof(*block_i), GFP_NOFS); | ||
373 | if (block_i) { | ||
374 | struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node; | ||
375 | |||
376 | rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; | ||
377 | rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; | ||
378 | |||
379 | /* | ||
380 | * if filesystem is mounted with NORESERVATION, the goal | ||
381 | * reservation window size is set to zero to indicate | ||
382 | * block reservation is off | ||
383 | */ | ||
384 | if (!test_opt(sb, RESERVATION)) | ||
385 | rsv->rsv_goal_size = 0; | ||
386 | else | ||
387 | rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS; | ||
388 | rsv->rsv_alloc_hit = 0; | ||
389 | block_i->last_alloc_logical_block = 0; | ||
390 | block_i->last_alloc_physical_block = 0; | ||
391 | } | ||
392 | ei->i_block_alloc_info = block_i; | ||
393 | } | ||
394 | |||
395 | /** | ||
396 | * ext4_discard_reservation() | ||
397 | * @inode: inode | ||
398 | * | ||
399 | * Discard(free) block reservation window on last file close, or truncate | ||
400 | * or at last iput(). | ||
401 | * | ||
402 | * It is being called in three cases: | ||
403 | * ext4_release_file(): last writer close the file | ||
404 | * ext4_clear_inode(): last iput(), when nobody link to this file. | ||
405 | * ext4_truncate(): when the block indirect map is about to change. | ||
406 | * | ||
407 | */ | ||
408 | void ext4_discard_reservation(struct inode *inode) | ||
409 | { | ||
410 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
411 | struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info; | ||
412 | struct ext4_reserve_window_node *rsv; | ||
413 | spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock; | ||
414 | |||
415 | if (!block_i) | ||
416 | return; | ||
417 | |||
418 | rsv = &block_i->rsv_window_node; | ||
419 | if (!rsv_is_empty(&rsv->rsv_window)) { | ||
420 | spin_lock(rsv_lock); | ||
421 | if (!rsv_is_empty(&rsv->rsv_window)) | ||
422 | rsv_window_remove(inode->i_sb, rsv); | ||
423 | spin_unlock(rsv_lock); | ||
424 | } | ||
425 | } | ||
426 | |||
427 | /** | ||
428 | * ext4_free_blocks_sb() -- Free given blocks and update quota | ||
429 | * @handle: handle to this transaction | ||
430 | * @sb: super block | ||
431 | * @block: start physcial block to free | ||
432 | * @count: number of blocks to free | ||
433 | * @pdquot_freed_blocks: pointer to quota | ||
434 | */ | ||
435 | void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb, | ||
436 | ext4_fsblk_t block, unsigned long count, | ||
437 | unsigned long *pdquot_freed_blocks) | ||
438 | { | ||
439 | struct buffer_head *bitmap_bh = NULL; | ||
440 | struct buffer_head *gd_bh; | ||
441 | unsigned long block_group; | ||
442 | ext4_grpblk_t bit; | ||
443 | unsigned long i; | ||
444 | unsigned long overflow; | ||
445 | struct ext4_group_desc * desc; | ||
446 | struct ext4_super_block * es; | ||
447 | struct ext4_sb_info *sbi; | ||
448 | int err = 0, ret; | ||
449 | ext4_grpblk_t group_freed; | ||
450 | |||
451 | *pdquot_freed_blocks = 0; | ||
452 | sbi = EXT4_SB(sb); | ||
453 | es = sbi->s_es; | ||
454 | if (block < le32_to_cpu(es->s_first_data_block) || | ||
455 | block + count < block || | ||
456 | block + count > ext4_blocks_count(es)) { | ||
457 | ext4_error (sb, "ext4_free_blocks", | ||
458 | "Freeing blocks not in datazone - " | ||
459 | "block = %llu, count = %lu", block, count); | ||
460 | goto error_return; | ||
461 | } | ||
462 | |||
463 | ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1); | ||
464 | |||
465 | do_more: | ||
466 | overflow = 0; | ||
467 | ext4_get_group_no_and_offset(sb, block, &block_group, &bit); | ||
468 | /* | ||
469 | * Check to see if we are freeing blocks across a group | ||
470 | * boundary. | ||
471 | */ | ||
472 | if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { | ||
473 | overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb); | ||
474 | count -= overflow; | ||
475 | } | ||
476 | brelse(bitmap_bh); | ||
477 | bitmap_bh = read_block_bitmap(sb, block_group); | ||
478 | if (!bitmap_bh) | ||
479 | goto error_return; | ||
480 | desc = ext4_get_group_desc (sb, block_group, &gd_bh); | ||
481 | if (!desc) | ||
482 | goto error_return; | ||
483 | |||
484 | if (in_range(ext4_block_bitmap(sb, desc), block, count) || | ||
485 | in_range(ext4_inode_bitmap(sb, desc), block, count) || | ||
486 | in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || | ||
487 | in_range(block + count - 1, ext4_inode_table(sb, desc), | ||
488 | sbi->s_itb_per_group)) | ||
489 | ext4_error (sb, "ext4_free_blocks", | ||
490 | "Freeing blocks in system zones - " | ||
491 | "Block = %llu, count = %lu", | ||
492 | block, count); | ||
493 | |||
494 | /* | ||
495 | * We are about to start releasing blocks in the bitmap, | ||
496 | * so we need undo access. | ||
497 | */ | ||
498 | /* @@@ check errors */ | ||
499 | BUFFER_TRACE(bitmap_bh, "getting undo access"); | ||
500 | err = ext4_journal_get_undo_access(handle, bitmap_bh); | ||
501 | if (err) | ||
502 | goto error_return; | ||
503 | |||
504 | /* | ||
505 | * We are about to modify some metadata. Call the journal APIs | ||
506 | * to unshare ->b_data if a currently-committing transaction is | ||
507 | * using it | ||
508 | */ | ||
509 | BUFFER_TRACE(gd_bh, "get_write_access"); | ||
510 | err = ext4_journal_get_write_access(handle, gd_bh); | ||
511 | if (err) | ||
512 | goto error_return; | ||
513 | |||
514 | jbd_lock_bh_state(bitmap_bh); | ||
515 | |||
516 | for (i = 0, group_freed = 0; i < count; i++) { | ||
517 | /* | ||
518 | * An HJ special. This is expensive... | ||
519 | */ | ||
520 | #ifdef CONFIG_JBD_DEBUG | ||
521 | jbd_unlock_bh_state(bitmap_bh); | ||
522 | { | ||
523 | struct buffer_head *debug_bh; | ||
524 | debug_bh = sb_find_get_block(sb, block + i); | ||
525 | if (debug_bh) { | ||
526 | BUFFER_TRACE(debug_bh, "Deleted!"); | ||
527 | if (!bh2jh(bitmap_bh)->b_committed_data) | ||
528 | BUFFER_TRACE(debug_bh, | ||
529 | "No commited data in bitmap"); | ||
530 | BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap"); | ||
531 | __brelse(debug_bh); | ||
532 | } | ||
533 | } | ||
534 | jbd_lock_bh_state(bitmap_bh); | ||
535 | #endif | ||
536 | if (need_resched()) { | ||
537 | jbd_unlock_bh_state(bitmap_bh); | ||
538 | cond_resched(); | ||
539 | jbd_lock_bh_state(bitmap_bh); | ||
540 | } | ||
541 | /* @@@ This prevents newly-allocated data from being | ||
542 | * freed and then reallocated within the same | ||
543 | * transaction. | ||
544 | * | ||
545 | * Ideally we would want to allow that to happen, but to | ||
546 | * do so requires making jbd2_journal_forget() capable of | ||
547 | * revoking the queued write of a data block, which | ||
548 | * implies blocking on the journal lock. *forget() | ||
549 | * cannot block due to truncate races. | ||
550 | * | ||
551 | * Eventually we can fix this by making jbd2_journal_forget() | ||
552 | * return a status indicating whether or not it was able | ||
553 | * to revoke the buffer. On successful revoke, it is | ||
554 | * safe not to set the allocation bit in the committed | ||
555 | * bitmap, because we know that there is no outstanding | ||
556 | * activity on the buffer any more and so it is safe to | ||
557 | * reallocate it. | ||
558 | */ | ||
559 | BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); | ||
560 | J_ASSERT_BH(bitmap_bh, | ||
561 | bh2jh(bitmap_bh)->b_committed_data != NULL); | ||
562 | ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i, | ||
563 | bh2jh(bitmap_bh)->b_committed_data); | ||
564 | |||
565 | /* | ||
566 | * We clear the bit in the bitmap after setting the committed | ||
567 | * data bit, because this is the reverse order to that which | ||
568 | * the allocator uses. | ||
569 | */ | ||
570 | BUFFER_TRACE(bitmap_bh, "clear bit"); | ||
571 | if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), | ||
572 | bit + i, bitmap_bh->b_data)) { | ||
573 | jbd_unlock_bh_state(bitmap_bh); | ||
574 | ext4_error(sb, __FUNCTION__, | ||
575 | "bit already cleared for block %llu", | ||
576 | (ext4_fsblk_t)(block + i)); | ||
577 | jbd_lock_bh_state(bitmap_bh); | ||
578 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); | ||
579 | } else { | ||
580 | group_freed++; | ||
581 | } | ||
582 | } | ||
583 | jbd_unlock_bh_state(bitmap_bh); | ||
584 | |||
585 | spin_lock(sb_bgl_lock(sbi, block_group)); | ||
586 | desc->bg_free_blocks_count = | ||
587 | cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) + | ||
588 | group_freed); | ||
589 | spin_unlock(sb_bgl_lock(sbi, block_group)); | ||
590 | percpu_counter_mod(&sbi->s_freeblocks_counter, count); | ||
591 | |||
592 | /* We dirtied the bitmap block */ | ||
593 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | ||
594 | err = ext4_journal_dirty_metadata(handle, bitmap_bh); | ||
595 | |||
596 | /* And the group descriptor block */ | ||
597 | BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); | ||
598 | ret = ext4_journal_dirty_metadata(handle, gd_bh); | ||
599 | if (!err) err = ret; | ||
600 | *pdquot_freed_blocks += group_freed; | ||
601 | |||
602 | if (overflow && !err) { | ||
603 | block += count; | ||
604 | count = overflow; | ||
605 | goto do_more; | ||
606 | } | ||
607 | sb->s_dirt = 1; | ||
608 | error_return: | ||
609 | brelse(bitmap_bh); | ||
610 | ext4_std_error(sb, err); | ||
611 | return; | ||
612 | } | ||
613 | |||
614 | /** | ||
615 | * ext4_free_blocks() -- Free given blocks and update quota | ||
616 | * @handle: handle for this transaction | ||
617 | * @inode: inode | ||
618 | * @block: start physical block to free | ||
619 | * @count: number of blocks to count | ||
620 | */ | ||
621 | void ext4_free_blocks(handle_t *handle, struct inode *inode, | ||
622 | ext4_fsblk_t block, unsigned long count) | ||
623 | { | ||
624 | struct super_block * sb; | ||
625 | unsigned long dquot_freed_blocks; | ||
626 | |||
627 | sb = inode->i_sb; | ||
628 | if (!sb) { | ||
629 | printk ("ext4_free_blocks: nonexistent device"); | ||
630 | return; | ||
631 | } | ||
632 | ext4_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); | ||
633 | if (dquot_freed_blocks) | ||
634 | DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); | ||
635 | return; | ||
636 | } | ||
637 | |||
638 | /** | ||
639 | * ext4_test_allocatable() | ||
640 | * @nr: given allocation block group | ||
641 | * @bh: bufferhead contains the bitmap of the given block group | ||
642 | * | ||
643 | * For ext4 allocations, we must not reuse any blocks which are | ||
644 | * allocated in the bitmap buffer's "last committed data" copy. This | ||
645 | * prevents deletes from freeing up the page for reuse until we have | ||
646 | * committed the delete transaction. | ||
647 | * | ||
648 | * If we didn't do this, then deleting something and reallocating it as | ||
649 | * data would allow the old block to be overwritten before the | ||
650 | * transaction committed (because we force data to disk before commit). | ||
651 | * This would lead to corruption if we crashed between overwriting the | ||
652 | * data and committing the delete. | ||
653 | * | ||
654 | * @@@ We may want to make this allocation behaviour conditional on | ||
655 | * data-writes at some point, and disable it for metadata allocations or | ||
656 | * sync-data inodes. | ||
657 | */ | ||
658 | static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh) | ||
659 | { | ||
660 | int ret; | ||
661 | struct journal_head *jh = bh2jh(bh); | ||
662 | |||
663 | if (ext4_test_bit(nr, bh->b_data)) | ||
664 | return 0; | ||
665 | |||
666 | jbd_lock_bh_state(bh); | ||
667 | if (!jh->b_committed_data) | ||
668 | ret = 1; | ||
669 | else | ||
670 | ret = !ext4_test_bit(nr, jh->b_committed_data); | ||
671 | jbd_unlock_bh_state(bh); | ||
672 | return ret; | ||
673 | } | ||
674 | |||
675 | /** | ||
676 | * bitmap_search_next_usable_block() | ||
677 | * @start: the starting block (group relative) of the search | ||
678 | * @bh: bufferhead contains the block group bitmap | ||
679 | * @maxblocks: the ending block (group relative) of the reservation | ||
680 | * | ||
681 | * The bitmap search --- search forward alternately through the actual | ||
682 | * bitmap on disk and the last-committed copy in journal, until we find a | ||
683 | * bit free in both bitmaps. | ||
684 | */ | ||
685 | static ext4_grpblk_t | ||
686 | bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh, | ||
687 | ext4_grpblk_t maxblocks) | ||
688 | { | ||
689 | ext4_grpblk_t next; | ||
690 | struct journal_head *jh = bh2jh(bh); | ||
691 | |||
692 | while (start < maxblocks) { | ||
693 | next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start); | ||
694 | if (next >= maxblocks) | ||
695 | return -1; | ||
696 | if (ext4_test_allocatable(next, bh)) | ||
697 | return next; | ||
698 | jbd_lock_bh_state(bh); | ||
699 | if (jh->b_committed_data) | ||
700 | start = ext4_find_next_zero_bit(jh->b_committed_data, | ||
701 | maxblocks, next); | ||
702 | jbd_unlock_bh_state(bh); | ||
703 | } | ||
704 | return -1; | ||
705 | } | ||
706 | |||
707 | /** | ||
708 | * find_next_usable_block() | ||
709 | * @start: the starting block (group relative) to find next | ||
710 | * allocatable block in bitmap. | ||
711 | * @bh: bufferhead contains the block group bitmap | ||
712 | * @maxblocks: the ending block (group relative) for the search | ||
713 | * | ||
714 | * Find an allocatable block in a bitmap. We honor both the bitmap and | ||
715 | * its last-committed copy (if that exists), and perform the "most | ||
716 | * appropriate allocation" algorithm of looking for a free block near | ||
717 | * the initial goal; then for a free byte somewhere in the bitmap; then | ||
718 | * for any free bit in the bitmap. | ||
719 | */ | ||
720 | static ext4_grpblk_t | ||
721 | find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh, | ||
722 | ext4_grpblk_t maxblocks) | ||
723 | { | ||
724 | ext4_grpblk_t here, next; | ||
725 | char *p, *r; | ||
726 | |||
727 | if (start > 0) { | ||
728 | /* | ||
729 | * The goal was occupied; search forward for a free | ||
730 | * block within the next XX blocks. | ||
731 | * | ||
732 | * end_goal is more or less random, but it has to be | ||
733 | * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the | ||
734 | * next 64-bit boundary is simple.. | ||
735 | */ | ||
736 | ext4_grpblk_t end_goal = (start + 63) & ~63; | ||
737 | if (end_goal > maxblocks) | ||
738 | end_goal = maxblocks; | ||
739 | here = ext4_find_next_zero_bit(bh->b_data, end_goal, start); | ||
740 | if (here < end_goal && ext4_test_allocatable(here, bh)) | ||
741 | return here; | ||
742 | ext4_debug("Bit not found near goal\n"); | ||
743 | } | ||
744 | |||
745 | here = start; | ||
746 | if (here < 0) | ||
747 | here = 0; | ||
748 | |||
749 | p = ((char *)bh->b_data) + (here >> 3); | ||
750 | r = memscan(p, 0, (maxblocks - here + 7) >> 3); | ||
751 | next = (r - ((char *)bh->b_data)) << 3; | ||
752 | |||
753 | if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh)) | ||
754 | return next; | ||
755 | |||
756 | /* | ||
757 | * The bitmap search --- search forward alternately through the actual | ||
758 | * bitmap and the last-committed copy until we find a bit free in | ||
759 | * both | ||
760 | */ | ||
761 | here = bitmap_search_next_usable_block(here, bh, maxblocks); | ||
762 | return here; | ||
763 | } | ||
764 | |||
765 | /** | ||
766 | * claim_block() | ||
767 | * @block: the free block (group relative) to allocate | ||
768 | * @bh: the bufferhead containts the block group bitmap | ||
769 | * | ||
770 | * We think we can allocate this block in this bitmap. Try to set the bit. | ||
771 | * If that succeeds then check that nobody has allocated and then freed the | ||
772 | * block since we saw that is was not marked in b_committed_data. If it _was_ | ||
773 | * allocated and freed then clear the bit in the bitmap again and return | ||
774 | * zero (failure). | ||
775 | */ | ||
776 | static inline int | ||
777 | claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh) | ||
778 | { | ||
779 | struct journal_head *jh = bh2jh(bh); | ||
780 | int ret; | ||
781 | |||
782 | if (ext4_set_bit_atomic(lock, block, bh->b_data)) | ||
783 | return 0; | ||
784 | jbd_lock_bh_state(bh); | ||
785 | if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) { | ||
786 | ext4_clear_bit_atomic(lock, block, bh->b_data); | ||
787 | ret = 0; | ||
788 | } else { | ||
789 | ret = 1; | ||
790 | } | ||
791 | jbd_unlock_bh_state(bh); | ||
792 | return ret; | ||
793 | } | ||
794 | |||
795 | /** | ||
796 | * ext4_try_to_allocate() | ||
797 | * @sb: superblock | ||
798 | * @handle: handle to this transaction | ||
799 | * @group: given allocation block group | ||
800 | * @bitmap_bh: bufferhead holds the block bitmap | ||
801 | * @grp_goal: given target block within the group | ||
802 | * @count: target number of blocks to allocate | ||
803 | * @my_rsv: reservation window | ||
804 | * | ||
805 | * Attempt to allocate blocks within a give range. Set the range of allocation | ||
806 | * first, then find the first free bit(s) from the bitmap (within the range), | ||
807 | * and at last, allocate the blocks by claiming the found free bit as allocated. | ||
808 | * | ||
809 | * To set the range of this allocation: | ||
810 | * if there is a reservation window, only try to allocate block(s) from the | ||
811 | * file's own reservation window; | ||
812 | * Otherwise, the allocation range starts from the give goal block, ends at | ||
813 | * the block group's last block. | ||
814 | * | ||
815 | * If we failed to allocate the desired block then we may end up crossing to a | ||
816 | * new bitmap. In that case we must release write access to the old one via | ||
817 | * ext4_journal_release_buffer(), else we'll run out of credits. | ||
818 | */ | ||
819 | static ext4_grpblk_t | ||
820 | ext4_try_to_allocate(struct super_block *sb, handle_t *handle, int group, | ||
821 | struct buffer_head *bitmap_bh, ext4_grpblk_t grp_goal, | ||
822 | unsigned long *count, struct ext4_reserve_window *my_rsv) | ||
823 | { | ||
824 | ext4_fsblk_t group_first_block; | ||
825 | ext4_grpblk_t start, end; | ||
826 | unsigned long num = 0; | ||
827 | |||
828 | /* we do allocation within the reservation window if we have a window */ | ||
829 | if (my_rsv) { | ||
830 | group_first_block = ext4_group_first_block_no(sb, group); | ||
831 | if (my_rsv->_rsv_start >= group_first_block) | ||
832 | start = my_rsv->_rsv_start - group_first_block; | ||
833 | else | ||
834 | /* reservation window cross group boundary */ | ||
835 | start = 0; | ||
836 | end = my_rsv->_rsv_end - group_first_block + 1; | ||
837 | if (end > EXT4_BLOCKS_PER_GROUP(sb)) | ||
838 | /* reservation window crosses group boundary */ | ||
839 | end = EXT4_BLOCKS_PER_GROUP(sb); | ||
840 | if ((start <= grp_goal) && (grp_goal < end)) | ||
841 | start = grp_goal; | ||
842 | else | ||
843 | grp_goal = -1; | ||
844 | } else { | ||
845 | if (grp_goal > 0) | ||
846 | start = grp_goal; | ||
847 | else | ||
848 | start = 0; | ||
849 | end = EXT4_BLOCKS_PER_GROUP(sb); | ||
850 | } | ||
851 | |||
852 | BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb)); | ||
853 | |||
854 | repeat: | ||
855 | if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) { | ||
856 | grp_goal = find_next_usable_block(start, bitmap_bh, end); | ||
857 | if (grp_goal < 0) | ||
858 | goto fail_access; | ||
859 | if (!my_rsv) { | ||
860 | int i; | ||
861 | |||
862 | for (i = 0; i < 7 && grp_goal > start && | ||
863 | ext4_test_allocatable(grp_goal - 1, | ||
864 | bitmap_bh); | ||
865 | i++, grp_goal--) | ||
866 | ; | ||
867 | } | ||
868 | } | ||
869 | start = grp_goal; | ||
870 | |||
871 | if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group), | ||
872 | grp_goal, bitmap_bh)) { | ||
873 | /* | ||
874 | * The block was allocated by another thread, or it was | ||
875 | * allocated and then freed by another thread | ||
876 | */ | ||
877 | start++; | ||
878 | grp_goal++; | ||
879 | if (start >= end) | ||
880 | goto fail_access; | ||
881 | goto repeat; | ||
882 | } | ||
883 | num++; | ||
884 | grp_goal++; | ||
885 | while (num < *count && grp_goal < end | ||
886 | && ext4_test_allocatable(grp_goal, bitmap_bh) | ||
887 | && claim_block(sb_bgl_lock(EXT4_SB(sb), group), | ||
888 | grp_goal, bitmap_bh)) { | ||
889 | num++; | ||
890 | grp_goal++; | ||
891 | } | ||
892 | *count = num; | ||
893 | return grp_goal - num; | ||
894 | fail_access: | ||
895 | *count = num; | ||
896 | return -1; | ||
897 | } | ||
898 | |||
899 | /** | ||
900 | * find_next_reservable_window(): | ||
901 | * find a reservable space within the given range. | ||
902 | * It does not allocate the reservation window for now: | ||
903 | * alloc_new_reservation() will do the work later. | ||
904 | * | ||
905 | * @search_head: the head of the searching list; | ||
906 | * This is not necessarily the list head of the whole filesystem | ||
907 | * | ||
908 | * We have both head and start_block to assist the search | ||
909 | * for the reservable space. The list starts from head, | ||
910 | * but we will shift to the place where start_block is, | ||
911 | * then start from there, when looking for a reservable space. | ||
912 | * | ||
913 | * @size: the target new reservation window size | ||
914 | * | ||
915 | * @group_first_block: the first block we consider to start | ||
916 | * the real search from | ||
917 | * | ||
918 | * @last_block: | ||
919 | * the maximum block number that our goal reservable space | ||
920 | * could start from. This is normally the last block in this | ||
921 | * group. The search will end when we found the start of next | ||
922 | * possible reservable space is out of this boundary. | ||
923 | * This could handle the cross boundary reservation window | ||
924 | * request. | ||
925 | * | ||
926 | * basically we search from the given range, rather than the whole | ||
927 | * reservation double linked list, (start_block, last_block) | ||
928 | * to find a free region that is of my size and has not | ||
929 | * been reserved. | ||
930 | * | ||
931 | */ | ||
932 | static int find_next_reservable_window( | ||
933 | struct ext4_reserve_window_node *search_head, | ||
934 | struct ext4_reserve_window_node *my_rsv, | ||
935 | struct super_block * sb, | ||
936 | ext4_fsblk_t start_block, | ||
937 | ext4_fsblk_t last_block) | ||
938 | { | ||
939 | struct rb_node *next; | ||
940 | struct ext4_reserve_window_node *rsv, *prev; | ||
941 | ext4_fsblk_t cur; | ||
942 | int size = my_rsv->rsv_goal_size; | ||
943 | |||
944 | /* TODO: make the start of the reservation window byte-aligned */ | ||
945 | /* cur = *start_block & ~7;*/ | ||
946 | cur = start_block; | ||
947 | rsv = search_head; | ||
948 | if (!rsv) | ||
949 | return -1; | ||
950 | |||
951 | while (1) { | ||
952 | if (cur <= rsv->rsv_end) | ||
953 | cur = rsv->rsv_end + 1; | ||
954 | |||
955 | /* TODO? | ||
956 | * in the case we could not find a reservable space | ||
957 | * that is what is expected, during the re-search, we could | ||
958 | * remember what's the largest reservable space we could have | ||
959 | * and return that one. | ||
960 | * | ||
961 | * For now it will fail if we could not find the reservable | ||
962 | * space with expected-size (or more)... | ||
963 | */ | ||
964 | if (cur > last_block) | ||
965 | return -1; /* fail */ | ||
966 | |||
967 | prev = rsv; | ||
968 | next = rb_next(&rsv->rsv_node); | ||
969 | rsv = list_entry(next,struct ext4_reserve_window_node,rsv_node); | ||
970 | |||
971 | /* | ||
972 | * Reached the last reservation, we can just append to the | ||
973 | * previous one. | ||
974 | */ | ||
975 | if (!next) | ||
976 | break; | ||
977 | |||
978 | if (cur + size <= rsv->rsv_start) { | ||
979 | /* | ||
980 | * Found a reserveable space big enough. We could | ||
981 | * have a reservation across the group boundary here | ||
982 | */ | ||
983 | break; | ||
984 | } | ||
985 | } | ||
986 | /* | ||
987 | * we come here either : | ||
988 | * when we reach the end of the whole list, | ||
989 | * and there is empty reservable space after last entry in the list. | ||
990 | * append it to the end of the list. | ||
991 | * | ||
992 | * or we found one reservable space in the middle of the list, | ||
993 | * return the reservation window that we could append to. | ||
994 | * succeed. | ||
995 | */ | ||
996 | |||
997 | if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window))) | ||
998 | rsv_window_remove(sb, my_rsv); | ||
999 | |||
1000 | /* | ||
1001 | * Let's book the whole avaliable window for now. We will check the | ||
1002 | * disk bitmap later and then, if there are free blocks then we adjust | ||
1003 | * the window size if it's larger than requested. | ||
1004 | * Otherwise, we will remove this node from the tree next time | ||
1005 | * call find_next_reservable_window. | ||
1006 | */ | ||
1007 | my_rsv->rsv_start = cur; | ||
1008 | my_rsv->rsv_end = cur + size - 1; | ||
1009 | my_rsv->rsv_alloc_hit = 0; | ||
1010 | |||
1011 | if (prev != my_rsv) | ||
1012 | ext4_rsv_window_add(sb, my_rsv); | ||
1013 | |||
1014 | return 0; | ||
1015 | } | ||
1016 | |||
1017 | /** | ||
1018 | * alloc_new_reservation()--allocate a new reservation window | ||
1019 | * | ||
1020 | * To make a new reservation, we search part of the filesystem | ||
1021 | * reservation list (the list that inside the group). We try to | ||
1022 | * allocate a new reservation window near the allocation goal, | ||
1023 | * or the beginning of the group, if there is no goal. | ||
1024 | * | ||
1025 | * We first find a reservable space after the goal, then from | ||
1026 | * there, we check the bitmap for the first free block after | ||
1027 | * it. If there is no free block until the end of group, then the | ||
1028 | * whole group is full, we failed. Otherwise, check if the free | ||
1029 | * block is inside the expected reservable space, if so, we | ||
1030 | * succeed. | ||
1031 | * If the first free block is outside the reservable space, then | ||
1032 | * start from the first free block, we search for next available | ||
1033 | * space, and go on. | ||
1034 | * | ||
1035 | * on succeed, a new reservation will be found and inserted into the list | ||
1036 | * It contains at least one free block, and it does not overlap with other | ||
1037 | * reservation windows. | ||
1038 | * | ||
1039 | * failed: we failed to find a reservation window in this group | ||
1040 | * | ||
1041 | * @rsv: the reservation | ||
1042 | * | ||
1043 | * @grp_goal: The goal (group-relative). It is where the search for a | ||
1044 | * free reservable space should start from. | ||
1045 | * if we have a grp_goal(grp_goal >0 ), then start from there, | ||
1046 | * no grp_goal(grp_goal = -1), we start from the first block | ||
1047 | * of the group. | ||
1048 | * | ||
1049 | * @sb: the super block | ||
1050 | * @group: the group we are trying to allocate in | ||
1051 | * @bitmap_bh: the block group block bitmap | ||
1052 | * | ||
1053 | */ | ||
1054 | static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv, | ||
1055 | ext4_grpblk_t grp_goal, struct super_block *sb, | ||
1056 | unsigned int group, struct buffer_head *bitmap_bh) | ||
1057 | { | ||
1058 | struct ext4_reserve_window_node *search_head; | ||
1059 | ext4_fsblk_t group_first_block, group_end_block, start_block; | ||
1060 | ext4_grpblk_t first_free_block; | ||
1061 | struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root; | ||
1062 | unsigned long size; | ||
1063 | int ret; | ||
1064 | spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock; | ||
1065 | |||
1066 | group_first_block = ext4_group_first_block_no(sb, group); | ||
1067 | group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); | ||
1068 | |||
1069 | if (grp_goal < 0) | ||
1070 | start_block = group_first_block; | ||
1071 | else | ||
1072 | start_block = grp_goal + group_first_block; | ||
1073 | |||
1074 | size = my_rsv->rsv_goal_size; | ||
1075 | |||
1076 | if (!rsv_is_empty(&my_rsv->rsv_window)) { | ||
1077 | /* | ||
1078 | * if the old reservation is cross group boundary | ||
1079 | * and if the goal is inside the old reservation window, | ||
1080 | * we will come here when we just failed to allocate from | ||
1081 | * the first part of the window. We still have another part | ||
1082 | * that belongs to the next group. In this case, there is no | ||
1083 | * point to discard our window and try to allocate a new one | ||
1084 | * in this group(which will fail). we should | ||
1085 | * keep the reservation window, just simply move on. | ||
1086 | * | ||
1087 | * Maybe we could shift the start block of the reservation | ||
1088 | * window to the first block of next group. | ||
1089 | */ | ||
1090 | |||
1091 | if ((my_rsv->rsv_start <= group_end_block) && | ||
1092 | (my_rsv->rsv_end > group_end_block) && | ||
1093 | (start_block >= my_rsv->rsv_start)) | ||
1094 | return -1; | ||
1095 | |||
1096 | if ((my_rsv->rsv_alloc_hit > | ||
1097 | (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) { | ||
1098 | /* | ||
1099 | * if the previously allocation hit ratio is | ||
1100 | * greater than 1/2, then we double the size of | ||
1101 | * the reservation window the next time, | ||
1102 | * otherwise we keep the same size window | ||
1103 | */ | ||
1104 | size = size * 2; | ||
1105 | if (size > EXT4_MAX_RESERVE_BLOCKS) | ||
1106 | size = EXT4_MAX_RESERVE_BLOCKS; | ||
1107 | my_rsv->rsv_goal_size= size; | ||
1108 | } | ||
1109 | } | ||
1110 | |||
1111 | spin_lock(rsv_lock); | ||
1112 | /* | ||
1113 | * shift the search start to the window near the goal block | ||
1114 | */ | ||
1115 | search_head = search_reserve_window(fs_rsv_root, start_block); | ||
1116 | |||
1117 | /* | ||
1118 | * find_next_reservable_window() simply finds a reservable window | ||
1119 | * inside the given range(start_block, group_end_block). | ||
1120 | * | ||
1121 | * To make sure the reservation window has a free bit inside it, we | ||
1122 | * need to check the bitmap after we found a reservable window. | ||
1123 | */ | ||
1124 | retry: | ||
1125 | ret = find_next_reservable_window(search_head, my_rsv, sb, | ||
1126 | start_block, group_end_block); | ||
1127 | |||
1128 | if (ret == -1) { | ||
1129 | if (!rsv_is_empty(&my_rsv->rsv_window)) | ||
1130 | rsv_window_remove(sb, my_rsv); | ||
1131 | spin_unlock(rsv_lock); | ||
1132 | return -1; | ||
1133 | } | ||
1134 | |||
1135 | /* | ||
1136 | * On success, find_next_reservable_window() returns the | ||
1137 | * reservation window where there is a reservable space after it. | ||
1138 | * Before we reserve this reservable space, we need | ||
1139 | * to make sure there is at least a free block inside this region. | ||
1140 | * | ||
1141 | * searching the first free bit on the block bitmap and copy of | ||
1142 | * last committed bitmap alternatively, until we found a allocatable | ||
1143 | * block. Search start from the start block of the reservable space | ||
1144 | * we just found. | ||
1145 | */ | ||
1146 | spin_unlock(rsv_lock); | ||
1147 | first_free_block = bitmap_search_next_usable_block( | ||
1148 | my_rsv->rsv_start - group_first_block, | ||
1149 | bitmap_bh, group_end_block - group_first_block + 1); | ||
1150 | |||
1151 | if (first_free_block < 0) { | ||
1152 | /* | ||
1153 | * no free block left on the bitmap, no point | ||
1154 | * to reserve the space. return failed. | ||
1155 | */ | ||
1156 | spin_lock(rsv_lock); | ||
1157 | if (!rsv_is_empty(&my_rsv->rsv_window)) | ||
1158 | rsv_window_remove(sb, my_rsv); | ||
1159 | spin_unlock(rsv_lock); | ||
1160 | return -1; /* failed */ | ||
1161 | } | ||
1162 | |||
1163 | start_block = first_free_block + group_first_block; | ||
1164 | /* | ||
1165 | * check if the first free block is within the | ||
1166 | * free space we just reserved | ||
1167 | */ | ||
1168 | if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end) | ||
1169 | return 0; /* success */ | ||
1170 | /* | ||
1171 | * if the first free bit we found is out of the reservable space | ||
1172 | * continue search for next reservable space, | ||
1173 | * start from where the free block is, | ||
1174 | * we also shift the list head to where we stopped last time | ||
1175 | */ | ||
1176 | search_head = my_rsv; | ||
1177 | spin_lock(rsv_lock); | ||
1178 | goto retry; | ||
1179 | } | ||
1180 | |||
1181 | /** | ||
1182 | * try_to_extend_reservation() | ||
1183 | * @my_rsv: given reservation window | ||
1184 | * @sb: super block | ||
1185 | * @size: the delta to extend | ||
1186 | * | ||
1187 | * Attempt to expand the reservation window large enough to have | ||
1188 | * required number of free blocks | ||
1189 | * | ||
1190 | * Since ext4_try_to_allocate() will always allocate blocks within | ||
1191 | * the reservation window range, if the window size is too small, | ||
1192 | * multiple blocks allocation has to stop at the end of the reservation | ||
1193 | * window. To make this more efficient, given the total number of | ||
1194 | * blocks needed and the current size of the window, we try to | ||
1195 | * expand the reservation window size if necessary on a best-effort | ||
1196 | * basis before ext4_new_blocks() tries to allocate blocks, | ||
1197 | */ | ||
1198 | static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv, | ||
1199 | struct super_block *sb, int size) | ||
1200 | { | ||
1201 | struct ext4_reserve_window_node *next_rsv; | ||
1202 | struct rb_node *next; | ||
1203 | spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock; | ||
1204 | |||
1205 | if (!spin_trylock(rsv_lock)) | ||
1206 | return; | ||
1207 | |||
1208 | next = rb_next(&my_rsv->rsv_node); | ||
1209 | |||
1210 | if (!next) | ||
1211 | my_rsv->rsv_end += size; | ||
1212 | else { | ||
1213 | next_rsv = list_entry(next, struct ext4_reserve_window_node, rsv_node); | ||
1214 | |||
1215 | if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) | ||
1216 | my_rsv->rsv_end += size; | ||
1217 | else | ||
1218 | my_rsv->rsv_end = next_rsv->rsv_start - 1; | ||
1219 | } | ||
1220 | spin_unlock(rsv_lock); | ||
1221 | } | ||
1222 | |||
1223 | /** | ||
1224 | * ext4_try_to_allocate_with_rsv() | ||
1225 | * @sb: superblock | ||
1226 | * @handle: handle to this transaction | ||
1227 | * @group: given allocation block group | ||
1228 | * @bitmap_bh: bufferhead holds the block bitmap | ||
1229 | * @grp_goal: given target block within the group | ||
1230 | * @count: target number of blocks to allocate | ||
1231 | * @my_rsv: reservation window | ||
1232 | * @errp: pointer to store the error code | ||
1233 | * | ||
1234 | * This is the main function used to allocate a new block and its reservation | ||
1235 | * window. | ||
1236 | * | ||
1237 | * Each time when a new block allocation is need, first try to allocate from | ||
1238 | * its own reservation. If it does not have a reservation window, instead of | ||
1239 | * looking for a free bit on bitmap first, then look up the reservation list to | ||
1240 | * see if it is inside somebody else's reservation window, we try to allocate a | ||
1241 | * reservation window for it starting from the goal first. Then do the block | ||
1242 | * allocation within the reservation window. | ||
1243 | * | ||
1244 | * This will avoid keeping on searching the reservation list again and | ||
1245 | * again when somebody is looking for a free block (without | ||
1246 | * reservation), and there are lots of free blocks, but they are all | ||
1247 | * being reserved. | ||
1248 | * | ||
1249 | * We use a red-black tree for the per-filesystem reservation list. | ||
1250 | * | ||
1251 | */ | ||
1252 | static ext4_grpblk_t | ||
1253 | ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | ||
1254 | unsigned int group, struct buffer_head *bitmap_bh, | ||
1255 | ext4_grpblk_t grp_goal, | ||
1256 | struct ext4_reserve_window_node * my_rsv, | ||
1257 | unsigned long *count, int *errp) | ||
1258 | { | ||
1259 | ext4_fsblk_t group_first_block, group_last_block; | ||
1260 | ext4_grpblk_t ret = 0; | ||
1261 | int fatal; | ||
1262 | unsigned long num = *count; | ||
1263 | |||
1264 | *errp = 0; | ||
1265 | |||
1266 | /* | ||
1267 | * Make sure we use undo access for the bitmap, because it is critical | ||
1268 | * that we do the frozen_data COW on bitmap buffers in all cases even | ||
1269 | * if the buffer is in BJ_Forget state in the committing transaction. | ||
1270 | */ | ||
1271 | BUFFER_TRACE(bitmap_bh, "get undo access for new block"); | ||
1272 | fatal = ext4_journal_get_undo_access(handle, bitmap_bh); | ||
1273 | if (fatal) { | ||
1274 | *errp = fatal; | ||
1275 | return -1; | ||
1276 | } | ||
1277 | |||
1278 | /* | ||
1279 | * we don't deal with reservation when | ||
1280 | * filesystem is mounted without reservation | ||
1281 | * or the file is not a regular file | ||
1282 | * or last attempt to allocate a block with reservation turned on failed | ||
1283 | */ | ||
1284 | if (my_rsv == NULL ) { | ||
1285 | ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh, | ||
1286 | grp_goal, count, NULL); | ||
1287 | goto out; | ||
1288 | } | ||
1289 | /* | ||
1290 | * grp_goal is a group relative block number (if there is a goal) | ||
1291 | * 0 < grp_goal < EXT4_BLOCKS_PER_GROUP(sb) | ||
1292 | * first block is a filesystem wide block number | ||
1293 | * first block is the block number of the first block in this group | ||
1294 | */ | ||
1295 | group_first_block = ext4_group_first_block_no(sb, group); | ||
1296 | group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); | ||
1297 | |||
1298 | /* | ||
1299 | * Basically we will allocate a new block from inode's reservation | ||
1300 | * window. | ||
1301 | * | ||
1302 | * We need to allocate a new reservation window, if: | ||
1303 | * a) inode does not have a reservation window; or | ||
1304 | * b) last attempt to allocate a block from existing reservation | ||
1305 | * failed; or | ||
1306 | * c) we come here with a goal and with a reservation window | ||
1307 | * | ||
1308 | * We do not need to allocate a new reservation window if we come here | ||
1309 | * at the beginning with a goal and the goal is inside the window, or | ||
1310 | * we don't have a goal but already have a reservation window. | ||
1311 | * then we could go to allocate from the reservation window directly. | ||
1312 | */ | ||
1313 | while (1) { | ||
1314 | if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || | ||
1315 | !goal_in_my_reservation(&my_rsv->rsv_window, | ||
1316 | grp_goal, group, sb)) { | ||
1317 | if (my_rsv->rsv_goal_size < *count) | ||
1318 | my_rsv->rsv_goal_size = *count; | ||
1319 | ret = alloc_new_reservation(my_rsv, grp_goal, sb, | ||
1320 | group, bitmap_bh); | ||
1321 | if (ret < 0) | ||
1322 | break; /* failed */ | ||
1323 | |||
1324 | if (!goal_in_my_reservation(&my_rsv->rsv_window, | ||
1325 | grp_goal, group, sb)) | ||
1326 | grp_goal = -1; | ||
1327 | } else if (grp_goal > 0 && | ||
1328 | (my_rsv->rsv_end-grp_goal+1) < *count) | ||
1329 | try_to_extend_reservation(my_rsv, sb, | ||
1330 | *count-my_rsv->rsv_end + grp_goal - 1); | ||
1331 | |||
1332 | if ((my_rsv->rsv_start > group_last_block) || | ||
1333 | (my_rsv->rsv_end < group_first_block)) { | ||
1334 | rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1); | ||
1335 | BUG(); | ||
1336 | } | ||
1337 | ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh, | ||
1338 | grp_goal, &num, &my_rsv->rsv_window); | ||
1339 | if (ret >= 0) { | ||
1340 | my_rsv->rsv_alloc_hit += num; | ||
1341 | *count = num; | ||
1342 | break; /* succeed */ | ||
1343 | } | ||
1344 | num = *count; | ||
1345 | } | ||
1346 | out: | ||
1347 | if (ret >= 0) { | ||
1348 | BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for " | ||
1349 | "bitmap block"); | ||
1350 | fatal = ext4_journal_dirty_metadata(handle, bitmap_bh); | ||
1351 | if (fatal) { | ||
1352 | *errp = fatal; | ||
1353 | return -1; | ||
1354 | } | ||
1355 | return ret; | ||
1356 | } | ||
1357 | |||
1358 | BUFFER_TRACE(bitmap_bh, "journal_release_buffer"); | ||
1359 | ext4_journal_release_buffer(handle, bitmap_bh); | ||
1360 | return ret; | ||
1361 | } | ||
1362 | |||
1363 | /** | ||
1364 | * ext4_has_free_blocks() | ||
1365 | * @sbi: in-core super block structure. | ||
1366 | * | ||
1367 | * Check if filesystem has at least 1 free block available for allocation. | ||
1368 | */ | ||
1369 | static int ext4_has_free_blocks(struct ext4_sb_info *sbi) | ||
1370 | { | ||
1371 | ext4_fsblk_t free_blocks, root_blocks; | ||
1372 | |||
1373 | free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); | ||
1374 | root_blocks = ext4_r_blocks_count(sbi->s_es); | ||
1375 | if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && | ||
1376 | sbi->s_resuid != current->fsuid && | ||
1377 | (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { | ||
1378 | return 0; | ||
1379 | } | ||
1380 | return 1; | ||
1381 | } | ||
1382 | |||
1383 | /** | ||
1384 | * ext4_should_retry_alloc() | ||
1385 | * @sb: super block | ||
1386 | * @retries number of attemps has been made | ||
1387 | * | ||
1388 | * ext4_should_retry_alloc() is called when ENOSPC is returned, and if | ||
1389 | * it is profitable to retry the operation, this function will wait | ||
1390 | * for the current or commiting transaction to complete, and then | ||
1391 | * return TRUE. | ||
1392 | * | ||
1393 | * if the total number of retries exceed three times, return FALSE. | ||
1394 | */ | ||
1395 | int ext4_should_retry_alloc(struct super_block *sb, int *retries) | ||
1396 | { | ||
1397 | if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3) | ||
1398 | return 0; | ||
1399 | |||
1400 | jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); | ||
1401 | |||
1402 | return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); | ||
1403 | } | ||
1404 | |||
1405 | /** | ||
1406 | * ext4_new_blocks() -- core block(s) allocation function | ||
1407 | * @handle: handle to this transaction | ||
1408 | * @inode: file inode | ||
1409 | * @goal: given target block(filesystem wide) | ||
1410 | * @count: target number of blocks to allocate | ||
1411 | * @errp: error code | ||
1412 | * | ||
1413 | * ext4_new_blocks uses a goal block to assist allocation. It tries to | ||
1414 | * allocate block(s) from the block group contains the goal block first. If that | ||
1415 | * fails, it will try to allocate block(s) from other block groups without | ||
1416 | * any specific goal block. | ||
1417 | * | ||
1418 | */ | ||
1419 | ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode, | ||
1420 | ext4_fsblk_t goal, unsigned long *count, int *errp) | ||
1421 | { | ||
1422 | struct buffer_head *bitmap_bh = NULL; | ||
1423 | struct buffer_head *gdp_bh; | ||
1424 | unsigned long group_no; | ||
1425 | int goal_group; | ||
1426 | ext4_grpblk_t grp_target_blk; /* blockgroup relative goal block */ | ||
1427 | ext4_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/ | ||
1428 | ext4_fsblk_t ret_block; /* filesyetem-wide allocated block */ | ||
1429 | int bgi; /* blockgroup iteration index */ | ||
1430 | int fatal = 0, err; | ||
1431 | int performed_allocation = 0; | ||
1432 | ext4_grpblk_t free_blocks; /* number of free blocks in a group */ | ||
1433 | struct super_block *sb; | ||
1434 | struct ext4_group_desc *gdp; | ||
1435 | struct ext4_super_block *es; | ||
1436 | struct ext4_sb_info *sbi; | ||
1437 | struct ext4_reserve_window_node *my_rsv = NULL; | ||
1438 | struct ext4_block_alloc_info *block_i; | ||
1439 | unsigned short windowsz = 0; | ||
1440 | #ifdef EXT4FS_DEBUG | ||
1441 | static int goal_hits, goal_attempts; | ||
1442 | #endif | ||
1443 | unsigned long ngroups; | ||
1444 | unsigned long num = *count; | ||
1445 | |||
1446 | *errp = -ENOSPC; | ||
1447 | sb = inode->i_sb; | ||
1448 | if (!sb) { | ||
1449 | printk("ext4_new_block: nonexistent device"); | ||
1450 | return 0; | ||
1451 | } | ||
1452 | |||
1453 | /* | ||
1454 | * Check quota for allocation of this block. | ||
1455 | */ | ||
1456 | if (DQUOT_ALLOC_BLOCK(inode, num)) { | ||
1457 | *errp = -EDQUOT; | ||
1458 | return 0; | ||
1459 | } | ||
1460 | |||
1461 | sbi = EXT4_SB(sb); | ||
1462 | es = EXT4_SB(sb)->s_es; | ||
1463 | ext4_debug("goal=%lu.\n", goal); | ||
1464 | /* | ||
1465 | * Allocate a block from reservation only when | ||
1466 | * filesystem is mounted with reservation(default,-o reservation), and | ||
1467 | * it's a regular file, and | ||
1468 | * the desired window size is greater than 0 (One could use ioctl | ||
1469 | * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off | ||
1470 | * reservation on that particular file) | ||
1471 | */ | ||
1472 | block_i = EXT4_I(inode)->i_block_alloc_info; | ||
1473 | if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0)) | ||
1474 | my_rsv = &block_i->rsv_window_node; | ||
1475 | |||
1476 | if (!ext4_has_free_blocks(sbi)) { | ||
1477 | *errp = -ENOSPC; | ||
1478 | goto out; | ||
1479 | } | ||
1480 | |||
1481 | /* | ||
1482 | * First, test whether the goal block is free. | ||
1483 | */ | ||
1484 | if (goal < le32_to_cpu(es->s_first_data_block) || | ||
1485 | goal >= ext4_blocks_count(es)) | ||
1486 | goal = le32_to_cpu(es->s_first_data_block); | ||
1487 | ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk); | ||
1488 | goal_group = group_no; | ||
1489 | retry_alloc: | ||
1490 | gdp = ext4_get_group_desc(sb, group_no, &gdp_bh); | ||
1491 | if (!gdp) | ||
1492 | goto io_error; | ||
1493 | |||
1494 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); | ||
1495 | /* | ||
1496 | * if there is not enough free blocks to make a new resevation | ||
1497 | * turn off reservation for this allocation | ||
1498 | */ | ||
1499 | if (my_rsv && (free_blocks < windowsz) | ||
1500 | && (rsv_is_empty(&my_rsv->rsv_window))) | ||
1501 | my_rsv = NULL; | ||
1502 | |||
1503 | if (free_blocks > 0) { | ||
1504 | bitmap_bh = read_block_bitmap(sb, group_no); | ||
1505 | if (!bitmap_bh) | ||
1506 | goto io_error; | ||
1507 | grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle, | ||
1508 | group_no, bitmap_bh, grp_target_blk, | ||
1509 | my_rsv, &num, &fatal); | ||
1510 | if (fatal) | ||
1511 | goto out; | ||
1512 | if (grp_alloc_blk >= 0) | ||
1513 | goto allocated; | ||
1514 | } | ||
1515 | |||
1516 | ngroups = EXT4_SB(sb)->s_groups_count; | ||
1517 | smp_rmb(); | ||
1518 | |||
1519 | /* | ||
1520 | * Now search the rest of the groups. We assume that | ||
1521 | * i and gdp correctly point to the last group visited. | ||
1522 | */ | ||
1523 | for (bgi = 0; bgi < ngroups; bgi++) { | ||
1524 | group_no++; | ||
1525 | if (group_no >= ngroups) | ||
1526 | group_no = 0; | ||
1527 | gdp = ext4_get_group_desc(sb, group_no, &gdp_bh); | ||
1528 | if (!gdp) { | ||
1529 | *errp = -EIO; | ||
1530 | goto out; | ||
1531 | } | ||
1532 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); | ||
1533 | /* | ||
1534 | * skip this group if the number of | ||
1535 | * free blocks is less than half of the reservation | ||
1536 | * window size. | ||
1537 | */ | ||
1538 | if (free_blocks <= (windowsz/2)) | ||
1539 | continue; | ||
1540 | |||
1541 | brelse(bitmap_bh); | ||
1542 | bitmap_bh = read_block_bitmap(sb, group_no); | ||
1543 | if (!bitmap_bh) | ||
1544 | goto io_error; | ||
1545 | /* | ||
1546 | * try to allocate block(s) from this group, without a goal(-1). | ||
1547 | */ | ||
1548 | grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle, | ||
1549 | group_no, bitmap_bh, -1, my_rsv, | ||
1550 | &num, &fatal); | ||
1551 | if (fatal) | ||
1552 | goto out; | ||
1553 | if (grp_alloc_blk >= 0) | ||
1554 | goto allocated; | ||
1555 | } | ||
1556 | /* | ||
1557 | * We may end up a bogus ealier ENOSPC error due to | ||
1558 | * filesystem is "full" of reservations, but | ||
1559 | * there maybe indeed free blocks avaliable on disk | ||
1560 | * In this case, we just forget about the reservations | ||
1561 | * just do block allocation as without reservations. | ||
1562 | */ | ||
1563 | if (my_rsv) { | ||
1564 | my_rsv = NULL; | ||
1565 | group_no = goal_group; | ||
1566 | goto retry_alloc; | ||
1567 | } | ||
1568 | /* No space left on the device */ | ||
1569 | *errp = -ENOSPC; | ||
1570 | goto out; | ||
1571 | |||
1572 | allocated: | ||
1573 | |||
1574 | ext4_debug("using block group %d(%d)\n", | ||
1575 | group_no, gdp->bg_free_blocks_count); | ||
1576 | |||
1577 | BUFFER_TRACE(gdp_bh, "get_write_access"); | ||
1578 | fatal = ext4_journal_get_write_access(handle, gdp_bh); | ||
1579 | if (fatal) | ||
1580 | goto out; | ||
1581 | |||
1582 | ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no); | ||
1583 | |||
1584 | if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) || | ||
1585 | in_range(ext4_block_bitmap(sb, gdp), ret_block, num) || | ||
1586 | in_range(ret_block, ext4_inode_table(sb, gdp), | ||
1587 | EXT4_SB(sb)->s_itb_per_group) || | ||
1588 | in_range(ret_block + num - 1, ext4_inode_table(sb, gdp), | ||
1589 | EXT4_SB(sb)->s_itb_per_group)) | ||
1590 | ext4_error(sb, "ext4_new_block", | ||
1591 | "Allocating block in system zone - " | ||
1592 | "blocks from %llu, length %lu", | ||
1593 | ret_block, num); | ||
1594 | |||
1595 | performed_allocation = 1; | ||
1596 | |||
1597 | #ifdef CONFIG_JBD_DEBUG | ||
1598 | { | ||
1599 | struct buffer_head *debug_bh; | ||
1600 | |||
1601 | /* Record bitmap buffer state in the newly allocated block */ | ||
1602 | debug_bh = sb_find_get_block(sb, ret_block); | ||
1603 | if (debug_bh) { | ||
1604 | BUFFER_TRACE(debug_bh, "state when allocated"); | ||
1605 | BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state"); | ||
1606 | brelse(debug_bh); | ||
1607 | } | ||
1608 | } | ||
1609 | jbd_lock_bh_state(bitmap_bh); | ||
1610 | spin_lock(sb_bgl_lock(sbi, group_no)); | ||
1611 | if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { | ||
1612 | int i; | ||
1613 | |||
1614 | for (i = 0; i < num; i++) { | ||
1615 | if (ext4_test_bit(grp_alloc_blk+i, | ||
1616 | bh2jh(bitmap_bh)->b_committed_data)) { | ||
1617 | printk("%s: block was unexpectedly set in " | ||
1618 | "b_committed_data\n", __FUNCTION__); | ||
1619 | } | ||
1620 | } | ||
1621 | } | ||
1622 | ext4_debug("found bit %d\n", grp_alloc_blk); | ||
1623 | spin_unlock(sb_bgl_lock(sbi, group_no)); | ||
1624 | jbd_unlock_bh_state(bitmap_bh); | ||
1625 | #endif | ||
1626 | |||
1627 | if (ret_block + num - 1 >= ext4_blocks_count(es)) { | ||
1628 | ext4_error(sb, "ext4_new_block", | ||
1629 | "block(%llu) >= blocks count(%llu) - " | ||
1630 | "block_group = %lu, es == %p ", ret_block, | ||
1631 | ext4_blocks_count(es), group_no, es); | ||
1632 | goto out; | ||
1633 | } | ||
1634 | |||
1635 | /* | ||
1636 | * It is up to the caller to add the new buffer to a journal | ||
1637 | * list of some description. We don't know in advance whether | ||
1638 | * the caller wants to use it as metadata or data. | ||
1639 | */ | ||
1640 | ext4_debug("allocating block %lu. Goal hits %d of %d.\n", | ||
1641 | ret_block, goal_hits, goal_attempts); | ||
1642 | |||
1643 | spin_lock(sb_bgl_lock(sbi, group_no)); | ||
1644 | gdp->bg_free_blocks_count = | ||
1645 | cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num); | ||
1646 | spin_unlock(sb_bgl_lock(sbi, group_no)); | ||
1647 | percpu_counter_mod(&sbi->s_freeblocks_counter, -num); | ||
1648 | |||
1649 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); | ||
1650 | err = ext4_journal_dirty_metadata(handle, gdp_bh); | ||
1651 | if (!fatal) | ||
1652 | fatal = err; | ||
1653 | |||
1654 | sb->s_dirt = 1; | ||
1655 | if (fatal) | ||
1656 | goto out; | ||
1657 | |||
1658 | *errp = 0; | ||
1659 | brelse(bitmap_bh); | ||
1660 | DQUOT_FREE_BLOCK(inode, *count-num); | ||
1661 | *count = num; | ||
1662 | return ret_block; | ||
1663 | |||
1664 | io_error: | ||
1665 | *errp = -EIO; | ||
1666 | out: | ||
1667 | if (fatal) { | ||
1668 | *errp = fatal; | ||
1669 | ext4_std_error(sb, fatal); | ||
1670 | } | ||
1671 | /* | ||
1672 | * Undo the block allocation | ||
1673 | */ | ||
1674 | if (!performed_allocation) | ||
1675 | DQUOT_FREE_BLOCK(inode, *count); | ||
1676 | brelse(bitmap_bh); | ||
1677 | return 0; | ||
1678 | } | ||
1679 | |||
1680 | ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode, | ||
1681 | ext4_fsblk_t goal, int *errp) | ||
1682 | { | ||
1683 | unsigned long count = 1; | ||
1684 | |||
1685 | return ext4_new_blocks(handle, inode, goal, &count, errp); | ||
1686 | } | ||
1687 | |||
1688 | /** | ||
1689 | * ext4_count_free_blocks() -- count filesystem free blocks | ||
1690 | * @sb: superblock | ||
1691 | * | ||
1692 | * Adds up the number of free blocks from each block group. | ||
1693 | */ | ||
1694 | ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb) | ||
1695 | { | ||
1696 | ext4_fsblk_t desc_count; | ||
1697 | struct ext4_group_desc *gdp; | ||
1698 | int i; | ||
1699 | unsigned long ngroups = EXT4_SB(sb)->s_groups_count; | ||
1700 | #ifdef EXT4FS_DEBUG | ||
1701 | struct ext4_super_block *es; | ||
1702 | ext4_fsblk_t bitmap_count; | ||
1703 | unsigned long x; | ||
1704 | struct buffer_head *bitmap_bh = NULL; | ||
1705 | |||
1706 | es = EXT4_SB(sb)->s_es; | ||
1707 | desc_count = 0; | ||
1708 | bitmap_count = 0; | ||
1709 | gdp = NULL; | ||
1710 | |||
1711 | smp_rmb(); | ||
1712 | for (i = 0; i < ngroups; i++) { | ||
1713 | gdp = ext4_get_group_desc(sb, i, NULL); | ||
1714 | if (!gdp) | ||
1715 | continue; | ||
1716 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | ||
1717 | brelse(bitmap_bh); | ||
1718 | bitmap_bh = read_block_bitmap(sb, i); | ||
1719 | if (bitmap_bh == NULL) | ||
1720 | continue; | ||
1721 | |||
1722 | x = ext4_count_free(bitmap_bh, sb->s_blocksize); | ||
1723 | printk("group %d: stored = %d, counted = %lu\n", | ||
1724 | i, le16_to_cpu(gdp->bg_free_blocks_count), x); | ||
1725 | bitmap_count += x; | ||
1726 | } | ||
1727 | brelse(bitmap_bh); | ||
1728 | printk("ext4_count_free_blocks: stored = %llu" | ||
1729 | ", computed = %llu, %llu\n", | ||
1730 | EXT4_FREE_BLOCKS_COUNT(es), | ||
1731 | desc_count, bitmap_count); | ||
1732 | return bitmap_count; | ||
1733 | #else | ||
1734 | desc_count = 0; | ||
1735 | smp_rmb(); | ||
1736 | for (i = 0; i < ngroups; i++) { | ||
1737 | gdp = ext4_get_group_desc(sb, i, NULL); | ||
1738 | if (!gdp) | ||
1739 | continue; | ||
1740 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | ||
1741 | } | ||
1742 | |||
1743 | return desc_count; | ||
1744 | #endif | ||
1745 | } | ||
1746 | |||
1747 | static inline int | ||
1748 | block_in_use(ext4_fsblk_t block, struct super_block *sb, unsigned char *map) | ||
1749 | { | ||
1750 | ext4_grpblk_t offset; | ||
1751 | |||
1752 | ext4_get_group_no_and_offset(sb, block, NULL, &offset); | ||
1753 | return ext4_test_bit (offset, map); | ||
1754 | } | ||
1755 | |||
1756 | static inline int test_root(int a, int b) | ||
1757 | { | ||
1758 | int num = b; | ||
1759 | |||
1760 | while (a > num) | ||
1761 | num *= b; | ||
1762 | return num == a; | ||
1763 | } | ||
1764 | |||
1765 | static int ext4_group_sparse(int group) | ||
1766 | { | ||
1767 | if (group <= 1) | ||
1768 | return 1; | ||
1769 | if (!(group & 1)) | ||
1770 | return 0; | ||
1771 | return (test_root(group, 7) || test_root(group, 5) || | ||
1772 | test_root(group, 3)); | ||
1773 | } | ||
1774 | |||
1775 | /** | ||
1776 | * ext4_bg_has_super - number of blocks used by the superblock in group | ||
1777 | * @sb: superblock for filesystem | ||
1778 | * @group: group number to check | ||
1779 | * | ||
1780 | * Return the number of blocks used by the superblock (primary or backup) | ||
1781 | * in this group. Currently this will be only 0 or 1. | ||
1782 | */ | ||
1783 | int ext4_bg_has_super(struct super_block *sb, int group) | ||
1784 | { | ||
1785 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, | ||
1786 | EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) && | ||
1787 | !ext4_group_sparse(group)) | ||
1788 | return 0; | ||
1789 | return 1; | ||
1790 | } | ||
1791 | |||
1792 | static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, int group) | ||
1793 | { | ||
1794 | unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); | ||
1795 | unsigned long first = metagroup * EXT4_DESC_PER_BLOCK(sb); | ||
1796 | unsigned long last = first + EXT4_DESC_PER_BLOCK(sb) - 1; | ||
1797 | |||
1798 | if (group == first || group == first + 1 || group == last) | ||
1799 | return 1; | ||
1800 | return 0; | ||
1801 | } | ||
1802 | |||
1803 | static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, int group) | ||
1804 | { | ||
1805 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, | ||
1806 | EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) && | ||
1807 | !ext4_group_sparse(group)) | ||
1808 | return 0; | ||
1809 | return EXT4_SB(sb)->s_gdb_count; | ||
1810 | } | ||
1811 | |||
1812 | /** | ||
1813 | * ext4_bg_num_gdb - number of blocks used by the group table in group | ||
1814 | * @sb: superblock for filesystem | ||
1815 | * @group: group number to check | ||
1816 | * | ||
1817 | * Return the number of blocks used by the group descriptor table | ||
1818 | * (primary or backup) in this group. In the future there may be a | ||
1819 | * different number of descriptor blocks in each group. | ||
1820 | */ | ||
1821 | unsigned long ext4_bg_num_gdb(struct super_block *sb, int group) | ||
1822 | { | ||
1823 | unsigned long first_meta_bg = | ||
1824 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); | ||
1825 | unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); | ||
1826 | |||
1827 | if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) || | ||
1828 | metagroup < first_meta_bg) | ||
1829 | return ext4_bg_num_gdb_nometa(sb,group); | ||
1830 | |||
1831 | return ext4_bg_num_gdb_meta(sb,group); | ||
1832 | |||
1833 | } | ||