diff options
author | Mingming Cao <cmm@us.ibm.com> | 2006-10-11 04:20:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-11 14:14:15 -0400 |
commit | 617ba13b31fbf505cc21799826639ef24ed94af0 (patch) | |
tree | 2a41e8c993f7c1eed115ad24047d546ba56cbdf5 /fs/ext4/balloc.c | |
parent | ac27a0ec112a089f1a5102bc8dffc79c8c815571 (diff) |
[PATCH] ext4: rename ext4 symbols to avoid duplication of ext3 symbols
Mingming Cao originally did this work, and Shaggy reproduced it using some
scripts from her.
Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: Dave Kleikamp <shaggy@austin.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/ext4/balloc.c')
-rw-r--r-- | fs/ext4/balloc.c | 536 |
1 files changed, 268 insertions, 268 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index b41a7d7e20f0..357e4e50374a 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/fs/ext3/balloc.c | 2 | * linux/fs/ext4/balloc.c |
3 | * | 3 | * |
4 | * Copyright (C) 1992, 1993, 1994, 1995 | 4 | * Copyright (C) 1992, 1993, 1994, 1995 |
5 | * Remy Card (card@masi.ibp.fr) | 5 | * Remy Card (card@masi.ibp.fr) |
@@ -15,8 +15,8 @@ | |||
15 | #include <linux/capability.h> | 15 | #include <linux/capability.h> |
16 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
17 | #include <linux/jbd.h> | 17 | #include <linux/jbd.h> |
18 | #include <linux/ext3_fs.h> | 18 | #include <linux/ext4_fs.h> |
19 | #include <linux/ext3_jbd.h> | 19 | #include <linux/ext4_jbd.h> |
20 | #include <linux/quotaops.h> | 20 | #include <linux/quotaops.h> |
21 | #include <linux/buffer_head.h> | 21 | #include <linux/buffer_head.h> |
22 | 22 | ||
@@ -32,30 +32,30 @@ | |||
32 | * The file system contains group descriptors which are located after the | 32 | * The file system contains group descriptors which are located after the |
33 | * super block. Each descriptor contains the number of the bitmap block and | 33 | * super block. Each descriptor contains the number of the bitmap block and |
34 | * the free blocks count in the block. The descriptors are loaded in memory | 34 | * the free blocks count in the block. The descriptors are loaded in memory |
35 | * when a file system is mounted (see ext3_read_super). | 35 | * when a file system is mounted (see ext4_read_super). |
36 | */ | 36 | */ |
37 | 37 | ||
38 | 38 | ||
39 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | 39 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) |
40 | 40 | ||
41 | /** | 41 | /** |
42 | * ext3_get_group_desc() -- load group descriptor from disk | 42 | * ext4_get_group_desc() -- load group descriptor from disk |
43 | * @sb: super block | 43 | * @sb: super block |
44 | * @block_group: given block group | 44 | * @block_group: given block group |
45 | * @bh: pointer to the buffer head to store the block | 45 | * @bh: pointer to the buffer head to store the block |
46 | * group descriptor | 46 | * group descriptor |
47 | */ | 47 | */ |
48 | struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, | 48 | struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, |
49 | unsigned int block_group, | 49 | unsigned int block_group, |
50 | struct buffer_head ** bh) | 50 | struct buffer_head ** bh) |
51 | { | 51 | { |
52 | unsigned long group_desc; | 52 | unsigned long group_desc; |
53 | unsigned long offset; | 53 | unsigned long offset; |
54 | struct ext3_group_desc * desc; | 54 | struct ext4_group_desc * desc; |
55 | struct ext3_sb_info *sbi = EXT3_SB(sb); | 55 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
56 | 56 | ||
57 | if (block_group >= sbi->s_groups_count) { | 57 | if (block_group >= sbi->s_groups_count) { |
58 | ext3_error (sb, "ext3_get_group_desc", | 58 | ext4_error (sb, "ext4_get_group_desc", |
59 | "block_group >= groups_count - " | 59 | "block_group >= groups_count - " |
60 | "block_group = %d, groups_count = %lu", | 60 | "block_group = %d, groups_count = %lu", |
61 | block_group, sbi->s_groups_count); | 61 | block_group, sbi->s_groups_count); |
@@ -64,17 +64,17 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, | |||
64 | } | 64 | } |
65 | smp_rmb(); | 65 | smp_rmb(); |
66 | 66 | ||
67 | group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb); | 67 | group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); |
68 | offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1); | 68 | offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); |
69 | if (!sbi->s_group_desc[group_desc]) { | 69 | if (!sbi->s_group_desc[group_desc]) { |
70 | ext3_error (sb, "ext3_get_group_desc", | 70 | ext4_error (sb, "ext4_get_group_desc", |
71 | "Group descriptor not loaded - " | 71 | "Group descriptor not loaded - " |
72 | "block_group = %d, group_desc = %lu, desc = %lu", | 72 | "block_group = %d, group_desc = %lu, desc = %lu", |
73 | block_group, group_desc, offset); | 73 | block_group, group_desc, offset); |
74 | return NULL; | 74 | return NULL; |
75 | } | 75 | } |
76 | 76 | ||
77 | desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data; | 77 | desc = (struct ext4_group_desc *) sbi->s_group_desc[group_desc]->b_data; |
78 | if (bh) | 78 | if (bh) |
79 | *bh = sbi->s_group_desc[group_desc]; | 79 | *bh = sbi->s_group_desc[group_desc]; |
80 | return desc + offset; | 80 | return desc + offset; |
@@ -93,15 +93,15 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, | |||
93 | static struct buffer_head * | 93 | static struct buffer_head * |
94 | read_block_bitmap(struct super_block *sb, unsigned int block_group) | 94 | read_block_bitmap(struct super_block *sb, unsigned int block_group) |
95 | { | 95 | { |
96 | struct ext3_group_desc * desc; | 96 | struct ext4_group_desc * desc; |
97 | struct buffer_head * bh = NULL; | 97 | struct buffer_head * bh = NULL; |
98 | 98 | ||
99 | desc = ext3_get_group_desc (sb, block_group, NULL); | 99 | desc = ext4_get_group_desc (sb, block_group, NULL); |
100 | if (!desc) | 100 | if (!desc) |
101 | goto error_out; | 101 | goto error_out; |
102 | bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap)); | 102 | bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap)); |
103 | if (!bh) | 103 | if (!bh) |
104 | ext3_error (sb, "read_block_bitmap", | 104 | ext4_error (sb, "read_block_bitmap", |
105 | "Cannot read block bitmap - " | 105 | "Cannot read block bitmap - " |
106 | "block_group = %d, block_bitmap = %u", | 106 | "block_group = %d, block_bitmap = %u", |
107 | block_group, le32_to_cpu(desc->bg_block_bitmap)); | 107 | block_group, le32_to_cpu(desc->bg_block_bitmap)); |
@@ -134,7 +134,7 @@ static void __rsv_window_dump(struct rb_root *root, int verbose, | |||
134 | const char *fn) | 134 | const char *fn) |
135 | { | 135 | { |
136 | struct rb_node *n; | 136 | struct rb_node *n; |
137 | struct ext3_reserve_window_node *rsv, *prev; | 137 | struct ext4_reserve_window_node *rsv, *prev; |
138 | int bad; | 138 | int bad; |
139 | 139 | ||
140 | restart: | 140 | restart: |
@@ -144,7 +144,7 @@ restart: | |||
144 | 144 | ||
145 | printk("Block Allocation Reservation Windows Map (%s):\n", fn); | 145 | printk("Block Allocation Reservation Windows Map (%s):\n", fn); |
146 | while (n) { | 146 | while (n) { |
147 | rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node); | 147 | rsv = list_entry(n, struct ext4_reserve_window_node, rsv_node); |
148 | if (verbose) | 148 | if (verbose) |
149 | printk("reservation window 0x%p " | 149 | printk("reservation window 0x%p " |
150 | "start: %lu, end: %lu\n", | 150 | "start: %lu, end: %lu\n", |
@@ -196,13 +196,13 @@ restart: | |||
196 | * otherwise, return 0; | 196 | * otherwise, return 0; |
197 | */ | 197 | */ |
198 | static int | 198 | static int |
199 | goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal, | 199 | goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal, |
200 | unsigned int group, struct super_block * sb) | 200 | unsigned int group, struct super_block * sb) |
201 | { | 201 | { |
202 | ext3_fsblk_t group_first_block, group_last_block; | 202 | ext4_fsblk_t group_first_block, group_last_block; |
203 | 203 | ||
204 | group_first_block = ext3_group_first_block_no(sb, group); | 204 | group_first_block = ext4_group_first_block_no(sb, group); |
205 | group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); | 205 | group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); |
206 | 206 | ||
207 | if ((rsv->_rsv_start > group_last_block) || | 207 | if ((rsv->_rsv_start > group_last_block) || |
208 | (rsv->_rsv_end < group_first_block)) | 208 | (rsv->_rsv_end < group_first_block)) |
@@ -222,17 +222,17 @@ goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal, | |||
222 | * if the goal is not in any window. | 222 | * if the goal is not in any window. |
223 | * Returns NULL if there are no windows or if all windows start after the goal. | 223 | * Returns NULL if there are no windows or if all windows start after the goal. |
224 | */ | 224 | */ |
225 | static struct ext3_reserve_window_node * | 225 | static struct ext4_reserve_window_node * |
226 | search_reserve_window(struct rb_root *root, ext3_fsblk_t goal) | 226 | search_reserve_window(struct rb_root *root, ext4_fsblk_t goal) |
227 | { | 227 | { |
228 | struct rb_node *n = root->rb_node; | 228 | struct rb_node *n = root->rb_node; |
229 | struct ext3_reserve_window_node *rsv; | 229 | struct ext4_reserve_window_node *rsv; |
230 | 230 | ||
231 | if (!n) | 231 | if (!n) |
232 | return NULL; | 232 | return NULL; |
233 | 233 | ||
234 | do { | 234 | do { |
235 | rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); | 235 | rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node); |
236 | 236 | ||
237 | if (goal < rsv->rsv_start) | 237 | if (goal < rsv->rsv_start) |
238 | n = n->rb_left; | 238 | n = n->rb_left; |
@@ -249,33 +249,33 @@ search_reserve_window(struct rb_root *root, ext3_fsblk_t goal) | |||
249 | */ | 249 | */ |
250 | if (rsv->rsv_start > goal) { | 250 | if (rsv->rsv_start > goal) { |
251 | n = rb_prev(&rsv->rsv_node); | 251 | n = rb_prev(&rsv->rsv_node); |
252 | rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); | 252 | rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node); |
253 | } | 253 | } |
254 | return rsv; | 254 | return rsv; |
255 | } | 255 | } |
256 | 256 | ||
257 | /** | 257 | /** |
258 | * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree. | 258 | * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree. |
259 | * @sb: super block | 259 | * @sb: super block |
260 | * @rsv: reservation window to add | 260 | * @rsv: reservation window to add |
261 | * | 261 | * |
262 | * Must be called with rsv_lock hold. | 262 | * Must be called with rsv_lock hold. |
263 | */ | 263 | */ |
264 | void ext3_rsv_window_add(struct super_block *sb, | 264 | void ext4_rsv_window_add(struct super_block *sb, |
265 | struct ext3_reserve_window_node *rsv) | 265 | struct ext4_reserve_window_node *rsv) |
266 | { | 266 | { |
267 | struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root; | 267 | struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root; |
268 | struct rb_node *node = &rsv->rsv_node; | 268 | struct rb_node *node = &rsv->rsv_node; |
269 | ext3_fsblk_t start = rsv->rsv_start; | 269 | ext4_fsblk_t start = rsv->rsv_start; |
270 | 270 | ||
271 | struct rb_node ** p = &root->rb_node; | 271 | struct rb_node ** p = &root->rb_node; |
272 | struct rb_node * parent = NULL; | 272 | struct rb_node * parent = NULL; |
273 | struct ext3_reserve_window_node *this; | 273 | struct ext4_reserve_window_node *this; |
274 | 274 | ||
275 | while (*p) | 275 | while (*p) |
276 | { | 276 | { |
277 | parent = *p; | 277 | parent = *p; |
278 | this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node); | 278 | this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node); |
279 | 279 | ||
280 | if (start < this->rsv_start) | 280 | if (start < this->rsv_start) |
281 | p = &(*p)->rb_left; | 281 | p = &(*p)->rb_left; |
@@ -292,7 +292,7 @@ void ext3_rsv_window_add(struct super_block *sb, | |||
292 | } | 292 | } |
293 | 293 | ||
294 | /** | 294 | /** |
295 | * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree | 295 | * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree |
296 | * @sb: super block | 296 | * @sb: super block |
297 | * @rsv: reservation window to remove | 297 | * @rsv: reservation window to remove |
298 | * | 298 | * |
@@ -301,59 +301,59 @@ void ext3_rsv_window_add(struct super_block *sb, | |||
301 | * rsv_lock hold. | 301 | * rsv_lock hold. |
302 | */ | 302 | */ |
303 | static void rsv_window_remove(struct super_block *sb, | 303 | static void rsv_window_remove(struct super_block *sb, |
304 | struct ext3_reserve_window_node *rsv) | 304 | struct ext4_reserve_window_node *rsv) |
305 | { | 305 | { |
306 | rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | 306 | rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; |
307 | rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | 307 | rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; |
308 | rsv->rsv_alloc_hit = 0; | 308 | rsv->rsv_alloc_hit = 0; |
309 | rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root); | 309 | rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root); |
310 | } | 310 | } |
311 | 311 | ||
312 | /* | 312 | /* |
313 | * rsv_is_empty() -- Check if the reservation window is allocated. | 313 | * rsv_is_empty() -- Check if the reservation window is allocated. |
314 | * @rsv: given reservation window to check | 314 | * @rsv: given reservation window to check |
315 | * | 315 | * |
316 | * returns 1 if the end block is EXT3_RESERVE_WINDOW_NOT_ALLOCATED. | 316 | * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED. |
317 | */ | 317 | */ |
318 | static inline int rsv_is_empty(struct ext3_reserve_window *rsv) | 318 | static inline int rsv_is_empty(struct ext4_reserve_window *rsv) |
319 | { | 319 | { |
320 | /* a valid reservation end block could not be 0 */ | 320 | /* a valid reservation end block could not be 0 */ |
321 | return rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | 321 | return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED; |
322 | } | 322 | } |
323 | 323 | ||
324 | /** | 324 | /** |
325 | * ext3_init_block_alloc_info() | 325 | * ext4_init_block_alloc_info() |
326 | * @inode: file inode structure | 326 | * @inode: file inode structure |
327 | * | 327 | * |
328 | * Allocate and initialize the reservation window structure, and | 328 | * Allocate and initialize the reservation window structure, and |
329 | * link the window to the ext3 inode structure at last | 329 | * link the window to the ext4 inode structure at last |
330 | * | 330 | * |
331 | * The reservation window structure is only dynamically allocated | 331 | * The reservation window structure is only dynamically allocated |
332 | * and linked to ext3 inode the first time the open file | 332 | * and linked to ext4 inode the first time the open file |
333 | * needs a new block. So, before every ext3_new_block(s) call, for | 333 | * needs a new block. So, before every ext4_new_block(s) call, for |
334 | * regular files, we should check whether the reservation window | 334 | * regular files, we should check whether the reservation window |
335 | * structure exists or not. In the latter case, this function is called. | 335 | * structure exists or not. In the latter case, this function is called. |
336 | * Fail to do so will result in block reservation being turned off for that | 336 | * Fail to do so will result in block reservation being turned off for that |
337 | * open file. | 337 | * open file. |
338 | * | 338 | * |
339 | * This function is called from ext3_get_blocks_handle(), also called | 339 | * This function is called from ext4_get_blocks_handle(), also called |
340 | * when setting the reservation window size through ioctl before the file | 340 | * when setting the reservation window size through ioctl before the file |
341 | * is open for write (needs block allocation). | 341 | * is open for write (needs block allocation). |
342 | * | 342 | * |
343 | * Needs truncate_mutex protection prior to call this function. | 343 | * Needs truncate_mutex protection prior to call this function. |
344 | */ | 344 | */ |
345 | void ext3_init_block_alloc_info(struct inode *inode) | 345 | void ext4_init_block_alloc_info(struct inode *inode) |
346 | { | 346 | { |
347 | struct ext3_inode_info *ei = EXT3_I(inode); | 347 | struct ext4_inode_info *ei = EXT4_I(inode); |
348 | struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; | 348 | struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info; |
349 | struct super_block *sb = inode->i_sb; | 349 | struct super_block *sb = inode->i_sb; |
350 | 350 | ||
351 | block_i = kmalloc(sizeof(*block_i), GFP_NOFS); | 351 | block_i = kmalloc(sizeof(*block_i), GFP_NOFS); |
352 | if (block_i) { | 352 | if (block_i) { |
353 | struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node; | 353 | struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node; |
354 | 354 | ||
355 | rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | 355 | rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; |
356 | rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | 356 | rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; |
357 | 357 | ||
358 | /* | 358 | /* |
359 | * if filesystem is mounted with NORESERVATION, the goal | 359 | * if filesystem is mounted with NORESERVATION, the goal |
@@ -363,7 +363,7 @@ void ext3_init_block_alloc_info(struct inode *inode) | |||
363 | if (!test_opt(sb, RESERVATION)) | 363 | if (!test_opt(sb, RESERVATION)) |
364 | rsv->rsv_goal_size = 0; | 364 | rsv->rsv_goal_size = 0; |
365 | else | 365 | else |
366 | rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS; | 366 | rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS; |
367 | rsv->rsv_alloc_hit = 0; | 367 | rsv->rsv_alloc_hit = 0; |
368 | block_i->last_alloc_logical_block = 0; | 368 | block_i->last_alloc_logical_block = 0; |
369 | block_i->last_alloc_physical_block = 0; | 369 | block_i->last_alloc_physical_block = 0; |
@@ -372,24 +372,24 @@ void ext3_init_block_alloc_info(struct inode *inode) | |||
372 | } | 372 | } |
373 | 373 | ||
374 | /** | 374 | /** |
375 | * ext3_discard_reservation() | 375 | * ext4_discard_reservation() |
376 | * @inode: inode | 376 | * @inode: inode |
377 | * | 377 | * |
378 | * Discard(free) block reservation window on last file close, or truncate | 378 | * Discard(free) block reservation window on last file close, or truncate |
379 | * or at last iput(). | 379 | * or at last iput(). |
380 | * | 380 | * |
381 | * It is being called in three cases: | 381 | * It is being called in three cases: |
382 | * ext3_release_file(): last writer close the file | 382 | * ext4_release_file(): last writer close the file |
383 | * ext3_clear_inode(): last iput(), when nobody link to this file. | 383 | * ext4_clear_inode(): last iput(), when nobody link to this file. |
384 | * ext3_truncate(): when the block indirect map is about to change. | 384 | * ext4_truncate(): when the block indirect map is about to change. |
385 | * | 385 | * |
386 | */ | 386 | */ |
387 | void ext3_discard_reservation(struct inode *inode) | 387 | void ext4_discard_reservation(struct inode *inode) |
388 | { | 388 | { |
389 | struct ext3_inode_info *ei = EXT3_I(inode); | 389 | struct ext4_inode_info *ei = EXT4_I(inode); |
390 | struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; | 390 | struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info; |
391 | struct ext3_reserve_window_node *rsv; | 391 | struct ext4_reserve_window_node *rsv; |
392 | spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock; | 392 | spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock; |
393 | 393 | ||
394 | if (!block_i) | 394 | if (!block_i) |
395 | return; | 395 | return; |
@@ -404,62 +404,62 @@ void ext3_discard_reservation(struct inode *inode) | |||
404 | } | 404 | } |
405 | 405 | ||
406 | /** | 406 | /** |
407 | * ext3_free_blocks_sb() -- Free given blocks and update quota | 407 | * ext4_free_blocks_sb() -- Free given blocks and update quota |
408 | * @handle: handle to this transaction | 408 | * @handle: handle to this transaction |
409 | * @sb: super block | 409 | * @sb: super block |
410 | * @block: start physcial block to free | 410 | * @block: start physcial block to free |
411 | * @count: number of blocks to free | 411 | * @count: number of blocks to free |
412 | * @pdquot_freed_blocks: pointer to quota | 412 | * @pdquot_freed_blocks: pointer to quota |
413 | */ | 413 | */ |
414 | void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, | 414 | void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb, |
415 | ext3_fsblk_t block, unsigned long count, | 415 | ext4_fsblk_t block, unsigned long count, |
416 | unsigned long *pdquot_freed_blocks) | 416 | unsigned long *pdquot_freed_blocks) |
417 | { | 417 | { |
418 | struct buffer_head *bitmap_bh = NULL; | 418 | struct buffer_head *bitmap_bh = NULL; |
419 | struct buffer_head *gd_bh; | 419 | struct buffer_head *gd_bh; |
420 | unsigned long block_group; | 420 | unsigned long block_group; |
421 | ext3_grpblk_t bit; | 421 | ext4_grpblk_t bit; |
422 | unsigned long i; | 422 | unsigned long i; |
423 | unsigned long overflow; | 423 | unsigned long overflow; |
424 | struct ext3_group_desc * desc; | 424 | struct ext4_group_desc * desc; |
425 | struct ext3_super_block * es; | 425 | struct ext4_super_block * es; |
426 | struct ext3_sb_info *sbi; | 426 | struct ext4_sb_info *sbi; |
427 | int err = 0, ret; | 427 | int err = 0, ret; |
428 | ext3_grpblk_t group_freed; | 428 | ext4_grpblk_t group_freed; |
429 | 429 | ||
430 | *pdquot_freed_blocks = 0; | 430 | *pdquot_freed_blocks = 0; |
431 | sbi = EXT3_SB(sb); | 431 | sbi = EXT4_SB(sb); |
432 | es = sbi->s_es; | 432 | es = sbi->s_es; |
433 | if (block < le32_to_cpu(es->s_first_data_block) || | 433 | if (block < le32_to_cpu(es->s_first_data_block) || |
434 | block + count < block || | 434 | block + count < block || |
435 | block + count > le32_to_cpu(es->s_blocks_count)) { | 435 | block + count > le32_to_cpu(es->s_blocks_count)) { |
436 | ext3_error (sb, "ext3_free_blocks", | 436 | ext4_error (sb, "ext4_free_blocks", |
437 | "Freeing blocks not in datazone - " | 437 | "Freeing blocks not in datazone - " |
438 | "block = "E3FSBLK", count = %lu", block, count); | 438 | "block = "E3FSBLK", count = %lu", block, count); |
439 | goto error_return; | 439 | goto error_return; |
440 | } | 440 | } |
441 | 441 | ||
442 | ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1); | 442 | ext4_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1); |
443 | 443 | ||
444 | do_more: | 444 | do_more: |
445 | overflow = 0; | 445 | overflow = 0; |
446 | block_group = (block - le32_to_cpu(es->s_first_data_block)) / | 446 | block_group = (block - le32_to_cpu(es->s_first_data_block)) / |
447 | EXT3_BLOCKS_PER_GROUP(sb); | 447 | EXT4_BLOCKS_PER_GROUP(sb); |
448 | bit = (block - le32_to_cpu(es->s_first_data_block)) % | 448 | bit = (block - le32_to_cpu(es->s_first_data_block)) % |
449 | EXT3_BLOCKS_PER_GROUP(sb); | 449 | EXT4_BLOCKS_PER_GROUP(sb); |
450 | /* | 450 | /* |
451 | * Check to see if we are freeing blocks across a group | 451 | * Check to see if we are freeing blocks across a group |
452 | * boundary. | 452 | * boundary. |
453 | */ | 453 | */ |
454 | if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) { | 454 | if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { |
455 | overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb); | 455 | overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb); |
456 | count -= overflow; | 456 | count -= overflow; |
457 | } | 457 | } |
458 | brelse(bitmap_bh); | 458 | brelse(bitmap_bh); |
459 | bitmap_bh = read_block_bitmap(sb, block_group); | 459 | bitmap_bh = read_block_bitmap(sb, block_group); |
460 | if (!bitmap_bh) | 460 | if (!bitmap_bh) |
461 | goto error_return; | 461 | goto error_return; |
462 | desc = ext3_get_group_desc (sb, block_group, &gd_bh); | 462 | desc = ext4_get_group_desc (sb, block_group, &gd_bh); |
463 | if (!desc) | 463 | if (!desc) |
464 | goto error_return; | 464 | goto error_return; |
465 | 465 | ||
@@ -469,7 +469,7 @@ do_more: | |||
469 | sbi->s_itb_per_group) || | 469 | sbi->s_itb_per_group) || |
470 | in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), | 470 | in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), |
471 | sbi->s_itb_per_group)) | 471 | sbi->s_itb_per_group)) |
472 | ext3_error (sb, "ext3_free_blocks", | 472 | ext4_error (sb, "ext4_free_blocks", |
473 | "Freeing blocks in system zones - " | 473 | "Freeing blocks in system zones - " |
474 | "Block = "E3FSBLK", count = %lu", | 474 | "Block = "E3FSBLK", count = %lu", |
475 | block, count); | 475 | block, count); |
@@ -480,7 +480,7 @@ do_more: | |||
480 | */ | 480 | */ |
481 | /* @@@ check errors */ | 481 | /* @@@ check errors */ |
482 | BUFFER_TRACE(bitmap_bh, "getting undo access"); | 482 | BUFFER_TRACE(bitmap_bh, "getting undo access"); |
483 | err = ext3_journal_get_undo_access(handle, bitmap_bh); | 483 | err = ext4_journal_get_undo_access(handle, bitmap_bh); |
484 | if (err) | 484 | if (err) |
485 | goto error_return; | 485 | goto error_return; |
486 | 486 | ||
@@ -490,7 +490,7 @@ do_more: | |||
490 | * using it | 490 | * using it |
491 | */ | 491 | */ |
492 | BUFFER_TRACE(gd_bh, "get_write_access"); | 492 | BUFFER_TRACE(gd_bh, "get_write_access"); |
493 | err = ext3_journal_get_write_access(handle, gd_bh); | 493 | err = ext4_journal_get_write_access(handle, gd_bh); |
494 | if (err) | 494 | if (err) |
495 | goto error_return; | 495 | goto error_return; |
496 | 496 | ||
@@ -542,7 +542,7 @@ do_more: | |||
542 | BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); | 542 | BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); |
543 | J_ASSERT_BH(bitmap_bh, | 543 | J_ASSERT_BH(bitmap_bh, |
544 | bh2jh(bitmap_bh)->b_committed_data != NULL); | 544 | bh2jh(bitmap_bh)->b_committed_data != NULL); |
545 | ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i, | 545 | ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i, |
546 | bh2jh(bitmap_bh)->b_committed_data); | 546 | bh2jh(bitmap_bh)->b_committed_data); |
547 | 547 | ||
548 | /* | 548 | /* |
@@ -551,10 +551,10 @@ do_more: | |||
551 | * the allocator uses. | 551 | * the allocator uses. |
552 | */ | 552 | */ |
553 | BUFFER_TRACE(bitmap_bh, "clear bit"); | 553 | BUFFER_TRACE(bitmap_bh, "clear bit"); |
554 | if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group), | 554 | if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), |
555 | bit + i, bitmap_bh->b_data)) { | 555 | bit + i, bitmap_bh->b_data)) { |
556 | jbd_unlock_bh_state(bitmap_bh); | 556 | jbd_unlock_bh_state(bitmap_bh); |
557 | ext3_error(sb, __FUNCTION__, | 557 | ext4_error(sb, __FUNCTION__, |
558 | "bit already cleared for block "E3FSBLK, | 558 | "bit already cleared for block "E3FSBLK, |
559 | block + i); | 559 | block + i); |
560 | jbd_lock_bh_state(bitmap_bh); | 560 | jbd_lock_bh_state(bitmap_bh); |
@@ -574,11 +574,11 @@ do_more: | |||
574 | 574 | ||
575 | /* We dirtied the bitmap block */ | 575 | /* We dirtied the bitmap block */ |
576 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | 576 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); |
577 | err = ext3_journal_dirty_metadata(handle, bitmap_bh); | 577 | err = ext4_journal_dirty_metadata(handle, bitmap_bh); |
578 | 578 | ||
579 | /* And the group descriptor block */ | 579 | /* And the group descriptor block */ |
580 | BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); | 580 | BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); |
581 | ret = ext3_journal_dirty_metadata(handle, gd_bh); | 581 | ret = ext4_journal_dirty_metadata(handle, gd_bh); |
582 | if (!err) err = ret; | 582 | if (!err) err = ret; |
583 | *pdquot_freed_blocks += group_freed; | 583 | *pdquot_freed_blocks += group_freed; |
584 | 584 | ||
@@ -590,40 +590,40 @@ do_more: | |||
590 | sb->s_dirt = 1; | 590 | sb->s_dirt = 1; |
591 | error_return: | 591 | error_return: |
592 | brelse(bitmap_bh); | 592 | brelse(bitmap_bh); |
593 | ext3_std_error(sb, err); | 593 | ext4_std_error(sb, err); |
594 | return; | 594 | return; |
595 | } | 595 | } |
596 | 596 | ||
597 | /** | 597 | /** |
598 | * ext3_free_blocks() -- Free given blocks and update quota | 598 | * ext4_free_blocks() -- Free given blocks and update quota |
599 | * @handle: handle for this transaction | 599 | * @handle: handle for this transaction |
600 | * @inode: inode | 600 | * @inode: inode |
601 | * @block: start physical block to free | 601 | * @block: start physical block to free |
602 | * @count: number of blocks to count | 602 | * @count: number of blocks to count |
603 | */ | 603 | */ |
604 | void ext3_free_blocks(handle_t *handle, struct inode *inode, | 604 | void ext4_free_blocks(handle_t *handle, struct inode *inode, |
605 | ext3_fsblk_t block, unsigned long count) | 605 | ext4_fsblk_t block, unsigned long count) |
606 | { | 606 | { |
607 | struct super_block * sb; | 607 | struct super_block * sb; |
608 | unsigned long dquot_freed_blocks; | 608 | unsigned long dquot_freed_blocks; |
609 | 609 | ||
610 | sb = inode->i_sb; | 610 | sb = inode->i_sb; |
611 | if (!sb) { | 611 | if (!sb) { |
612 | printk ("ext3_free_blocks: nonexistent device"); | 612 | printk ("ext4_free_blocks: nonexistent device"); |
613 | return; | 613 | return; |
614 | } | 614 | } |
615 | ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); | 615 | ext4_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); |
616 | if (dquot_freed_blocks) | 616 | if (dquot_freed_blocks) |
617 | DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); | 617 | DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); |
618 | return; | 618 | return; |
619 | } | 619 | } |
620 | 620 | ||
621 | /** | 621 | /** |
622 | * ext3_test_allocatable() | 622 | * ext4_test_allocatable() |
623 | * @nr: given allocation block group | 623 | * @nr: given allocation block group |
624 | * @bh: bufferhead contains the bitmap of the given block group | 624 | * @bh: bufferhead contains the bitmap of the given block group |
625 | * | 625 | * |
626 | * For ext3 allocations, we must not reuse any blocks which are | 626 | * For ext4 allocations, we must not reuse any blocks which are |
627 | * allocated in the bitmap buffer's "last committed data" copy. This | 627 | * allocated in the bitmap buffer's "last committed data" copy. This |
628 | * prevents deletes from freeing up the page for reuse until we have | 628 | * prevents deletes from freeing up the page for reuse until we have |
629 | * committed the delete transaction. | 629 | * committed the delete transaction. |
@@ -638,19 +638,19 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode, | |||
638 | * data-writes at some point, and disable it for metadata allocations or | 638 | * data-writes at some point, and disable it for metadata allocations or |
639 | * sync-data inodes. | 639 | * sync-data inodes. |
640 | */ | 640 | */ |
641 | static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh) | 641 | static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh) |
642 | { | 642 | { |
643 | int ret; | 643 | int ret; |
644 | struct journal_head *jh = bh2jh(bh); | 644 | struct journal_head *jh = bh2jh(bh); |
645 | 645 | ||
646 | if (ext3_test_bit(nr, bh->b_data)) | 646 | if (ext4_test_bit(nr, bh->b_data)) |
647 | return 0; | 647 | return 0; |
648 | 648 | ||
649 | jbd_lock_bh_state(bh); | 649 | jbd_lock_bh_state(bh); |
650 | if (!jh->b_committed_data) | 650 | if (!jh->b_committed_data) |
651 | ret = 1; | 651 | ret = 1; |
652 | else | 652 | else |
653 | ret = !ext3_test_bit(nr, jh->b_committed_data); | 653 | ret = !ext4_test_bit(nr, jh->b_committed_data); |
654 | jbd_unlock_bh_state(bh); | 654 | jbd_unlock_bh_state(bh); |
655 | return ret; | 655 | return ret; |
656 | } | 656 | } |
@@ -665,22 +665,22 @@ static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh) | |||
665 | * bitmap on disk and the last-committed copy in journal, until we find a | 665 | * bitmap on disk and the last-committed copy in journal, until we find a |
666 | * bit free in both bitmaps. | 666 | * bit free in both bitmaps. |
667 | */ | 667 | */ |
668 | static ext3_grpblk_t | 668 | static ext4_grpblk_t |
669 | bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, | 669 | bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh, |
670 | ext3_grpblk_t maxblocks) | 670 | ext4_grpblk_t maxblocks) |
671 | { | 671 | { |
672 | ext3_grpblk_t next; | 672 | ext4_grpblk_t next; |
673 | struct journal_head *jh = bh2jh(bh); | 673 | struct journal_head *jh = bh2jh(bh); |
674 | 674 | ||
675 | while (start < maxblocks) { | 675 | while (start < maxblocks) { |
676 | next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start); | 676 | next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start); |
677 | if (next >= maxblocks) | 677 | if (next >= maxblocks) |
678 | return -1; | 678 | return -1; |
679 | if (ext3_test_allocatable(next, bh)) | 679 | if (ext4_test_allocatable(next, bh)) |
680 | return next; | 680 | return next; |
681 | jbd_lock_bh_state(bh); | 681 | jbd_lock_bh_state(bh); |
682 | if (jh->b_committed_data) | 682 | if (jh->b_committed_data) |
683 | start = ext3_find_next_zero_bit(jh->b_committed_data, | 683 | start = ext4_find_next_zero_bit(jh->b_committed_data, |
684 | maxblocks, next); | 684 | maxblocks, next); |
685 | jbd_unlock_bh_state(bh); | 685 | jbd_unlock_bh_state(bh); |
686 | } | 686 | } |
@@ -700,11 +700,11 @@ bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, | |||
700 | * the initial goal; then for a free byte somewhere in the bitmap; then | 700 | * the initial goal; then for a free byte somewhere in the bitmap; then |
701 | * for any free bit in the bitmap. | 701 | * for any free bit in the bitmap. |
702 | */ | 702 | */ |
703 | static ext3_grpblk_t | 703 | static ext4_grpblk_t |
704 | find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, | 704 | find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh, |
705 | ext3_grpblk_t maxblocks) | 705 | ext4_grpblk_t maxblocks) |
706 | { | 706 | { |
707 | ext3_grpblk_t here, next; | 707 | ext4_grpblk_t here, next; |
708 | char *p, *r; | 708 | char *p, *r; |
709 | 709 | ||
710 | if (start > 0) { | 710 | if (start > 0) { |
@@ -713,16 +713,16 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, | |||
713 | * block within the next XX blocks. | 713 | * block within the next XX blocks. |
714 | * | 714 | * |
715 | * end_goal is more or less random, but it has to be | 715 | * end_goal is more or less random, but it has to be |
716 | * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the | 716 | * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the |
717 | * next 64-bit boundary is simple.. | 717 | * next 64-bit boundary is simple.. |
718 | */ | 718 | */ |
719 | ext3_grpblk_t end_goal = (start + 63) & ~63; | 719 | ext4_grpblk_t end_goal = (start + 63) & ~63; |
720 | if (end_goal > maxblocks) | 720 | if (end_goal > maxblocks) |
721 | end_goal = maxblocks; | 721 | end_goal = maxblocks; |
722 | here = ext3_find_next_zero_bit(bh->b_data, end_goal, start); | 722 | here = ext4_find_next_zero_bit(bh->b_data, end_goal, start); |
723 | if (here < end_goal && ext3_test_allocatable(here, bh)) | 723 | if (here < end_goal && ext4_test_allocatable(here, bh)) |
724 | return here; | 724 | return here; |
725 | ext3_debug("Bit not found near goal\n"); | 725 | ext4_debug("Bit not found near goal\n"); |
726 | } | 726 | } |
727 | 727 | ||
728 | here = start; | 728 | here = start; |
@@ -733,7 +733,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, | |||
733 | r = memscan(p, 0, (maxblocks - here + 7) >> 3); | 733 | r = memscan(p, 0, (maxblocks - here + 7) >> 3); |
734 | next = (r - ((char *)bh->b_data)) << 3; | 734 | next = (r - ((char *)bh->b_data)) << 3; |
735 | 735 | ||
736 | if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh)) | 736 | if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh)) |
737 | return next; | 737 | return next; |
738 | 738 | ||
739 | /* | 739 | /* |
@@ -757,16 +757,16 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, | |||
757 | * zero (failure). | 757 | * zero (failure). |
758 | */ | 758 | */ |
759 | static inline int | 759 | static inline int |
760 | claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh) | 760 | claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh) |
761 | { | 761 | { |
762 | struct journal_head *jh = bh2jh(bh); | 762 | struct journal_head *jh = bh2jh(bh); |
763 | int ret; | 763 | int ret; |
764 | 764 | ||
765 | if (ext3_set_bit_atomic(lock, block, bh->b_data)) | 765 | if (ext4_set_bit_atomic(lock, block, bh->b_data)) |
766 | return 0; | 766 | return 0; |
767 | jbd_lock_bh_state(bh); | 767 | jbd_lock_bh_state(bh); |
768 | if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) { | 768 | if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) { |
769 | ext3_clear_bit_atomic(lock, block, bh->b_data); | 769 | ext4_clear_bit_atomic(lock, block, bh->b_data); |
770 | ret = 0; | 770 | ret = 0; |
771 | } else { | 771 | } else { |
772 | ret = 1; | 772 | ret = 1; |
@@ -776,7 +776,7 @@ claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh) | |||
776 | } | 776 | } |
777 | 777 | ||
778 | /** | 778 | /** |
779 | * ext3_try_to_allocate() | 779 | * ext4_try_to_allocate() |
780 | * @sb: superblock | 780 | * @sb: superblock |
781 | * @handle: handle to this transaction | 781 | * @handle: handle to this transaction |
782 | * @group: given allocation block group | 782 | * @group: given allocation block group |
@@ -797,29 +797,29 @@ claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh) | |||
797 | * | 797 | * |
798 | * If we failed to allocate the desired block then we may end up crossing to a | 798 | * If we failed to allocate the desired block then we may end up crossing to a |
799 | * new bitmap. In that case we must release write access to the old one via | 799 | * new bitmap. In that case we must release write access to the old one via |
800 | * ext3_journal_release_buffer(), else we'll run out of credits. | 800 | * ext4_journal_release_buffer(), else we'll run out of credits. |
801 | */ | 801 | */ |
802 | static ext3_grpblk_t | 802 | static ext4_grpblk_t |
803 | ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, | 803 | ext4_try_to_allocate(struct super_block *sb, handle_t *handle, int group, |
804 | struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal, | 804 | struct buffer_head *bitmap_bh, ext4_grpblk_t grp_goal, |
805 | unsigned long *count, struct ext3_reserve_window *my_rsv) | 805 | unsigned long *count, struct ext4_reserve_window *my_rsv) |
806 | { | 806 | { |
807 | ext3_fsblk_t group_first_block; | 807 | ext4_fsblk_t group_first_block; |
808 | ext3_grpblk_t start, end; | 808 | ext4_grpblk_t start, end; |
809 | unsigned long num = 0; | 809 | unsigned long num = 0; |
810 | 810 | ||
811 | /* we do allocation within the reservation window if we have a window */ | 811 | /* we do allocation within the reservation window if we have a window */ |
812 | if (my_rsv) { | 812 | if (my_rsv) { |
813 | group_first_block = ext3_group_first_block_no(sb, group); | 813 | group_first_block = ext4_group_first_block_no(sb, group); |
814 | if (my_rsv->_rsv_start >= group_first_block) | 814 | if (my_rsv->_rsv_start >= group_first_block) |
815 | start = my_rsv->_rsv_start - group_first_block; | 815 | start = my_rsv->_rsv_start - group_first_block; |
816 | else | 816 | else |
817 | /* reservation window cross group boundary */ | 817 | /* reservation window cross group boundary */ |
818 | start = 0; | 818 | start = 0; |
819 | end = my_rsv->_rsv_end - group_first_block + 1; | 819 | end = my_rsv->_rsv_end - group_first_block + 1; |
820 | if (end > EXT3_BLOCKS_PER_GROUP(sb)) | 820 | if (end > EXT4_BLOCKS_PER_GROUP(sb)) |
821 | /* reservation window crosses group boundary */ | 821 | /* reservation window crosses group boundary */ |
822 | end = EXT3_BLOCKS_PER_GROUP(sb); | 822 | end = EXT4_BLOCKS_PER_GROUP(sb); |
823 | if ((start <= grp_goal) && (grp_goal < end)) | 823 | if ((start <= grp_goal) && (grp_goal < end)) |
824 | start = grp_goal; | 824 | start = grp_goal; |
825 | else | 825 | else |
@@ -829,13 +829,13 @@ ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, | |||
829 | start = grp_goal; | 829 | start = grp_goal; |
830 | else | 830 | else |
831 | start = 0; | 831 | start = 0; |
832 | end = EXT3_BLOCKS_PER_GROUP(sb); | 832 | end = EXT4_BLOCKS_PER_GROUP(sb); |
833 | } | 833 | } |
834 | 834 | ||
835 | BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb)); | 835 | BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb)); |
836 | 836 | ||
837 | repeat: | 837 | repeat: |
838 | if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) { | 838 | if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) { |
839 | grp_goal = find_next_usable_block(start, bitmap_bh, end); | 839 | grp_goal = find_next_usable_block(start, bitmap_bh, end); |
840 | if (grp_goal < 0) | 840 | if (grp_goal < 0) |
841 | goto fail_access; | 841 | goto fail_access; |
@@ -843,7 +843,7 @@ repeat: | |||
843 | int i; | 843 | int i; |
844 | 844 | ||
845 | for (i = 0; i < 7 && grp_goal > start && | 845 | for (i = 0; i < 7 && grp_goal > start && |
846 | ext3_test_allocatable(grp_goal - 1, | 846 | ext4_test_allocatable(grp_goal - 1, |
847 | bitmap_bh); | 847 | bitmap_bh); |
848 | i++, grp_goal--) | 848 | i++, grp_goal--) |
849 | ; | 849 | ; |
@@ -851,7 +851,7 @@ repeat: | |||
851 | } | 851 | } |
852 | start = grp_goal; | 852 | start = grp_goal; |
853 | 853 | ||
854 | if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), | 854 | if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group), |
855 | grp_goal, bitmap_bh)) { | 855 | grp_goal, bitmap_bh)) { |
856 | /* | 856 | /* |
857 | * The block was allocated by another thread, or it was | 857 | * The block was allocated by another thread, or it was |
@@ -866,8 +866,8 @@ repeat: | |||
866 | num++; | 866 | num++; |
867 | grp_goal++; | 867 | grp_goal++; |
868 | while (num < *count && grp_goal < end | 868 | while (num < *count && grp_goal < end |
869 | && ext3_test_allocatable(grp_goal, bitmap_bh) | 869 | && ext4_test_allocatable(grp_goal, bitmap_bh) |
870 | && claim_block(sb_bgl_lock(EXT3_SB(sb), group), | 870 | && claim_block(sb_bgl_lock(EXT4_SB(sb), group), |
871 | grp_goal, bitmap_bh)) { | 871 | grp_goal, bitmap_bh)) { |
872 | num++; | 872 | num++; |
873 | grp_goal++; | 873 | grp_goal++; |
@@ -913,15 +913,15 @@ fail_access: | |||
913 | * | 913 | * |
914 | */ | 914 | */ |
915 | static int find_next_reservable_window( | 915 | static int find_next_reservable_window( |
916 | struct ext3_reserve_window_node *search_head, | 916 | struct ext4_reserve_window_node *search_head, |
917 | struct ext3_reserve_window_node *my_rsv, | 917 | struct ext4_reserve_window_node *my_rsv, |
918 | struct super_block * sb, | 918 | struct super_block * sb, |
919 | ext3_fsblk_t start_block, | 919 | ext4_fsblk_t start_block, |
920 | ext3_fsblk_t last_block) | 920 | ext4_fsblk_t last_block) |
921 | { | 921 | { |
922 | struct rb_node *next; | 922 | struct rb_node *next; |
923 | struct ext3_reserve_window_node *rsv, *prev; | 923 | struct ext4_reserve_window_node *rsv, *prev; |
924 | ext3_fsblk_t cur; | 924 | ext4_fsblk_t cur; |
925 | int size = my_rsv->rsv_goal_size; | 925 | int size = my_rsv->rsv_goal_size; |
926 | 926 | ||
927 | /* TODO: make the start of the reservation window byte-aligned */ | 927 | /* TODO: make the start of the reservation window byte-aligned */ |
@@ -949,7 +949,7 @@ static int find_next_reservable_window( | |||
949 | 949 | ||
950 | prev = rsv; | 950 | prev = rsv; |
951 | next = rb_next(&rsv->rsv_node); | 951 | next = rb_next(&rsv->rsv_node); |
952 | rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node); | 952 | rsv = list_entry(next,struct ext4_reserve_window_node,rsv_node); |
953 | 953 | ||
954 | /* | 954 | /* |
955 | * Reached the last reservation, we can just append to the | 955 | * Reached the last reservation, we can just append to the |
@@ -992,7 +992,7 @@ static int find_next_reservable_window( | |||
992 | my_rsv->rsv_alloc_hit = 0; | 992 | my_rsv->rsv_alloc_hit = 0; |
993 | 993 | ||
994 | if (prev != my_rsv) | 994 | if (prev != my_rsv) |
995 | ext3_rsv_window_add(sb, my_rsv); | 995 | ext4_rsv_window_add(sb, my_rsv); |
996 | 996 | ||
997 | return 0; | 997 | return 0; |
998 | } | 998 | } |
@@ -1034,20 +1034,20 @@ static int find_next_reservable_window( | |||
1034 | * @bitmap_bh: the block group block bitmap | 1034 | * @bitmap_bh: the block group block bitmap |
1035 | * | 1035 | * |
1036 | */ | 1036 | */ |
1037 | static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, | 1037 | static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv, |
1038 | ext3_grpblk_t grp_goal, struct super_block *sb, | 1038 | ext4_grpblk_t grp_goal, struct super_block *sb, |
1039 | unsigned int group, struct buffer_head *bitmap_bh) | 1039 | unsigned int group, struct buffer_head *bitmap_bh) |
1040 | { | 1040 | { |
1041 | struct ext3_reserve_window_node *search_head; | 1041 | struct ext4_reserve_window_node *search_head; |
1042 | ext3_fsblk_t group_first_block, group_end_block, start_block; | 1042 | ext4_fsblk_t group_first_block, group_end_block, start_block; |
1043 | ext3_grpblk_t first_free_block; | 1043 | ext4_grpblk_t first_free_block; |
1044 | struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root; | 1044 | struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root; |
1045 | unsigned long size; | 1045 | unsigned long size; |
1046 | int ret; | 1046 | int ret; |
1047 | spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; | 1047 | spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock; |
1048 | 1048 | ||
1049 | group_first_block = ext3_group_first_block_no(sb, group); | 1049 | group_first_block = ext4_group_first_block_no(sb, group); |
1050 | group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); | 1050 | group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); |
1051 | 1051 | ||
1052 | if (grp_goal < 0) | 1052 | if (grp_goal < 0) |
1053 | start_block = group_first_block; | 1053 | start_block = group_first_block; |
@@ -1085,8 +1085,8 @@ static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, | |||
1085 | * otherwise we keep the same size window | 1085 | * otherwise we keep the same size window |
1086 | */ | 1086 | */ |
1087 | size = size * 2; | 1087 | size = size * 2; |
1088 | if (size > EXT3_MAX_RESERVE_BLOCKS) | 1088 | if (size > EXT4_MAX_RESERVE_BLOCKS) |
1089 | size = EXT3_MAX_RESERVE_BLOCKS; | 1089 | size = EXT4_MAX_RESERVE_BLOCKS; |
1090 | my_rsv->rsv_goal_size= size; | 1090 | my_rsv->rsv_goal_size= size; |
1091 | } | 1091 | } |
1092 | } | 1092 | } |
@@ -1170,20 +1170,20 @@ retry: | |||
1170 | * Attempt to expand the reservation window large enough to have | 1170 | * Attempt to expand the reservation window large enough to have |
1171 | * required number of free blocks | 1171 | * required number of free blocks |
1172 | * | 1172 | * |
1173 | * Since ext3_try_to_allocate() will always allocate blocks within | 1173 | * Since ext4_try_to_allocate() will always allocate blocks within |
1174 | * the reservation window range, if the window size is too small, | 1174 | * the reservation window range, if the window size is too small, |
1175 | * multiple blocks allocation has to stop at the end of the reservation | 1175 | * multiple blocks allocation has to stop at the end of the reservation |
1176 | * window. To make this more efficient, given the total number of | 1176 | * window. To make this more efficient, given the total number of |
1177 | * blocks needed and the current size of the window, we try to | 1177 | * blocks needed and the current size of the window, we try to |
1178 | * expand the reservation window size if necessary on a best-effort | 1178 | * expand the reservation window size if necessary on a best-effort |
1179 | * basis before ext3_new_blocks() tries to allocate blocks, | 1179 | * basis before ext4_new_blocks() tries to allocate blocks, |
1180 | */ | 1180 | */ |
1181 | static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, | 1181 | static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv, |
1182 | struct super_block *sb, int size) | 1182 | struct super_block *sb, int size) |
1183 | { | 1183 | { |
1184 | struct ext3_reserve_window_node *next_rsv; | 1184 | struct ext4_reserve_window_node *next_rsv; |
1185 | struct rb_node *next; | 1185 | struct rb_node *next; |
1186 | spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; | 1186 | spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock; |
1187 | 1187 | ||
1188 | if (!spin_trylock(rsv_lock)) | 1188 | if (!spin_trylock(rsv_lock)) |
1189 | return; | 1189 | return; |
@@ -1193,7 +1193,7 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, | |||
1193 | if (!next) | 1193 | if (!next) |
1194 | my_rsv->rsv_end += size; | 1194 | my_rsv->rsv_end += size; |
1195 | else { | 1195 | else { |
1196 | next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node); | 1196 | next_rsv = list_entry(next, struct ext4_reserve_window_node, rsv_node); |
1197 | 1197 | ||
1198 | if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) | 1198 | if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) |
1199 | my_rsv->rsv_end += size; | 1199 | my_rsv->rsv_end += size; |
@@ -1204,7 +1204,7 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, | |||
1204 | } | 1204 | } |
1205 | 1205 | ||
1206 | /** | 1206 | /** |
1207 | * ext3_try_to_allocate_with_rsv() | 1207 | * ext4_try_to_allocate_with_rsv() |
1208 | * @sb: superblock | 1208 | * @sb: superblock |
1209 | * @handle: handle to this transaction | 1209 | * @handle: handle to this transaction |
1210 | * @group: given allocation block group | 1210 | * @group: given allocation block group |
@@ -1232,15 +1232,15 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, | |||
1232 | * We use a red-black tree for the per-filesystem reservation list. | 1232 | * We use a red-black tree for the per-filesystem reservation list. |
1233 | * | 1233 | * |
1234 | */ | 1234 | */ |
1235 | static ext3_grpblk_t | 1235 | static ext4_grpblk_t |
1236 | ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | 1236 | ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, |
1237 | unsigned int group, struct buffer_head *bitmap_bh, | 1237 | unsigned int group, struct buffer_head *bitmap_bh, |
1238 | ext3_grpblk_t grp_goal, | 1238 | ext4_grpblk_t grp_goal, |
1239 | struct ext3_reserve_window_node * my_rsv, | 1239 | struct ext4_reserve_window_node * my_rsv, |
1240 | unsigned long *count, int *errp) | 1240 | unsigned long *count, int *errp) |
1241 | { | 1241 | { |
1242 | ext3_fsblk_t group_first_block, group_last_block; | 1242 | ext4_fsblk_t group_first_block, group_last_block; |
1243 | ext3_grpblk_t ret = 0; | 1243 | ext4_grpblk_t ret = 0; |
1244 | int fatal; | 1244 | int fatal; |
1245 | unsigned long num = *count; | 1245 | unsigned long num = *count; |
1246 | 1246 | ||
@@ -1252,7 +1252,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1252 | * if the buffer is in BJ_Forget state in the committing transaction. | 1252 | * if the buffer is in BJ_Forget state in the committing transaction. |
1253 | */ | 1253 | */ |
1254 | BUFFER_TRACE(bitmap_bh, "get undo access for new block"); | 1254 | BUFFER_TRACE(bitmap_bh, "get undo access for new block"); |
1255 | fatal = ext3_journal_get_undo_access(handle, bitmap_bh); | 1255 | fatal = ext4_journal_get_undo_access(handle, bitmap_bh); |
1256 | if (fatal) { | 1256 | if (fatal) { |
1257 | *errp = fatal; | 1257 | *errp = fatal; |
1258 | return -1; | 1258 | return -1; |
@@ -1265,18 +1265,18 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1265 | * or last attempt to allocate a block with reservation turned on failed | 1265 | * or last attempt to allocate a block with reservation turned on failed |
1266 | */ | 1266 | */ |
1267 | if (my_rsv == NULL ) { | 1267 | if (my_rsv == NULL ) { |
1268 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, | 1268 | ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh, |
1269 | grp_goal, count, NULL); | 1269 | grp_goal, count, NULL); |
1270 | goto out; | 1270 | goto out; |
1271 | } | 1271 | } |
1272 | /* | 1272 | /* |
1273 | * grp_goal is a group relative block number (if there is a goal) | 1273 | * grp_goal is a group relative block number (if there is a goal) |
1274 | * 0 < grp_goal < EXT3_BLOCKS_PER_GROUP(sb) | 1274 | * 0 < grp_goal < EXT4_BLOCKS_PER_GROUP(sb) |
1275 | * first block is a filesystem wide block number | 1275 | * first block is a filesystem wide block number |
1276 | * first block is the block number of the first block in this group | 1276 | * first block is the block number of the first block in this group |
1277 | */ | 1277 | */ |
1278 | group_first_block = ext3_group_first_block_no(sb, group); | 1278 | group_first_block = ext4_group_first_block_no(sb, group); |
1279 | group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); | 1279 | group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); |
1280 | 1280 | ||
1281 | /* | 1281 | /* |
1282 | * Basically we will allocate a new block from inode's reservation | 1282 | * Basically we will allocate a new block from inode's reservation |
@@ -1314,10 +1314,10 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1314 | 1314 | ||
1315 | if ((my_rsv->rsv_start > group_last_block) || | 1315 | if ((my_rsv->rsv_start > group_last_block) || |
1316 | (my_rsv->rsv_end < group_first_block)) { | 1316 | (my_rsv->rsv_end < group_first_block)) { |
1317 | rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1); | 1317 | rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1); |
1318 | BUG(); | 1318 | BUG(); |
1319 | } | 1319 | } |
1320 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, | 1320 | ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh, |
1321 | grp_goal, &num, &my_rsv->rsv_window); | 1321 | grp_goal, &num, &my_rsv->rsv_window); |
1322 | if (ret >= 0) { | 1322 | if (ret >= 0) { |
1323 | my_rsv->rsv_alloc_hit += num; | 1323 | my_rsv->rsv_alloc_hit += num; |
@@ -1330,7 +1330,7 @@ out: | |||
1330 | if (ret >= 0) { | 1330 | if (ret >= 0) { |
1331 | BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for " | 1331 | BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for " |
1332 | "bitmap block"); | 1332 | "bitmap block"); |
1333 | fatal = ext3_journal_dirty_metadata(handle, bitmap_bh); | 1333 | fatal = ext4_journal_dirty_metadata(handle, bitmap_bh); |
1334 | if (fatal) { | 1334 | if (fatal) { |
1335 | *errp = fatal; | 1335 | *errp = fatal; |
1336 | return -1; | 1336 | return -1; |
@@ -1339,19 +1339,19 @@ out: | |||
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | BUFFER_TRACE(bitmap_bh, "journal_release_buffer"); | 1341 | BUFFER_TRACE(bitmap_bh, "journal_release_buffer"); |
1342 | ext3_journal_release_buffer(handle, bitmap_bh); | 1342 | ext4_journal_release_buffer(handle, bitmap_bh); |
1343 | return ret; | 1343 | return ret; |
1344 | } | 1344 | } |
1345 | 1345 | ||
1346 | /** | 1346 | /** |
1347 | * ext3_has_free_blocks() | 1347 | * ext4_has_free_blocks() |
1348 | * @sbi: in-core super block structure. | 1348 | * @sbi: in-core super block structure. |
1349 | * | 1349 | * |
1350 | * Check if filesystem has at least 1 free block available for allocation. | 1350 | * Check if filesystem has at least 1 free block available for allocation. |
1351 | */ | 1351 | */ |
1352 | static int ext3_has_free_blocks(struct ext3_sb_info *sbi) | 1352 | static int ext4_has_free_blocks(struct ext4_sb_info *sbi) |
1353 | { | 1353 | { |
1354 | ext3_fsblk_t free_blocks, root_blocks; | 1354 | ext4_fsblk_t free_blocks, root_blocks; |
1355 | 1355 | ||
1356 | free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); | 1356 | free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); |
1357 | root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); | 1357 | root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); |
@@ -1364,63 +1364,63 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi) | |||
1364 | } | 1364 | } |
1365 | 1365 | ||
1366 | /** | 1366 | /** |
1367 | * ext3_should_retry_alloc() | 1367 | * ext4_should_retry_alloc() |
1368 | * @sb: super block | 1368 | * @sb: super block |
1369 | * @retries number of attemps has been made | 1369 | * @retries number of attemps has been made |
1370 | * | 1370 | * |
1371 | * ext3_should_retry_alloc() is called when ENOSPC is returned, and if | 1371 | * ext4_should_retry_alloc() is called when ENOSPC is returned, and if |
1372 | * it is profitable to retry the operation, this function will wait | 1372 | * it is profitable to retry the operation, this function will wait |
1373 | * for the current or commiting transaction to complete, and then | 1373 | * for the current or commiting transaction to complete, and then |
1374 | * return TRUE. | 1374 | * return TRUE. |
1375 | * | 1375 | * |
1376 | * if the total number of retries exceed three times, return FALSE. | 1376 | * if the total number of retries exceed three times, return FALSE. |
1377 | */ | 1377 | */ |
1378 | int ext3_should_retry_alloc(struct super_block *sb, int *retries) | 1378 | int ext4_should_retry_alloc(struct super_block *sb, int *retries) |
1379 | { | 1379 | { |
1380 | if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3) | 1380 | if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3) |
1381 | return 0; | 1381 | return 0; |
1382 | 1382 | ||
1383 | jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); | 1383 | jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); |
1384 | 1384 | ||
1385 | return journal_force_commit_nested(EXT3_SB(sb)->s_journal); | 1385 | return journal_force_commit_nested(EXT4_SB(sb)->s_journal); |
1386 | } | 1386 | } |
1387 | 1387 | ||
1388 | /** | 1388 | /** |
1389 | * ext3_new_blocks() -- core block(s) allocation function | 1389 | * ext4_new_blocks() -- core block(s) allocation function |
1390 | * @handle: handle to this transaction | 1390 | * @handle: handle to this transaction |
1391 | * @inode: file inode | 1391 | * @inode: file inode |
1392 | * @goal: given target block(filesystem wide) | 1392 | * @goal: given target block(filesystem wide) |
1393 | * @count: target number of blocks to allocate | 1393 | * @count: target number of blocks to allocate |
1394 | * @errp: error code | 1394 | * @errp: error code |
1395 | * | 1395 | * |
1396 | * ext3_new_blocks uses a goal block to assist allocation. It tries to | 1396 | * ext4_new_blocks uses a goal block to assist allocation. It tries to |
1397 | * allocate block(s) from the block group contains the goal block first. If that | 1397 | * allocate block(s) from the block group contains the goal block first. If that |
1398 | * fails, it will try to allocate block(s) from other block groups without | 1398 | * fails, it will try to allocate block(s) from other block groups without |
1399 | * any specific goal block. | 1399 | * any specific goal block. |
1400 | * | 1400 | * |
1401 | */ | 1401 | */ |
1402 | ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, | 1402 | ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode, |
1403 | ext3_fsblk_t goal, unsigned long *count, int *errp) | 1403 | ext4_fsblk_t goal, unsigned long *count, int *errp) |
1404 | { | 1404 | { |
1405 | struct buffer_head *bitmap_bh = NULL; | 1405 | struct buffer_head *bitmap_bh = NULL; |
1406 | struct buffer_head *gdp_bh; | 1406 | struct buffer_head *gdp_bh; |
1407 | int group_no; | 1407 | int group_no; |
1408 | int goal_group; | 1408 | int goal_group; |
1409 | ext3_grpblk_t grp_target_blk; /* blockgroup relative goal block */ | 1409 | ext4_grpblk_t grp_target_blk; /* blockgroup relative goal block */ |
1410 | ext3_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/ | 1410 | ext4_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/ |
1411 | ext3_fsblk_t ret_block; /* filesyetem-wide allocated block */ | 1411 | ext4_fsblk_t ret_block; /* filesyetem-wide allocated block */ |
1412 | int bgi; /* blockgroup iteration index */ | 1412 | int bgi; /* blockgroup iteration index */ |
1413 | int fatal = 0, err; | 1413 | int fatal = 0, err; |
1414 | int performed_allocation = 0; | 1414 | int performed_allocation = 0; |
1415 | ext3_grpblk_t free_blocks; /* number of free blocks in a group */ | 1415 | ext4_grpblk_t free_blocks; /* number of free blocks in a group */ |
1416 | struct super_block *sb; | 1416 | struct super_block *sb; |
1417 | struct ext3_group_desc *gdp; | 1417 | struct ext4_group_desc *gdp; |
1418 | struct ext3_super_block *es; | 1418 | struct ext4_super_block *es; |
1419 | struct ext3_sb_info *sbi; | 1419 | struct ext4_sb_info *sbi; |
1420 | struct ext3_reserve_window_node *my_rsv = NULL; | 1420 | struct ext4_reserve_window_node *my_rsv = NULL; |
1421 | struct ext3_block_alloc_info *block_i; | 1421 | struct ext4_block_alloc_info *block_i; |
1422 | unsigned short windowsz = 0; | 1422 | unsigned short windowsz = 0; |
1423 | #ifdef EXT3FS_DEBUG | 1423 | #ifdef EXT4FS_DEBUG |
1424 | static int goal_hits, goal_attempts; | 1424 | static int goal_hits, goal_attempts; |
1425 | #endif | 1425 | #endif |
1426 | unsigned long ngroups; | 1426 | unsigned long ngroups; |
@@ -1429,7 +1429,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, | |||
1429 | *errp = -ENOSPC; | 1429 | *errp = -ENOSPC; |
1430 | sb = inode->i_sb; | 1430 | sb = inode->i_sb; |
1431 | if (!sb) { | 1431 | if (!sb) { |
1432 | printk("ext3_new_block: nonexistent device"); | 1432 | printk("ext4_new_block: nonexistent device"); |
1433 | return 0; | 1433 | return 0; |
1434 | } | 1434 | } |
1435 | 1435 | ||
@@ -1441,22 +1441,22 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, | |||
1441 | return 0; | 1441 | return 0; |
1442 | } | 1442 | } |
1443 | 1443 | ||
1444 | sbi = EXT3_SB(sb); | 1444 | sbi = EXT4_SB(sb); |
1445 | es = EXT3_SB(sb)->s_es; | 1445 | es = EXT4_SB(sb)->s_es; |
1446 | ext3_debug("goal=%lu.\n", goal); | 1446 | ext4_debug("goal=%lu.\n", goal); |
1447 | /* | 1447 | /* |
1448 | * Allocate a block from reservation only when | 1448 | * Allocate a block from reservation only when |
1449 | * filesystem is mounted with reservation(default,-o reservation), and | 1449 | * filesystem is mounted with reservation(default,-o reservation), and |
1450 | * it's a regular file, and | 1450 | * it's a regular file, and |
1451 | * the desired window size is greater than 0 (One could use ioctl | 1451 | * the desired window size is greater than 0 (One could use ioctl |
1452 | * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off | 1452 | * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off |
1453 | * reservation on that particular file) | 1453 | * reservation on that particular file) |
1454 | */ | 1454 | */ |
1455 | block_i = EXT3_I(inode)->i_block_alloc_info; | 1455 | block_i = EXT4_I(inode)->i_block_alloc_info; |
1456 | if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0)) | 1456 | if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0)) |
1457 | my_rsv = &block_i->rsv_window_node; | 1457 | my_rsv = &block_i->rsv_window_node; |
1458 | 1458 | ||
1459 | if (!ext3_has_free_blocks(sbi)) { | 1459 | if (!ext4_has_free_blocks(sbi)) { |
1460 | *errp = -ENOSPC; | 1460 | *errp = -ENOSPC; |
1461 | goto out; | 1461 | goto out; |
1462 | } | 1462 | } |
@@ -1468,10 +1468,10 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, | |||
1468 | goal >= le32_to_cpu(es->s_blocks_count)) | 1468 | goal >= le32_to_cpu(es->s_blocks_count)) |
1469 | goal = le32_to_cpu(es->s_first_data_block); | 1469 | goal = le32_to_cpu(es->s_first_data_block); |
1470 | group_no = (goal - le32_to_cpu(es->s_first_data_block)) / | 1470 | group_no = (goal - le32_to_cpu(es->s_first_data_block)) / |
1471 | EXT3_BLOCKS_PER_GROUP(sb); | 1471 | EXT4_BLOCKS_PER_GROUP(sb); |
1472 | goal_group = group_no; | 1472 | goal_group = group_no; |
1473 | retry_alloc: | 1473 | retry_alloc: |
1474 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); | 1474 | gdp = ext4_get_group_desc(sb, group_no, &gdp_bh); |
1475 | if (!gdp) | 1475 | if (!gdp) |
1476 | goto io_error; | 1476 | goto io_error; |
1477 | 1477 | ||
@@ -1486,11 +1486,11 @@ retry_alloc: | |||
1486 | 1486 | ||
1487 | if (free_blocks > 0) { | 1487 | if (free_blocks > 0) { |
1488 | grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) % | 1488 | grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) % |
1489 | EXT3_BLOCKS_PER_GROUP(sb)); | 1489 | EXT4_BLOCKS_PER_GROUP(sb)); |
1490 | bitmap_bh = read_block_bitmap(sb, group_no); | 1490 | bitmap_bh = read_block_bitmap(sb, group_no); |
1491 | if (!bitmap_bh) | 1491 | if (!bitmap_bh) |
1492 | goto io_error; | 1492 | goto io_error; |
1493 | grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle, | 1493 | grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle, |
1494 | group_no, bitmap_bh, grp_target_blk, | 1494 | group_no, bitmap_bh, grp_target_blk, |
1495 | my_rsv, &num, &fatal); | 1495 | my_rsv, &num, &fatal); |
1496 | if (fatal) | 1496 | if (fatal) |
@@ -1499,7 +1499,7 @@ retry_alloc: | |||
1499 | goto allocated; | 1499 | goto allocated; |
1500 | } | 1500 | } |
1501 | 1501 | ||
1502 | ngroups = EXT3_SB(sb)->s_groups_count; | 1502 | ngroups = EXT4_SB(sb)->s_groups_count; |
1503 | smp_rmb(); | 1503 | smp_rmb(); |
1504 | 1504 | ||
1505 | /* | 1505 | /* |
@@ -1510,7 +1510,7 @@ retry_alloc: | |||
1510 | group_no++; | 1510 | group_no++; |
1511 | if (group_no >= ngroups) | 1511 | if (group_no >= ngroups) |
1512 | group_no = 0; | 1512 | group_no = 0; |
1513 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); | 1513 | gdp = ext4_get_group_desc(sb, group_no, &gdp_bh); |
1514 | if (!gdp) { | 1514 | if (!gdp) { |
1515 | *errp = -EIO; | 1515 | *errp = -EIO; |
1516 | goto out; | 1516 | goto out; |
@@ -1531,7 +1531,7 @@ retry_alloc: | |||
1531 | /* | 1531 | /* |
1532 | * try to allocate block(s) from this group, without a goal(-1). | 1532 | * try to allocate block(s) from this group, without a goal(-1). |
1533 | */ | 1533 | */ |
1534 | grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle, | 1534 | grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle, |
1535 | group_no, bitmap_bh, -1, my_rsv, | 1535 | group_no, bitmap_bh, -1, my_rsv, |
1536 | &num, &fatal); | 1536 | &num, &fatal); |
1537 | if (fatal) | 1537 | if (fatal) |
@@ -1557,23 +1557,23 @@ retry_alloc: | |||
1557 | 1557 | ||
1558 | allocated: | 1558 | allocated: |
1559 | 1559 | ||
1560 | ext3_debug("using block group %d(%d)\n", | 1560 | ext4_debug("using block group %d(%d)\n", |
1561 | group_no, gdp->bg_free_blocks_count); | 1561 | group_no, gdp->bg_free_blocks_count); |
1562 | 1562 | ||
1563 | BUFFER_TRACE(gdp_bh, "get_write_access"); | 1563 | BUFFER_TRACE(gdp_bh, "get_write_access"); |
1564 | fatal = ext3_journal_get_write_access(handle, gdp_bh); | 1564 | fatal = ext4_journal_get_write_access(handle, gdp_bh); |
1565 | if (fatal) | 1565 | if (fatal) |
1566 | goto out; | 1566 | goto out; |
1567 | 1567 | ||
1568 | ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no); | 1568 | ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no); |
1569 | 1569 | ||
1570 | if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) || | 1570 | if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) || |
1571 | in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) || | 1571 | in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) || |
1572 | in_range(ret_block, le32_to_cpu(gdp->bg_inode_table), | 1572 | in_range(ret_block, le32_to_cpu(gdp->bg_inode_table), |
1573 | EXT3_SB(sb)->s_itb_per_group) || | 1573 | EXT4_SB(sb)->s_itb_per_group) || |
1574 | in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table), | 1574 | in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table), |
1575 | EXT3_SB(sb)->s_itb_per_group)) | 1575 | EXT4_SB(sb)->s_itb_per_group)) |
1576 | ext3_error(sb, "ext3_new_block", | 1576 | ext4_error(sb, "ext4_new_block", |
1577 | "Allocating block in system zone - " | 1577 | "Allocating block in system zone - " |
1578 | "blocks from "E3FSBLK", length %lu", | 1578 | "blocks from "E3FSBLK", length %lu", |
1579 | ret_block, num); | 1579 | ret_block, num); |
@@ -1598,20 +1598,20 @@ allocated: | |||
1598 | int i; | 1598 | int i; |
1599 | 1599 | ||
1600 | for (i = 0; i < num; i++) { | 1600 | for (i = 0; i < num; i++) { |
1601 | if (ext3_test_bit(grp_alloc_blk+i, | 1601 | if (ext4_test_bit(grp_alloc_blk+i, |
1602 | bh2jh(bitmap_bh)->b_committed_data)) { | 1602 | bh2jh(bitmap_bh)->b_committed_data)) { |
1603 | printk("%s: block was unexpectedly set in " | 1603 | printk("%s: block was unexpectedly set in " |
1604 | "b_committed_data\n", __FUNCTION__); | 1604 | "b_committed_data\n", __FUNCTION__); |
1605 | } | 1605 | } |
1606 | } | 1606 | } |
1607 | } | 1607 | } |
1608 | ext3_debug("found bit %d\n", grp_alloc_blk); | 1608 | ext4_debug("found bit %d\n", grp_alloc_blk); |
1609 | spin_unlock(sb_bgl_lock(sbi, group_no)); | 1609 | spin_unlock(sb_bgl_lock(sbi, group_no)); |
1610 | jbd_unlock_bh_state(bitmap_bh); | 1610 | jbd_unlock_bh_state(bitmap_bh); |
1611 | #endif | 1611 | #endif |
1612 | 1612 | ||
1613 | if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) { | 1613 | if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) { |
1614 | ext3_error(sb, "ext3_new_block", | 1614 | ext4_error(sb, "ext4_new_block", |
1615 | "block("E3FSBLK") >= blocks count(%d) - " | 1615 | "block("E3FSBLK") >= blocks count(%d) - " |
1616 | "block_group = %d, es == %p ", ret_block, | 1616 | "block_group = %d, es == %p ", ret_block, |
1617 | le32_to_cpu(es->s_blocks_count), group_no, es); | 1617 | le32_to_cpu(es->s_blocks_count), group_no, es); |
@@ -1623,7 +1623,7 @@ allocated: | |||
1623 | * list of some description. We don't know in advance whether | 1623 | * list of some description. We don't know in advance whether |
1624 | * the caller wants to use it as metadata or data. | 1624 | * the caller wants to use it as metadata or data. |
1625 | */ | 1625 | */ |
1626 | ext3_debug("allocating block %lu. Goal hits %d of %d.\n", | 1626 | ext4_debug("allocating block %lu. Goal hits %d of %d.\n", |
1627 | ret_block, goal_hits, goal_attempts); | 1627 | ret_block, goal_hits, goal_attempts); |
1628 | 1628 | ||
1629 | spin_lock(sb_bgl_lock(sbi, group_no)); | 1629 | spin_lock(sb_bgl_lock(sbi, group_no)); |
@@ -1633,7 +1633,7 @@ allocated: | |||
1633 | percpu_counter_mod(&sbi->s_freeblocks_counter, -num); | 1633 | percpu_counter_mod(&sbi->s_freeblocks_counter, -num); |
1634 | 1634 | ||
1635 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); | 1635 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); |
1636 | err = ext3_journal_dirty_metadata(handle, gdp_bh); | 1636 | err = ext4_journal_dirty_metadata(handle, gdp_bh); |
1637 | if (!fatal) | 1637 | if (!fatal) |
1638 | fatal = err; | 1638 | fatal = err; |
1639 | 1639 | ||
@@ -1652,7 +1652,7 @@ io_error: | |||
1652 | out: | 1652 | out: |
1653 | if (fatal) { | 1653 | if (fatal) { |
1654 | *errp = fatal; | 1654 | *errp = fatal; |
1655 | ext3_std_error(sb, fatal); | 1655 | ext4_std_error(sb, fatal); |
1656 | } | 1656 | } |
1657 | /* | 1657 | /* |
1658 | * Undo the block allocation | 1658 | * Undo the block allocation |
@@ -1663,40 +1663,40 @@ out: | |||
1663 | return 0; | 1663 | return 0; |
1664 | } | 1664 | } |
1665 | 1665 | ||
1666 | ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode, | 1666 | ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode, |
1667 | ext3_fsblk_t goal, int *errp) | 1667 | ext4_fsblk_t goal, int *errp) |
1668 | { | 1668 | { |
1669 | unsigned long count = 1; | 1669 | unsigned long count = 1; |
1670 | 1670 | ||
1671 | return ext3_new_blocks(handle, inode, goal, &count, errp); | 1671 | return ext4_new_blocks(handle, inode, goal, &count, errp); |
1672 | } | 1672 | } |
1673 | 1673 | ||
1674 | /** | 1674 | /** |
1675 | * ext3_count_free_blocks() -- count filesystem free blocks | 1675 | * ext4_count_free_blocks() -- count filesystem free blocks |
1676 | * @sb: superblock | 1676 | * @sb: superblock |
1677 | * | 1677 | * |
1678 | * Adds up the number of free blocks from each block group. | 1678 | * Adds up the number of free blocks from each block group. |
1679 | */ | 1679 | */ |
1680 | ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb) | 1680 | ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb) |
1681 | { | 1681 | { |
1682 | ext3_fsblk_t desc_count; | 1682 | ext4_fsblk_t desc_count; |
1683 | struct ext3_group_desc *gdp; | 1683 | struct ext4_group_desc *gdp; |
1684 | int i; | 1684 | int i; |
1685 | unsigned long ngroups = EXT3_SB(sb)->s_groups_count; | 1685 | unsigned long ngroups = EXT4_SB(sb)->s_groups_count; |
1686 | #ifdef EXT3FS_DEBUG | 1686 | #ifdef EXT4FS_DEBUG |
1687 | struct ext3_super_block *es; | 1687 | struct ext4_super_block *es; |
1688 | ext3_fsblk_t bitmap_count; | 1688 | ext4_fsblk_t bitmap_count; |
1689 | unsigned long x; | 1689 | unsigned long x; |
1690 | struct buffer_head *bitmap_bh = NULL; | 1690 | struct buffer_head *bitmap_bh = NULL; |
1691 | 1691 | ||
1692 | es = EXT3_SB(sb)->s_es; | 1692 | es = EXT4_SB(sb)->s_es; |
1693 | desc_count = 0; | 1693 | desc_count = 0; |
1694 | bitmap_count = 0; | 1694 | bitmap_count = 0; |
1695 | gdp = NULL; | 1695 | gdp = NULL; |
1696 | 1696 | ||
1697 | smp_rmb(); | 1697 | smp_rmb(); |
1698 | for (i = 0; i < ngroups; i++) { | 1698 | for (i = 0; i < ngroups; i++) { |
1699 | gdp = ext3_get_group_desc(sb, i, NULL); | 1699 | gdp = ext4_get_group_desc(sb, i, NULL); |
1700 | if (!gdp) | 1700 | if (!gdp) |
1701 | continue; | 1701 | continue; |
1702 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | 1702 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); |
@@ -1705,13 +1705,13 @@ ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb) | |||
1705 | if (bitmap_bh == NULL) | 1705 | if (bitmap_bh == NULL) |
1706 | continue; | 1706 | continue; |
1707 | 1707 | ||
1708 | x = ext3_count_free(bitmap_bh, sb->s_blocksize); | 1708 | x = ext4_count_free(bitmap_bh, sb->s_blocksize); |
1709 | printk("group %d: stored = %d, counted = %lu\n", | 1709 | printk("group %d: stored = %d, counted = %lu\n", |
1710 | i, le16_to_cpu(gdp->bg_free_blocks_count), x); | 1710 | i, le16_to_cpu(gdp->bg_free_blocks_count), x); |
1711 | bitmap_count += x; | 1711 | bitmap_count += x; |
1712 | } | 1712 | } |
1713 | brelse(bitmap_bh); | 1713 | brelse(bitmap_bh); |
1714 | printk("ext3_count_free_blocks: stored = "E3FSBLK | 1714 | printk("ext4_count_free_blocks: stored = "E3FSBLK |
1715 | ", computed = "E3FSBLK", "E3FSBLK"\n", | 1715 | ", computed = "E3FSBLK", "E3FSBLK"\n", |
1716 | le32_to_cpu(es->s_free_blocks_count), | 1716 | le32_to_cpu(es->s_free_blocks_count), |
1717 | desc_count, bitmap_count); | 1717 | desc_count, bitmap_count); |
@@ -1720,7 +1720,7 @@ ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb) | |||
1720 | desc_count = 0; | 1720 | desc_count = 0; |
1721 | smp_rmb(); | 1721 | smp_rmb(); |
1722 | for (i = 0; i < ngroups; i++) { | 1722 | for (i = 0; i < ngroups; i++) { |
1723 | gdp = ext3_get_group_desc(sb, i, NULL); | 1723 | gdp = ext4_get_group_desc(sb, i, NULL); |
1724 | if (!gdp) | 1724 | if (!gdp) |
1725 | continue; | 1725 | continue; |
1726 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | 1726 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); |
@@ -1731,11 +1731,11 @@ ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb) | |||
1731 | } | 1731 | } |
1732 | 1732 | ||
1733 | static inline int | 1733 | static inline int |
1734 | block_in_use(ext3_fsblk_t block, struct super_block *sb, unsigned char *map) | 1734 | block_in_use(ext4_fsblk_t block, struct super_block *sb, unsigned char *map) |
1735 | { | 1735 | { |
1736 | return ext3_test_bit ((block - | 1736 | return ext4_test_bit ((block - |
1737 | le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) % | 1737 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) % |
1738 | EXT3_BLOCKS_PER_GROUP(sb), map); | 1738 | EXT4_BLOCKS_PER_GROUP(sb), map); |
1739 | } | 1739 | } |
1740 | 1740 | ||
1741 | static inline int test_root(int a, int b) | 1741 | static inline int test_root(int a, int b) |
@@ -1747,7 +1747,7 @@ static inline int test_root(int a, int b) | |||
1747 | return num == a; | 1747 | return num == a; |
1748 | } | 1748 | } |
1749 | 1749 | ||
1750 | static int ext3_group_sparse(int group) | 1750 | static int ext4_group_sparse(int group) |
1751 | { | 1751 | { |
1752 | if (group <= 1) | 1752 | if (group <= 1) |
1753 | return 1; | 1753 | return 1; |
@@ -1758,44 +1758,44 @@ static int ext3_group_sparse(int group) | |||
1758 | } | 1758 | } |
1759 | 1759 | ||
1760 | /** | 1760 | /** |
1761 | * ext3_bg_has_super - number of blocks used by the superblock in group | 1761 | * ext4_bg_has_super - number of blocks used by the superblock in group |
1762 | * @sb: superblock for filesystem | 1762 | * @sb: superblock for filesystem |
1763 | * @group: group number to check | 1763 | * @group: group number to check |
1764 | * | 1764 | * |
1765 | * Return the number of blocks used by the superblock (primary or backup) | 1765 | * Return the number of blocks used by the superblock (primary or backup) |
1766 | * in this group. Currently this will be only 0 or 1. | 1766 | * in this group. Currently this will be only 0 or 1. |
1767 | */ | 1767 | */ |
1768 | int ext3_bg_has_super(struct super_block *sb, int group) | 1768 | int ext4_bg_has_super(struct super_block *sb, int group) |
1769 | { | 1769 | { |
1770 | if (EXT3_HAS_RO_COMPAT_FEATURE(sb, | 1770 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, |
1771 | EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) && | 1771 | EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) && |
1772 | !ext3_group_sparse(group)) | 1772 | !ext4_group_sparse(group)) |
1773 | return 0; | 1773 | return 0; |
1774 | return 1; | 1774 | return 1; |
1775 | } | 1775 | } |
1776 | 1776 | ||
1777 | static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group) | 1777 | static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, int group) |
1778 | { | 1778 | { |
1779 | unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb); | 1779 | unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); |
1780 | unsigned long first = metagroup * EXT3_DESC_PER_BLOCK(sb); | 1780 | unsigned long first = metagroup * EXT4_DESC_PER_BLOCK(sb); |
1781 | unsigned long last = first + EXT3_DESC_PER_BLOCK(sb) - 1; | 1781 | unsigned long last = first + EXT4_DESC_PER_BLOCK(sb) - 1; |
1782 | 1782 | ||
1783 | if (group == first || group == first + 1 || group == last) | 1783 | if (group == first || group == first + 1 || group == last) |
1784 | return 1; | 1784 | return 1; |
1785 | return 0; | 1785 | return 0; |
1786 | } | 1786 | } |
1787 | 1787 | ||
1788 | static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group) | 1788 | static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, int group) |
1789 | { | 1789 | { |
1790 | if (EXT3_HAS_RO_COMPAT_FEATURE(sb, | 1790 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, |
1791 | EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) && | 1791 | EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) && |
1792 | !ext3_group_sparse(group)) | 1792 | !ext4_group_sparse(group)) |
1793 | return 0; | 1793 | return 0; |
1794 | return EXT3_SB(sb)->s_gdb_count; | 1794 | return EXT4_SB(sb)->s_gdb_count; |
1795 | } | 1795 | } |
1796 | 1796 | ||
1797 | /** | 1797 | /** |
1798 | * ext3_bg_num_gdb - number of blocks used by the group table in group | 1798 | * ext4_bg_num_gdb - number of blocks used by the group table in group |
1799 | * @sb: superblock for filesystem | 1799 | * @sb: superblock for filesystem |
1800 | * @group: group number to check | 1800 | * @group: group number to check |
1801 | * | 1801 | * |
@@ -1803,16 +1803,16 @@ static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group) | |||
1803 | * (primary or backup) in this group. In the future there may be a | 1803 | * (primary or backup) in this group. In the future there may be a |
1804 | * different number of descriptor blocks in each group. | 1804 | * different number of descriptor blocks in each group. |
1805 | */ | 1805 | */ |
1806 | unsigned long ext3_bg_num_gdb(struct super_block *sb, int group) | 1806 | unsigned long ext4_bg_num_gdb(struct super_block *sb, int group) |
1807 | { | 1807 | { |
1808 | unsigned long first_meta_bg = | 1808 | unsigned long first_meta_bg = |
1809 | le32_to_cpu(EXT3_SB(sb)->s_es->s_first_meta_bg); | 1809 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); |
1810 | unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb); | 1810 | unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); |
1811 | 1811 | ||
1812 | if (!EXT3_HAS_INCOMPAT_FEATURE(sb,EXT3_FEATURE_INCOMPAT_META_BG) || | 1812 | if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) || |
1813 | metagroup < first_meta_bg) | 1813 | metagroup < first_meta_bg) |
1814 | return ext3_bg_num_gdb_nometa(sb,group); | 1814 | return ext4_bg_num_gdb_nometa(sb,group); |
1815 | 1815 | ||
1816 | return ext3_bg_num_gdb_meta(sb,group); | 1816 | return ext4_bg_num_gdb_meta(sb,group); |
1817 | 1817 | ||
1818 | } | 1818 | } |