diff options
Diffstat (limited to 'fs/ext3')
-rw-r--r-- | fs/ext3/acl.c | 6 | ||||
-rw-r--r-- | fs/ext3/balloc.c | 350 | ||||
-rw-r--r-- | fs/ext3/bitmap.c | 2 | ||||
-rw-r--r-- | fs/ext3/dir.c | 19 | ||||
-rw-r--r-- | fs/ext3/file.c | 2 | ||||
-rw-r--r-- | fs/ext3/fsync.c | 6 | ||||
-rw-r--r-- | fs/ext3/hash.c | 8 | ||||
-rw-r--r-- | fs/ext3/ialloc.c | 55 | ||||
-rw-r--r-- | fs/ext3/inode.c | 77 | ||||
-rw-r--r-- | fs/ext3/namei.c | 50 | ||||
-rw-r--r-- | fs/ext3/resize.c | 42 | ||||
-rw-r--r-- | fs/ext3/super.c | 110 | ||||
-rw-r--r-- | fs/ext3/xattr.c | 16 |
13 files changed, 482 insertions, 261 deletions
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c index 0d21d558b87a..1e5038d9a01b 100644 --- a/fs/ext3/acl.c +++ b/fs/ext3/acl.c | |||
@@ -90,8 +90,8 @@ ext3_acl_to_disk(const struct posix_acl *acl, size_t *size) | |||
90 | size_t n; | 90 | size_t n; |
91 | 91 | ||
92 | *size = ext3_acl_size(acl->a_count); | 92 | *size = ext3_acl_size(acl->a_count); |
93 | ext_acl = (ext3_acl_header *)kmalloc(sizeof(ext3_acl_header) + | 93 | ext_acl = kmalloc(sizeof(ext3_acl_header) + acl->a_count * |
94 | acl->a_count * sizeof(ext3_acl_entry), GFP_KERNEL); | 94 | sizeof(ext3_acl_entry), GFP_KERNEL); |
95 | if (!ext_acl) | 95 | if (!ext_acl) |
96 | return ERR_PTR(-ENOMEM); | 96 | return ERR_PTR(-ENOMEM); |
97 | ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION); | 97 | ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION); |
@@ -258,7 +258,7 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type, | |||
258 | default: | 258 | default: |
259 | return -EINVAL; | 259 | return -EINVAL; |
260 | } | 260 | } |
261 | if (acl) { | 261 | if (acl) { |
262 | value = ext3_acl_to_disk(acl, &size); | 262 | value = ext3_acl_to_disk(acl, &size); |
263 | if (IS_ERR(value)) | 263 | if (IS_ERR(value)) |
264 | return (int)PTR_ERR(value); | 264 | return (int)PTR_ERR(value); |
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index 063d994bda0b..b41a7d7e20f0 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c | |||
@@ -38,6 +38,13 @@ | |||
38 | 38 | ||
39 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | 39 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) |
40 | 40 | ||
41 | /** | ||
42 | * ext3_get_group_desc() -- load group descriptor from disk | ||
43 | * @sb: super block | ||
44 | * @block_group: given block group | ||
45 | * @bh: pointer to the buffer head to store the block | ||
46 | * group descriptor | ||
47 | */ | ||
41 | struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, | 48 | struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, |
42 | unsigned int block_group, | 49 | unsigned int block_group, |
43 | struct buffer_head ** bh) | 50 | struct buffer_head ** bh) |
@@ -73,8 +80,12 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, | |||
73 | return desc + offset; | 80 | return desc + offset; |
74 | } | 81 | } |
75 | 82 | ||
76 | /* | 83 | /** |
77 | * Read the bitmap for a given block_group, reading into the specified | 84 | * read_block_bitmap() |
85 | * @sb: super block | ||
86 | * @block_group: given block group | ||
87 | * | ||
88 | * Read the bitmap for a given block_group, reading into the specified | ||
78 | * slot in the superblock's bitmap cache. | 89 | * slot in the superblock's bitmap cache. |
79 | * | 90 | * |
80 | * Return buffer_head on success or NULL in case of failure. | 91 | * Return buffer_head on success or NULL in case of failure. |
@@ -103,15 +114,22 @@ error_out: | |||
103 | * Operations include: | 114 | * Operations include: |
104 | * dump, find, add, remove, is_empty, find_next_reservable_window, etc. | 115 | * dump, find, add, remove, is_empty, find_next_reservable_window, etc. |
105 | * | 116 | * |
106 | * We use sorted double linked list for the per-filesystem reservation | 117 | * We use a red-black tree to represent per-filesystem reservation |
107 | * window list. (like in vm_region). | 118 | * windows. |
119 | * | ||
120 | */ | ||
121 | |||
122 | /** | ||
123 | * __rsv_window_dump() -- Dump the filesystem block allocation reservation map | ||
124 | * @rb_root: root of per-filesystem reservation rb tree | ||
125 | * @verbose: verbose mode | ||
126 | * @fn: function which wishes to dump the reservation map | ||
108 | * | 127 | * |
109 | * Initially, we keep those small operations in the abstract functions, | 128 | * If verbose is turned on, it will print the whole block reservation |
110 | * so later if we need a better searching tree than double linked-list, | 129 | * windows(start, end). Otherwise, it will only print out the "bad" windows, |
111 | * we could easily switch to that without changing too much | 130 | * those windows that overlap with their immediate neighbors. |
112 | * code. | ||
113 | */ | 131 | */ |
114 | #if 0 | 132 | #if 1 |
115 | static void __rsv_window_dump(struct rb_root *root, int verbose, | 133 | static void __rsv_window_dump(struct rb_root *root, int verbose, |
116 | const char *fn) | 134 | const char *fn) |
117 | { | 135 | { |
@@ -129,7 +147,7 @@ restart: | |||
129 | rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node); | 147 | rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node); |
130 | if (verbose) | 148 | if (verbose) |
131 | printk("reservation window 0x%p " | 149 | printk("reservation window 0x%p " |
132 | "start: %d, end: %d\n", | 150 | "start: %lu, end: %lu\n", |
133 | rsv, rsv->rsv_start, rsv->rsv_end); | 151 | rsv, rsv->rsv_start, rsv->rsv_end); |
134 | if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) { | 152 | if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) { |
135 | printk("Bad reservation %p (start >= end)\n", | 153 | printk("Bad reservation %p (start >= end)\n", |
@@ -161,6 +179,22 @@ restart: | |||
161 | #define rsv_window_dump(root, verbose) do {} while (0) | 179 | #define rsv_window_dump(root, verbose) do {} while (0) |
162 | #endif | 180 | #endif |
163 | 181 | ||
182 | /** | ||
183 | * goal_in_my_reservation() | ||
184 | * @rsv: inode's reservation window | ||
185 | * @grp_goal: given goal block relative to the allocation block group | ||
186 | * @group: the current allocation block group | ||
187 | * @sb: filesystem super block | ||
188 | * | ||
189 | * Test if the given goal block (group relative) is within the file's | ||
190 | * own block reservation window range. | ||
191 | * | ||
192 | * If the reservation window is outside the goal allocation group, return 0; | ||
193 | * grp_goal (given goal block) could be -1, which means no specific | ||
194 | * goal block. In this case, always return 1. | ||
195 | * If the goal block is within the reservation window, return 1; | ||
196 | * otherwise, return 0; | ||
197 | */ | ||
164 | static int | 198 | static int |
165 | goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal, | 199 | goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal, |
166 | unsigned int group, struct super_block * sb) | 200 | unsigned int group, struct super_block * sb) |
@@ -168,7 +202,7 @@ goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal, | |||
168 | ext3_fsblk_t group_first_block, group_last_block; | 202 | ext3_fsblk_t group_first_block, group_last_block; |
169 | 203 | ||
170 | group_first_block = ext3_group_first_block_no(sb, group); | 204 | group_first_block = ext3_group_first_block_no(sb, group); |
171 | group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; | 205 | group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); |
172 | 206 | ||
173 | if ((rsv->_rsv_start > group_last_block) || | 207 | if ((rsv->_rsv_start > group_last_block) || |
174 | (rsv->_rsv_end < group_first_block)) | 208 | (rsv->_rsv_end < group_first_block)) |
@@ -179,7 +213,11 @@ goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal, | |||
179 | return 1; | 213 | return 1; |
180 | } | 214 | } |
181 | 215 | ||
182 | /* | 216 | /** |
217 | * search_reserve_window() | ||
218 | * @rb_root: root of reservation tree | ||
219 | * @goal: target allocation block | ||
220 | * | ||
183 | * Find the reserved window which includes the goal, or the previous one | 221 | * Find the reserved window which includes the goal, or the previous one |
184 | * if the goal is not in any window. | 222 | * if the goal is not in any window. |
185 | * Returns NULL if there are no windows or if all windows start after the goal. | 223 | * Returns NULL if there are no windows or if all windows start after the goal. |
@@ -216,6 +254,13 @@ search_reserve_window(struct rb_root *root, ext3_fsblk_t goal) | |||
216 | return rsv; | 254 | return rsv; |
217 | } | 255 | } |
218 | 256 | ||
257 | /** | ||
258 | * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree. | ||
259 | * @sb: super block | ||
260 | * @rsv: reservation window to add | ||
261 | * | ||
262 | * Must be called with rsv_lock hold. | ||
263 | */ | ||
219 | void ext3_rsv_window_add(struct super_block *sb, | 264 | void ext3_rsv_window_add(struct super_block *sb, |
220 | struct ext3_reserve_window_node *rsv) | 265 | struct ext3_reserve_window_node *rsv) |
221 | { | 266 | { |
@@ -236,14 +281,25 @@ void ext3_rsv_window_add(struct super_block *sb, | |||
236 | p = &(*p)->rb_left; | 281 | p = &(*p)->rb_left; |
237 | else if (start > this->rsv_end) | 282 | else if (start > this->rsv_end) |
238 | p = &(*p)->rb_right; | 283 | p = &(*p)->rb_right; |
239 | else | 284 | else { |
285 | rsv_window_dump(root, 1); | ||
240 | BUG(); | 286 | BUG(); |
287 | } | ||
241 | } | 288 | } |
242 | 289 | ||
243 | rb_link_node(node, parent, p); | 290 | rb_link_node(node, parent, p); |
244 | rb_insert_color(node, root); | 291 | rb_insert_color(node, root); |
245 | } | 292 | } |
246 | 293 | ||
294 | /** | ||
295 | * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree | ||
296 | * @sb: super block | ||
297 | * @rsv: reservation window to remove | ||
298 | * | ||
299 | * Mark the block reservation window as not allocated, and unlink it | ||
300 | * from the filesystem reservation window rb tree. Must be called with | ||
301 | * rsv_lock hold. | ||
302 | */ | ||
247 | static void rsv_window_remove(struct super_block *sb, | 303 | static void rsv_window_remove(struct super_block *sb, |
248 | struct ext3_reserve_window_node *rsv) | 304 | struct ext3_reserve_window_node *rsv) |
249 | { | 305 | { |
@@ -253,11 +309,39 @@ static void rsv_window_remove(struct super_block *sb, | |||
253 | rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root); | 309 | rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root); |
254 | } | 310 | } |
255 | 311 | ||
312 | /* | ||
313 | * rsv_is_empty() -- Check if the reservation window is allocated. | ||
314 | * @rsv: given reservation window to check | ||
315 | * | ||
316 | * returns 1 if the end block is EXT3_RESERVE_WINDOW_NOT_ALLOCATED. | ||
317 | */ | ||
256 | static inline int rsv_is_empty(struct ext3_reserve_window *rsv) | 318 | static inline int rsv_is_empty(struct ext3_reserve_window *rsv) |
257 | { | 319 | { |
258 | /* a valid reservation end block could not be 0 */ | 320 | /* a valid reservation end block could not be 0 */ |
259 | return (rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED); | 321 | return rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED; |
260 | } | 322 | } |
323 | |||
324 | /** | ||
325 | * ext3_init_block_alloc_info() | ||
326 | * @inode: file inode structure | ||
327 | * | ||
328 | * Allocate and initialize the reservation window structure, and | ||
329 | * link the window to the ext3 inode structure at last | ||
330 | * | ||
331 | * The reservation window structure is only dynamically allocated | ||
332 | * and linked to ext3 inode the first time the open file | ||
333 | * needs a new block. So, before every ext3_new_block(s) call, for | ||
334 | * regular files, we should check whether the reservation window | ||
335 | * structure exists or not. In the latter case, this function is called. | ||
336 | * Fail to do so will result in block reservation being turned off for that | ||
337 | * open file. | ||
338 | * | ||
339 | * This function is called from ext3_get_blocks_handle(), also called | ||
340 | * when setting the reservation window size through ioctl before the file | ||
341 | * is open for write (needs block allocation). | ||
342 | * | ||
343 | * Needs truncate_mutex protection prior to call this function. | ||
344 | */ | ||
261 | void ext3_init_block_alloc_info(struct inode *inode) | 345 | void ext3_init_block_alloc_info(struct inode *inode) |
262 | { | 346 | { |
263 | struct ext3_inode_info *ei = EXT3_I(inode); | 347 | struct ext3_inode_info *ei = EXT3_I(inode); |
@@ -271,7 +355,7 @@ void ext3_init_block_alloc_info(struct inode *inode) | |||
271 | rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | 355 | rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; |
272 | rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | 356 | rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; |
273 | 357 | ||
274 | /* | 358 | /* |
275 | * if filesystem is mounted with NORESERVATION, the goal | 359 | * if filesystem is mounted with NORESERVATION, the goal |
276 | * reservation window size is set to zero to indicate | 360 | * reservation window size is set to zero to indicate |
277 | * block reservation is off | 361 | * block reservation is off |
@@ -287,6 +371,19 @@ void ext3_init_block_alloc_info(struct inode *inode) | |||
287 | ei->i_block_alloc_info = block_i; | 371 | ei->i_block_alloc_info = block_i; |
288 | } | 372 | } |
289 | 373 | ||
374 | /** | ||
375 | * ext3_discard_reservation() | ||
376 | * @inode: inode | ||
377 | * | ||
378 | * Discard(free) block reservation window on last file close, or truncate | ||
379 | * or at last iput(). | ||
380 | * | ||
381 | * It is being called in three cases: | ||
382 | * ext3_release_file(): last writer close the file | ||
383 | * ext3_clear_inode(): last iput(), when nobody link to this file. | ||
384 | * ext3_truncate(): when the block indirect map is about to change. | ||
385 | * | ||
386 | */ | ||
290 | void ext3_discard_reservation(struct inode *inode) | 387 | void ext3_discard_reservation(struct inode *inode) |
291 | { | 388 | { |
292 | struct ext3_inode_info *ei = EXT3_I(inode); | 389 | struct ext3_inode_info *ei = EXT3_I(inode); |
@@ -306,7 +403,14 @@ void ext3_discard_reservation(struct inode *inode) | |||
306 | } | 403 | } |
307 | } | 404 | } |
308 | 405 | ||
309 | /* Free given blocks, update quota and i_blocks field */ | 406 | /** |
407 | * ext3_free_blocks_sb() -- Free given blocks and update quota | ||
408 | * @handle: handle to this transaction | ||
409 | * @sb: super block | ||
410 | * @block: start physcial block to free | ||
411 | * @count: number of blocks to free | ||
412 | * @pdquot_freed_blocks: pointer to quota | ||
413 | */ | ||
310 | void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, | 414 | void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, |
311 | ext3_fsblk_t block, unsigned long count, | 415 | ext3_fsblk_t block, unsigned long count, |
312 | unsigned long *pdquot_freed_blocks) | 416 | unsigned long *pdquot_freed_blocks) |
@@ -419,8 +523,8 @@ do_more: | |||
419 | } | 523 | } |
420 | /* @@@ This prevents newly-allocated data from being | 524 | /* @@@ This prevents newly-allocated data from being |
421 | * freed and then reallocated within the same | 525 | * freed and then reallocated within the same |
422 | * transaction. | 526 | * transaction. |
423 | * | 527 | * |
424 | * Ideally we would want to allow that to happen, but to | 528 | * Ideally we would want to allow that to happen, but to |
425 | * do so requires making journal_forget() capable of | 529 | * do so requires making journal_forget() capable of |
426 | * revoking the queued write of a data block, which | 530 | * revoking the queued write of a data block, which |
@@ -433,7 +537,7 @@ do_more: | |||
433 | * safe not to set the allocation bit in the committed | 537 | * safe not to set the allocation bit in the committed |
434 | * bitmap, because we know that there is no outstanding | 538 | * bitmap, because we know that there is no outstanding |
435 | * activity on the buffer any more and so it is safe to | 539 | * activity on the buffer any more and so it is safe to |
436 | * reallocate it. | 540 | * reallocate it. |
437 | */ | 541 | */ |
438 | BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); | 542 | BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); |
439 | J_ASSERT_BH(bitmap_bh, | 543 | J_ASSERT_BH(bitmap_bh, |
@@ -490,7 +594,13 @@ error_return: | |||
490 | return; | 594 | return; |
491 | } | 595 | } |
492 | 596 | ||
493 | /* Free given blocks, update quota and i_blocks field */ | 597 | /** |
598 | * ext3_free_blocks() -- Free given blocks and update quota | ||
599 | * @handle: handle for this transaction | ||
600 | * @inode: inode | ||
601 | * @block: start physical block to free | ||
602 | * @count: number of blocks to count | ||
603 | */ | ||
494 | void ext3_free_blocks(handle_t *handle, struct inode *inode, | 604 | void ext3_free_blocks(handle_t *handle, struct inode *inode, |
495 | ext3_fsblk_t block, unsigned long count) | 605 | ext3_fsblk_t block, unsigned long count) |
496 | { | 606 | { |
@@ -508,7 +618,11 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode, | |||
508 | return; | 618 | return; |
509 | } | 619 | } |
510 | 620 | ||
511 | /* | 621 | /** |
622 | * ext3_test_allocatable() | ||
623 | * @nr: given allocation block group | ||
624 | * @bh: bufferhead contains the bitmap of the given block group | ||
625 | * | ||
512 | * For ext3 allocations, we must not reuse any blocks which are | 626 | * For ext3 allocations, we must not reuse any blocks which are |
513 | * allocated in the bitmap buffer's "last committed data" copy. This | 627 | * allocated in the bitmap buffer's "last committed data" copy. This |
514 | * prevents deletes from freeing up the page for reuse until we have | 628 | * prevents deletes from freeing up the page for reuse until we have |
@@ -518,7 +632,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode, | |||
518 | * data would allow the old block to be overwritten before the | 632 | * data would allow the old block to be overwritten before the |
519 | * transaction committed (because we force data to disk before commit). | 633 | * transaction committed (because we force data to disk before commit). |
520 | * This would lead to corruption if we crashed between overwriting the | 634 | * This would lead to corruption if we crashed between overwriting the |
521 | * data and committing the delete. | 635 | * data and committing the delete. |
522 | * | 636 | * |
523 | * @@@ We may want to make this allocation behaviour conditional on | 637 | * @@@ We may want to make this allocation behaviour conditional on |
524 | * data-writes at some point, and disable it for metadata allocations or | 638 | * data-writes at some point, and disable it for metadata allocations or |
@@ -541,6 +655,16 @@ static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh) | |||
541 | return ret; | 655 | return ret; |
542 | } | 656 | } |
543 | 657 | ||
658 | /** | ||
659 | * bitmap_search_next_usable_block() | ||
660 | * @start: the starting block (group relative) of the search | ||
661 | * @bh: bufferhead contains the block group bitmap | ||
662 | * @maxblocks: the ending block (group relative) of the reservation | ||
663 | * | ||
664 | * The bitmap search --- search forward alternately through the actual | ||
665 | * bitmap on disk and the last-committed copy in journal, until we find a | ||
666 | * bit free in both bitmaps. | ||
667 | */ | ||
544 | static ext3_grpblk_t | 668 | static ext3_grpblk_t |
545 | bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, | 669 | bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, |
546 | ext3_grpblk_t maxblocks) | 670 | ext3_grpblk_t maxblocks) |
@@ -548,11 +672,6 @@ bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, | |||
548 | ext3_grpblk_t next; | 672 | ext3_grpblk_t next; |
549 | struct journal_head *jh = bh2jh(bh); | 673 | struct journal_head *jh = bh2jh(bh); |
550 | 674 | ||
551 | /* | ||
552 | * The bitmap search --- search forward alternately through the actual | ||
553 | * bitmap and the last-committed copy until we find a bit free in | ||
554 | * both | ||
555 | */ | ||
556 | while (start < maxblocks) { | 675 | while (start < maxblocks) { |
557 | next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start); | 676 | next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start); |
558 | if (next >= maxblocks) | 677 | if (next >= maxblocks) |
@@ -562,14 +681,20 @@ bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, | |||
562 | jbd_lock_bh_state(bh); | 681 | jbd_lock_bh_state(bh); |
563 | if (jh->b_committed_data) | 682 | if (jh->b_committed_data) |
564 | start = ext3_find_next_zero_bit(jh->b_committed_data, | 683 | start = ext3_find_next_zero_bit(jh->b_committed_data, |
565 | maxblocks, next); | 684 | maxblocks, next); |
566 | jbd_unlock_bh_state(bh); | 685 | jbd_unlock_bh_state(bh); |
567 | } | 686 | } |
568 | return -1; | 687 | return -1; |
569 | } | 688 | } |
570 | 689 | ||
571 | /* | 690 | /** |
572 | * Find an allocatable block in a bitmap. We honour both the bitmap and | 691 | * find_next_usable_block() |
692 | * @start: the starting block (group relative) to find next | ||
693 | * allocatable block in bitmap. | ||
694 | * @bh: bufferhead contains the block group bitmap | ||
695 | * @maxblocks: the ending block (group relative) for the search | ||
696 | * | ||
697 | * Find an allocatable block in a bitmap. We honor both the bitmap and | ||
573 | * its last-committed copy (if that exists), and perform the "most | 698 | * its last-committed copy (if that exists), and perform the "most |
574 | * appropriate allocation" algorithm of looking for a free block near | 699 | * appropriate allocation" algorithm of looking for a free block near |
575 | * the initial goal; then for a free byte somewhere in the bitmap; then | 700 | * the initial goal; then for a free byte somewhere in the bitmap; then |
@@ -584,7 +709,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, | |||
584 | 709 | ||
585 | if (start > 0) { | 710 | if (start > 0) { |
586 | /* | 711 | /* |
587 | * The goal was occupied; search forward for a free | 712 | * The goal was occupied; search forward for a free |
588 | * block within the next XX blocks. | 713 | * block within the next XX blocks. |
589 | * | 714 | * |
590 | * end_goal is more or less random, but it has to be | 715 | * end_goal is more or less random, but it has to be |
@@ -620,7 +745,11 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, | |||
620 | return here; | 745 | return here; |
621 | } | 746 | } |
622 | 747 | ||
623 | /* | 748 | /** |
749 | * claim_block() | ||
750 | * @block: the free block (group relative) to allocate | ||
751 | * @bh: the bufferhead containts the block group bitmap | ||
752 | * | ||
624 | * We think we can allocate this block in this bitmap. Try to set the bit. | 753 | * We think we can allocate this block in this bitmap. Try to set the bit. |
625 | * If that succeeds then check that nobody has allocated and then freed the | 754 | * If that succeeds then check that nobody has allocated and then freed the |
626 | * block since we saw that is was not marked in b_committed_data. If it _was_ | 755 | * block since we saw that is was not marked in b_committed_data. If it _was_ |
@@ -646,7 +775,26 @@ claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh) | |||
646 | return ret; | 775 | return ret; |
647 | } | 776 | } |
648 | 777 | ||
649 | /* | 778 | /** |
779 | * ext3_try_to_allocate() | ||
780 | * @sb: superblock | ||
781 | * @handle: handle to this transaction | ||
782 | * @group: given allocation block group | ||
783 | * @bitmap_bh: bufferhead holds the block bitmap | ||
784 | * @grp_goal: given target block within the group | ||
785 | * @count: target number of blocks to allocate | ||
786 | * @my_rsv: reservation window | ||
787 | * | ||
788 | * Attempt to allocate blocks within a give range. Set the range of allocation | ||
789 | * first, then find the first free bit(s) from the bitmap (within the range), | ||
790 | * and at last, allocate the blocks by claiming the found free bit as allocated. | ||
791 | * | ||
792 | * To set the range of this allocation: | ||
793 | * if there is a reservation window, only try to allocate block(s) from the | ||
794 | * file's own reservation window; | ||
795 | * Otherwise, the allocation range starts from the give goal block, ends at | ||
796 | * the block group's last block. | ||
797 | * | ||
650 | * If we failed to allocate the desired block then we may end up crossing to a | 798 | * If we failed to allocate the desired block then we may end up crossing to a |
651 | * new bitmap. In that case we must release write access to the old one via | 799 | * new bitmap. In that case we must release write access to the old one via |
652 | * ext3_journal_release_buffer(), else we'll run out of credits. | 800 | * ext3_journal_release_buffer(), else we'll run out of credits. |
@@ -703,7 +851,8 @@ repeat: | |||
703 | } | 851 | } |
704 | start = grp_goal; | 852 | start = grp_goal; |
705 | 853 | ||
706 | if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) { | 854 | if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), |
855 | grp_goal, bitmap_bh)) { | ||
707 | /* | 856 | /* |
708 | * The block was allocated by another thread, or it was | 857 | * The block was allocated by another thread, or it was |
709 | * allocated and then freed by another thread | 858 | * allocated and then freed by another thread |
@@ -718,7 +867,8 @@ repeat: | |||
718 | grp_goal++; | 867 | grp_goal++; |
719 | while (num < *count && grp_goal < end | 868 | while (num < *count && grp_goal < end |
720 | && ext3_test_allocatable(grp_goal, bitmap_bh) | 869 | && ext3_test_allocatable(grp_goal, bitmap_bh) |
721 | && claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) { | 870 | && claim_block(sb_bgl_lock(EXT3_SB(sb), group), |
871 | grp_goal, bitmap_bh)) { | ||
722 | num++; | 872 | num++; |
723 | grp_goal++; | 873 | grp_goal++; |
724 | } | 874 | } |
@@ -730,12 +880,12 @@ fail_access: | |||
730 | } | 880 | } |
731 | 881 | ||
732 | /** | 882 | /** |
733 | * find_next_reservable_window(): | 883 | * find_next_reservable_window(): |
734 | * find a reservable space within the given range. | 884 | * find a reservable space within the given range. |
735 | * It does not allocate the reservation window for now: | 885 | * It does not allocate the reservation window for now: |
736 | * alloc_new_reservation() will do the work later. | 886 | * alloc_new_reservation() will do the work later. |
737 | * | 887 | * |
738 | * @search_head: the head of the searching list; | 888 | * @search_head: the head of the searching list; |
739 | * This is not necessarily the list head of the whole filesystem | 889 | * This is not necessarily the list head of the whole filesystem |
740 | * | 890 | * |
741 | * We have both head and start_block to assist the search | 891 | * We have both head and start_block to assist the search |
@@ -743,12 +893,12 @@ fail_access: | |||
743 | * but we will shift to the place where start_block is, | 893 | * but we will shift to the place where start_block is, |
744 | * then start from there, when looking for a reservable space. | 894 | * then start from there, when looking for a reservable space. |
745 | * | 895 | * |
746 | * @size: the target new reservation window size | 896 | * @size: the target new reservation window size |
747 | * | 897 | * |
748 | * @group_first_block: the first block we consider to start | 898 | * @group_first_block: the first block we consider to start |
749 | * the real search from | 899 | * the real search from |
750 | * | 900 | * |
751 | * @last_block: | 901 | * @last_block: |
752 | * the maximum block number that our goal reservable space | 902 | * the maximum block number that our goal reservable space |
753 | * could start from. This is normally the last block in this | 903 | * could start from. This is normally the last block in this |
754 | * group. The search will end when we found the start of next | 904 | * group. The search will end when we found the start of next |
@@ -756,10 +906,10 @@ fail_access: | |||
756 | * This could handle the cross boundary reservation window | 906 | * This could handle the cross boundary reservation window |
757 | * request. | 907 | * request. |
758 | * | 908 | * |
759 | * basically we search from the given range, rather than the whole | 909 | * basically we search from the given range, rather than the whole |
760 | * reservation double linked list, (start_block, last_block) | 910 | * reservation double linked list, (start_block, last_block) |
761 | * to find a free region that is of my size and has not | 911 | * to find a free region that is of my size and has not |
762 | * been reserved. | 912 | * been reserved. |
763 | * | 913 | * |
764 | */ | 914 | */ |
765 | static int find_next_reservable_window( | 915 | static int find_next_reservable_window( |
@@ -812,7 +962,7 @@ static int find_next_reservable_window( | |||
812 | /* | 962 | /* |
813 | * Found a reserveable space big enough. We could | 963 | * Found a reserveable space big enough. We could |
814 | * have a reservation across the group boundary here | 964 | * have a reservation across the group boundary here |
815 | */ | 965 | */ |
816 | break; | 966 | break; |
817 | } | 967 | } |
818 | } | 968 | } |
@@ -848,7 +998,7 @@ static int find_next_reservable_window( | |||
848 | } | 998 | } |
849 | 999 | ||
850 | /** | 1000 | /** |
851 | * alloc_new_reservation()--allocate a new reservation window | 1001 | * alloc_new_reservation()--allocate a new reservation window |
852 | * | 1002 | * |
853 | * To make a new reservation, we search part of the filesystem | 1003 | * To make a new reservation, we search part of the filesystem |
854 | * reservation list (the list that inside the group). We try to | 1004 | * reservation list (the list that inside the group). We try to |
@@ -897,7 +1047,7 @@ static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, | |||
897 | spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; | 1047 | spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; |
898 | 1048 | ||
899 | group_first_block = ext3_group_first_block_no(sb, group); | 1049 | group_first_block = ext3_group_first_block_no(sb, group); |
900 | group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; | 1050 | group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); |
901 | 1051 | ||
902 | if (grp_goal < 0) | 1052 | if (grp_goal < 0) |
903 | start_block = group_first_block; | 1053 | start_block = group_first_block; |
@@ -929,9 +1079,10 @@ static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, | |||
929 | if ((my_rsv->rsv_alloc_hit > | 1079 | if ((my_rsv->rsv_alloc_hit > |
930 | (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) { | 1080 | (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) { |
931 | /* | 1081 | /* |
932 | * if we previously allocation hit ration is greater than half | 1082 | * if the previously allocation hit ratio is |
933 | * we double the size of reservation window next time | 1083 | * greater than 1/2, then we double the size of |
934 | * otherwise keep the same | 1084 | * the reservation window the next time, |
1085 | * otherwise we keep the same size window | ||
935 | */ | 1086 | */ |
936 | size = size * 2; | 1087 | size = size * 2; |
937 | if (size > EXT3_MAX_RESERVE_BLOCKS) | 1088 | if (size > EXT3_MAX_RESERVE_BLOCKS) |
@@ -1010,6 +1161,23 @@ retry: | |||
1010 | goto retry; | 1161 | goto retry; |
1011 | } | 1162 | } |
1012 | 1163 | ||
1164 | /** | ||
1165 | * try_to_extend_reservation() | ||
1166 | * @my_rsv: given reservation window | ||
1167 | * @sb: super block | ||
1168 | * @size: the delta to extend | ||
1169 | * | ||
1170 | * Attempt to expand the reservation window large enough to have | ||
1171 | * required number of free blocks | ||
1172 | * | ||
1173 | * Since ext3_try_to_allocate() will always allocate blocks within | ||
1174 | * the reservation window range, if the window size is too small, | ||
1175 | * multiple blocks allocation has to stop at the end of the reservation | ||
1176 | * window. To make this more efficient, given the total number of | ||
1177 | * blocks needed and the current size of the window, we try to | ||
1178 | * expand the reservation window size if necessary on a best-effort | ||
1179 | * basis before ext3_new_blocks() tries to allocate blocks, | ||
1180 | */ | ||
1013 | static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, | 1181 | static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, |
1014 | struct super_block *sb, int size) | 1182 | struct super_block *sb, int size) |
1015 | { | 1183 | { |
@@ -1035,7 +1203,17 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, | |||
1035 | spin_unlock(rsv_lock); | 1203 | spin_unlock(rsv_lock); |
1036 | } | 1204 | } |
1037 | 1205 | ||
1038 | /* | 1206 | /** |
1207 | * ext3_try_to_allocate_with_rsv() | ||
1208 | * @sb: superblock | ||
1209 | * @handle: handle to this transaction | ||
1210 | * @group: given allocation block group | ||
1211 | * @bitmap_bh: bufferhead holds the block bitmap | ||
1212 | * @grp_goal: given target block within the group | ||
1213 | * @count: target number of blocks to allocate | ||
1214 | * @my_rsv: reservation window | ||
1215 | * @errp: pointer to store the error code | ||
1216 | * | ||
1039 | * This is the main function used to allocate a new block and its reservation | 1217 | * This is the main function used to allocate a new block and its reservation |
1040 | * window. | 1218 | * window. |
1041 | * | 1219 | * |
@@ -1051,9 +1229,7 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, | |||
1051 | * reservation), and there are lots of free blocks, but they are all | 1229 | * reservation), and there are lots of free blocks, but they are all |
1052 | * being reserved. | 1230 | * being reserved. |
1053 | * | 1231 | * |
1054 | * We use a sorted double linked list for the per-filesystem reservation list. | 1232 | * We use a red-black tree for the per-filesystem reservation list. |
1055 | * The insert, remove and find a free space(non-reserved) operations for the | ||
1056 | * sorted double linked list should be fast. | ||
1057 | * | 1233 | * |
1058 | */ | 1234 | */ |
1059 | static ext3_grpblk_t | 1235 | static ext3_grpblk_t |
@@ -1063,7 +1239,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1063 | struct ext3_reserve_window_node * my_rsv, | 1239 | struct ext3_reserve_window_node * my_rsv, |
1064 | unsigned long *count, int *errp) | 1240 | unsigned long *count, int *errp) |
1065 | { | 1241 | { |
1066 | ext3_fsblk_t group_first_block; | 1242 | ext3_fsblk_t group_first_block, group_last_block; |
1067 | ext3_grpblk_t ret = 0; | 1243 | ext3_grpblk_t ret = 0; |
1068 | int fatal; | 1244 | int fatal; |
1069 | unsigned long num = *count; | 1245 | unsigned long num = *count; |
@@ -1100,6 +1276,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1100 | * first block is the block number of the first block in this group | 1276 | * first block is the block number of the first block in this group |
1101 | */ | 1277 | */ |
1102 | group_first_block = ext3_group_first_block_no(sb, group); | 1278 | group_first_block = ext3_group_first_block_no(sb, group); |
1279 | group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); | ||
1103 | 1280 | ||
1104 | /* | 1281 | /* |
1105 | * Basically we will allocate a new block from inode's reservation | 1282 | * Basically we will allocate a new block from inode's reservation |
@@ -1118,7 +1295,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1118 | */ | 1295 | */ |
1119 | while (1) { | 1296 | while (1) { |
1120 | if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || | 1297 | if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || |
1121 | !goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) { | 1298 | !goal_in_my_reservation(&my_rsv->rsv_window, |
1299 | grp_goal, group, sb)) { | ||
1122 | if (my_rsv->rsv_goal_size < *count) | 1300 | if (my_rsv->rsv_goal_size < *count) |
1123 | my_rsv->rsv_goal_size = *count; | 1301 | my_rsv->rsv_goal_size = *count; |
1124 | ret = alloc_new_reservation(my_rsv, grp_goal, sb, | 1302 | ret = alloc_new_reservation(my_rsv, grp_goal, sb, |
@@ -1126,17 +1304,21 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1126 | if (ret < 0) | 1304 | if (ret < 0) |
1127 | break; /* failed */ | 1305 | break; /* failed */ |
1128 | 1306 | ||
1129 | if (!goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) | 1307 | if (!goal_in_my_reservation(&my_rsv->rsv_window, |
1308 | grp_goal, group, sb)) | ||
1130 | grp_goal = -1; | 1309 | grp_goal = -1; |
1131 | } else if (grp_goal > 0 && (my_rsv->rsv_end-grp_goal+1) < *count) | 1310 | } else if (grp_goal > 0 && |
1311 | (my_rsv->rsv_end-grp_goal+1) < *count) | ||
1132 | try_to_extend_reservation(my_rsv, sb, | 1312 | try_to_extend_reservation(my_rsv, sb, |
1133 | *count-my_rsv->rsv_end + grp_goal - 1); | 1313 | *count-my_rsv->rsv_end + grp_goal - 1); |
1134 | 1314 | ||
1135 | if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) | 1315 | if ((my_rsv->rsv_start > group_last_block) || |
1136 | || (my_rsv->rsv_end < group_first_block)) | 1316 | (my_rsv->rsv_end < group_first_block)) { |
1317 | rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1); | ||
1137 | BUG(); | 1318 | BUG(); |
1138 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, grp_goal, | 1319 | } |
1139 | &num, &my_rsv->rsv_window); | 1320 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, |
1321 | grp_goal, &num, &my_rsv->rsv_window); | ||
1140 | if (ret >= 0) { | 1322 | if (ret >= 0) { |
1141 | my_rsv->rsv_alloc_hit += num; | 1323 | my_rsv->rsv_alloc_hit += num; |
1142 | *count = num; | 1324 | *count = num; |
@@ -1161,6 +1343,12 @@ out: | |||
1161 | return ret; | 1343 | return ret; |
1162 | } | 1344 | } |
1163 | 1345 | ||
1346 | /** | ||
1347 | * ext3_has_free_blocks() | ||
1348 | * @sbi: in-core super block structure. | ||
1349 | * | ||
1350 | * Check if filesystem has at least 1 free block available for allocation. | ||
1351 | */ | ||
1164 | static int ext3_has_free_blocks(struct ext3_sb_info *sbi) | 1352 | static int ext3_has_free_blocks(struct ext3_sb_info *sbi) |
1165 | { | 1353 | { |
1166 | ext3_fsblk_t free_blocks, root_blocks; | 1354 | ext3_fsblk_t free_blocks, root_blocks; |
@@ -1175,11 +1363,17 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi) | |||
1175 | return 1; | 1363 | return 1; |
1176 | } | 1364 | } |
1177 | 1365 | ||
1178 | /* | 1366 | /** |
1367 | * ext3_should_retry_alloc() | ||
1368 | * @sb: super block | ||
1369 | * @retries number of attemps has been made | ||
1370 | * | ||
1179 | * ext3_should_retry_alloc() is called when ENOSPC is returned, and if | 1371 | * ext3_should_retry_alloc() is called when ENOSPC is returned, and if |
1180 | * it is profitable to retry the operation, this function will wait | 1372 | * it is profitable to retry the operation, this function will wait |
1181 | * for the current or commiting transaction to complete, and then | 1373 | * for the current or commiting transaction to complete, and then |
1182 | * return TRUE. | 1374 | * return TRUE. |
1375 | * | ||
1376 | * if the total number of retries exceed three times, return FALSE. | ||
1183 | */ | 1377 | */ |
1184 | int ext3_should_retry_alloc(struct super_block *sb, int *retries) | 1378 | int ext3_should_retry_alloc(struct super_block *sb, int *retries) |
1185 | { | 1379 | { |
@@ -1191,13 +1385,19 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries) | |||
1191 | return journal_force_commit_nested(EXT3_SB(sb)->s_journal); | 1385 | return journal_force_commit_nested(EXT3_SB(sb)->s_journal); |
1192 | } | 1386 | } |
1193 | 1387 | ||
1194 | /* | 1388 | /** |
1195 | * ext3_new_block uses a goal block to assist allocation. If the goal is | 1389 | * ext3_new_blocks() -- core block(s) allocation function |
1196 | * free, or there is a free block within 32 blocks of the goal, that block | 1390 | * @handle: handle to this transaction |
1197 | * is allocated. Otherwise a forward search is made for a free block; within | 1391 | * @inode: file inode |
1198 | * each block group the search first looks for an entire free byte in the block | 1392 | * @goal: given target block(filesystem wide) |
1199 | * bitmap, and then for any free bit if that fails. | 1393 | * @count: target number of blocks to allocate |
1200 | * This function also updates quota and i_blocks field. | 1394 | * @errp: error code |
1395 | * | ||
1396 | * ext3_new_blocks uses a goal block to assist allocation. It tries to | ||
1397 | * allocate block(s) from the block group contains the goal block first. If that | ||
1398 | * fails, it will try to allocate block(s) from other block groups without | ||
1399 | * any specific goal block. | ||
1400 | * | ||
1201 | */ | 1401 | */ |
1202 | ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, | 1402 | ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, |
1203 | ext3_fsblk_t goal, unsigned long *count, int *errp) | 1403 | ext3_fsblk_t goal, unsigned long *count, int *errp) |
@@ -1303,7 +1503,7 @@ retry_alloc: | |||
1303 | smp_rmb(); | 1503 | smp_rmb(); |
1304 | 1504 | ||
1305 | /* | 1505 | /* |
1306 | * Now search the rest of the groups. We assume that | 1506 | * Now search the rest of the groups. We assume that |
1307 | * i and gdp correctly point to the last group visited. | 1507 | * i and gdp correctly point to the last group visited. |
1308 | */ | 1508 | */ |
1309 | for (bgi = 0; bgi < ngroups; bgi++) { | 1509 | for (bgi = 0; bgi < ngroups; bgi++) { |
@@ -1428,7 +1628,7 @@ allocated: | |||
1428 | 1628 | ||
1429 | spin_lock(sb_bgl_lock(sbi, group_no)); | 1629 | spin_lock(sb_bgl_lock(sbi, group_no)); |
1430 | gdp->bg_free_blocks_count = | 1630 | gdp->bg_free_blocks_count = |
1431 | cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num); | 1631 | cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num); |
1432 | spin_unlock(sb_bgl_lock(sbi, group_no)); | 1632 | spin_unlock(sb_bgl_lock(sbi, group_no)); |
1433 | percpu_counter_mod(&sbi->s_freeblocks_counter, -num); | 1633 | percpu_counter_mod(&sbi->s_freeblocks_counter, -num); |
1434 | 1634 | ||
@@ -1471,6 +1671,12 @@ ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode, | |||
1471 | return ext3_new_blocks(handle, inode, goal, &count, errp); | 1671 | return ext3_new_blocks(handle, inode, goal, &count, errp); |
1472 | } | 1672 | } |
1473 | 1673 | ||
1674 | /** | ||
1675 | * ext3_count_free_blocks() -- count filesystem free blocks | ||
1676 | * @sb: superblock | ||
1677 | * | ||
1678 | * Adds up the number of free blocks from each block group. | ||
1679 | */ | ||
1474 | ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb) | 1680 | ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb) |
1475 | { | 1681 | { |
1476 | ext3_fsblk_t desc_count; | 1682 | ext3_fsblk_t desc_count; |
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c index ce4f82b9e528..b9176eed98d1 100644 --- a/fs/ext3/bitmap.c +++ b/fs/ext3/bitmap.c | |||
@@ -20,7 +20,7 @@ unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars) | |||
20 | unsigned int i; | 20 | unsigned int i; |
21 | unsigned long sum = 0; | 21 | unsigned long sum = 0; |
22 | 22 | ||
23 | if (!map) | 23 | if (!map) |
24 | return (0); | 24 | return (0); |
25 | for (i = 0; i < numchars; i++) | 25 | for (i = 0; i < numchars; i++) |
26 | sum += nibblemap[map->b_data[i] & 0xf] + | 26 | sum += nibblemap[map->b_data[i] & 0xf] + |
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index fbb0d4ed07d4..429acbb4e064 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c | |||
@@ -59,7 +59,7 @@ static unsigned char get_dtype(struct super_block *sb, int filetype) | |||
59 | 59 | ||
60 | return (ext3_filetype_table[filetype]); | 60 | return (ext3_filetype_table[filetype]); |
61 | } | 61 | } |
62 | 62 | ||
63 | 63 | ||
64 | int ext3_check_dir_entry (const char * function, struct inode * dir, | 64 | int ext3_check_dir_entry (const char * function, struct inode * dir, |
65 | struct ext3_dir_entry_2 * de, | 65 | struct ext3_dir_entry_2 * de, |
@@ -67,7 +67,7 @@ int ext3_check_dir_entry (const char * function, struct inode * dir, | |||
67 | unsigned long offset) | 67 | unsigned long offset) |
68 | { | 68 | { |
69 | const char * error_msg = NULL; | 69 | const char * error_msg = NULL; |
70 | const int rlen = le16_to_cpu(de->rec_len); | 70 | const int rlen = le16_to_cpu(de->rec_len); |
71 | 71 | ||
72 | if (rlen < EXT3_DIR_REC_LEN(1)) | 72 | if (rlen < EXT3_DIR_REC_LEN(1)) |
73 | error_msg = "rec_len is smaller than minimal"; | 73 | error_msg = "rec_len is smaller than minimal"; |
@@ -162,7 +162,7 @@ revalidate: | |||
162 | * to make sure. */ | 162 | * to make sure. */ |
163 | if (filp->f_version != inode->i_version) { | 163 | if (filp->f_version != inode->i_version) { |
164 | for (i = 0; i < sb->s_blocksize && i < offset; ) { | 164 | for (i = 0; i < sb->s_blocksize && i < offset; ) { |
165 | de = (struct ext3_dir_entry_2 *) | 165 | de = (struct ext3_dir_entry_2 *) |
166 | (bh->b_data + i); | 166 | (bh->b_data + i); |
167 | /* It's too expensive to do a full | 167 | /* It's too expensive to do a full |
168 | * dirent test each time round this | 168 | * dirent test each time round this |
@@ -181,7 +181,7 @@ revalidate: | |||
181 | filp->f_version = inode->i_version; | 181 | filp->f_version = inode->i_version; |
182 | } | 182 | } |
183 | 183 | ||
184 | while (!error && filp->f_pos < inode->i_size | 184 | while (!error && filp->f_pos < inode->i_size |
185 | && offset < sb->s_blocksize) { | 185 | && offset < sb->s_blocksize) { |
186 | de = (struct ext3_dir_entry_2 *) (bh->b_data + offset); | 186 | de = (struct ext3_dir_entry_2 *) (bh->b_data + offset); |
187 | if (!ext3_check_dir_entry ("ext3_readdir", inode, de, | 187 | if (!ext3_check_dir_entry ("ext3_readdir", inode, de, |
@@ -229,7 +229,7 @@ out: | |||
229 | /* | 229 | /* |
230 | * These functions convert from the major/minor hash to an f_pos | 230 | * These functions convert from the major/minor hash to an f_pos |
231 | * value. | 231 | * value. |
232 | * | 232 | * |
233 | * Currently we only use major hash numer. This is unfortunate, but | 233 | * Currently we only use major hash numer. This is unfortunate, but |
234 | * on 32-bit machines, the same VFS interface is used for lseek and | 234 | * on 32-bit machines, the same VFS interface is used for lseek and |
235 | * llseek, so if we use the 64 bit offset, then the 32-bit versions of | 235 | * llseek, so if we use the 64 bit offset, then the 32-bit versions of |
@@ -250,7 +250,7 @@ out: | |||
250 | struct fname { | 250 | struct fname { |
251 | __u32 hash; | 251 | __u32 hash; |
252 | __u32 minor_hash; | 252 | __u32 minor_hash; |
253 | struct rb_node rb_hash; | 253 | struct rb_node rb_hash; |
254 | struct fname *next; | 254 | struct fname *next; |
255 | __u32 inode; | 255 | __u32 inode; |
256 | __u8 name_len; | 256 | __u8 name_len; |
@@ -343,10 +343,9 @@ int ext3_htree_store_dirent(struct file *dir_file, __u32 hash, | |||
343 | 343 | ||
344 | /* Create and allocate the fname structure */ | 344 | /* Create and allocate the fname structure */ |
345 | len = sizeof(struct fname) + dirent->name_len + 1; | 345 | len = sizeof(struct fname) + dirent->name_len + 1; |
346 | new_fn = kmalloc(len, GFP_KERNEL); | 346 | new_fn = kzalloc(len, GFP_KERNEL); |
347 | if (!new_fn) | 347 | if (!new_fn) |
348 | return -ENOMEM; | 348 | return -ENOMEM; |
349 | memset(new_fn, 0, len); | ||
350 | new_fn->hash = hash; | 349 | new_fn->hash = hash; |
351 | new_fn->minor_hash = minor_hash; | 350 | new_fn->minor_hash = minor_hash; |
352 | new_fn->inode = le32_to_cpu(dirent->inode); | 351 | new_fn->inode = le32_to_cpu(dirent->inode); |
@@ -410,7 +409,7 @@ static int call_filldir(struct file * filp, void * dirent, | |||
410 | curr_pos = hash2pos(fname->hash, fname->minor_hash); | 409 | curr_pos = hash2pos(fname->hash, fname->minor_hash); |
411 | while (fname) { | 410 | while (fname) { |
412 | error = filldir(dirent, fname->name, | 411 | error = filldir(dirent, fname->name, |
413 | fname->name_len, curr_pos, | 412 | fname->name_len, curr_pos, |
414 | fname->inode, | 413 | fname->inode, |
415 | get_dtype(sb, fname->file_type)); | 414 | get_dtype(sb, fname->file_type)); |
416 | if (error) { | 415 | if (error) { |
@@ -465,7 +464,7 @@ static int ext3_dx_readdir(struct file * filp, | |||
465 | /* | 464 | /* |
466 | * Fill the rbtree if we have no more entries, | 465 | * Fill the rbtree if we have no more entries, |
467 | * or the inode has changed since we last read in the | 466 | * or the inode has changed since we last read in the |
468 | * cached entries. | 467 | * cached entries. |
469 | */ | 468 | */ |
470 | if ((!info->curr_node) || | 469 | if ((!info->curr_node) || |
471 | (filp->f_version != inode->i_version)) { | 470 | (filp->f_version != inode->i_version)) { |
diff --git a/fs/ext3/file.c b/fs/ext3/file.c index 1efefb630ea9..994efd189f4e 100644 --- a/fs/ext3/file.c +++ b/fs/ext3/file.c | |||
@@ -100,7 +100,7 @@ ext3_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t | |||
100 | 100 | ||
101 | force_commit: | 101 | force_commit: |
102 | err = ext3_force_commit(inode->i_sb); | 102 | err = ext3_force_commit(inode->i_sb); |
103 | if (err) | 103 | if (err) |
104 | return err; | 104 | return err; |
105 | return ret; | 105 | return ret; |
106 | } | 106 | } |
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c index 49382a208e05..dd1fd3c0fc05 100644 --- a/fs/ext3/fsync.c +++ b/fs/ext3/fsync.c | |||
@@ -8,14 +8,14 @@ | |||
8 | * Universite Pierre et Marie Curie (Paris VI) | 8 | * Universite Pierre et Marie Curie (Paris VI) |
9 | * from | 9 | * from |
10 | * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds | 10 | * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds |
11 | * | 11 | * |
12 | * ext3fs fsync primitive | 12 | * ext3fs fsync primitive |
13 | * | 13 | * |
14 | * Big-endian to little-endian byte-swapping/bitmaps by | 14 | * Big-endian to little-endian byte-swapping/bitmaps by |
15 | * David S. Miller (davem@caip.rutgers.edu), 1995 | 15 | * David S. Miller (davem@caip.rutgers.edu), 1995 |
16 | * | 16 | * |
17 | * Removed unnecessary code duplication for little endian machines | 17 | * Removed unnecessary code duplication for little endian machines |
18 | * and excessive __inline__s. | 18 | * and excessive __inline__s. |
19 | * Andi Kleen, 1997 | 19 | * Andi Kleen, 1997 |
20 | * | 20 | * |
21 | * Major simplications and cleanup - we only need to do the metadata, because | 21 | * Major simplications and cleanup - we only need to do the metadata, because |
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c index 5a2d1235ead0..deeb27b5ba83 100644 --- a/fs/ext3/hash.c +++ b/fs/ext3/hash.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright (C) 2002 by Theodore Ts'o | 4 | * Copyright (C) 2002 by Theodore Ts'o |
5 | * | 5 | * |
6 | * This file is released under the GPL v2. | 6 | * This file is released under the GPL v2. |
7 | * | 7 | * |
8 | * This file may be redistributed under the terms of the GNU Public | 8 | * This file may be redistributed under the terms of the GNU Public |
9 | * License. | 9 | * License. |
10 | */ | 10 | */ |
@@ -80,11 +80,11 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num) | |||
80 | * Returns the hash of a filename. If len is 0 and name is NULL, then | 80 | * Returns the hash of a filename. If len is 0 and name is NULL, then |
81 | * this function can be used to test whether or not a hash version is | 81 | * this function can be used to test whether or not a hash version is |
82 | * supported. | 82 | * supported. |
83 | * | 83 | * |
84 | * The seed is an 4 longword (32 bits) "secret" which can be used to | 84 | * The seed is an 4 longword (32 bits) "secret" which can be used to |
85 | * uniquify a hash. If the seed is all zero's, then some default seed | 85 | * uniquify a hash. If the seed is all zero's, then some default seed |
86 | * may be used. | 86 | * may be used. |
87 | * | 87 | * |
88 | * A particular hash version specifies whether or not the seed is | 88 | * A particular hash version specifies whether or not the seed is |
89 | * represented, and whether or not the returned hash is 32 bits or 64 | 89 | * represented, and whether or not the returned hash is 32 bits or 64 |
90 | * bits. 32 bit hashes will return 0 for the minor hash. | 90 | * bits. 32 bit hashes will return 0 for the minor hash. |
@@ -95,7 +95,7 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo) | |||
95 | __u32 minor_hash = 0; | 95 | __u32 minor_hash = 0; |
96 | const char *p; | 96 | const char *p; |
97 | int i; | 97 | int i; |
98 | __u32 in[8], buf[4]; | 98 | __u32 in[8], buf[4]; |
99 | 99 | ||
100 | /* Initialize the default seed for the hash checksum functions */ | 100 | /* Initialize the default seed for the hash checksum functions */ |
101 | buf[0] = 0x67452301; | 101 | buf[0] = 0x67452301; |
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index 36546ed36a14..e45dbd651736 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c | |||
@@ -202,7 +202,7 @@ error_return: | |||
202 | static int find_group_dir(struct super_block *sb, struct inode *parent) | 202 | static int find_group_dir(struct super_block *sb, struct inode *parent) |
203 | { | 203 | { |
204 | int ngroups = EXT3_SB(sb)->s_groups_count; | 204 | int ngroups = EXT3_SB(sb)->s_groups_count; |
205 | int freei, avefreei; | 205 | unsigned int freei, avefreei; |
206 | struct ext3_group_desc *desc, *best_desc = NULL; | 206 | struct ext3_group_desc *desc, *best_desc = NULL; |
207 | struct buffer_head *bh; | 207 | struct buffer_head *bh; |
208 | int group, best_group = -1; | 208 | int group, best_group = -1; |
@@ -216,7 +216,7 @@ static int find_group_dir(struct super_block *sb, struct inode *parent) | |||
216 | continue; | 216 | continue; |
217 | if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) | 217 | if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) |
218 | continue; | 218 | continue; |
219 | if (!best_desc || | 219 | if (!best_desc || |
220 | (le16_to_cpu(desc->bg_free_blocks_count) > | 220 | (le16_to_cpu(desc->bg_free_blocks_count) > |
221 | le16_to_cpu(best_desc->bg_free_blocks_count))) { | 221 | le16_to_cpu(best_desc->bg_free_blocks_count))) { |
222 | best_group = group; | 222 | best_group = group; |
@@ -226,30 +226,30 @@ static int find_group_dir(struct super_block *sb, struct inode *parent) | |||
226 | return best_group; | 226 | return best_group; |
227 | } | 227 | } |
228 | 228 | ||
229 | /* | 229 | /* |
230 | * Orlov's allocator for directories. | 230 | * Orlov's allocator for directories. |
231 | * | 231 | * |
232 | * We always try to spread first-level directories. | 232 | * We always try to spread first-level directories. |
233 | * | 233 | * |
234 | * If there are blockgroups with both free inodes and free blocks counts | 234 | * If there are blockgroups with both free inodes and free blocks counts |
235 | * not worse than average we return one with smallest directory count. | 235 | * not worse than average we return one with smallest directory count. |
236 | * Otherwise we simply return a random group. | 236 | * Otherwise we simply return a random group. |
237 | * | 237 | * |
238 | * For the rest rules look so: | 238 | * For the rest rules look so: |
239 | * | 239 | * |
240 | * It's OK to put directory into a group unless | 240 | * It's OK to put directory into a group unless |
241 | * it has too many directories already (max_dirs) or | 241 | * it has too many directories already (max_dirs) or |
242 | * it has too few free inodes left (min_inodes) or | 242 | * it has too few free inodes left (min_inodes) or |
243 | * it has too few free blocks left (min_blocks) or | 243 | * it has too few free blocks left (min_blocks) or |
244 | * it's already running too large debt (max_debt). | 244 | * it's already running too large debt (max_debt). |
245 | * Parent's group is prefered, if it doesn't satisfy these | 245 | * Parent's group is prefered, if it doesn't satisfy these |
246 | * conditions we search cyclically through the rest. If none | 246 | * conditions we search cyclically through the rest. If none |
247 | * of the groups look good we just look for a group with more | 247 | * of the groups look good we just look for a group with more |
248 | * free inodes than average (starting at parent's group). | 248 | * free inodes than average (starting at parent's group). |
249 | * | 249 | * |
250 | * Debt is incremented each time we allocate a directory and decremented | 250 | * Debt is incremented each time we allocate a directory and decremented |
251 | * when we allocate an inode, within 0--255. | 251 | * when we allocate an inode, within 0--255. |
252 | */ | 252 | */ |
253 | 253 | ||
254 | #define INODE_COST 64 | 254 | #define INODE_COST 64 |
255 | #define BLOCK_COST 256 | 255 | #define BLOCK_COST 256 |
@@ -261,10 +261,10 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent) | |||
261 | struct ext3_super_block *es = sbi->s_es; | 261 | struct ext3_super_block *es = sbi->s_es; |
262 | int ngroups = sbi->s_groups_count; | 262 | int ngroups = sbi->s_groups_count; |
263 | int inodes_per_group = EXT3_INODES_PER_GROUP(sb); | 263 | int inodes_per_group = EXT3_INODES_PER_GROUP(sb); |
264 | int freei, avefreei; | 264 | unsigned int freei, avefreei; |
265 | ext3_fsblk_t freeb, avefreeb; | 265 | ext3_fsblk_t freeb, avefreeb; |
266 | ext3_fsblk_t blocks_per_dir; | 266 | ext3_fsblk_t blocks_per_dir; |
267 | int ndirs; | 267 | unsigned int ndirs; |
268 | int max_debt, max_dirs, min_inodes; | 268 | int max_debt, max_dirs, min_inodes; |
269 | ext3_grpblk_t min_blocks; | 269 | ext3_grpblk_t min_blocks; |
270 | int group = -1, i; | 270 | int group = -1, i; |
@@ -454,7 +454,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode) | |||
454 | group = find_group_dir(sb, dir); | 454 | group = find_group_dir(sb, dir); |
455 | else | 455 | else |
456 | group = find_group_orlov(sb, dir); | 456 | group = find_group_orlov(sb, dir); |
457 | } else | 457 | } else |
458 | group = find_group_other(sb, dir); | 458 | group = find_group_other(sb, dir); |
459 | 459 | ||
460 | err = -ENOSPC; | 460 | err = -ENOSPC; |
@@ -559,7 +559,6 @@ got: | |||
559 | 559 | ||
560 | inode->i_ino = ino; | 560 | inode->i_ino = ino; |
561 | /* This is the optimal IO size (for stat), not the fs block size */ | 561 | /* This is the optimal IO size (for stat), not the fs block size */ |
562 | inode->i_blksize = PAGE_SIZE; | ||
563 | inode->i_blocks = 0; | 562 | inode->i_blocks = 0; |
564 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; | 563 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; |
565 | 564 | ||
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 84be02e93652..dcf4f1dd108b 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -13,11 +13,11 @@ | |||
13 | * Copyright (C) 1991, 1992 Linus Torvalds | 13 | * Copyright (C) 1991, 1992 Linus Torvalds |
14 | * | 14 | * |
15 | * Goal-directed block allocation by Stephen Tweedie | 15 | * Goal-directed block allocation by Stephen Tweedie |
16 | * (sct@redhat.com), 1993, 1998 | 16 | * (sct@redhat.com), 1993, 1998 |
17 | * Big-endian to little-endian byte-swapping/bitmaps by | 17 | * Big-endian to little-endian byte-swapping/bitmaps by |
18 | * David S. Miller (davem@caip.rutgers.edu), 1995 | 18 | * David S. Miller (davem@caip.rutgers.edu), 1995 |
19 | * 64-bit file support on 64-bit platforms by Jakub Jelinek | 19 | * 64-bit file support on 64-bit platforms by Jakub Jelinek |
20 | * (jj@sunsite.ms.mff.cuni.cz) | 20 | * (jj@sunsite.ms.mff.cuni.cz) |
21 | * | 21 | * |
22 | * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000 | 22 | * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000 |
23 | */ | 23 | */ |
@@ -55,7 +55,7 @@ static int ext3_inode_is_fast_symlink(struct inode *inode) | |||
55 | /* | 55 | /* |
56 | * The ext3 forget function must perform a revoke if we are freeing data | 56 | * The ext3 forget function must perform a revoke if we are freeing data |
57 | * which has been journaled. Metadata (eg. indirect blocks) must be | 57 | * which has been journaled. Metadata (eg. indirect blocks) must be |
58 | * revoked in all cases. | 58 | * revoked in all cases. |
59 | * | 59 | * |
60 | * "bh" may be NULL: a metadata block may have been freed from memory | 60 | * "bh" may be NULL: a metadata block may have been freed from memory |
61 | * but there may still be a record of it in the journal, and that record | 61 | * but there may still be a record of it in the journal, and that record |
@@ -105,7 +105,7 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, | |||
105 | * Work out how many blocks we need to proceed with the next chunk of a | 105 | * Work out how many blocks we need to proceed with the next chunk of a |
106 | * truncate transaction. | 106 | * truncate transaction. |
107 | */ | 107 | */ |
108 | static unsigned long blocks_for_truncate(struct inode *inode) | 108 | static unsigned long blocks_for_truncate(struct inode *inode) |
109 | { | 109 | { |
110 | unsigned long needed; | 110 | unsigned long needed; |
111 | 111 | ||
@@ -122,13 +122,13 @@ static unsigned long blocks_for_truncate(struct inode *inode) | |||
122 | 122 | ||
123 | /* But we need to bound the transaction so we don't overflow the | 123 | /* But we need to bound the transaction so we don't overflow the |
124 | * journal. */ | 124 | * journal. */ |
125 | if (needed > EXT3_MAX_TRANS_DATA) | 125 | if (needed > EXT3_MAX_TRANS_DATA) |
126 | needed = EXT3_MAX_TRANS_DATA; | 126 | needed = EXT3_MAX_TRANS_DATA; |
127 | 127 | ||
128 | return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; | 128 | return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; |
129 | } | 129 | } |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * Truncate transactions can be complex and absolutely huge. So we need to | 132 | * Truncate transactions can be complex and absolutely huge. So we need to |
133 | * be able to restart the transaction at a conventient checkpoint to make | 133 | * be able to restart the transaction at a conventient checkpoint to make |
134 | * sure we don't overflow the journal. | 134 | * sure we don't overflow the journal. |
@@ -136,9 +136,9 @@ static unsigned long blocks_for_truncate(struct inode *inode) | |||
136 | * start_transaction gets us a new handle for a truncate transaction, | 136 | * start_transaction gets us a new handle for a truncate transaction, |
137 | * and extend_transaction tries to extend the existing one a bit. If | 137 | * and extend_transaction tries to extend the existing one a bit. If |
138 | * extend fails, we need to propagate the failure up and restart the | 138 | * extend fails, we need to propagate the failure up and restart the |
139 | * transaction in the top-level truncate loop. --sct | 139 | * transaction in the top-level truncate loop. --sct |
140 | */ | 140 | */ |
141 | static handle_t *start_transaction(struct inode *inode) | 141 | static handle_t *start_transaction(struct inode *inode) |
142 | { | 142 | { |
143 | handle_t *result; | 143 | handle_t *result; |
144 | 144 | ||
@@ -215,12 +215,12 @@ void ext3_delete_inode (struct inode * inode) | |||
215 | ext3_orphan_del(handle, inode); | 215 | ext3_orphan_del(handle, inode); |
216 | EXT3_I(inode)->i_dtime = get_seconds(); | 216 | EXT3_I(inode)->i_dtime = get_seconds(); |
217 | 217 | ||
218 | /* | 218 | /* |
219 | * One subtle ordering requirement: if anything has gone wrong | 219 | * One subtle ordering requirement: if anything has gone wrong |
220 | * (transaction abort, IO errors, whatever), then we can still | 220 | * (transaction abort, IO errors, whatever), then we can still |
221 | * do these next steps (the fs will already have been marked as | 221 | * do these next steps (the fs will already have been marked as |
222 | * having errors), but we can't free the inode if the mark_dirty | 222 | * having errors), but we can't free the inode if the mark_dirty |
223 | * fails. | 223 | * fails. |
224 | */ | 224 | */ |
225 | if (ext3_mark_inode_dirty(handle, inode)) | 225 | if (ext3_mark_inode_dirty(handle, inode)) |
226 | /* If that failed, just do the required in-core inode clear. */ | 226 | /* If that failed, just do the required in-core inode clear. */ |
@@ -398,7 +398,7 @@ no_block: | |||
398 | * + if there is a block to the left of our position - allocate near it. | 398 | * + if there is a block to the left of our position - allocate near it. |
399 | * + if pointer will live in indirect block - allocate near that block. | 399 | * + if pointer will live in indirect block - allocate near that block. |
400 | * + if pointer will live in inode - allocate in the same | 400 | * + if pointer will live in inode - allocate in the same |
401 | * cylinder group. | 401 | * cylinder group. |
402 | * | 402 | * |
403 | * In the latter case we colour the starting block by the callers PID to | 403 | * In the latter case we colour the starting block by the callers PID to |
404 | * prevent it from clashing with concurrent allocations for a different inode | 404 | * prevent it from clashing with concurrent allocations for a different inode |
@@ -470,7 +470,7 @@ static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block, | |||
470 | * ext3_blks_to_allocate: Look up the block map and count the number | 470 | * ext3_blks_to_allocate: Look up the block map and count the number |
471 | * of direct blocks need to be allocated for the given branch. | 471 | * of direct blocks need to be allocated for the given branch. |
472 | * | 472 | * |
473 | * @branch: chain of indirect blocks | 473 | * @branch: chain of indirect blocks |
474 | * @k: number of blocks need for indirect blocks | 474 | * @k: number of blocks need for indirect blocks |
475 | * @blks: number of data blocks to be mapped. | 475 | * @blks: number of data blocks to be mapped. |
476 | * @blocks_to_boundary: the offset in the indirect block | 476 | * @blocks_to_boundary: the offset in the indirect block |
@@ -744,7 +744,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, | |||
744 | jbd_debug(5, "splicing indirect only\n"); | 744 | jbd_debug(5, "splicing indirect only\n"); |
745 | BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); | 745 | BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); |
746 | err = ext3_journal_dirty_metadata(handle, where->bh); | 746 | err = ext3_journal_dirty_metadata(handle, where->bh); |
747 | if (err) | 747 | if (err) |
748 | goto err_out; | 748 | goto err_out; |
749 | } else { | 749 | } else { |
750 | /* | 750 | /* |
@@ -1098,7 +1098,7 @@ static int walk_page_buffers( handle_t *handle, | |||
1098 | 1098 | ||
1099 | for ( bh = head, block_start = 0; | 1099 | for ( bh = head, block_start = 0; |
1100 | ret == 0 && (bh != head || !block_start); | 1100 | ret == 0 && (bh != head || !block_start); |
1101 | block_start = block_end, bh = next) | 1101 | block_start = block_end, bh = next) |
1102 | { | 1102 | { |
1103 | next = bh->b_this_page; | 1103 | next = bh->b_this_page; |
1104 | block_end = block_start + blocksize; | 1104 | block_end = block_start + blocksize; |
@@ -1137,7 +1137,7 @@ static int walk_page_buffers( handle_t *handle, | |||
1137 | * So what we do is to rely on the fact that journal_stop/journal_start | 1137 | * So what we do is to rely on the fact that journal_stop/journal_start |
1138 | * will _not_ run commit under these circumstances because handle->h_ref | 1138 | * will _not_ run commit under these circumstances because handle->h_ref |
1139 | * is elevated. We'll still have enough credits for the tiny quotafile | 1139 | * is elevated. We'll still have enough credits for the tiny quotafile |
1140 | * write. | 1140 | * write. |
1141 | */ | 1141 | */ |
1142 | static int do_journal_get_write_access(handle_t *handle, | 1142 | static int do_journal_get_write_access(handle_t *handle, |
1143 | struct buffer_head *bh) | 1143 | struct buffer_head *bh) |
@@ -1282,7 +1282,7 @@ static int ext3_journalled_commit_write(struct file *file, | |||
1282 | if (inode->i_size > EXT3_I(inode)->i_disksize) { | 1282 | if (inode->i_size > EXT3_I(inode)->i_disksize) { |
1283 | EXT3_I(inode)->i_disksize = inode->i_size; | 1283 | EXT3_I(inode)->i_disksize = inode->i_size; |
1284 | ret2 = ext3_mark_inode_dirty(handle, inode); | 1284 | ret2 = ext3_mark_inode_dirty(handle, inode); |
1285 | if (!ret) | 1285 | if (!ret) |
1286 | ret = ret2; | 1286 | ret = ret2; |
1287 | } | 1287 | } |
1288 | ret2 = ext3_journal_stop(handle); | 1288 | ret2 = ext3_journal_stop(handle); |
@@ -1291,7 +1291,7 @@ static int ext3_journalled_commit_write(struct file *file, | |||
1291 | return ret; | 1291 | return ret; |
1292 | } | 1292 | } |
1293 | 1293 | ||
1294 | /* | 1294 | /* |
1295 | * bmap() is special. It gets used by applications such as lilo and by | 1295 | * bmap() is special. It gets used by applications such as lilo and by |
1296 | * the swapper to find the on-disk block of a specific piece of data. | 1296 | * the swapper to find the on-disk block of a specific piece of data. |
1297 | * | 1297 | * |
@@ -1300,10 +1300,10 @@ static int ext3_journalled_commit_write(struct file *file, | |||
1300 | * filesystem and enables swap, then they may get a nasty shock when the | 1300 | * filesystem and enables swap, then they may get a nasty shock when the |
1301 | * data getting swapped to that swapfile suddenly gets overwritten by | 1301 | * data getting swapped to that swapfile suddenly gets overwritten by |
1302 | * the original zero's written out previously to the journal and | 1302 | * the original zero's written out previously to the journal and |
1303 | * awaiting writeback in the kernel's buffer cache. | 1303 | * awaiting writeback in the kernel's buffer cache. |
1304 | * | 1304 | * |
1305 | * So, if we see any bmap calls here on a modified, data-journaled file, | 1305 | * So, if we see any bmap calls here on a modified, data-journaled file, |
1306 | * take extra steps to flush any blocks which might be in the cache. | 1306 | * take extra steps to flush any blocks which might be in the cache. |
1307 | */ | 1307 | */ |
1308 | static sector_t ext3_bmap(struct address_space *mapping, sector_t block) | 1308 | static sector_t ext3_bmap(struct address_space *mapping, sector_t block) |
1309 | { | 1309 | { |
@@ -1312,16 +1312,16 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block) | |||
1312 | int err; | 1312 | int err; |
1313 | 1313 | ||
1314 | if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { | 1314 | if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { |
1315 | /* | 1315 | /* |
1316 | * This is a REALLY heavyweight approach, but the use of | 1316 | * This is a REALLY heavyweight approach, but the use of |
1317 | * bmap on dirty files is expected to be extremely rare: | 1317 | * bmap on dirty files is expected to be extremely rare: |
1318 | * only if we run lilo or swapon on a freshly made file | 1318 | * only if we run lilo or swapon on a freshly made file |
1319 | * do we expect this to happen. | 1319 | * do we expect this to happen. |
1320 | * | 1320 | * |
1321 | * (bmap requires CAP_SYS_RAWIO so this does not | 1321 | * (bmap requires CAP_SYS_RAWIO so this does not |
1322 | * represent an unprivileged user DOS attack --- we'd be | 1322 | * represent an unprivileged user DOS attack --- we'd be |
1323 | * in trouble if mortal users could trigger this path at | 1323 | * in trouble if mortal users could trigger this path at |
1324 | * will.) | 1324 | * will.) |
1325 | * | 1325 | * |
1326 | * NB. EXT3_STATE_JDATA is not set on files other than | 1326 | * NB. EXT3_STATE_JDATA is not set on files other than |
1327 | * regular files. If somebody wants to bmap a directory | 1327 | * regular files. If somebody wants to bmap a directory |
@@ -1457,7 +1457,7 @@ static int ext3_ordered_writepage(struct page *page, | |||
1457 | */ | 1457 | */ |
1458 | 1458 | ||
1459 | /* | 1459 | /* |
1460 | * And attach them to the current transaction. But only if | 1460 | * And attach them to the current transaction. But only if |
1461 | * block_write_full_page() succeeded. Otherwise they are unmapped, | 1461 | * block_write_full_page() succeeded. Otherwise they are unmapped, |
1462 | * and generally junk. | 1462 | * and generally junk. |
1463 | */ | 1463 | */ |
@@ -1644,7 +1644,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | |||
1644 | } | 1644 | } |
1645 | } | 1645 | } |
1646 | 1646 | ||
1647 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 1647 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
1648 | offset, nr_segs, | 1648 | offset, nr_segs, |
1649 | ext3_get_block, NULL); | 1649 | ext3_get_block, NULL); |
1650 | 1650 | ||
@@ -2025,7 +2025,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode, | |||
2025 | __le32 *first, __le32 *last) | 2025 | __le32 *first, __le32 *last) |
2026 | { | 2026 | { |
2027 | ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */ | 2027 | ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */ |
2028 | unsigned long count = 0; /* Number of blocks in the run */ | 2028 | unsigned long count = 0; /* Number of blocks in the run */ |
2029 | __le32 *block_to_free_p = NULL; /* Pointer into inode/ind | 2029 | __le32 *block_to_free_p = NULL; /* Pointer into inode/ind |
2030 | corresponding to | 2030 | corresponding to |
2031 | block_to_free */ | 2031 | block_to_free */ |
@@ -2054,7 +2054,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode, | |||
2054 | } else if (nr == block_to_free + count) { | 2054 | } else if (nr == block_to_free + count) { |
2055 | count++; | 2055 | count++; |
2056 | } else { | 2056 | } else { |
2057 | ext3_clear_blocks(handle, inode, this_bh, | 2057 | ext3_clear_blocks(handle, inode, this_bh, |
2058 | block_to_free, | 2058 | block_to_free, |
2059 | count, block_to_free_p, p); | 2059 | count, block_to_free_p, p); |
2060 | block_to_free = nr; | 2060 | block_to_free = nr; |
@@ -2115,7 +2115,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2115 | */ | 2115 | */ |
2116 | if (!bh) { | 2116 | if (!bh) { |
2117 | ext3_error(inode->i_sb, "ext3_free_branches", | 2117 | ext3_error(inode->i_sb, "ext3_free_branches", |
2118 | "Read failure, inode=%ld, block="E3FSBLK, | 2118 | "Read failure, inode=%lu, block="E3FSBLK, |
2119 | inode->i_ino, nr); | 2119 | inode->i_ino, nr); |
2120 | continue; | 2120 | continue; |
2121 | } | 2121 | } |
@@ -2184,7 +2184,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2184 | *p = 0; | 2184 | *p = 0; |
2185 | BUFFER_TRACE(parent_bh, | 2185 | BUFFER_TRACE(parent_bh, |
2186 | "call ext3_journal_dirty_metadata"); | 2186 | "call ext3_journal_dirty_metadata"); |
2187 | ext3_journal_dirty_metadata(handle, | 2187 | ext3_journal_dirty_metadata(handle, |
2188 | parent_bh); | 2188 | parent_bh); |
2189 | } | 2189 | } |
2190 | } | 2190 | } |
@@ -2632,9 +2632,6 @@ void ext3_read_inode(struct inode * inode) | |||
2632 | * recovery code: that's fine, we're about to complete | 2632 | * recovery code: that's fine, we're about to complete |
2633 | * the process of deleting those. */ | 2633 | * the process of deleting those. */ |
2634 | } | 2634 | } |
2635 | inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size | ||
2636 | * (for stat), not the fs block | ||
2637 | * size */ | ||
2638 | inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); | 2635 | inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); |
2639 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); | 2636 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); |
2640 | #ifdef EXT3_FRAGMENTS | 2637 | #ifdef EXT3_FRAGMENTS |
@@ -2704,7 +2701,7 @@ void ext3_read_inode(struct inode * inode) | |||
2704 | if (raw_inode->i_block[0]) | 2701 | if (raw_inode->i_block[0]) |
2705 | init_special_inode(inode, inode->i_mode, | 2702 | init_special_inode(inode, inode->i_mode, |
2706 | old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); | 2703 | old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); |
2707 | else | 2704 | else |
2708 | init_special_inode(inode, inode->i_mode, | 2705 | init_special_inode(inode, inode->i_mode, |
2709 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); | 2706 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); |
2710 | } | 2707 | } |
@@ -2724,8 +2721,8 @@ bad_inode: | |||
2724 | * | 2721 | * |
2725 | * The caller must have write access to iloc->bh. | 2722 | * The caller must have write access to iloc->bh. |
2726 | */ | 2723 | */ |
2727 | static int ext3_do_update_inode(handle_t *handle, | 2724 | static int ext3_do_update_inode(handle_t *handle, |
2728 | struct inode *inode, | 2725 | struct inode *inode, |
2729 | struct ext3_iloc *iloc) | 2726 | struct ext3_iloc *iloc) |
2730 | { | 2727 | { |
2731 | struct ext3_inode *raw_inode = ext3_raw_inode(iloc); | 2728 | struct ext3_inode *raw_inode = ext3_raw_inode(iloc); |
@@ -2900,7 +2897,7 @@ int ext3_write_inode(struct inode *inode, int wait) | |||
2900 | * commit will leave the blocks being flushed in an unused state on | 2897 | * commit will leave the blocks being flushed in an unused state on |
2901 | * disk. (On recovery, the inode will get truncated and the blocks will | 2898 | * disk. (On recovery, the inode will get truncated and the blocks will |
2902 | * be freed, so we have a strong guarantee that no future commit will | 2899 | * be freed, so we have a strong guarantee that no future commit will |
2903 | * leave these blocks visible to the user.) | 2900 | * leave these blocks visible to the user.) |
2904 | * | 2901 | * |
2905 | * Called with inode->sem down. | 2902 | * Called with inode->sem down. |
2906 | */ | 2903 | */ |
@@ -3043,13 +3040,13 @@ int ext3_mark_iloc_dirty(handle_t *handle, | |||
3043 | return err; | 3040 | return err; |
3044 | } | 3041 | } |
3045 | 3042 | ||
3046 | /* | 3043 | /* |
3047 | * On success, We end up with an outstanding reference count against | 3044 | * On success, We end up with an outstanding reference count against |
3048 | * iloc->bh. This _must_ be cleaned up later. | 3045 | * iloc->bh. This _must_ be cleaned up later. |
3049 | */ | 3046 | */ |
3050 | 3047 | ||
3051 | int | 3048 | int |
3052 | ext3_reserve_inode_write(handle_t *handle, struct inode *inode, | 3049 | ext3_reserve_inode_write(handle_t *handle, struct inode *inode, |
3053 | struct ext3_iloc *iloc) | 3050 | struct ext3_iloc *iloc) |
3054 | { | 3051 | { |
3055 | int err = 0; | 3052 | int err = 0; |
@@ -3139,7 +3136,7 @@ out: | |||
3139 | } | 3136 | } |
3140 | 3137 | ||
3141 | #if 0 | 3138 | #if 0 |
3142 | /* | 3139 | /* |
3143 | * Bind an inode's backing buffer_head into this transaction, to prevent | 3140 | * Bind an inode's backing buffer_head into this transaction, to prevent |
3144 | * it from being flushed to disk early. Unlike | 3141 | * it from being flushed to disk early. Unlike |
3145 | * ext3_reserve_inode_write, this leaves behind no bh reference and | 3142 | * ext3_reserve_inode_write, this leaves behind no bh reference and |
@@ -3157,7 +3154,7 @@ static int ext3_pin_inode(handle_t *handle, struct inode *inode) | |||
3157 | BUFFER_TRACE(iloc.bh, "get_write_access"); | 3154 | BUFFER_TRACE(iloc.bh, "get_write_access"); |
3158 | err = journal_get_write_access(handle, iloc.bh); | 3155 | err = journal_get_write_access(handle, iloc.bh); |
3159 | if (!err) | 3156 | if (!err) |
3160 | err = ext3_journal_dirty_metadata(handle, | 3157 | err = ext3_journal_dirty_metadata(handle, |
3161 | iloc.bh); | 3158 | iloc.bh); |
3162 | brelse(iloc.bh); | 3159 | brelse(iloc.bh); |
3163 | } | 3160 | } |
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 2aa7101b27cd..85d132c37ee0 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c | |||
@@ -15,13 +15,13 @@ | |||
15 | * Big-endian to little-endian byte-swapping/bitmaps by | 15 | * Big-endian to little-endian byte-swapping/bitmaps by |
16 | * David S. Miller (davem@caip.rutgers.edu), 1995 | 16 | * David S. Miller (davem@caip.rutgers.edu), 1995 |
17 | * Directory entry file type support and forward compatibility hooks | 17 | * Directory entry file type support and forward compatibility hooks |
18 | * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998 | 18 | * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998 |
19 | * Hash Tree Directory indexing (c) | 19 | * Hash Tree Directory indexing (c) |
20 | * Daniel Phillips, 2001 | 20 | * Daniel Phillips, 2001 |
21 | * Hash Tree Directory indexing porting | 21 | * Hash Tree Directory indexing porting |
22 | * Christopher Li, 2002 | 22 | * Christopher Li, 2002 |
23 | * Hash Tree Directory indexing cleanup | 23 | * Hash Tree Directory indexing cleanup |
24 | * Theodore Ts'o, 2002 | 24 | * Theodore Ts'o, 2002 |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
@@ -76,7 +76,7 @@ static struct buffer_head *ext3_append(handle_t *handle, | |||
76 | #ifdef DX_DEBUG | 76 | #ifdef DX_DEBUG |
77 | #define dxtrace(command) command | 77 | #define dxtrace(command) command |
78 | #else | 78 | #else |
79 | #define dxtrace(command) | 79 | #define dxtrace(command) |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | struct fake_dirent | 82 | struct fake_dirent |
@@ -169,7 +169,7 @@ static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size); | |||
169 | static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block); | 169 | static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block); |
170 | static int ext3_htree_next_block(struct inode *dir, __u32 hash, | 170 | static int ext3_htree_next_block(struct inode *dir, __u32 hash, |
171 | struct dx_frame *frame, | 171 | struct dx_frame *frame, |
172 | struct dx_frame *frames, | 172 | struct dx_frame *frames, |
173 | __u32 *start_hash); | 173 | __u32 *start_hash); |
174 | static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry, | 174 | static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry, |
175 | struct ext3_dir_entry_2 **res_dir, int *err); | 175 | struct ext3_dir_entry_2 **res_dir, int *err); |
@@ -250,7 +250,7 @@ static void dx_show_index (char * label, struct dx_entry *entries) | |||
250 | } | 250 | } |
251 | 251 | ||
252 | struct stats | 252 | struct stats |
253 | { | 253 | { |
254 | unsigned names; | 254 | unsigned names; |
255 | unsigned space; | 255 | unsigned space; |
256 | unsigned bcount; | 256 | unsigned bcount; |
@@ -278,7 +278,7 @@ static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_ent | |||
278 | ((char *) de - base)); | 278 | ((char *) de - base)); |
279 | } | 279 | } |
280 | space += EXT3_DIR_REC_LEN(de->name_len); | 280 | space += EXT3_DIR_REC_LEN(de->name_len); |
281 | names++; | 281 | names++; |
282 | } | 282 | } |
283 | de = (struct ext3_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len)); | 283 | de = (struct ext3_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len)); |
284 | } | 284 | } |
@@ -464,7 +464,7 @@ static void dx_release (struct dx_frame *frames) | |||
464 | */ | 464 | */ |
465 | static int ext3_htree_next_block(struct inode *dir, __u32 hash, | 465 | static int ext3_htree_next_block(struct inode *dir, __u32 hash, |
466 | struct dx_frame *frame, | 466 | struct dx_frame *frame, |
467 | struct dx_frame *frames, | 467 | struct dx_frame *frames, |
468 | __u32 *start_hash) | 468 | __u32 *start_hash) |
469 | { | 469 | { |
470 | struct dx_frame *p; | 470 | struct dx_frame *p; |
@@ -632,7 +632,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash, | |||
632 | } | 632 | } |
633 | count += ret; | 633 | count += ret; |
634 | hashval = ~0; | 634 | hashval = ~0; |
635 | ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS, | 635 | ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS, |
636 | frame, frames, &hashval); | 636 | frame, frames, &hashval); |
637 | *next_hash = hashval; | 637 | *next_hash = hashval; |
638 | if (ret < 0) { | 638 | if (ret < 0) { |
@@ -649,7 +649,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash, | |||
649 | break; | 649 | break; |
650 | } | 650 | } |
651 | dx_release(frames); | 651 | dx_release(frames); |
652 | dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n", | 652 | dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n", |
653 | count, *next_hash)); | 653 | count, *next_hash)); |
654 | return count; | 654 | return count; |
655 | errout: | 655 | errout: |
@@ -1050,7 +1050,7 @@ struct dentry *ext3_get_parent(struct dentry *child) | |||
1050 | parent = ERR_PTR(-ENOMEM); | 1050 | parent = ERR_PTR(-ENOMEM); |
1051 | } | 1051 | } |
1052 | return parent; | 1052 | return parent; |
1053 | } | 1053 | } |
1054 | 1054 | ||
1055 | #define S_SHIFT 12 | 1055 | #define S_SHIFT 12 |
1056 | static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = { | 1056 | static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = { |
@@ -1198,7 +1198,7 @@ errout: | |||
1198 | * add_dirent_to_buf will attempt search the directory block for | 1198 | * add_dirent_to_buf will attempt search the directory block for |
1199 | * space. It will return -ENOSPC if no space is available, and -EIO | 1199 | * space. It will return -ENOSPC if no space is available, and -EIO |
1200 | * and -EEXIST if directory entry already exists. | 1200 | * and -EEXIST if directory entry already exists. |
1201 | * | 1201 | * |
1202 | * NOTE! bh is NOT released in the case where ENOSPC is returned. In | 1202 | * NOTE! bh is NOT released in the case where ENOSPC is returned. In |
1203 | * all other cases bh is released. | 1203 | * all other cases bh is released. |
1204 | */ | 1204 | */ |
@@ -1572,7 +1572,7 @@ cleanup: | |||
1572 | * ext3_delete_entry deletes a directory entry by merging it with the | 1572 | * ext3_delete_entry deletes a directory entry by merging it with the |
1573 | * previous entry | 1573 | * previous entry |
1574 | */ | 1574 | */ |
1575 | static int ext3_delete_entry (handle_t *handle, | 1575 | static int ext3_delete_entry (handle_t *handle, |
1576 | struct inode * dir, | 1576 | struct inode * dir, |
1577 | struct ext3_dir_entry_2 * de_del, | 1577 | struct ext3_dir_entry_2 * de_del, |
1578 | struct buffer_head * bh) | 1578 | struct buffer_head * bh) |
@@ -1643,12 +1643,12 @@ static int ext3_add_nondir(handle_t *handle, | |||
1643 | * is so far negative - it has no inode. | 1643 | * is so far negative - it has no inode. |
1644 | * | 1644 | * |
1645 | * If the create succeeds, we fill in the inode information | 1645 | * If the create succeeds, we fill in the inode information |
1646 | * with d_instantiate(). | 1646 | * with d_instantiate(). |
1647 | */ | 1647 | */ |
1648 | static int ext3_create (struct inode * dir, struct dentry * dentry, int mode, | 1648 | static int ext3_create (struct inode * dir, struct dentry * dentry, int mode, |
1649 | struct nameidata *nd) | 1649 | struct nameidata *nd) |
1650 | { | 1650 | { |
1651 | handle_t *handle; | 1651 | handle_t *handle; |
1652 | struct inode * inode; | 1652 | struct inode * inode; |
1653 | int err, retries = 0; | 1653 | int err, retries = 0; |
1654 | 1654 | ||
@@ -1688,7 +1688,7 @@ static int ext3_mknod (struct inode * dir, struct dentry *dentry, | |||
1688 | 1688 | ||
1689 | retry: | 1689 | retry: |
1690 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1690 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
1691 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1691 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
1692 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 1692 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); |
1693 | if (IS_ERR(handle)) | 1693 | if (IS_ERR(handle)) |
1694 | return PTR_ERR(handle); | 1694 | return PTR_ERR(handle); |
@@ -1813,10 +1813,10 @@ static int empty_dir (struct inode * inode) | |||
1813 | de1 = (struct ext3_dir_entry_2 *) | 1813 | de1 = (struct ext3_dir_entry_2 *) |
1814 | ((char *) de + le16_to_cpu(de->rec_len)); | 1814 | ((char *) de + le16_to_cpu(de->rec_len)); |
1815 | if (le32_to_cpu(de->inode) != inode->i_ino || | 1815 | if (le32_to_cpu(de->inode) != inode->i_ino || |
1816 | !le32_to_cpu(de1->inode) || | 1816 | !le32_to_cpu(de1->inode) || |
1817 | strcmp (".", de->name) || | 1817 | strcmp (".", de->name) || |
1818 | strcmp ("..", de1->name)) { | 1818 | strcmp ("..", de1->name)) { |
1819 | ext3_warning (inode->i_sb, "empty_dir", | 1819 | ext3_warning (inode->i_sb, "empty_dir", |
1820 | "bad directory (dir #%lu) - no `.' or `..'", | 1820 | "bad directory (dir #%lu) - no `.' or `..'", |
1821 | inode->i_ino); | 1821 | inode->i_ino); |
1822 | brelse (bh); | 1822 | brelse (bh); |
@@ -1883,7 +1883,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) | |||
1883 | * being truncated, or files being unlinked. */ | 1883 | * being truncated, or files being unlinked. */ |
1884 | 1884 | ||
1885 | /* @@@ FIXME: Observation from aviro: | 1885 | /* @@@ FIXME: Observation from aviro: |
1886 | * I think I can trigger J_ASSERT in ext3_orphan_add(). We block | 1886 | * I think I can trigger J_ASSERT in ext3_orphan_add(). We block |
1887 | * here (on lock_super()), so race with ext3_link() which might bump | 1887 | * here (on lock_super()), so race with ext3_link() which might bump |
1888 | * ->i_nlink. For, say it, character device. Not a regular file, | 1888 | * ->i_nlink. For, say it, character device. Not a regular file, |
1889 | * not a directory, not a symlink and ->i_nlink > 0. | 1889 | * not a directory, not a symlink and ->i_nlink > 0. |
@@ -1919,8 +1919,8 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) | |||
1919 | if (!err) | 1919 | if (!err) |
1920 | list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); | 1920 | list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); |
1921 | 1921 | ||
1922 | jbd_debug(4, "superblock will point to %ld\n", inode->i_ino); | 1922 | jbd_debug(4, "superblock will point to %lu\n", inode->i_ino); |
1923 | jbd_debug(4, "orphan inode %ld will point to %d\n", | 1923 | jbd_debug(4, "orphan inode %lu will point to %d\n", |
1924 | inode->i_ino, NEXT_ORPHAN(inode)); | 1924 | inode->i_ino, NEXT_ORPHAN(inode)); |
1925 | out_unlock: | 1925 | out_unlock: |
1926 | unlock_super(sb); | 1926 | unlock_super(sb); |
@@ -2129,7 +2129,7 @@ static int ext3_symlink (struct inode * dir, | |||
2129 | 2129 | ||
2130 | retry: | 2130 | retry: |
2131 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 2131 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
2132 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + | 2132 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + |
2133 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 2133 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); |
2134 | if (IS_ERR(handle)) | 2134 | if (IS_ERR(handle)) |
2135 | return PTR_ERR(handle); | 2135 | return PTR_ERR(handle); |
@@ -2227,7 +2227,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry, | |||
2227 | DQUOT_INIT(new_dentry->d_inode); | 2227 | DQUOT_INIT(new_dentry->d_inode); |
2228 | handle = ext3_journal_start(old_dir, 2 * | 2228 | handle = ext3_journal_start(old_dir, 2 * |
2229 | EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + | 2229 | EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + |
2230 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2); | 2230 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2); |
2231 | if (IS_ERR(handle)) | 2231 | if (IS_ERR(handle)) |
2232 | return PTR_ERR(handle); | 2232 | return PTR_ERR(handle); |
2233 | 2233 | ||
@@ -2393,4 +2393,4 @@ struct inode_operations ext3_special_inode_operations = { | |||
2393 | .removexattr = generic_removexattr, | 2393 | .removexattr = generic_removexattr, |
2394 | #endif | 2394 | #endif |
2395 | .permission = ext3_permission, | 2395 | .permission = ext3_permission, |
2396 | }; | 2396 | }; |
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c index 5e1337fd878a..b73cba12f79c 100644 --- a/fs/ext3/resize.c +++ b/fs/ext3/resize.c | |||
@@ -336,7 +336,7 @@ static int verify_reserved_gdb(struct super_block *sb, | |||
336 | unsigned five = 5; | 336 | unsigned five = 5; |
337 | unsigned seven = 7; | 337 | unsigned seven = 7; |
338 | unsigned grp; | 338 | unsigned grp; |
339 | __u32 *p = (__u32 *)primary->b_data; | 339 | __le32 *p = (__le32 *)primary->b_data; |
340 | int gdbackups = 0; | 340 | int gdbackups = 0; |
341 | 341 | ||
342 | while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) { | 342 | while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) { |
@@ -380,7 +380,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
380 | struct buffer_head *dind; | 380 | struct buffer_head *dind; |
381 | int gdbackups; | 381 | int gdbackups; |
382 | struct ext3_iloc iloc; | 382 | struct ext3_iloc iloc; |
383 | __u32 *data; | 383 | __le32 *data; |
384 | int err; | 384 | int err; |
385 | 385 | ||
386 | if (test_opt(sb, DEBUG)) | 386 | if (test_opt(sb, DEBUG)) |
@@ -417,7 +417,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
417 | goto exit_bh; | 417 | goto exit_bh; |
418 | } | 418 | } |
419 | 419 | ||
420 | data = (__u32 *)dind->b_data; | 420 | data = (__le32 *)dind->b_data; |
421 | if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) { | 421 | if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) { |
422 | ext3_warning(sb, __FUNCTION__, | 422 | ext3_warning(sb, __FUNCTION__, |
423 | "new group %u GDT block "E3FSBLK" not reserved", | 423 | "new group %u GDT block "E3FSBLK" not reserved", |
@@ -439,8 +439,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
439 | if ((err = ext3_reserve_inode_write(handle, inode, &iloc))) | 439 | if ((err = ext3_reserve_inode_write(handle, inode, &iloc))) |
440 | goto exit_dindj; | 440 | goto exit_dindj; |
441 | 441 | ||
442 | n_group_desc = (struct buffer_head **)kmalloc((gdb_num + 1) * | 442 | n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *), |
443 | sizeof(struct buffer_head *), GFP_KERNEL); | 443 | GFP_KERNEL); |
444 | if (!n_group_desc) { | 444 | if (!n_group_desc) { |
445 | err = -ENOMEM; | 445 | err = -ENOMEM; |
446 | ext3_warning (sb, __FUNCTION__, | 446 | ext3_warning (sb, __FUNCTION__, |
@@ -519,7 +519,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, | |||
519 | struct buffer_head *dind; | 519 | struct buffer_head *dind; |
520 | struct ext3_iloc iloc; | 520 | struct ext3_iloc iloc; |
521 | ext3_fsblk_t blk; | 521 | ext3_fsblk_t blk; |
522 | __u32 *data, *end; | 522 | __le32 *data, *end; |
523 | int gdbackups = 0; | 523 | int gdbackups = 0; |
524 | int res, i; | 524 | int res, i; |
525 | int err; | 525 | int err; |
@@ -536,8 +536,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, | |||
536 | } | 536 | } |
537 | 537 | ||
538 | blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count; | 538 | blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count; |
539 | data = (__u32 *)dind->b_data + EXT3_SB(sb)->s_gdb_count; | 539 | data = (__le32 *)dind->b_data + EXT3_SB(sb)->s_gdb_count; |
540 | end = (__u32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb); | 540 | end = (__le32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb); |
541 | 541 | ||
542 | /* Get each reserved primary GDT block and verify it holds backups */ | 542 | /* Get each reserved primary GDT block and verify it holds backups */ |
543 | for (res = 0; res < reserved_gdb; res++, blk++) { | 543 | for (res = 0; res < reserved_gdb; res++, blk++) { |
@@ -545,7 +545,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, | |||
545 | ext3_warning(sb, __FUNCTION__, | 545 | ext3_warning(sb, __FUNCTION__, |
546 | "reserved block "E3FSBLK | 546 | "reserved block "E3FSBLK |
547 | " not at offset %ld", | 547 | " not at offset %ld", |
548 | blk, (long)(data - (__u32 *)dind->b_data)); | 548 | blk, |
549 | (long)(data - (__le32 *)dind->b_data)); | ||
549 | err = -EINVAL; | 550 | err = -EINVAL; |
550 | goto exit_bh; | 551 | goto exit_bh; |
551 | } | 552 | } |
@@ -560,7 +561,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, | |||
560 | goto exit_bh; | 561 | goto exit_bh; |
561 | } | 562 | } |
562 | if (++data >= end) | 563 | if (++data >= end) |
563 | data = (__u32 *)dind->b_data; | 564 | data = (__le32 *)dind->b_data; |
564 | } | 565 | } |
565 | 566 | ||
566 | for (i = 0; i < reserved_gdb; i++) { | 567 | for (i = 0; i < reserved_gdb; i++) { |
@@ -584,7 +585,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, | |||
584 | blk = input->group * EXT3_BLOCKS_PER_GROUP(sb); | 585 | blk = input->group * EXT3_BLOCKS_PER_GROUP(sb); |
585 | for (i = 0; i < reserved_gdb; i++) { | 586 | for (i = 0; i < reserved_gdb; i++) { |
586 | int err2; | 587 | int err2; |
587 | data = (__u32 *)primary[i]->b_data; | 588 | data = (__le32 *)primary[i]->b_data; |
588 | /* printk("reserving backup %lu[%u] = %lu\n", | 589 | /* printk("reserving backup %lu[%u] = %lu\n", |
589 | primary[i]->b_blocknr, gdbackups, | 590 | primary[i]->b_blocknr, gdbackups, |
590 | blk + primary[i]->b_blocknr); */ | 591 | blk + primary[i]->b_blocknr); */ |
@@ -689,7 +690,7 @@ exit_err: | |||
689 | "can't update backup for group %d (err %d), " | 690 | "can't update backup for group %d (err %d), " |
690 | "forcing fsck on next reboot", group, err); | 691 | "forcing fsck on next reboot", group, err); |
691 | sbi->s_mount_state &= ~EXT3_VALID_FS; | 692 | sbi->s_mount_state &= ~EXT3_VALID_FS; |
692 | sbi->s_es->s_state &= ~cpu_to_le16(EXT3_VALID_FS); | 693 | sbi->s_es->s_state &= cpu_to_le16(~EXT3_VALID_FS); |
693 | mark_buffer_dirty(sbi->s_sbh); | 694 | mark_buffer_dirty(sbi->s_sbh); |
694 | } | 695 | } |
695 | } | 696 | } |
@@ -730,6 +731,18 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
730 | return -EPERM; | 731 | return -EPERM; |
731 | } | 732 | } |
732 | 733 | ||
734 | if (le32_to_cpu(es->s_blocks_count) + input->blocks_count < | ||
735 | le32_to_cpu(es->s_blocks_count)) { | ||
736 | ext3_warning(sb, __FUNCTION__, "blocks_count overflow\n"); | ||
737 | return -EINVAL; | ||
738 | } | ||
739 | |||
740 | if (le32_to_cpu(es->s_inodes_count) + EXT3_INODES_PER_GROUP(sb) < | ||
741 | le32_to_cpu(es->s_inodes_count)) { | ||
742 | ext3_warning(sb, __FUNCTION__, "inodes_count overflow\n"); | ||
743 | return -EINVAL; | ||
744 | } | ||
745 | |||
733 | if (reserved_gdb || gdb_off == 0) { | 746 | if (reserved_gdb || gdb_off == 0) { |
734 | if (!EXT3_HAS_COMPAT_FEATURE(sb, | 747 | if (!EXT3_HAS_COMPAT_FEATURE(sb, |
735 | EXT3_FEATURE_COMPAT_RESIZE_INODE)){ | 748 | EXT3_FEATURE_COMPAT_RESIZE_INODE)){ |
@@ -958,6 +971,11 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, | |||
958 | 971 | ||
959 | add = EXT3_BLOCKS_PER_GROUP(sb) - last; | 972 | add = EXT3_BLOCKS_PER_GROUP(sb) - last; |
960 | 973 | ||
974 | if (o_blocks_count + add < o_blocks_count) { | ||
975 | ext3_warning(sb, __FUNCTION__, "blocks_count overflow"); | ||
976 | return -EINVAL; | ||
977 | } | ||
978 | |||
961 | if (o_blocks_count + add > n_blocks_count) | 979 | if (o_blocks_count + add > n_blocks_count) |
962 | add = n_blocks_count - o_blocks_count; | 980 | add = n_blocks_count - o_blocks_count; |
963 | 981 | ||
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 3559086eee5f..8bfd56ef18ca 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -45,7 +45,7 @@ | |||
45 | static int ext3_load_journal(struct super_block *, struct ext3_super_block *, | 45 | static int ext3_load_journal(struct super_block *, struct ext3_super_block *, |
46 | unsigned long journal_devnum); | 46 | unsigned long journal_devnum); |
47 | static int ext3_create_journal(struct super_block *, struct ext3_super_block *, | 47 | static int ext3_create_journal(struct super_block *, struct ext3_super_block *, |
48 | int); | 48 | unsigned int); |
49 | static void ext3_commit_super (struct super_block * sb, | 49 | static void ext3_commit_super (struct super_block * sb, |
50 | struct ext3_super_block * es, | 50 | struct ext3_super_block * es, |
51 | int sync); | 51 | int sync); |
@@ -62,13 +62,13 @@ static void ext3_unlockfs(struct super_block *sb); | |||
62 | static void ext3_write_super (struct super_block * sb); | 62 | static void ext3_write_super (struct super_block * sb); |
63 | static void ext3_write_super_lockfs(struct super_block *sb); | 63 | static void ext3_write_super_lockfs(struct super_block *sb); |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * Wrappers for journal_start/end. | 66 | * Wrappers for journal_start/end. |
67 | * | 67 | * |
68 | * The only special thing we need to do here is to make sure that all | 68 | * The only special thing we need to do here is to make sure that all |
69 | * journal_end calls result in the superblock being marked dirty, so | 69 | * journal_end calls result in the superblock being marked dirty, so |
70 | * that sync() will call the filesystem's write_super callback if | 70 | * that sync() will call the filesystem's write_super callback if |
71 | * appropriate. | 71 | * appropriate. |
72 | */ | 72 | */ |
73 | handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) | 73 | handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) |
74 | { | 74 | { |
@@ -90,11 +90,11 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) | |||
90 | return journal_start(journal, nblocks); | 90 | return journal_start(journal, nblocks); |
91 | } | 91 | } |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * The only special thing we need to do here is to make sure that all | 94 | * The only special thing we need to do here is to make sure that all |
95 | * journal_stop calls result in the superblock being marked dirty, so | 95 | * journal_stop calls result in the superblock being marked dirty, so |
96 | * that sync() will call the filesystem's write_super callback if | 96 | * that sync() will call the filesystem's write_super callback if |
97 | * appropriate. | 97 | * appropriate. |
98 | */ | 98 | */ |
99 | int __ext3_journal_stop(const char *where, handle_t *handle) | 99 | int __ext3_journal_stop(const char *where, handle_t *handle) |
100 | { | 100 | { |
@@ -159,20 +159,21 @@ static void ext3_handle_error(struct super_block *sb) | |||
159 | if (sb->s_flags & MS_RDONLY) | 159 | if (sb->s_flags & MS_RDONLY) |
160 | return; | 160 | return; |
161 | 161 | ||
162 | if (test_opt (sb, ERRORS_RO)) { | 162 | if (!test_opt (sb, ERRORS_CONT)) { |
163 | printk (KERN_CRIT "Remounting filesystem read-only\n"); | ||
164 | sb->s_flags |= MS_RDONLY; | ||
165 | } else { | ||
166 | journal_t *journal = EXT3_SB(sb)->s_journal; | 163 | journal_t *journal = EXT3_SB(sb)->s_journal; |
167 | 164 | ||
168 | EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT; | 165 | EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT; |
169 | if (journal) | 166 | if (journal) |
170 | journal_abort(journal, -EIO); | 167 | journal_abort(journal, -EIO); |
171 | } | 168 | } |
169 | if (test_opt (sb, ERRORS_RO)) { | ||
170 | printk (KERN_CRIT "Remounting filesystem read-only\n"); | ||
171 | sb->s_flags |= MS_RDONLY; | ||
172 | } | ||
173 | ext3_commit_super(sb, es, 1); | ||
172 | if (test_opt(sb, ERRORS_PANIC)) | 174 | if (test_opt(sb, ERRORS_PANIC)) |
173 | panic("EXT3-fs (device %s): panic forced after error\n", | 175 | panic("EXT3-fs (device %s): panic forced after error\n", |
174 | sb->s_id); | 176 | sb->s_id); |
175 | ext3_commit_super(sb, es, 1); | ||
176 | } | 177 | } |
177 | 178 | ||
178 | void ext3_error (struct super_block * sb, const char * function, | 179 | void ext3_error (struct super_block * sb, const char * function, |
@@ -369,16 +370,16 @@ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi) | |||
369 | { | 370 | { |
370 | struct list_head *l; | 371 | struct list_head *l; |
371 | 372 | ||
372 | printk(KERN_ERR "sb orphan head is %d\n", | 373 | printk(KERN_ERR "sb orphan head is %d\n", |
373 | le32_to_cpu(sbi->s_es->s_last_orphan)); | 374 | le32_to_cpu(sbi->s_es->s_last_orphan)); |
374 | 375 | ||
375 | printk(KERN_ERR "sb_info orphan list:\n"); | 376 | printk(KERN_ERR "sb_info orphan list:\n"); |
376 | list_for_each(l, &sbi->s_orphan) { | 377 | list_for_each(l, &sbi->s_orphan) { |
377 | struct inode *inode = orphan_list_entry(l); | 378 | struct inode *inode = orphan_list_entry(l); |
378 | printk(KERN_ERR " " | 379 | printk(KERN_ERR " " |
379 | "inode %s:%ld at %p: mode %o, nlink %d, next %d\n", | 380 | "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", |
380 | inode->i_sb->s_id, inode->i_ino, inode, | 381 | inode->i_sb->s_id, inode->i_ino, inode, |
381 | inode->i_mode, inode->i_nlink, | 382 | inode->i_mode, inode->i_nlink, |
382 | NEXT_ORPHAN(inode)); | 383 | NEXT_ORPHAN(inode)); |
383 | } | 384 | } |
384 | } | 385 | } |
@@ -475,7 +476,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | |||
475 | inode_init_once(&ei->vfs_inode); | 476 | inode_init_once(&ei->vfs_inode); |
476 | } | 477 | } |
477 | } | 478 | } |
478 | 479 | ||
479 | static int init_inodecache(void) | 480 | static int init_inodecache(void) |
480 | { | 481 | { |
481 | ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", | 482 | ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", |
@@ -490,8 +491,7 @@ static int init_inodecache(void) | |||
490 | 491 | ||
491 | static void destroy_inodecache(void) | 492 | static void destroy_inodecache(void) |
492 | { | 493 | { |
493 | if (kmem_cache_destroy(ext3_inode_cachep)) | 494 | kmem_cache_destroy(ext3_inode_cachep); |
494 | printk(KERN_INFO "ext3_inode_cache: not all structures were freed\n"); | ||
495 | } | 495 | } |
496 | 496 | ||
497 | static void ext3_clear_inode(struct inode *inode) | 497 | static void ext3_clear_inode(struct inode *inode) |
@@ -733,8 +733,8 @@ static match_table_t tokens = { | |||
733 | 733 | ||
734 | static ext3_fsblk_t get_sb_block(void **data) | 734 | static ext3_fsblk_t get_sb_block(void **data) |
735 | { | 735 | { |
736 | ext3_fsblk_t sb_block; | 736 | ext3_fsblk_t sb_block; |
737 | char *options = (char *) *data; | 737 | char *options = (char *) *data; |
738 | 738 | ||
739 | if (!options || strncmp(options, "sb=", 3) != 0) | 739 | if (!options || strncmp(options, "sb=", 3) != 0) |
740 | return 1; /* Default location */ | 740 | return 1; /* Default location */ |
@@ -753,7 +753,7 @@ static ext3_fsblk_t get_sb_block(void **data) | |||
753 | } | 753 | } |
754 | 754 | ||
755 | static int parse_options (char *options, struct super_block *sb, | 755 | static int parse_options (char *options, struct super_block *sb, |
756 | unsigned long *inum, unsigned long *journal_devnum, | 756 | unsigned int *inum, unsigned long *journal_devnum, |
757 | ext3_fsblk_t *n_blocks_count, int is_remount) | 757 | ext3_fsblk_t *n_blocks_count, int is_remount) |
758 | { | 758 | { |
759 | struct ext3_sb_info *sbi = EXT3_SB(sb); | 759 | struct ext3_sb_info *sbi = EXT3_SB(sb); |
@@ -1174,7 +1174,8 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es, | |||
1174 | static int ext3_check_descriptors (struct super_block * sb) | 1174 | static int ext3_check_descriptors (struct super_block * sb) |
1175 | { | 1175 | { |
1176 | struct ext3_sb_info *sbi = EXT3_SB(sb); | 1176 | struct ext3_sb_info *sbi = EXT3_SB(sb); |
1177 | ext3_fsblk_t block = le32_to_cpu(sbi->s_es->s_first_data_block); | 1177 | ext3_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); |
1178 | ext3_fsblk_t last_block; | ||
1178 | struct ext3_group_desc * gdp = NULL; | 1179 | struct ext3_group_desc * gdp = NULL; |
1179 | int desc_block = 0; | 1180 | int desc_block = 0; |
1180 | int i; | 1181 | int i; |
@@ -1183,12 +1184,17 @@ static int ext3_check_descriptors (struct super_block * sb) | |||
1183 | 1184 | ||
1184 | for (i = 0; i < sbi->s_groups_count; i++) | 1185 | for (i = 0; i < sbi->s_groups_count; i++) |
1185 | { | 1186 | { |
1187 | if (i == sbi->s_groups_count - 1) | ||
1188 | last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; | ||
1189 | else | ||
1190 | last_block = first_block + | ||
1191 | (EXT3_BLOCKS_PER_GROUP(sb) - 1); | ||
1192 | |||
1186 | if ((i % EXT3_DESC_PER_BLOCK(sb)) == 0) | 1193 | if ((i % EXT3_DESC_PER_BLOCK(sb)) == 0) |
1187 | gdp = (struct ext3_group_desc *) | 1194 | gdp = (struct ext3_group_desc *) |
1188 | sbi->s_group_desc[desc_block++]->b_data; | 1195 | sbi->s_group_desc[desc_block++]->b_data; |
1189 | if (le32_to_cpu(gdp->bg_block_bitmap) < block || | 1196 | if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || |
1190 | le32_to_cpu(gdp->bg_block_bitmap) >= | 1197 | le32_to_cpu(gdp->bg_block_bitmap) > last_block) |
1191 | block + EXT3_BLOCKS_PER_GROUP(sb)) | ||
1192 | { | 1198 | { |
1193 | ext3_error (sb, "ext3_check_descriptors", | 1199 | ext3_error (sb, "ext3_check_descriptors", |
1194 | "Block bitmap for group %d" | 1200 | "Block bitmap for group %d" |
@@ -1197,9 +1203,8 @@ static int ext3_check_descriptors (struct super_block * sb) | |||
1197 | le32_to_cpu(gdp->bg_block_bitmap)); | 1203 | le32_to_cpu(gdp->bg_block_bitmap)); |
1198 | return 0; | 1204 | return 0; |
1199 | } | 1205 | } |
1200 | if (le32_to_cpu(gdp->bg_inode_bitmap) < block || | 1206 | if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block || |
1201 | le32_to_cpu(gdp->bg_inode_bitmap) >= | 1207 | le32_to_cpu(gdp->bg_inode_bitmap) > last_block) |
1202 | block + EXT3_BLOCKS_PER_GROUP(sb)) | ||
1203 | { | 1208 | { |
1204 | ext3_error (sb, "ext3_check_descriptors", | 1209 | ext3_error (sb, "ext3_check_descriptors", |
1205 | "Inode bitmap for group %d" | 1210 | "Inode bitmap for group %d" |
@@ -1208,9 +1213,9 @@ static int ext3_check_descriptors (struct super_block * sb) | |||
1208 | le32_to_cpu(gdp->bg_inode_bitmap)); | 1213 | le32_to_cpu(gdp->bg_inode_bitmap)); |
1209 | return 0; | 1214 | return 0; |
1210 | } | 1215 | } |
1211 | if (le32_to_cpu(gdp->bg_inode_table) < block || | 1216 | if (le32_to_cpu(gdp->bg_inode_table) < first_block || |
1212 | le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >= | 1217 | le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group > |
1213 | block + EXT3_BLOCKS_PER_GROUP(sb)) | 1218 | last_block) |
1214 | { | 1219 | { |
1215 | ext3_error (sb, "ext3_check_descriptors", | 1220 | ext3_error (sb, "ext3_check_descriptors", |
1216 | "Inode table for group %d" | 1221 | "Inode table for group %d" |
@@ -1219,7 +1224,7 @@ static int ext3_check_descriptors (struct super_block * sb) | |||
1219 | le32_to_cpu(gdp->bg_inode_table)); | 1224 | le32_to_cpu(gdp->bg_inode_table)); |
1220 | return 0; | 1225 | return 0; |
1221 | } | 1226 | } |
1222 | block += EXT3_BLOCKS_PER_GROUP(sb); | 1227 | first_block += EXT3_BLOCKS_PER_GROUP(sb); |
1223 | gdp++; | 1228 | gdp++; |
1224 | } | 1229 | } |
1225 | 1230 | ||
@@ -1301,17 +1306,17 @@ static void ext3_orphan_cleanup (struct super_block * sb, | |||
1301 | DQUOT_INIT(inode); | 1306 | DQUOT_INIT(inode); |
1302 | if (inode->i_nlink) { | 1307 | if (inode->i_nlink) { |
1303 | printk(KERN_DEBUG | 1308 | printk(KERN_DEBUG |
1304 | "%s: truncating inode %ld to %Ld bytes\n", | 1309 | "%s: truncating inode %lu to %Ld bytes\n", |
1305 | __FUNCTION__, inode->i_ino, inode->i_size); | 1310 | __FUNCTION__, inode->i_ino, inode->i_size); |
1306 | jbd_debug(2, "truncating inode %ld to %Ld bytes\n", | 1311 | jbd_debug(2, "truncating inode %lu to %Ld bytes\n", |
1307 | inode->i_ino, inode->i_size); | 1312 | inode->i_ino, inode->i_size); |
1308 | ext3_truncate(inode); | 1313 | ext3_truncate(inode); |
1309 | nr_truncates++; | 1314 | nr_truncates++; |
1310 | } else { | 1315 | } else { |
1311 | printk(KERN_DEBUG | 1316 | printk(KERN_DEBUG |
1312 | "%s: deleting unreferenced inode %ld\n", | 1317 | "%s: deleting unreferenced inode %lu\n", |
1313 | __FUNCTION__, inode->i_ino); | 1318 | __FUNCTION__, inode->i_ino); |
1314 | jbd_debug(2, "deleting unreferenced inode %ld\n", | 1319 | jbd_debug(2, "deleting unreferenced inode %lu\n", |
1315 | inode->i_ino); | 1320 | inode->i_ino); |
1316 | nr_orphans++; | 1321 | nr_orphans++; |
1317 | } | 1322 | } |
@@ -1390,7 +1395,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1390 | ext3_fsblk_t sb_block = get_sb_block(&data); | 1395 | ext3_fsblk_t sb_block = get_sb_block(&data); |
1391 | ext3_fsblk_t logic_sb_block; | 1396 | ext3_fsblk_t logic_sb_block; |
1392 | unsigned long offset = 0; | 1397 | unsigned long offset = 0; |
1393 | unsigned long journal_inum = 0; | 1398 | unsigned int journal_inum = 0; |
1394 | unsigned long journal_devnum = 0; | 1399 | unsigned long journal_devnum = 0; |
1395 | unsigned long def_mount_opts; | 1400 | unsigned long def_mount_opts; |
1396 | struct inode *root; | 1401 | struct inode *root; |
@@ -1401,11 +1406,10 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1401 | int needs_recovery; | 1406 | int needs_recovery; |
1402 | __le32 features; | 1407 | __le32 features; |
1403 | 1408 | ||
1404 | sbi = kmalloc(sizeof(*sbi), GFP_KERNEL); | 1409 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); |
1405 | if (!sbi) | 1410 | if (!sbi) |
1406 | return -ENOMEM; | 1411 | return -ENOMEM; |
1407 | sb->s_fs_info = sbi; | 1412 | sb->s_fs_info = sbi; |
1408 | memset(sbi, 0, sizeof(*sbi)); | ||
1409 | sbi->s_mount_opt = 0; | 1413 | sbi->s_mount_opt = 0; |
1410 | sbi->s_resuid = EXT3_DEF_RESUID; | 1414 | sbi->s_resuid = EXT3_DEF_RESUID; |
1411 | sbi->s_resgid = EXT3_DEF_RESGID; | 1415 | sbi->s_resgid = EXT3_DEF_RESGID; |
@@ -1483,7 +1487,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1483 | (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || | 1487 | (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || |
1484 | EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) || | 1488 | EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) || |
1485 | EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U))) | 1489 | EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U))) |
1486 | printk(KERN_WARNING | 1490 | printk(KERN_WARNING |
1487 | "EXT3-fs warning: feature flags set on rev 0 fs, " | 1491 | "EXT3-fs warning: feature flags set on rev 0 fs, " |
1488 | "running e2fsck is recommended\n"); | 1492 | "running e2fsck is recommended\n"); |
1489 | /* | 1493 | /* |
@@ -1509,7 +1513,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1509 | 1513 | ||
1510 | if (blocksize < EXT3_MIN_BLOCK_SIZE || | 1514 | if (blocksize < EXT3_MIN_BLOCK_SIZE || |
1511 | blocksize > EXT3_MAX_BLOCK_SIZE) { | 1515 | blocksize > EXT3_MAX_BLOCK_SIZE) { |
1512 | printk(KERN_ERR | 1516 | printk(KERN_ERR |
1513 | "EXT3-fs: Unsupported filesystem blocksize %d on %s.\n", | 1517 | "EXT3-fs: Unsupported filesystem blocksize %d on %s.\n", |
1514 | blocksize, sb->s_id); | 1518 | blocksize, sb->s_id); |
1515 | goto failed_mount; | 1519 | goto failed_mount; |
@@ -1533,14 +1537,14 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1533 | offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; | 1537 | offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; |
1534 | bh = sb_bread(sb, logic_sb_block); | 1538 | bh = sb_bread(sb, logic_sb_block); |
1535 | if (!bh) { | 1539 | if (!bh) { |
1536 | printk(KERN_ERR | 1540 | printk(KERN_ERR |
1537 | "EXT3-fs: Can't read superblock on 2nd try.\n"); | 1541 | "EXT3-fs: Can't read superblock on 2nd try.\n"); |
1538 | goto failed_mount; | 1542 | goto failed_mount; |
1539 | } | 1543 | } |
1540 | es = (struct ext3_super_block *)(((char *)bh->b_data) + offset); | 1544 | es = (struct ext3_super_block *)(((char *)bh->b_data) + offset); |
1541 | sbi->s_es = es; | 1545 | sbi->s_es = es; |
1542 | if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) { | 1546 | if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) { |
1543 | printk (KERN_ERR | 1547 | printk (KERN_ERR |
1544 | "EXT3-fs: Magic mismatch, very weird !\n"); | 1548 | "EXT3-fs: Magic mismatch, very weird !\n"); |
1545 | goto failed_mount; | 1549 | goto failed_mount; |
1546 | } | 1550 | } |
@@ -1622,10 +1626,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1622 | 1626 | ||
1623 | if (EXT3_BLOCKS_PER_GROUP(sb) == 0) | 1627 | if (EXT3_BLOCKS_PER_GROUP(sb) == 0) |
1624 | goto cantfind_ext3; | 1628 | goto cantfind_ext3; |
1625 | sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) - | 1629 | sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - |
1626 | le32_to_cpu(es->s_first_data_block) + | 1630 | le32_to_cpu(es->s_first_data_block) - 1) |
1627 | EXT3_BLOCKS_PER_GROUP(sb) - 1) / | 1631 | / EXT3_BLOCKS_PER_GROUP(sb)) + 1; |
1628 | EXT3_BLOCKS_PER_GROUP(sb); | ||
1629 | db_count = (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) / | 1632 | db_count = (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) / |
1630 | EXT3_DESC_PER_BLOCK(sb); | 1633 | EXT3_DESC_PER_BLOCK(sb); |
1631 | sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *), | 1634 | sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *), |
@@ -1820,7 +1823,7 @@ out_fail: | |||
1820 | /* | 1823 | /* |
1821 | * Setup any per-fs journal parameters now. We'll do this both on | 1824 | * Setup any per-fs journal parameters now. We'll do this both on |
1822 | * initial mount, once the journal has been initialised but before we've | 1825 | * initial mount, once the journal has been initialised but before we've |
1823 | * done any recovery; and again on any subsequent remount. | 1826 | * done any recovery; and again on any subsequent remount. |
1824 | */ | 1827 | */ |
1825 | static void ext3_init_journal_params(struct super_block *sb, journal_t *journal) | 1828 | static void ext3_init_journal_params(struct super_block *sb, journal_t *journal) |
1826 | { | 1829 | { |
@@ -1840,7 +1843,8 @@ static void ext3_init_journal_params(struct super_block *sb, journal_t *journal) | |||
1840 | spin_unlock(&journal->j_state_lock); | 1843 | spin_unlock(&journal->j_state_lock); |
1841 | } | 1844 | } |
1842 | 1845 | ||
1843 | static journal_t *ext3_get_journal(struct super_block *sb, int journal_inum) | 1846 | static journal_t *ext3_get_journal(struct super_block *sb, |
1847 | unsigned int journal_inum) | ||
1844 | { | 1848 | { |
1845 | struct inode *journal_inode; | 1849 | struct inode *journal_inode; |
1846 | journal_t *journal; | 1850 | journal_t *journal; |
@@ -1975,7 +1979,7 @@ static int ext3_load_journal(struct super_block *sb, | |||
1975 | unsigned long journal_devnum) | 1979 | unsigned long journal_devnum) |
1976 | { | 1980 | { |
1977 | journal_t *journal; | 1981 | journal_t *journal; |
1978 | int journal_inum = le32_to_cpu(es->s_journal_inum); | 1982 | unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); |
1979 | dev_t journal_dev; | 1983 | dev_t journal_dev; |
1980 | int err = 0; | 1984 | int err = 0; |
1981 | int really_read_only; | 1985 | int really_read_only; |
@@ -2061,7 +2065,7 @@ static int ext3_load_journal(struct super_block *sb, | |||
2061 | 2065 | ||
2062 | static int ext3_create_journal(struct super_block * sb, | 2066 | static int ext3_create_journal(struct super_block * sb, |
2063 | struct ext3_super_block * es, | 2067 | struct ext3_super_block * es, |
2064 | int journal_inum) | 2068 | unsigned int journal_inum) |
2065 | { | 2069 | { |
2066 | journal_t *journal; | 2070 | journal_t *journal; |
2067 | 2071 | ||
@@ -2074,7 +2078,7 @@ static int ext3_create_journal(struct super_block * sb, | |||
2074 | if (!(journal = ext3_get_journal(sb, journal_inum))) | 2078 | if (!(journal = ext3_get_journal(sb, journal_inum))) |
2075 | return -EINVAL; | 2079 | return -EINVAL; |
2076 | 2080 | ||
2077 | printk(KERN_INFO "EXT3-fs: creating new journal on inode %d\n", | 2081 | printk(KERN_INFO "EXT3-fs: creating new journal on inode %u\n", |
2078 | journal_inum); | 2082 | journal_inum); |
2079 | 2083 | ||
2080 | if (journal_create(journal)) { | 2084 | if (journal_create(journal)) { |
@@ -2342,10 +2346,8 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) | |||
2342 | */ | 2346 | */ |
2343 | ext3_clear_journal_err(sb, es); | 2347 | ext3_clear_journal_err(sb, es); |
2344 | sbi->s_mount_state = le16_to_cpu(es->s_state); | 2348 | sbi->s_mount_state = le16_to_cpu(es->s_state); |
2345 | if ((ret = ext3_group_extend(sb, es, n_blocks_count))) { | 2349 | if ((err = ext3_group_extend(sb, es, n_blocks_count))) |
2346 | err = ret; | ||
2347 | goto restore_opts; | 2350 | goto restore_opts; |
2348 | } | ||
2349 | if (!ext3_setup_super (sb, es, 0)) | 2351 | if (!ext3_setup_super (sb, es, 0)) |
2350 | sb->s_flags &= ~MS_RDONLY; | 2352 | sb->s_flags &= ~MS_RDONLY; |
2351 | } | 2353 | } |
@@ -2734,7 +2736,7 @@ static int __init init_ext3_fs(void) | |||
2734 | out: | 2736 | out: |
2735 | destroy_inodecache(); | 2737 | destroy_inodecache(); |
2736 | out1: | 2738 | out1: |
2737 | exit_ext3_xattr(); | 2739 | exit_ext3_xattr(); |
2738 | return err; | 2740 | return err; |
2739 | } | 2741 | } |
2740 | 2742 | ||
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c index a44a0562203a..f86f2482f01d 100644 --- a/fs/ext3/xattr.c +++ b/fs/ext3/xattr.c | |||
@@ -75,7 +75,7 @@ | |||
75 | 75 | ||
76 | #ifdef EXT3_XATTR_DEBUG | 76 | #ifdef EXT3_XATTR_DEBUG |
77 | # define ea_idebug(inode, f...) do { \ | 77 | # define ea_idebug(inode, f...) do { \ |
78 | printk(KERN_DEBUG "inode %s:%ld: ", \ | 78 | printk(KERN_DEBUG "inode %s:%lu: ", \ |
79 | inode->i_sb->s_id, inode->i_ino); \ | 79 | inode->i_sb->s_id, inode->i_ino); \ |
80 | printk(f); \ | 80 | printk(f); \ |
81 | printk("\n"); \ | 81 | printk("\n"); \ |
@@ -233,7 +233,7 @@ ext3_xattr_block_get(struct inode *inode, int name_index, const char *name, | |||
233 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); | 233 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); |
234 | if (ext3_xattr_check_block(bh)) { | 234 | if (ext3_xattr_check_block(bh)) { |
235 | bad_block: ext3_error(inode->i_sb, __FUNCTION__, | 235 | bad_block: ext3_error(inode->i_sb, __FUNCTION__, |
236 | "inode %ld: bad block "E3FSBLK, inode->i_ino, | 236 | "inode %lu: bad block "E3FSBLK, inode->i_ino, |
237 | EXT3_I(inode)->i_file_acl); | 237 | EXT3_I(inode)->i_file_acl); |
238 | error = -EIO; | 238 | error = -EIO; |
239 | goto cleanup; | 239 | goto cleanup; |
@@ -375,7 +375,7 @@ ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size) | |||
375 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); | 375 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); |
376 | if (ext3_xattr_check_block(bh)) { | 376 | if (ext3_xattr_check_block(bh)) { |
377 | ext3_error(inode->i_sb, __FUNCTION__, | 377 | ext3_error(inode->i_sb, __FUNCTION__, |
378 | "inode %ld: bad block "E3FSBLK, inode->i_ino, | 378 | "inode %lu: bad block "E3FSBLK, inode->i_ino, |
379 | EXT3_I(inode)->i_file_acl); | 379 | EXT3_I(inode)->i_file_acl); |
380 | error = -EIO; | 380 | error = -EIO; |
381 | goto cleanup; | 381 | goto cleanup; |
@@ -647,7 +647,7 @@ ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i, | |||
647 | le32_to_cpu(BHDR(bs->bh)->h_refcount)); | 647 | le32_to_cpu(BHDR(bs->bh)->h_refcount)); |
648 | if (ext3_xattr_check_block(bs->bh)) { | 648 | if (ext3_xattr_check_block(bs->bh)) { |
649 | ext3_error(sb, __FUNCTION__, | 649 | ext3_error(sb, __FUNCTION__, |
650 | "inode %ld: bad block "E3FSBLK, inode->i_ino, | 650 | "inode %lu: bad block "E3FSBLK, inode->i_ino, |
651 | EXT3_I(inode)->i_file_acl); | 651 | EXT3_I(inode)->i_file_acl); |
652 | error = -EIO; | 652 | error = -EIO; |
653 | goto cleanup; | 653 | goto cleanup; |
@@ -848,7 +848,7 @@ cleanup_dquot: | |||
848 | 848 | ||
849 | bad_block: | 849 | bad_block: |
850 | ext3_error(inode->i_sb, __FUNCTION__, | 850 | ext3_error(inode->i_sb, __FUNCTION__, |
851 | "inode %ld: bad block "E3FSBLK, inode->i_ino, | 851 | "inode %lu: bad block "E3FSBLK, inode->i_ino, |
852 | EXT3_I(inode)->i_file_acl); | 852 | EXT3_I(inode)->i_file_acl); |
853 | goto cleanup; | 853 | goto cleanup; |
854 | 854 | ||
@@ -1077,14 +1077,14 @@ ext3_xattr_delete_inode(handle_t *handle, struct inode *inode) | |||
1077 | bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); | 1077 | bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); |
1078 | if (!bh) { | 1078 | if (!bh) { |
1079 | ext3_error(inode->i_sb, __FUNCTION__, | 1079 | ext3_error(inode->i_sb, __FUNCTION__, |
1080 | "inode %ld: block "E3FSBLK" read error", inode->i_ino, | 1080 | "inode %lu: block "E3FSBLK" read error", inode->i_ino, |
1081 | EXT3_I(inode)->i_file_acl); | 1081 | EXT3_I(inode)->i_file_acl); |
1082 | goto cleanup; | 1082 | goto cleanup; |
1083 | } | 1083 | } |
1084 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || | 1084 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || |
1085 | BHDR(bh)->h_blocks != cpu_to_le32(1)) { | 1085 | BHDR(bh)->h_blocks != cpu_to_le32(1)) { |
1086 | ext3_error(inode->i_sb, __FUNCTION__, | 1086 | ext3_error(inode->i_sb, __FUNCTION__, |
1087 | "inode %ld: bad block "E3FSBLK, inode->i_ino, | 1087 | "inode %lu: bad block "E3FSBLK, inode->i_ino, |
1088 | EXT3_I(inode)->i_file_acl); | 1088 | EXT3_I(inode)->i_file_acl); |
1089 | goto cleanup; | 1089 | goto cleanup; |
1090 | } | 1090 | } |
@@ -1211,7 +1211,7 @@ again: | |||
1211 | bh = sb_bread(inode->i_sb, ce->e_block); | 1211 | bh = sb_bread(inode->i_sb, ce->e_block); |
1212 | if (!bh) { | 1212 | if (!bh) { |
1213 | ext3_error(inode->i_sb, __FUNCTION__, | 1213 | ext3_error(inode->i_sb, __FUNCTION__, |
1214 | "inode %ld: block %lu read error", | 1214 | "inode %lu: block %lu read error", |
1215 | inode->i_ino, (unsigned long) ce->e_block); | 1215 | inode->i_ino, (unsigned long) ce->e_block); |
1216 | } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= | 1216 | } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= |
1217 | EXT3_XATTR_REFCOUNT_MAX) { | 1217 | EXT3_XATTR_REFCOUNT_MAX) { |