diff options
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/binfmt_flat.c | 17 | ||||
| -rw-r--r-- | fs/btrfs/extent-tree.c | 21 | ||||
| -rw-r--r-- | fs/btrfs/free-space-cache.c | 73 | ||||
| -rw-r--r-- | fs/btrfs/inode.c | 3 | ||||
| -rw-r--r-- | fs/btrfs/relocation.c | 9 | ||||
| -rw-r--r-- | fs/btrfs/zlib.c | 6 | ||||
| -rw-r--r-- | fs/compat_ioctl.c | 1 | ||||
| -rw-r--r-- | fs/inode.c | 40 | ||||
| -rw-r--r-- | fs/jffs2/file.c | 2 | ||||
| -rw-r--r-- | fs/namespace.c | 3 | ||||
| -rw-r--r-- | fs/nfs/direct.c | 20 | ||||
| -rw-r--r-- | fs/nfs/read.c | 6 | ||||
| -rw-r--r-- | fs/nfs/write.c | 6 | ||||
| -rw-r--r-- | fs/proc/base.c | 27 | ||||
| -rw-r--r-- | fs/proc/task_mmu.c | 1 | ||||
| -rw-r--r-- | fs/proc/task_nommu.c | 1 | ||||
| -rw-r--r-- | fs/xfs/xfs_iget.c | 142 | ||||
| -rw-r--r-- | fs/xfs/xfs_inode.h | 17 |
18 files changed, 233 insertions, 162 deletions
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 697f6b5f1313..e92f229e3c6e 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
| @@ -828,15 +828,22 @@ static int load_flat_shared_library(int id, struct lib_info *libs) | |||
| 828 | if (IS_ERR(bprm.file)) | 828 | if (IS_ERR(bprm.file)) |
| 829 | return res; | 829 | return res; |
| 830 | 830 | ||
| 831 | bprm.cred = prepare_exec_creds(); | ||
| 832 | res = -ENOMEM; | ||
| 833 | if (!bprm.cred) | ||
| 834 | goto out; | ||
| 835 | |||
| 831 | res = prepare_binprm(&bprm); | 836 | res = prepare_binprm(&bprm); |
| 832 | 837 | ||
| 833 | if (res <= (unsigned long)-4096) | 838 | if (res <= (unsigned long)-4096) |
| 834 | res = load_flat_file(&bprm, libs, id, NULL); | 839 | res = load_flat_file(&bprm, libs, id, NULL); |
| 835 | if (bprm.file) { | 840 | |
| 836 | allow_write_access(bprm.file); | 841 | abort_creds(bprm.cred); |
| 837 | fput(bprm.file); | 842 | |
| 838 | bprm.file = NULL; | 843 | out: |
| 839 | } | 844 | allow_write_access(bprm.file); |
| 845 | fput(bprm.file); | ||
| 846 | |||
| 840 | return(res); | 847 | return(res); |
| 841 | } | 848 | } |
| 842 | 849 | ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index dc84daee6bc4..72a2b9c28e9f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -265,10 +265,6 @@ static int caching_kthread(void *data) | |||
| 265 | 265 | ||
| 266 | atomic_inc(&block_group->space_info->caching_threads); | 266 | atomic_inc(&block_group->space_info->caching_threads); |
| 267 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); | 267 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); |
| 268 | again: | ||
| 269 | /* need to make sure the commit_root doesn't disappear */ | ||
| 270 | down_read(&fs_info->extent_commit_sem); | ||
| 271 | |||
| 272 | /* | 268 | /* |
| 273 | * We don't want to deadlock with somebody trying to allocate a new | 269 | * We don't want to deadlock with somebody trying to allocate a new |
| 274 | * extent for the extent root while also trying to search the extent | 270 | * extent for the extent root while also trying to search the extent |
| @@ -282,6 +278,10 @@ again: | |||
| 282 | key.objectid = last; | 278 | key.objectid = last; |
| 283 | key.offset = 0; | 279 | key.offset = 0; |
| 284 | btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); | 280 | btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); |
| 281 | again: | ||
| 282 | /* need to make sure the commit_root doesn't disappear */ | ||
| 283 | down_read(&fs_info->extent_commit_sem); | ||
| 284 | |||
| 285 | ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); | 285 | ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); |
| 286 | if (ret < 0) | 286 | if (ret < 0) |
| 287 | goto err; | 287 | goto err; |
| @@ -304,6 +304,19 @@ again: | |||
| 304 | 304 | ||
| 305 | if (need_resched() || | 305 | if (need_resched() || |
| 306 | btrfs_transaction_in_commit(fs_info)) { | 306 | btrfs_transaction_in_commit(fs_info)) { |
| 307 | leaf = path->nodes[0]; | ||
| 308 | |||
| 309 | /* this shouldn't happen, but if the | ||
| 310 | * leaf is empty just move on. | ||
| 311 | */ | ||
| 312 | if (btrfs_header_nritems(leaf) == 0) | ||
| 313 | break; | ||
| 314 | /* | ||
| 315 | * we need to copy the key out so that | ||
| 316 | * we are sure the next search advances | ||
| 317 | * us forward in the btree. | ||
| 318 | */ | ||
| 319 | btrfs_item_key_to_cpu(leaf, &key, 0); | ||
| 307 | btrfs_release_path(fs_info->extent_root, path); | 320 | btrfs_release_path(fs_info->extent_root, path); |
| 308 | up_read(&fs_info->extent_commit_sem); | 321 | up_read(&fs_info->extent_commit_sem); |
| 309 | schedule_timeout(1); | 322 | schedule_timeout(1); |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index af99b78b288e..5edcee3a617f 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
| @@ -414,11 +414,29 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro | |||
| 414 | u64 *offset, u64 *bytes) | 414 | u64 *offset, u64 *bytes) |
| 415 | { | 415 | { |
| 416 | u64 end; | 416 | u64 end; |
| 417 | u64 search_start, search_bytes; | ||
| 418 | int ret; | ||
| 417 | 419 | ||
| 418 | again: | 420 | again: |
| 419 | end = bitmap_info->offset + | 421 | end = bitmap_info->offset + |
| 420 | (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; | 422 | (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; |
| 421 | 423 | ||
| 424 | /* | ||
| 425 | * XXX - this can go away after a few releases. | ||
| 426 | * | ||
| 427 | * since the only user of btrfs_remove_free_space is the tree logging | ||
| 428 | * stuff, and the only way to test that is under crash conditions, we | ||
| 429 | * want to have this debug stuff here just in case somethings not | ||
| 430 | * working. Search the bitmap for the space we are trying to use to | ||
| 431 | * make sure its actually there. If its not there then we need to stop | ||
| 432 | * because something has gone wrong. | ||
| 433 | */ | ||
| 434 | search_start = *offset; | ||
| 435 | search_bytes = *bytes; | ||
| 436 | ret = search_bitmap(block_group, bitmap_info, &search_start, | ||
| 437 | &search_bytes); | ||
| 438 | BUG_ON(ret < 0 || search_start != *offset); | ||
| 439 | |||
| 422 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { | 440 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { |
| 423 | bitmap_clear_bits(block_group, bitmap_info, *offset, | 441 | bitmap_clear_bits(block_group, bitmap_info, *offset, |
| 424 | end - *offset + 1); | 442 | end - *offset + 1); |
| @@ -430,6 +448,7 @@ again: | |||
| 430 | } | 448 | } |
| 431 | 449 | ||
| 432 | if (*bytes) { | 450 | if (*bytes) { |
| 451 | struct rb_node *next = rb_next(&bitmap_info->offset_index); | ||
| 433 | if (!bitmap_info->bytes) { | 452 | if (!bitmap_info->bytes) { |
| 434 | unlink_free_space(block_group, bitmap_info); | 453 | unlink_free_space(block_group, bitmap_info); |
| 435 | kfree(bitmap_info->bitmap); | 454 | kfree(bitmap_info->bitmap); |
| @@ -438,16 +457,36 @@ again: | |||
| 438 | recalculate_thresholds(block_group); | 457 | recalculate_thresholds(block_group); |
| 439 | } | 458 | } |
| 440 | 459 | ||
| 441 | bitmap_info = tree_search_offset(block_group, | 460 | /* |
| 442 | offset_to_bitmap(block_group, | 461 | * no entry after this bitmap, but we still have bytes to |
| 443 | *offset), | 462 | * remove, so something has gone wrong. |
| 444 | 1, 0); | 463 | */ |
| 445 | if (!bitmap_info) | 464 | if (!next) |
| 446 | return -EINVAL; | 465 | return -EINVAL; |
| 447 | 466 | ||
| 467 | bitmap_info = rb_entry(next, struct btrfs_free_space, | ||
| 468 | offset_index); | ||
| 469 | |||
| 470 | /* | ||
| 471 | * if the next entry isn't a bitmap we need to return to let the | ||
| 472 | * extent stuff do its work. | ||
| 473 | */ | ||
| 448 | if (!bitmap_info->bitmap) | 474 | if (!bitmap_info->bitmap) |
| 449 | return -EAGAIN; | 475 | return -EAGAIN; |
| 450 | 476 | ||
| 477 | /* | ||
| 478 | * Ok the next item is a bitmap, but it may not actually hold | ||
| 479 | * the information for the rest of this free space stuff, so | ||
| 480 | * look for it, and if we don't find it return so we can try | ||
| 481 | * everything over again. | ||
| 482 | */ | ||
| 483 | search_start = *offset; | ||
| 484 | search_bytes = *bytes; | ||
| 485 | ret = search_bitmap(block_group, bitmap_info, &search_start, | ||
| 486 | &search_bytes); | ||
| 487 | if (ret < 0 || search_start != *offset) | ||
| 488 | return -EAGAIN; | ||
| 489 | |||
| 451 | goto again; | 490 | goto again; |
| 452 | } else if (!bitmap_info->bytes) { | 491 | } else if (!bitmap_info->bytes) { |
| 453 | unlink_free_space(block_group, bitmap_info); | 492 | unlink_free_space(block_group, bitmap_info); |
| @@ -644,8 +683,17 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
| 644 | again: | 683 | again: |
| 645 | info = tree_search_offset(block_group, offset, 0, 0); | 684 | info = tree_search_offset(block_group, offset, 0, 0); |
| 646 | if (!info) { | 685 | if (!info) { |
| 647 | WARN_ON(1); | 686 | /* |
| 648 | goto out_lock; | 687 | * oops didn't find an extent that matched the space we wanted |
| 688 | * to remove, look for a bitmap instead | ||
| 689 | */ | ||
| 690 | info = tree_search_offset(block_group, | ||
| 691 | offset_to_bitmap(block_group, offset), | ||
| 692 | 1, 0); | ||
| 693 | if (!info) { | ||
| 694 | WARN_ON(1); | ||
| 695 | goto out_lock; | ||
| 696 | } | ||
| 649 | } | 697 | } |
| 650 | 698 | ||
| 651 | if (info->bytes < bytes && rb_next(&info->offset_index)) { | 699 | if (info->bytes < bytes && rb_next(&info->offset_index)) { |
| @@ -957,8 +1005,15 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
| 957 | if (cluster->block_group != block_group) | 1005 | if (cluster->block_group != block_group) |
| 958 | goto out; | 1006 | goto out; |
| 959 | 1007 | ||
| 960 | entry = tree_search_offset(block_group, search_start, 0, 0); | 1008 | /* |
| 961 | 1009 | * search_start is the beginning of the bitmap, but at some point it may | |
| 1010 | * be a good idea to point to the actual start of the free area in the | ||
| 1011 | * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only | ||
| 1012 | * to 1 to make sure we get the bitmap entry | ||
| 1013 | */ | ||
| 1014 | entry = tree_search_offset(block_group, | ||
| 1015 | offset_to_bitmap(block_group, search_start), | ||
| 1016 | 1, 0); | ||
| 962 | if (!entry || !entry->bitmap) | 1017 | if (!entry || !entry->bitmap) |
| 963 | goto out; | 1018 | goto out; |
| 964 | 1019 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 56fe83fa60c4..272b9b2bea86 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -4785,8 +4785,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 4785 | * and the replacement file is large. Start IO on it now so | 4785 | * and the replacement file is large. Start IO on it now so |
| 4786 | * we don't add too much work to the end of the transaction | 4786 | * we don't add too much work to the end of the transaction |
| 4787 | */ | 4787 | */ |
| 4788 | if (new_inode && old_inode && S_ISREG(old_inode->i_mode) && | 4788 | if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && |
| 4789 | new_inode->i_size && | ||
| 4790 | old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) | 4789 | old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) |
| 4791 | filemap_flush(old_inode->i_mapping); | 4790 | filemap_flush(old_inode->i_mapping); |
| 4792 | 4791 | ||
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index e71264d1c2c9..c04f7f212602 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
| @@ -2553,8 +2553,13 @@ int relocate_inode_pages(struct inode *inode, u64 start, u64 len) | |||
| 2553 | last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; | 2553 | last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; |
| 2554 | 2554 | ||
| 2555 | /* make sure the dirty trick played by the caller work */ | 2555 | /* make sure the dirty trick played by the caller work */ |
| 2556 | ret = invalidate_inode_pages2_range(inode->i_mapping, | 2556 | while (1) { |
| 2557 | first_index, last_index); | 2557 | ret = invalidate_inode_pages2_range(inode->i_mapping, |
| 2558 | first_index, last_index); | ||
| 2559 | if (ret != -EBUSY) | ||
| 2560 | break; | ||
| 2561 | schedule_timeout(HZ/10); | ||
| 2562 | } | ||
| 2558 | if (ret) | 2563 | if (ret) |
| 2559 | goto out_unlock; | 2564 | goto out_unlock; |
| 2560 | 2565 | ||
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index ecfbce836d32..3e2b90eaa239 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c | |||
| @@ -208,7 +208,7 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, | |||
| 208 | *total_in = 0; | 208 | *total_in = 0; |
| 209 | 209 | ||
| 210 | workspace = find_zlib_workspace(); | 210 | workspace = find_zlib_workspace(); |
| 211 | if (!workspace) | 211 | if (IS_ERR(workspace)) |
| 212 | return -1; | 212 | return -1; |
| 213 | 213 | ||
| 214 | if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { | 214 | if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { |
| @@ -366,7 +366,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, | |||
| 366 | char *kaddr; | 366 | char *kaddr; |
| 367 | 367 | ||
| 368 | workspace = find_zlib_workspace(); | 368 | workspace = find_zlib_workspace(); |
| 369 | if (!workspace) | 369 | if (IS_ERR(workspace)) |
| 370 | return -ENOMEM; | 370 | return -ENOMEM; |
| 371 | 371 | ||
| 372 | data_in = kmap(pages_in[page_in_index]); | 372 | data_in = kmap(pages_in[page_in_index]); |
| @@ -547,7 +547,7 @@ int btrfs_zlib_decompress(unsigned char *data_in, | |||
| 547 | return -ENOMEM; | 547 | return -ENOMEM; |
| 548 | 548 | ||
| 549 | workspace = find_zlib_workspace(); | 549 | workspace = find_zlib_workspace(); |
| 550 | if (!workspace) | 550 | if (IS_ERR(workspace)) |
| 551 | return -ENOMEM; | 551 | return -ENOMEM; |
| 552 | 552 | ||
| 553 | workspace->inf_strm.next_in = data_in; | 553 | workspace->inf_strm.next_in = data_in; |
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index f28f070a60fc..f91fd51b32e3 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c | |||
| @@ -1905,6 +1905,7 @@ COMPATIBLE_IOCTL(FIONCLEX) | |||
| 1905 | COMPATIBLE_IOCTL(FIOASYNC) | 1905 | COMPATIBLE_IOCTL(FIOASYNC) |
| 1906 | COMPATIBLE_IOCTL(FIONBIO) | 1906 | COMPATIBLE_IOCTL(FIONBIO) |
| 1907 | COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */ | 1907 | COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */ |
| 1908 | COMPATIBLE_IOCTL(FS_IOC_FIEMAP) | ||
| 1908 | /* 0x00 */ | 1909 | /* 0x00 */ |
| 1909 | COMPATIBLE_IOCTL(FIBMAP) | 1910 | COMPATIBLE_IOCTL(FIBMAP) |
| 1910 | COMPATIBLE_IOCTL(FIGETBSZ) | 1911 | COMPATIBLE_IOCTL(FIGETBSZ) |
diff --git a/fs/inode.c b/fs/inode.c index 901bad1e5f12..ae7b67e48661 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
| @@ -120,12 +120,11 @@ static void wake_up_inode(struct inode *inode) | |||
| 120 | * These are initializations that need to be done on every inode | 120 | * These are initializations that need to be done on every inode |
| 121 | * allocation as the fields are not initialised by slab allocation. | 121 | * allocation as the fields are not initialised by slab allocation. |
| 122 | */ | 122 | */ |
| 123 | struct inode *inode_init_always(struct super_block *sb, struct inode *inode) | 123 | int inode_init_always(struct super_block *sb, struct inode *inode) |
| 124 | { | 124 | { |
| 125 | static const struct address_space_operations empty_aops; | 125 | static const struct address_space_operations empty_aops; |
| 126 | static struct inode_operations empty_iops; | 126 | static struct inode_operations empty_iops; |
| 127 | static const struct file_operations empty_fops; | 127 | static const struct file_operations empty_fops; |
| 128 | |||
| 129 | struct address_space *const mapping = &inode->i_data; | 128 | struct address_space *const mapping = &inode->i_data; |
| 130 | 129 | ||
| 131 | inode->i_sb = sb; | 130 | inode->i_sb = sb; |
| @@ -152,7 +151,7 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) | |||
| 152 | inode->dirtied_when = 0; | 151 | inode->dirtied_when = 0; |
| 153 | 152 | ||
| 154 | if (security_inode_alloc(inode)) | 153 | if (security_inode_alloc(inode)) |
| 155 | goto out_free_inode; | 154 | goto out; |
| 156 | 155 | ||
| 157 | /* allocate and initialize an i_integrity */ | 156 | /* allocate and initialize an i_integrity */ |
| 158 | if (ima_inode_alloc(inode)) | 157 | if (ima_inode_alloc(inode)) |
| @@ -198,16 +197,12 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) | |||
| 198 | inode->i_fsnotify_mask = 0; | 197 | inode->i_fsnotify_mask = 0; |
| 199 | #endif | 198 | #endif |
| 200 | 199 | ||
| 201 | return inode; | 200 | return 0; |
| 202 | 201 | ||
| 203 | out_free_security: | 202 | out_free_security: |
| 204 | security_inode_free(inode); | 203 | security_inode_free(inode); |
| 205 | out_free_inode: | 204 | out: |
| 206 | if (inode->i_sb->s_op->destroy_inode) | 205 | return -ENOMEM; |
| 207 | inode->i_sb->s_op->destroy_inode(inode); | ||
| 208 | else | ||
| 209 | kmem_cache_free(inode_cachep, (inode)); | ||
| 210 | return NULL; | ||
| 211 | } | 206 | } |
| 212 | EXPORT_SYMBOL(inode_init_always); | 207 | EXPORT_SYMBOL(inode_init_always); |
| 213 | 208 | ||
| @@ -220,12 +215,21 @@ static struct inode *alloc_inode(struct super_block *sb) | |||
| 220 | else | 215 | else |
| 221 | inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); | 216 | inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); |
| 222 | 217 | ||
| 223 | if (inode) | 218 | if (!inode) |
| 224 | return inode_init_always(sb, inode); | 219 | return NULL; |
| 225 | return NULL; | 220 | |
| 221 | if (unlikely(inode_init_always(sb, inode))) { | ||
| 222 | if (inode->i_sb->s_op->destroy_inode) | ||
| 223 | inode->i_sb->s_op->destroy_inode(inode); | ||
| 224 | else | ||
| 225 | kmem_cache_free(inode_cachep, inode); | ||
| 226 | return NULL; | ||
| 227 | } | ||
| 228 | |||
| 229 | return inode; | ||
| 226 | } | 230 | } |
| 227 | 231 | ||
| 228 | void destroy_inode(struct inode *inode) | 232 | void __destroy_inode(struct inode *inode) |
| 229 | { | 233 | { |
| 230 | BUG_ON(inode_has_buffers(inode)); | 234 | BUG_ON(inode_has_buffers(inode)); |
| 231 | ima_inode_free(inode); | 235 | ima_inode_free(inode); |
| @@ -237,13 +241,17 @@ void destroy_inode(struct inode *inode) | |||
| 237 | if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) | 241 | if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) |
| 238 | posix_acl_release(inode->i_default_acl); | 242 | posix_acl_release(inode->i_default_acl); |
| 239 | #endif | 243 | #endif |
| 244 | } | ||
| 245 | EXPORT_SYMBOL(__destroy_inode); | ||
| 246 | |||
| 247 | void destroy_inode(struct inode *inode) | ||
| 248 | { | ||
| 249 | __destroy_inode(inode); | ||
| 240 | if (inode->i_sb->s_op->destroy_inode) | 250 | if (inode->i_sb->s_op->destroy_inode) |
| 241 | inode->i_sb->s_op->destroy_inode(inode); | 251 | inode->i_sb->s_op->destroy_inode(inode); |
| 242 | else | 252 | else |
| 243 | kmem_cache_free(inode_cachep, (inode)); | 253 | kmem_cache_free(inode_cachep, (inode)); |
| 244 | } | 254 | } |
| 245 | EXPORT_SYMBOL(destroy_inode); | ||
| 246 | |||
| 247 | 255 | ||
| 248 | /* | 256 | /* |
| 249 | * These are initializations that only need to be done | 257 | * These are initializations that only need to be done |
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 5edc2bf20581..23c947539864 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c | |||
| @@ -99,7 +99,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) | |||
| 99 | kunmap(pg); | 99 | kunmap(pg); |
| 100 | 100 | ||
| 101 | D2(printk(KERN_DEBUG "readpage finished\n")); | 101 | D2(printk(KERN_DEBUG "readpage finished\n")); |
| 102 | return 0; | 102 | return ret; |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg) | 105 | int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg) |
diff --git a/fs/namespace.c b/fs/namespace.c index 277c28a63ead..7230787d18b0 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -316,7 +316,8 @@ EXPORT_SYMBOL_GPL(mnt_clone_write); | |||
| 316 | */ | 316 | */ |
| 317 | int mnt_want_write_file(struct file *file) | 317 | int mnt_want_write_file(struct file *file) |
| 318 | { | 318 | { |
| 319 | if (!(file->f_mode & FMODE_WRITE)) | 319 | struct inode *inode = file->f_dentry->d_inode; |
| 320 | if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) | ||
| 320 | return mnt_want_write(file->f_path.mnt); | 321 | return mnt_want_write(file->f_path.mnt); |
| 321 | else | 322 | else |
| 322 | return mnt_clone_write(file->f_path.mnt); | 323 | return mnt_clone_write(file->f_path.mnt); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 489fc01a3204..e4e089a8f294 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
| @@ -255,7 +255,7 @@ static void nfs_direct_read_release(void *calldata) | |||
| 255 | 255 | ||
| 256 | if (put_dreq(dreq)) | 256 | if (put_dreq(dreq)) |
| 257 | nfs_direct_complete(dreq); | 257 | nfs_direct_complete(dreq); |
| 258 | nfs_readdata_release(calldata); | 258 | nfs_readdata_free(data); |
| 259 | } | 259 | } |
| 260 | 260 | ||
| 261 | static const struct rpc_call_ops nfs_read_direct_ops = { | 261 | static const struct rpc_call_ops nfs_read_direct_ops = { |
| @@ -314,14 +314,14 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, | |||
| 314 | data->npages, 1, 0, data->pagevec, NULL); | 314 | data->npages, 1, 0, data->pagevec, NULL); |
| 315 | up_read(¤t->mm->mmap_sem); | 315 | up_read(¤t->mm->mmap_sem); |
| 316 | if (result < 0) { | 316 | if (result < 0) { |
| 317 | nfs_readdata_release(data); | 317 | nfs_readdata_free(data); |
| 318 | break; | 318 | break; |
| 319 | } | 319 | } |
| 320 | if ((unsigned)result < data->npages) { | 320 | if ((unsigned)result < data->npages) { |
| 321 | bytes = result * PAGE_SIZE; | 321 | bytes = result * PAGE_SIZE; |
| 322 | if (bytes <= pgbase) { | 322 | if (bytes <= pgbase) { |
| 323 | nfs_direct_release_pages(data->pagevec, result); | 323 | nfs_direct_release_pages(data->pagevec, result); |
| 324 | nfs_readdata_release(data); | 324 | nfs_readdata_free(data); |
| 325 | break; | 325 | break; |
| 326 | } | 326 | } |
| 327 | bytes -= pgbase; | 327 | bytes -= pgbase; |
| @@ -334,7 +334,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, | |||
| 334 | data->inode = inode; | 334 | data->inode = inode; |
| 335 | data->cred = msg.rpc_cred; | 335 | data->cred = msg.rpc_cred; |
| 336 | data->args.fh = NFS_FH(inode); | 336 | data->args.fh = NFS_FH(inode); |
| 337 | data->args.context = get_nfs_open_context(ctx); | 337 | data->args.context = ctx; |
| 338 | data->args.offset = pos; | 338 | data->args.offset = pos; |
| 339 | data->args.pgbase = pgbase; | 339 | data->args.pgbase = pgbase; |
| 340 | data->args.pages = data->pagevec; | 340 | data->args.pages = data->pagevec; |
| @@ -441,7 +441,7 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) | |||
| 441 | struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); | 441 | struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); |
| 442 | list_del(&data->pages); | 442 | list_del(&data->pages); |
| 443 | nfs_direct_release_pages(data->pagevec, data->npages); | 443 | nfs_direct_release_pages(data->pagevec, data->npages); |
| 444 | nfs_writedata_release(data); | 444 | nfs_writedata_free(data); |
| 445 | } | 445 | } |
| 446 | } | 446 | } |
| 447 | 447 | ||
| @@ -534,7 +534,7 @@ static void nfs_direct_commit_release(void *calldata) | |||
| 534 | 534 | ||
| 535 | dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status); | 535 | dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status); |
| 536 | nfs_direct_write_complete(dreq, data->inode); | 536 | nfs_direct_write_complete(dreq, data->inode); |
| 537 | nfs_commitdata_release(calldata); | 537 | nfs_commit_free(data); |
| 538 | } | 538 | } |
| 539 | 539 | ||
| 540 | static const struct rpc_call_ops nfs_commit_direct_ops = { | 540 | static const struct rpc_call_ops nfs_commit_direct_ops = { |
| @@ -570,7 +570,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) | |||
| 570 | data->args.fh = NFS_FH(data->inode); | 570 | data->args.fh = NFS_FH(data->inode); |
| 571 | data->args.offset = 0; | 571 | data->args.offset = 0; |
| 572 | data->args.count = 0; | 572 | data->args.count = 0; |
| 573 | data->args.context = get_nfs_open_context(dreq->ctx); | 573 | data->args.context = dreq->ctx; |
| 574 | data->res.count = 0; | 574 | data->res.count = 0; |
| 575 | data->res.fattr = &data->fattr; | 575 | data->res.fattr = &data->fattr; |
| 576 | data->res.verf = &data->verf; | 576 | data->res.verf = &data->verf; |
| @@ -734,14 +734,14 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, | |||
| 734 | data->npages, 0, 0, data->pagevec, NULL); | 734 | data->npages, 0, 0, data->pagevec, NULL); |
| 735 | up_read(¤t->mm->mmap_sem); | 735 | up_read(¤t->mm->mmap_sem); |
| 736 | if (result < 0) { | 736 | if (result < 0) { |
| 737 | nfs_writedata_release(data); | 737 | nfs_writedata_free(data); |
| 738 | break; | 738 | break; |
| 739 | } | 739 | } |
| 740 | if ((unsigned)result < data->npages) { | 740 | if ((unsigned)result < data->npages) { |
| 741 | bytes = result * PAGE_SIZE; | 741 | bytes = result * PAGE_SIZE; |
| 742 | if (bytes <= pgbase) { | 742 | if (bytes <= pgbase) { |
| 743 | nfs_direct_release_pages(data->pagevec, result); | 743 | nfs_direct_release_pages(data->pagevec, result); |
| 744 | nfs_writedata_release(data); | 744 | nfs_writedata_free(data); |
| 745 | break; | 745 | break; |
| 746 | } | 746 | } |
| 747 | bytes -= pgbase; | 747 | bytes -= pgbase; |
| @@ -756,7 +756,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, | |||
| 756 | data->inode = inode; | 756 | data->inode = inode; |
| 757 | data->cred = msg.rpc_cred; | 757 | data->cred = msg.rpc_cred; |
| 758 | data->args.fh = NFS_FH(inode); | 758 | data->args.fh = NFS_FH(inode); |
| 759 | data->args.context = get_nfs_open_context(ctx); | 759 | data->args.context = ctx; |
| 760 | data->args.offset = pos; | 760 | data->args.offset = pos; |
| 761 | data->args.pgbase = pgbase; | 761 | data->args.pgbase = pgbase; |
| 762 | data->args.pages = data->pagevec; | 762 | data->args.pages = data->pagevec; |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 73ea5e8d66ce..12c9e66d3f1d 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
| @@ -60,17 +60,15 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) | |||
| 60 | return p; | 60 | return p; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static void nfs_readdata_free(struct nfs_read_data *p) | 63 | void nfs_readdata_free(struct nfs_read_data *p) |
| 64 | { | 64 | { |
| 65 | if (p && (p->pagevec != &p->page_array[0])) | 65 | if (p && (p->pagevec != &p->page_array[0])) |
| 66 | kfree(p->pagevec); | 66 | kfree(p->pagevec); |
| 67 | mempool_free(p, nfs_rdata_mempool); | 67 | mempool_free(p, nfs_rdata_mempool); |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | void nfs_readdata_release(void *data) | 70 | static void nfs_readdata_release(struct nfs_read_data *rdata) |
| 71 | { | 71 | { |
| 72 | struct nfs_read_data *rdata = data; | ||
| 73 | |||
| 74 | put_nfs_open_context(rdata->args.context); | 72 | put_nfs_open_context(rdata->args.context); |
| 75 | nfs_readdata_free(rdata); | 73 | nfs_readdata_free(rdata); |
| 76 | } | 74 | } |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 0a0a2ff767c3..a34fae21fe10 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -87,17 +87,15 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) | |||
| 87 | return p; | 87 | return p; |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static void nfs_writedata_free(struct nfs_write_data *p) | 90 | void nfs_writedata_free(struct nfs_write_data *p) |
| 91 | { | 91 | { |
| 92 | if (p && (p->pagevec != &p->page_array[0])) | 92 | if (p && (p->pagevec != &p->page_array[0])) |
| 93 | kfree(p->pagevec); | 93 | kfree(p->pagevec); |
| 94 | mempool_free(p, nfs_wdata_mempool); | 94 | mempool_free(p, nfs_wdata_mempool); |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | void nfs_writedata_release(void *data) | 97 | static void nfs_writedata_release(struct nfs_write_data *wdata) |
| 98 | { | 98 | { |
| 99 | struct nfs_write_data *wdata = data; | ||
| 100 | |||
| 101 | put_nfs_open_context(wdata->args.context); | 99 | put_nfs_open_context(wdata->args.context); |
| 102 | nfs_writedata_free(wdata); | 100 | nfs_writedata_free(wdata); |
| 103 | } | 101 | } |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 3ce5ae9e3d2d..175db258942f 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -234,23 +234,20 @@ static int check_mem_permission(struct task_struct *task) | |||
| 234 | 234 | ||
| 235 | struct mm_struct *mm_for_maps(struct task_struct *task) | 235 | struct mm_struct *mm_for_maps(struct task_struct *task) |
| 236 | { | 236 | { |
| 237 | struct mm_struct *mm = get_task_mm(task); | 237 | struct mm_struct *mm; |
| 238 | if (!mm) | 238 | |
| 239 | if (mutex_lock_killable(&task->cred_guard_mutex)) | ||
| 239 | return NULL; | 240 | return NULL; |
| 240 | down_read(&mm->mmap_sem); | 241 | |
| 241 | task_lock(task); | 242 | mm = get_task_mm(task); |
| 242 | if (task->mm != mm) | 243 | if (mm && mm != current->mm && |
| 243 | goto out; | 244 | !ptrace_may_access(task, PTRACE_MODE_READ)) { |
| 244 | if (task->mm != current->mm && | 245 | mmput(mm); |
| 245 | __ptrace_may_access(task, PTRACE_MODE_READ) < 0) | 246 | mm = NULL; |
| 246 | goto out; | 247 | } |
| 247 | task_unlock(task); | 248 | mutex_unlock(&task->cred_guard_mutex); |
| 249 | |||
| 248 | return mm; | 250 | return mm; |
| 249 | out: | ||
| 250 | task_unlock(task); | ||
| 251 | up_read(&mm->mmap_sem); | ||
| 252 | mmput(mm); | ||
| 253 | return NULL; | ||
| 254 | } | 251 | } |
| 255 | 252 | ||
| 256 | static int proc_pid_cmdline(struct task_struct *task, char * buffer) | 253 | static int proc_pid_cmdline(struct task_struct *task, char * buffer) |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 6f61b7cc32e0..9bd8be1d235c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
| @@ -119,6 +119,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
| 119 | mm = mm_for_maps(priv->task); | 119 | mm = mm_for_maps(priv->task); |
| 120 | if (!mm) | 120 | if (!mm) |
| 121 | return NULL; | 121 | return NULL; |
| 122 | down_read(&mm->mmap_sem); | ||
| 122 | 123 | ||
| 123 | tail_vma = get_gate_vma(priv->task); | 124 | tail_vma = get_gate_vma(priv->task); |
| 124 | priv->tail_vma = tail_vma; | 125 | priv->tail_vma = tail_vma; |
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 64a72e2e7650..8f5c05d3dbd3 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
| @@ -189,6 +189,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
| 189 | priv->task = NULL; | 189 | priv->task = NULL; |
| 190 | return NULL; | 190 | return NULL; |
| 191 | } | 191 | } |
| 192 | down_read(&mm->mmap_sem); | ||
| 192 | 193 | ||
| 193 | /* start from the Nth VMA */ | 194 | /* start from the Nth VMA */ |
| 194 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) | 195 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) |
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 5fcec6f020a7..34ec86923f7e 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
| @@ -64,6 +64,10 @@ xfs_inode_alloc( | |||
| 64 | ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); | 64 | ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); |
| 65 | if (!ip) | 65 | if (!ip) |
| 66 | return NULL; | 66 | return NULL; |
| 67 | if (inode_init_always(mp->m_super, VFS_I(ip))) { | ||
| 68 | kmem_zone_free(xfs_inode_zone, ip); | ||
| 69 | return NULL; | ||
| 70 | } | ||
| 67 | 71 | ||
| 68 | ASSERT(atomic_read(&ip->i_iocount) == 0); | 72 | ASSERT(atomic_read(&ip->i_iocount) == 0); |
| 69 | ASSERT(atomic_read(&ip->i_pincount) == 0); | 73 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
| @@ -105,17 +109,6 @@ xfs_inode_alloc( | |||
| 105 | #ifdef XFS_DIR2_TRACE | 109 | #ifdef XFS_DIR2_TRACE |
| 106 | ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); | 110 | ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); |
| 107 | #endif | 111 | #endif |
| 108 | /* | ||
| 109 | * Now initialise the VFS inode. We do this after the xfs_inode | ||
| 110 | * initialisation as internal failures will result in ->destroy_inode | ||
| 111 | * being called and that will pass down through the reclaim path and | ||
| 112 | * free the XFS inode. This path requires the XFS inode to already be | ||
| 113 | * initialised. Hence if this call fails, the xfs_inode has already | ||
| 114 | * been freed and we should not reference it at all in the error | ||
| 115 | * handling. | ||
| 116 | */ | ||
| 117 | if (!inode_init_always(mp->m_super, VFS_I(ip))) | ||
| 118 | return NULL; | ||
| 119 | 112 | ||
| 120 | /* prevent anyone from using this yet */ | 113 | /* prevent anyone from using this yet */ |
| 121 | VFS_I(ip)->i_state = I_NEW|I_LOCK; | 114 | VFS_I(ip)->i_state = I_NEW|I_LOCK; |
| @@ -123,6 +116,71 @@ xfs_inode_alloc( | |||
| 123 | return ip; | 116 | return ip; |
| 124 | } | 117 | } |
| 125 | 118 | ||
| 119 | STATIC void | ||
| 120 | xfs_inode_free( | ||
| 121 | struct xfs_inode *ip) | ||
| 122 | { | ||
| 123 | switch (ip->i_d.di_mode & S_IFMT) { | ||
| 124 | case S_IFREG: | ||
| 125 | case S_IFDIR: | ||
| 126 | case S_IFLNK: | ||
| 127 | xfs_idestroy_fork(ip, XFS_DATA_FORK); | ||
| 128 | break; | ||
| 129 | } | ||
| 130 | |||
| 131 | if (ip->i_afp) | ||
| 132 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | ||
| 133 | |||
| 134 | #ifdef XFS_INODE_TRACE | ||
| 135 | ktrace_free(ip->i_trace); | ||
| 136 | #endif | ||
| 137 | #ifdef XFS_BMAP_TRACE | ||
| 138 | ktrace_free(ip->i_xtrace); | ||
| 139 | #endif | ||
| 140 | #ifdef XFS_BTREE_TRACE | ||
| 141 | ktrace_free(ip->i_btrace); | ||
| 142 | #endif | ||
| 143 | #ifdef XFS_RW_TRACE | ||
| 144 | ktrace_free(ip->i_rwtrace); | ||
| 145 | #endif | ||
| 146 | #ifdef XFS_ILOCK_TRACE | ||
| 147 | ktrace_free(ip->i_lock_trace); | ||
| 148 | #endif | ||
| 149 | #ifdef XFS_DIR2_TRACE | ||
| 150 | ktrace_free(ip->i_dir_trace); | ||
| 151 | #endif | ||
| 152 | |||
| 153 | if (ip->i_itemp) { | ||
| 154 | /* | ||
| 155 | * Only if we are shutting down the fs will we see an | ||
| 156 | * inode still in the AIL. If it is there, we should remove | ||
| 157 | * it to prevent a use-after-free from occurring. | ||
| 158 | */ | ||
| 159 | xfs_log_item_t *lip = &ip->i_itemp->ili_item; | ||
| 160 | struct xfs_ail *ailp = lip->li_ailp; | ||
| 161 | |||
| 162 | ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || | ||
| 163 | XFS_FORCED_SHUTDOWN(ip->i_mount)); | ||
| 164 | if (lip->li_flags & XFS_LI_IN_AIL) { | ||
| 165 | spin_lock(&ailp->xa_lock); | ||
| 166 | if (lip->li_flags & XFS_LI_IN_AIL) | ||
| 167 | xfs_trans_ail_delete(ailp, lip); | ||
| 168 | else | ||
| 169 | spin_unlock(&ailp->xa_lock); | ||
| 170 | } | ||
| 171 | xfs_inode_item_destroy(ip); | ||
| 172 | ip->i_itemp = NULL; | ||
| 173 | } | ||
| 174 | |||
| 175 | /* asserts to verify all state is correct here */ | ||
| 176 | ASSERT(atomic_read(&ip->i_iocount) == 0); | ||
| 177 | ASSERT(atomic_read(&ip->i_pincount) == 0); | ||
| 178 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); | ||
| 179 | ASSERT(completion_done(&ip->i_flush)); | ||
| 180 | |||
| 181 | kmem_zone_free(xfs_inode_zone, ip); | ||
| 182 | } | ||
| 183 | |||
| 126 | /* | 184 | /* |
| 127 | * Check the validity of the inode we just found it the cache | 185 | * Check the validity of the inode we just found it the cache |
| 128 | */ | 186 | */ |
| @@ -167,7 +225,7 @@ xfs_iget_cache_hit( | |||
| 167 | * errors cleanly, then tag it so it can be set up correctly | 225 | * errors cleanly, then tag it so it can be set up correctly |
| 168 | * later. | 226 | * later. |
| 169 | */ | 227 | */ |
| 170 | if (!inode_init_always(mp->m_super, VFS_I(ip))) { | 228 | if (inode_init_always(mp->m_super, VFS_I(ip))) { |
| 171 | error = ENOMEM; | 229 | error = ENOMEM; |
| 172 | goto out_error; | 230 | goto out_error; |
| 173 | } | 231 | } |
| @@ -299,7 +357,8 @@ out_preload_end: | |||
| 299 | if (lock_flags) | 357 | if (lock_flags) |
| 300 | xfs_iunlock(ip, lock_flags); | 358 | xfs_iunlock(ip, lock_flags); |
| 301 | out_destroy: | 359 | out_destroy: |
| 302 | xfs_destroy_inode(ip); | 360 | __destroy_inode(VFS_I(ip)); |
| 361 | xfs_inode_free(ip); | ||
| 303 | return error; | 362 | return error; |
| 304 | } | 363 | } |
| 305 | 364 | ||
| @@ -504,62 +563,7 @@ xfs_ireclaim( | |||
| 504 | xfs_qm_dqdetach(ip); | 563 | xfs_qm_dqdetach(ip); |
| 505 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 564 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); |
| 506 | 565 | ||
| 507 | switch (ip->i_d.di_mode & S_IFMT) { | 566 | xfs_inode_free(ip); |
| 508 | case S_IFREG: | ||
| 509 | case S_IFDIR: | ||
| 510 | case S_IFLNK: | ||
| 511 | xfs_idestroy_fork(ip, XFS_DATA_FORK); | ||
| 512 | break; | ||
| 513 | } | ||
| 514 | |||
| 515 | if (ip->i_afp) | ||
| 516 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | ||
| 517 | |||
| 518 | #ifdef XFS_INODE_TRACE | ||
| 519 | ktrace_free(ip->i_trace); | ||
| 520 | #endif | ||
| 521 | #ifdef XFS_BMAP_TRACE | ||
| 522 | ktrace_free(ip->i_xtrace); | ||
| 523 | #endif | ||
| 524 | #ifdef XFS_BTREE_TRACE | ||
| 525 | ktrace_free(ip->i_btrace); | ||
| 526 | #endif | ||
| 527 | #ifdef XFS_RW_TRACE | ||
| 528 | ktrace_free(ip->i_rwtrace); | ||
| 529 | #endif | ||
| 530 | #ifdef XFS_ILOCK_TRACE | ||
| 531 | ktrace_free(ip->i_lock_trace); | ||
| 532 | #endif | ||
| 533 | #ifdef XFS_DIR2_TRACE | ||
| 534 | ktrace_free(ip->i_dir_trace); | ||
| 535 | #endif | ||
| 536 | if (ip->i_itemp) { | ||
| 537 | /* | ||
| 538 | * Only if we are shutting down the fs will we see an | ||
| 539 | * inode still in the AIL. If it is there, we should remove | ||
| 540 | * it to prevent a use-after-free from occurring. | ||
| 541 | */ | ||
| 542 | xfs_log_item_t *lip = &ip->i_itemp->ili_item; | ||
| 543 | struct xfs_ail *ailp = lip->li_ailp; | ||
| 544 | |||
| 545 | ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || | ||
| 546 | XFS_FORCED_SHUTDOWN(ip->i_mount)); | ||
| 547 | if (lip->li_flags & XFS_LI_IN_AIL) { | ||
| 548 | spin_lock(&ailp->xa_lock); | ||
| 549 | if (lip->li_flags & XFS_LI_IN_AIL) | ||
| 550 | xfs_trans_ail_delete(ailp, lip); | ||
| 551 | else | ||
| 552 | spin_unlock(&ailp->xa_lock); | ||
| 553 | } | ||
| 554 | xfs_inode_item_destroy(ip); | ||
| 555 | ip->i_itemp = NULL; | ||
| 556 | } | ||
| 557 | /* asserts to verify all state is correct here */ | ||
| 558 | ASSERT(atomic_read(&ip->i_iocount) == 0); | ||
| 559 | ASSERT(atomic_read(&ip->i_pincount) == 0); | ||
| 560 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); | ||
| 561 | ASSERT(completion_done(&ip->i_flush)); | ||
| 562 | kmem_zone_free(xfs_inode_zone, ip); | ||
| 563 | } | 567 | } |
| 564 | 568 | ||
| 565 | /* | 569 | /* |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 1804f866a71d..65f24a3cc992 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
| @@ -310,23 +310,6 @@ static inline struct inode *VFS_I(struct xfs_inode *ip) | |||
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | /* | 312 | /* |
| 313 | * Get rid of a partially initialized inode. | ||
| 314 | * | ||
| 315 | * We have to go through destroy_inode to make sure allocations | ||
| 316 | * from init_inode_always like the security data are undone. | ||
| 317 | * | ||
| 318 | * We mark the inode bad so that it takes the short cut in | ||
| 319 | * the reclaim path instead of going through the flush path | ||
| 320 | * which doesn't make sense for an inode that has never seen the | ||
| 321 | * light of day. | ||
| 322 | */ | ||
| 323 | static inline void xfs_destroy_inode(struct xfs_inode *ip) | ||
| 324 | { | ||
| 325 | make_bad_inode(VFS_I(ip)); | ||
| 326 | return destroy_inode(VFS_I(ip)); | ||
| 327 | } | ||
| 328 | |||
| 329 | /* | ||
| 330 | * i_flags helper functions | 313 | * i_flags helper functions |
| 331 | */ | 314 | */ |
| 332 | static inline void | 315 | static inline void |
