diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/autofs4/waitq.c | 22 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 51 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 1 | ||||
-rw-r--r-- | fs/buffer.c | 2 | ||||
-rw-r--r-- | fs/inode.c | 32 | ||||
-rw-r--r-- | fs/jbd/commit.c | 6 | ||||
-rw-r--r-- | fs/nilfs2/cpfile.c | 6 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/kmem.h | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_dfrag.c | 8 | ||||
-rw-r--r-- | fs/xfs/xfs_fsops.c | 2 |
10 files changed, 100 insertions, 32 deletions
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index eeb246845909..2341375386f8 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
@@ -297,20 +297,14 @@ static int validate_request(struct autofs_wait_queue **wait, | |||
297 | */ | 297 | */ |
298 | if (notify == NFY_MOUNT) { | 298 | if (notify == NFY_MOUNT) { |
299 | /* | 299 | /* |
300 | * If the dentry isn't hashed just go ahead and try the | 300 | * If the dentry was successfully mounted while we slept |
301 | * mount again with a new wait (not much else we can do). | 301 | * on the wait queue mutex we can return success. If it |
302 | */ | 302 | * isn't mounted (doesn't have submounts for the case of |
303 | if (!d_unhashed(dentry)) { | 303 | * a multi-mount with no mount at it's base) we can |
304 | /* | 304 | * continue on and create a new request. |
305 | * But if the dentry is hashed, that means that we | 305 | */ |
306 | * got here through the revalidate path. Thus, we | 306 | if (have_submounts(dentry)) |
307 | * need to check if the dentry has been mounted | 307 | return 0; |
308 | * while we waited on the wq_mutex. If it has, | ||
309 | * simply return success. | ||
310 | */ | ||
311 | if (d_mountpoint(dentry)) | ||
312 | return 0; | ||
313 | } | ||
314 | } | 308 | } |
315 | 309 | ||
316 | return 1; | 310 | return 1; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 3e2c7c738f23..35af93355063 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -2622,7 +2622,18 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
2622 | search_start); | 2622 | search_start); |
2623 | if (block_group && block_group_bits(block_group, data)) { | 2623 | if (block_group && block_group_bits(block_group, data)) { |
2624 | down_read(&space_info->groups_sem); | 2624 | down_read(&space_info->groups_sem); |
2625 | goto have_block_group; | 2625 | if (list_empty(&block_group->list) || |
2626 | block_group->ro) { | ||
2627 | /* | ||
2628 | * someone is removing this block group, | ||
2629 | * we can't jump into the have_block_group | ||
2630 | * target because our list pointers are not | ||
2631 | * valid | ||
2632 | */ | ||
2633 | btrfs_put_block_group(block_group); | ||
2634 | up_read(&space_info->groups_sem); | ||
2635 | } else | ||
2636 | goto have_block_group; | ||
2626 | } else if (block_group) { | 2637 | } else if (block_group) { |
2627 | btrfs_put_block_group(block_group); | 2638 | btrfs_put_block_group(block_group); |
2628 | } | 2639 | } |
@@ -2656,6 +2667,13 @@ have_block_group: | |||
2656 | * people trying to start a new cluster | 2667 | * people trying to start a new cluster |
2657 | */ | 2668 | */ |
2658 | spin_lock(&last_ptr->refill_lock); | 2669 | spin_lock(&last_ptr->refill_lock); |
2670 | if (last_ptr->block_group && | ||
2671 | (last_ptr->block_group->ro || | ||
2672 | !block_group_bits(last_ptr->block_group, data))) { | ||
2673 | offset = 0; | ||
2674 | goto refill_cluster; | ||
2675 | } | ||
2676 | |||
2659 | offset = btrfs_alloc_from_cluster(block_group, last_ptr, | 2677 | offset = btrfs_alloc_from_cluster(block_group, last_ptr, |
2660 | num_bytes, search_start); | 2678 | num_bytes, search_start); |
2661 | if (offset) { | 2679 | if (offset) { |
@@ -2681,10 +2699,17 @@ have_block_group: | |||
2681 | 2699 | ||
2682 | last_ptr_loop = 1; | 2700 | last_ptr_loop = 1; |
2683 | search_start = block_group->key.objectid; | 2701 | search_start = block_group->key.objectid; |
2702 | /* | ||
2703 | * we know this block group is properly | ||
2704 | * in the list because | ||
2705 | * btrfs_remove_block_group, drops the | ||
2706 | * cluster before it removes the block | ||
2707 | * group from the list | ||
2708 | */ | ||
2684 | goto have_block_group; | 2709 | goto have_block_group; |
2685 | } | 2710 | } |
2686 | spin_unlock(&last_ptr->lock); | 2711 | spin_unlock(&last_ptr->lock); |
2687 | 2712 | refill_cluster: | |
2688 | /* | 2713 | /* |
2689 | * this cluster didn't work out, free it and | 2714 | * this cluster didn't work out, free it and |
2690 | * start over | 2715 | * start over |
@@ -5968,6 +5993,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
5968 | { | 5993 | { |
5969 | struct btrfs_path *path; | 5994 | struct btrfs_path *path; |
5970 | struct btrfs_block_group_cache *block_group; | 5995 | struct btrfs_block_group_cache *block_group; |
5996 | struct btrfs_free_cluster *cluster; | ||
5971 | struct btrfs_key key; | 5997 | struct btrfs_key key; |
5972 | int ret; | 5998 | int ret; |
5973 | 5999 | ||
@@ -5979,6 +6005,21 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
5979 | 6005 | ||
5980 | memcpy(&key, &block_group->key, sizeof(key)); | 6006 | memcpy(&key, &block_group->key, sizeof(key)); |
5981 | 6007 | ||
6008 | /* make sure this block group isn't part of an allocation cluster */ | ||
6009 | cluster = &root->fs_info->data_alloc_cluster; | ||
6010 | spin_lock(&cluster->refill_lock); | ||
6011 | btrfs_return_cluster_to_free_space(block_group, cluster); | ||
6012 | spin_unlock(&cluster->refill_lock); | ||
6013 | |||
6014 | /* | ||
6015 | * make sure this block group isn't part of a metadata | ||
6016 | * allocation cluster | ||
6017 | */ | ||
6018 | cluster = &root->fs_info->meta_alloc_cluster; | ||
6019 | spin_lock(&cluster->refill_lock); | ||
6020 | btrfs_return_cluster_to_free_space(block_group, cluster); | ||
6021 | spin_unlock(&cluster->refill_lock); | ||
6022 | |||
5982 | path = btrfs_alloc_path(); | 6023 | path = btrfs_alloc_path(); |
5983 | BUG_ON(!path); | 6024 | BUG_ON(!path); |
5984 | 6025 | ||
@@ -5988,7 +6029,11 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
5988 | spin_unlock(&root->fs_info->block_group_cache_lock); | 6029 | spin_unlock(&root->fs_info->block_group_cache_lock); |
5989 | btrfs_remove_free_space_cache(block_group); | 6030 | btrfs_remove_free_space_cache(block_group); |
5990 | down_write(&block_group->space_info->groups_sem); | 6031 | down_write(&block_group->space_info->groups_sem); |
5991 | list_del(&block_group->list); | 6032 | /* |
6033 | * we must use list_del_init so people can check to see if they | ||
6034 | * are still on the list after taking the semaphore | ||
6035 | */ | ||
6036 | list_del_init(&block_group->list); | ||
5992 | up_write(&block_group->space_info->groups_sem); | 6037 | up_write(&block_group->space_info->groups_sem); |
5993 | 6038 | ||
5994 | spin_lock(&block_group->space_info->lock); | 6039 | spin_lock(&block_group->space_info->lock); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 5f01dad4b696..a6d35b0054ca 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -1440,6 +1440,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1440 | device->io_align = root->sectorsize; | 1440 | device->io_align = root->sectorsize; |
1441 | device->sector_size = root->sectorsize; | 1441 | device->sector_size = root->sectorsize; |
1442 | device->total_bytes = i_size_read(bdev->bd_inode); | 1442 | device->total_bytes = i_size_read(bdev->bd_inode); |
1443 | device->disk_total_bytes = device->total_bytes; | ||
1443 | device->dev_root = root->fs_info->dev_root; | 1444 | device->dev_root = root->fs_info->dev_root; |
1444 | device->bdev = bdev; | 1445 | device->bdev = bdev; |
1445 | device->in_fs_metadata = 1; | 1446 | device->in_fs_metadata = 1; |
diff --git a/fs/buffer.c b/fs/buffer.c index aed297739eb0..49106127a4aa 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -2736,6 +2736,8 @@ has_buffers: | |||
2736 | pos += blocksize; | 2736 | pos += blocksize; |
2737 | } | 2737 | } |
2738 | 2738 | ||
2739 | map_bh.b_size = blocksize; | ||
2740 | map_bh.b_state = 0; | ||
2739 | err = get_block(inode, iblock, &map_bh, 0); | 2741 | err = get_block(inode, iblock, &map_bh, 0); |
2740 | if (err) | 2742 | if (err) |
2741 | goto unlock; | 2743 | goto unlock; |
diff --git a/fs/inode.c b/fs/inode.c index 0571983755dc..bca0c618fdb3 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -219,6 +219,7 @@ static struct inode *alloc_inode(struct super_block *sb) | |||
219 | void destroy_inode(struct inode *inode) | 219 | void destroy_inode(struct inode *inode) |
220 | { | 220 | { |
221 | BUG_ON(inode_has_buffers(inode)); | 221 | BUG_ON(inode_has_buffers(inode)); |
222 | ima_inode_free(inode); | ||
222 | security_inode_free(inode); | 223 | security_inode_free(inode); |
223 | if (inode->i_sb->s_op->destroy_inode) | 224 | if (inode->i_sb->s_op->destroy_inode) |
224 | inode->i_sb->s_op->destroy_inode(inode); | 225 | inode->i_sb->s_op->destroy_inode(inode); |
@@ -1053,13 +1054,22 @@ int insert_inode_locked(struct inode *inode) | |||
1053 | struct super_block *sb = inode->i_sb; | 1054 | struct super_block *sb = inode->i_sb; |
1054 | ino_t ino = inode->i_ino; | 1055 | ino_t ino = inode->i_ino; |
1055 | struct hlist_head *head = inode_hashtable + hash(sb, ino); | 1056 | struct hlist_head *head = inode_hashtable + hash(sb, ino); |
1056 | struct inode *old; | ||
1057 | 1057 | ||
1058 | inode->i_state |= I_LOCK|I_NEW; | 1058 | inode->i_state |= I_LOCK|I_NEW; |
1059 | while (1) { | 1059 | while (1) { |
1060 | struct hlist_node *node; | ||
1061 | struct inode *old = NULL; | ||
1060 | spin_lock(&inode_lock); | 1062 | spin_lock(&inode_lock); |
1061 | old = find_inode_fast(sb, head, ino); | 1063 | hlist_for_each_entry(old, node, head, i_hash) { |
1062 | if (likely(!old)) { | 1064 | if (old->i_ino != ino) |
1065 | continue; | ||
1066 | if (old->i_sb != sb) | ||
1067 | continue; | ||
1068 | if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) | ||
1069 | continue; | ||
1070 | break; | ||
1071 | } | ||
1072 | if (likely(!node)) { | ||
1063 | hlist_add_head(&inode->i_hash, head); | 1073 | hlist_add_head(&inode->i_hash, head); |
1064 | spin_unlock(&inode_lock); | 1074 | spin_unlock(&inode_lock); |
1065 | return 0; | 1075 | return 0; |
@@ -1081,14 +1091,24 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval, | |||
1081 | { | 1091 | { |
1082 | struct super_block *sb = inode->i_sb; | 1092 | struct super_block *sb = inode->i_sb; |
1083 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); | 1093 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); |
1084 | struct inode *old; | ||
1085 | 1094 | ||
1086 | inode->i_state |= I_LOCK|I_NEW; | 1095 | inode->i_state |= I_LOCK|I_NEW; |
1087 | 1096 | ||
1088 | while (1) { | 1097 | while (1) { |
1098 | struct hlist_node *node; | ||
1099 | struct inode *old = NULL; | ||
1100 | |||
1089 | spin_lock(&inode_lock); | 1101 | spin_lock(&inode_lock); |
1090 | old = find_inode(sb, head, test, data); | 1102 | hlist_for_each_entry(old, node, head, i_hash) { |
1091 | if (likely(!old)) { | 1103 | if (old->i_sb != sb) |
1104 | continue; | ||
1105 | if (!test(old, data)) | ||
1106 | continue; | ||
1107 | if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) | ||
1108 | continue; | ||
1109 | break; | ||
1110 | } | ||
1111 | if (likely(!node)) { | ||
1092 | hlist_add_head(&inode->i_hash, head); | 1112 | hlist_add_head(&inode->i_hash, head); |
1093 | spin_unlock(&inode_lock); | 1113 | spin_unlock(&inode_lock); |
1094 | return 0; | 1114 | return 0; |
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 06560c520f49..618e21c0b7a3 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c | |||
@@ -241,7 +241,7 @@ write_out_data: | |||
241 | spin_lock(&journal->j_list_lock); | 241 | spin_lock(&journal->j_list_lock); |
242 | } | 242 | } |
243 | /* Someone already cleaned up the buffer? */ | 243 | /* Someone already cleaned up the buffer? */ |
244 | if (!buffer_jbd(bh) | 244 | if (!buffer_jbd(bh) || bh2jh(bh) != jh |
245 | || jh->b_transaction != commit_transaction | 245 | || jh->b_transaction != commit_transaction |
246 | || jh->b_jlist != BJ_SyncData) { | 246 | || jh->b_jlist != BJ_SyncData) { |
247 | jbd_unlock_bh_state(bh); | 247 | jbd_unlock_bh_state(bh); |
@@ -478,7 +478,9 @@ void journal_commit_transaction(journal_t *journal) | |||
478 | spin_lock(&journal->j_list_lock); | 478 | spin_lock(&journal->j_list_lock); |
479 | continue; | 479 | continue; |
480 | } | 480 | } |
481 | if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) { | 481 | if (buffer_jbd(bh) && bh2jh(bh) == jh && |
482 | jh->b_transaction == commit_transaction && | ||
483 | jh->b_jlist == BJ_Locked) { | ||
482 | __journal_unfile_buffer(jh); | 484 | __journal_unfile_buffer(jh); |
483 | jbd_unlock_bh_state(bh); | 485 | jbd_unlock_bh_state(bh); |
484 | journal_remove_journal_head(bh); | 486 | journal_remove_journal_head(bh); |
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index e90b60dfced9..300f1cdfa862 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c | |||
@@ -311,7 +311,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, | |||
311 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); | 311 | ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); |
312 | if (ret < 0) { | 312 | if (ret < 0) { |
313 | if (ret != -ENOENT) | 313 | if (ret != -ENOENT) |
314 | goto out_sem; | 314 | goto out_header; |
315 | /* skip hole */ | 315 | /* skip hole */ |
316 | ret = 0; | 316 | ret = 0; |
317 | continue; | 317 | continue; |
@@ -344,7 +344,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, | |||
344 | continue; | 344 | continue; |
345 | printk(KERN_ERR "%s: cannot delete block\n", | 345 | printk(KERN_ERR "%s: cannot delete block\n", |
346 | __func__); | 346 | __func__); |
347 | goto out_sem; | 347 | goto out_header; |
348 | } | 348 | } |
349 | } | 349 | } |
350 | 350 | ||
@@ -361,6 +361,8 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, | |||
361 | nilfs_mdt_mark_dirty(cpfile); | 361 | nilfs_mdt_mark_dirty(cpfile); |
362 | kunmap_atomic(kaddr, KM_USER0); | 362 | kunmap_atomic(kaddr, KM_USER0); |
363 | } | 363 | } |
364 | |||
365 | out_header: | ||
364 | brelse(header_bh); | 366 | brelse(header_bh); |
365 | 367 | ||
366 | out_sem: | 368 | out_sem: |
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h index af6843c7ee4b..179cbd630f69 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/linux-2.6/kmem.h | |||
@@ -103,7 +103,7 @@ extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); | |||
103 | static inline int | 103 | static inline int |
104 | kmem_shake_allow(gfp_t gfp_mask) | 104 | kmem_shake_allow(gfp_t gfp_mask) |
105 | { | 105 | { |
106 | return (gfp_mask & __GFP_WAIT) != 0; | 106 | return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)); |
107 | } | 107 | } |
108 | 108 | ||
109 | #endif /* __XFS_SUPPORT_KMEM_H__ */ | 109 | #endif /* __XFS_SUPPORT_KMEM_H__ */ |
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index e6d839bddbf0..7465f9ee125f 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c | |||
@@ -347,13 +347,15 @@ xfs_swap_extents( | |||
347 | 347 | ||
348 | error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT); | 348 | error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT); |
349 | 349 | ||
350 | out_unlock: | ||
351 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
352 | xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
353 | out: | 350 | out: |
354 | kmem_free(tempifp); | 351 | kmem_free(tempifp); |
355 | return error; | 352 | return error; |
356 | 353 | ||
354 | out_unlock: | ||
355 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
356 | xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
357 | goto out; | ||
358 | |||
357 | out_trans_cancel: | 359 | out_trans_cancel: |
358 | xfs_trans_cancel(tp, 0); | 360 | xfs_trans_cancel(tp, 0); |
359 | goto out_unlock; | 361 | goto out_unlock; |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 8379e3bca26c..cbd451bb4848 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -160,7 +160,7 @@ xfs_growfs_data_private( | |||
160 | nagcount = new + (nb_mod != 0); | 160 | nagcount = new + (nb_mod != 0); |
161 | if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) { | 161 | if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) { |
162 | nagcount--; | 162 | nagcount--; |
163 | nb = nagcount * mp->m_sb.sb_agblocks; | 163 | nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks; |
164 | if (nb < mp->m_sb.sb_dblocks) | 164 | if (nb < mp->m_sb.sb_dblocks) |
165 | return XFS_ERROR(EINVAL); | 165 | return XFS_ERROR(EINVAL); |
166 | } | 166 | } |