aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/backref.c109
-rw-r--r--fs/btrfs/ctree.c60
-rw-r--r--fs/btrfs/ctree.h9
-rw-r--r--fs/btrfs/disk-io.c35
-rw-r--r--fs/btrfs/extent-tree.c11
-rw-r--r--fs/btrfs/extent_io.c14
-rw-r--r--fs/btrfs/file.c13
-rw-r--r--fs/btrfs/free-space-cache.c145
-rw-r--r--fs/btrfs/inode.c61
-rw-r--r--fs/btrfs/ioctl.h2
-rw-r--r--fs/btrfs/super.c4
-rw-r--r--fs/btrfs/tree-log.c6
-rw-r--r--fs/btrfs/volumes.c95
-rw-r--r--fs/btrfs/volumes.h3
-rw-r--r--fs/ceph/addr.c21
-rw-r--r--fs/cifs/connect.c41
-rw-r--r--fs/ecryptfs/kthread.c2
-rw-r--r--fs/ecryptfs/miscdev.c48
-rw-r--r--fs/exec.c2
-rw-r--r--fs/hfsplus/ioctl.c9
-rw-r--r--fs/hfsplus/wrapper.c2
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/nfs/direct.c1
-rw-r--r--fs/nfs/idmap.c13
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/pnfs.c13
-rw-r--r--fs/nilfs2/gcinode.c2
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ocfs2/dlmglue.c33
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/ocfs2/file.c4
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/open.c6
-rw-r--r--fs/pstore/inode.c2
-rw-r--r--fs/pstore/platform.c34
-rw-r--r--fs/pstore/ram.c3
-rw-r--r--fs/pstore/ram_core.c27
-rw-r--r--fs/splice.c35
-rw-r--r--fs/ubifs/debug.c8
-rw-r--r--fs/ubifs/find.c4
-rw-r--r--fs/udf/super.c102
-rw-r--r--fs/xfs/xfs_alloc.c3
-rw-r--r--fs/xfs/xfs_aops.c11
-rw-r--r--fs/xfs/xfs_buf.c16
-rw-r--r--fs/xfs/xfs_inode_item.c17
-rw-r--r--fs/xfs/xfs_log.c77
-rw-r--r--fs/xfs/xfs_log_cil.c22
-rw-r--r--fs/xfs/xfs_log_priv.h46
-rw-r--r--fs/xfs/xfs_log_recover.c38
-rw-r--r--fs/xfs/xfs_mount.h4
-rw-r--r--fs/xfs/xfs_sync.c32
-rw-r--r--fs/xfs/xfs_trace.h18
52 files changed, 716 insertions, 556 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 8f7d1237b7a0..a383c18e74e8 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -179,61 +179,74 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
179 179
180static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 180static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
181 struct ulist *parents, int level, 181 struct ulist *parents, int level,
182 struct btrfs_key *key, u64 time_seq, 182 struct btrfs_key *key_for_search, u64 time_seq,
183 u64 wanted_disk_byte, 183 u64 wanted_disk_byte,
184 const u64 *extent_item_pos) 184 const u64 *extent_item_pos)
185{ 185{
186 int ret; 186 int ret = 0;
187 int slot = path->slots[level]; 187 int slot;
188 struct extent_buffer *eb = path->nodes[level]; 188 struct extent_buffer *eb;
189 struct btrfs_key key;
189 struct btrfs_file_extent_item *fi; 190 struct btrfs_file_extent_item *fi;
190 struct extent_inode_elem *eie = NULL; 191 struct extent_inode_elem *eie = NULL;
191 u64 disk_byte; 192 u64 disk_byte;
192 u64 wanted_objectid = key->objectid;
193 193
194add_parent: 194 if (level != 0) {
195 if (level == 0 && extent_item_pos) { 195 eb = path->nodes[level];
196 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 196 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
197 ret = check_extent_in_eb(key, eb, fi, *extent_item_pos, &eie);
198 if (ret < 0) 197 if (ret < 0)
199 return ret; 198 return ret;
200 }
201 ret = ulist_add(parents, eb->start, (unsigned long)eie, GFP_NOFS);
202 if (ret < 0)
203 return ret;
204
205 if (level != 0)
206 return 0; 199 return 0;
200 }
207 201
208 /* 202 /*
209 * if the current leaf is full with EXTENT_DATA items, we must 203 * We normally enter this function with the path already pointing to
210 * check the next one if that holds a reference as well. 204 * the first item to check. But sometimes, we may enter it with
211 * ref->count cannot be used to skip this check. 205 * slot==nritems. In that case, go to the next leaf before we continue.
212 * repeat this until we don't find any additional EXTENT_DATA items.
213 */ 206 */
214 while (1) { 207 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
215 eie = NULL;
216 ret = btrfs_next_old_leaf(root, path, time_seq); 208 ret = btrfs_next_old_leaf(root, path, time_seq);
217 if (ret < 0)
218 return ret;
219 if (ret)
220 return 0;
221 209
210 while (!ret) {
222 eb = path->nodes[0]; 211 eb = path->nodes[0];
223 for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) { 212 slot = path->slots[0];
224 btrfs_item_key_to_cpu(eb, key, slot); 213
225 if (key->objectid != wanted_objectid || 214 btrfs_item_key_to_cpu(eb, &key, slot);
226 key->type != BTRFS_EXTENT_DATA_KEY) 215
227 return 0; 216 if (key.objectid != key_for_search->objectid ||
228 fi = btrfs_item_ptr(eb, slot, 217 key.type != BTRFS_EXTENT_DATA_KEY)
229 struct btrfs_file_extent_item); 218 break;
230 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 219
231 if (disk_byte == wanted_disk_byte) 220 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
232 goto add_parent; 221 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
222
223 if (disk_byte == wanted_disk_byte) {
224 eie = NULL;
225 if (extent_item_pos) {
226 ret = check_extent_in_eb(&key, eb, fi,
227 *extent_item_pos,
228 &eie);
229 if (ret < 0)
230 break;
231 }
232 if (!ret) {
233 ret = ulist_add(parents, eb->start,
234 (unsigned long)eie, GFP_NOFS);
235 if (ret < 0)
236 break;
237 if (!extent_item_pos) {
238 ret = btrfs_next_old_leaf(root, path,
239 time_seq);
240 continue;
241 }
242 }
233 } 243 }
244 ret = btrfs_next_old_item(root, path, time_seq);
234 } 245 }
235 246
236 return 0; 247 if (ret > 0)
248 ret = 0;
249 return ret;
237} 250}
238 251
239/* 252/*
@@ -250,7 +263,6 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
250 struct btrfs_path *path; 263 struct btrfs_path *path;
251 struct btrfs_root *root; 264 struct btrfs_root *root;
252 struct btrfs_key root_key; 265 struct btrfs_key root_key;
253 struct btrfs_key key = {0};
254 struct extent_buffer *eb; 266 struct extent_buffer *eb;
255 int ret = 0; 267 int ret = 0;
256 int root_level; 268 int root_level;
@@ -289,17 +301,19 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
289 goto out; 301 goto out;
290 302
291 eb = path->nodes[level]; 303 eb = path->nodes[level];
292 if (!eb) { 304 while (!eb) {
293 WARN_ON(1); 305 if (!level) {
294 ret = 1; 306 WARN_ON(1);
295 goto out; 307 ret = 1;
308 goto out;
309 }
310 level--;
311 eb = path->nodes[level];
296 } 312 }
297 313
298 if (level == 0) 314 ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
299 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 315 time_seq, ref->wanted_disk_byte,
300 316 extent_item_pos);
301 ret = add_all_parents(root, path, parents, level, &key, time_seq,
302 ref->wanted_disk_byte, extent_item_pos);
303out: 317out:
304 btrfs_free_path(path); 318 btrfs_free_path(path);
305 return ret; 319 return ret;
@@ -825,6 +839,7 @@ again:
825 } 839 }
826 ret = __add_delayed_refs(head, delayed_ref_seq, 840 ret = __add_delayed_refs(head, delayed_ref_seq,
827 &prefs_delayed); 841 &prefs_delayed);
842 mutex_unlock(&head->mutex);
828 if (ret) { 843 if (ret) {
829 spin_unlock(&delayed_refs->lock); 844 spin_unlock(&delayed_refs->lock);
830 goto out; 845 goto out;
@@ -918,8 +933,6 @@ again:
918 } 933 }
919 934
920out: 935out:
921 if (head)
922 mutex_unlock(&head->mutex);
923 btrfs_free_path(path); 936 btrfs_free_path(path);
924 while (!list_empty(&prefs)) { 937 while (!list_empty(&prefs)) {
925 ref = list_first_entry(&prefs, struct __prelim_ref, list); 938 ref = list_first_entry(&prefs, struct __prelim_ref, list);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 15cbc2bf4ff0..8206b3900587 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1024,11 +1024,18 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1024 if (!looped && !tm) 1024 if (!looped && !tm)
1025 return 0; 1025 return 0;
1026 /* 1026 /*
1027 * we must have key remove operations in the log before the 1027 * if there are no tree operation for the oldest root, we simply
1028 * replace operation. 1028 * return it. this should only happen if that (old) root is at
1029 * level 0.
1029 */ 1030 */
1030 BUG_ON(!tm); 1031 if (!tm)
1032 break;
1031 1033
1034 /*
1035 * if there's an operation that's not a root replacement, we
1036 * found the oldest version of our root. normally, we'll find a
1037 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1038 */
1032 if (tm->op != MOD_LOG_ROOT_REPLACE) 1039 if (tm->op != MOD_LOG_ROOT_REPLACE)
1033 break; 1040 break;
1034 1041
@@ -1087,11 +1094,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
1087 tm->generation); 1094 tm->generation);
1088 break; 1095 break;
1089 case MOD_LOG_KEY_ADD: 1096 case MOD_LOG_KEY_ADD:
1090 if (tm->slot != n - 1) { 1097 /* if a move operation is needed it's in the log */
1091 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1092 o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
1093 memmove_extent_buffer(eb, o_dst, o_src, p_size);
1094 }
1095 n--; 1098 n--;
1096 break; 1099 break;
1097 case MOD_LOG_MOVE_KEYS: 1100 case MOD_LOG_MOVE_KEYS:
@@ -1192,16 +1195,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1192 } 1195 }
1193 1196
1194 tm = tree_mod_log_search(root->fs_info, logical, time_seq); 1197 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1195 /*
1196 * there was an item in the log when __tree_mod_log_oldest_root
1197 * returned. this one must not go away, because the time_seq passed to
1198 * us must be blocking its removal.
1199 */
1200 BUG_ON(!tm);
1201
1202 if (old_root) 1198 if (old_root)
1203 eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT, 1199 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1204 root->nodesize);
1205 else 1200 else
1206 eb = btrfs_clone_extent_buffer(root->node); 1201 eb = btrfs_clone_extent_buffer(root->node);
1207 btrfs_tree_read_unlock(root->node); 1202 btrfs_tree_read_unlock(root->node);
@@ -1216,7 +1211,10 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1216 btrfs_set_header_level(eb, old_root->level); 1211 btrfs_set_header_level(eb, old_root->level);
1217 btrfs_set_header_generation(eb, old_generation); 1212 btrfs_set_header_generation(eb, old_generation);
1218 } 1213 }
1219 __tree_mod_log_rewind(eb, time_seq, tm); 1214 if (tm)
1215 __tree_mod_log_rewind(eb, time_seq, tm);
1216 else
1217 WARN_ON(btrfs_header_level(eb) != 0);
1220 extent_buffer_get(eb); 1218 extent_buffer_get(eb);
1221 1219
1222 return eb; 1220 return eb;
@@ -2995,7 +2993,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2995static void insert_ptr(struct btrfs_trans_handle *trans, 2993static void insert_ptr(struct btrfs_trans_handle *trans,
2996 struct btrfs_root *root, struct btrfs_path *path, 2994 struct btrfs_root *root, struct btrfs_path *path,
2997 struct btrfs_disk_key *key, u64 bytenr, 2995 struct btrfs_disk_key *key, u64 bytenr,
2998 int slot, int level, int tree_mod_log) 2996 int slot, int level)
2999{ 2997{
3000 struct extent_buffer *lower; 2998 struct extent_buffer *lower;
3001 int nritems; 2999 int nritems;
@@ -3008,7 +3006,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
3008 BUG_ON(slot > nritems); 3006 BUG_ON(slot > nritems);
3009 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root)); 3007 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3010 if (slot != nritems) { 3008 if (slot != nritems) {
3011 if (tree_mod_log && level) 3009 if (level)
3012 tree_mod_log_eb_move(root->fs_info, lower, slot + 1, 3010 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3013 slot, nritems - slot); 3011 slot, nritems - slot);
3014 memmove_extent_buffer(lower, 3012 memmove_extent_buffer(lower,
@@ -3016,7 +3014,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
3016 btrfs_node_key_ptr_offset(slot), 3014 btrfs_node_key_ptr_offset(slot),
3017 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 3015 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3018 } 3016 }
3019 if (tree_mod_log && level) { 3017 if (level) {
3020 ret = tree_mod_log_insert_key(root->fs_info, lower, slot, 3018 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3021 MOD_LOG_KEY_ADD); 3019 MOD_LOG_KEY_ADD);
3022 BUG_ON(ret < 0); 3020 BUG_ON(ret < 0);
@@ -3104,7 +3102,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
3104 btrfs_mark_buffer_dirty(split); 3102 btrfs_mark_buffer_dirty(split);
3105 3103
3106 insert_ptr(trans, root, path, &disk_key, split->start, 3104 insert_ptr(trans, root, path, &disk_key, split->start,
3107 path->slots[level + 1] + 1, level + 1, 1); 3105 path->slots[level + 1] + 1, level + 1);
3108 3106
3109 if (path->slots[level] >= mid) { 3107 if (path->slots[level] >= mid) {
3110 path->slots[level] -= mid; 3108 path->slots[level] -= mid;
@@ -3641,7 +3639,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3641 btrfs_set_header_nritems(l, mid); 3639 btrfs_set_header_nritems(l, mid);
3642 btrfs_item_key(right, &disk_key, 0); 3640 btrfs_item_key(right, &disk_key, 0);
3643 insert_ptr(trans, root, path, &disk_key, right->start, 3641 insert_ptr(trans, root, path, &disk_key, right->start,
3644 path->slots[1] + 1, 1, 0); 3642 path->slots[1] + 1, 1);
3645 3643
3646 btrfs_mark_buffer_dirty(right); 3644 btrfs_mark_buffer_dirty(right);
3647 btrfs_mark_buffer_dirty(l); 3645 btrfs_mark_buffer_dirty(l);
@@ -3848,7 +3846,7 @@ again:
3848 if (mid <= slot) { 3846 if (mid <= slot) {
3849 btrfs_set_header_nritems(right, 0); 3847 btrfs_set_header_nritems(right, 0);
3850 insert_ptr(trans, root, path, &disk_key, right->start, 3848 insert_ptr(trans, root, path, &disk_key, right->start,
3851 path->slots[1] + 1, 1, 0); 3849 path->slots[1] + 1, 1);
3852 btrfs_tree_unlock(path->nodes[0]); 3850 btrfs_tree_unlock(path->nodes[0]);
3853 free_extent_buffer(path->nodes[0]); 3851 free_extent_buffer(path->nodes[0]);
3854 path->nodes[0] = right; 3852 path->nodes[0] = right;
@@ -3857,7 +3855,7 @@ again:
3857 } else { 3855 } else {
3858 btrfs_set_header_nritems(right, 0); 3856 btrfs_set_header_nritems(right, 0);
3859 insert_ptr(trans, root, path, &disk_key, right->start, 3857 insert_ptr(trans, root, path, &disk_key, right->start,
3860 path->slots[1], 1, 0); 3858 path->slots[1], 1);
3861 btrfs_tree_unlock(path->nodes[0]); 3859 btrfs_tree_unlock(path->nodes[0]);
3862 free_extent_buffer(path->nodes[0]); 3860 free_extent_buffer(path->nodes[0]);
3863 path->nodes[0] = right; 3861 path->nodes[0] = right;
@@ -5121,6 +5119,18 @@ again:
5121 5119
5122 if (!path->skip_locking) { 5120 if (!path->skip_locking) {
5123 ret = btrfs_try_tree_read_lock(next); 5121 ret = btrfs_try_tree_read_lock(next);
5122 if (!ret && time_seq) {
5123 /*
5124 * If we don't get the lock, we may be racing
5125 * with push_leaf_left, holding that lock while
5126 * itself waiting for the leaf we've currently
5127 * locked. To solve this situation, we give up
5128 * on our lock and cycle.
5129 */
5130 btrfs_release_path(path);
5131 cond_resched();
5132 goto again;
5133 }
5124 if (!ret) { 5134 if (!ret) {
5125 btrfs_set_path_blocking(path); 5135 btrfs_set_path_blocking(path);
5126 btrfs_tree_read_lock(next); 5136 btrfs_tree_read_lock(next);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8b73b2d4deb7..fa5c45b39075 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2755,13 +2755,18 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
2755int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 2755int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
2756int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 2756int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
2757 u64 time_seq); 2757 u64 time_seq);
2758static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) 2758static inline int btrfs_next_old_item(struct btrfs_root *root,
2759 struct btrfs_path *p, u64 time_seq)
2759{ 2760{
2760 ++p->slots[0]; 2761 ++p->slots[0];
2761 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) 2762 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0]))
2762 return btrfs_next_leaf(root, p); 2763 return btrfs_next_old_leaf(root, p, time_seq);
2763 return 0; 2764 return 0;
2764} 2765}
2766static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
2767{
2768 return btrfs_next_old_item(root, p, 0);
2769}
2765int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 2770int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
2766int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 2771int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
2767int __must_check btrfs_drop_snapshot(struct btrfs_root *root, 2772int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e1890b1d3075..2936ca49b3b4 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2354,12 +2354,17 @@ retry_root_backup:
2354 BTRFS_CSUM_TREE_OBJECTID, csum_root); 2354 BTRFS_CSUM_TREE_OBJECTID, csum_root);
2355 if (ret) 2355 if (ret)
2356 goto recovery_tree_root; 2356 goto recovery_tree_root;
2357
2358 csum_root->track_dirty = 1; 2357 csum_root->track_dirty = 1;
2359 2358
2360 fs_info->generation = generation; 2359 fs_info->generation = generation;
2361 fs_info->last_trans_committed = generation; 2360 fs_info->last_trans_committed = generation;
2362 2361
2362 ret = btrfs_recover_balance(fs_info);
2363 if (ret) {
2364 printk(KERN_WARNING "btrfs: failed to recover balance\n");
2365 goto fail_block_groups;
2366 }
2367
2363 ret = btrfs_init_dev_stats(fs_info); 2368 ret = btrfs_init_dev_stats(fs_info);
2364 if (ret) { 2369 if (ret) {
2365 printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n", 2370 printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
@@ -2485,20 +2490,23 @@ retry_root_backup:
2485 goto fail_trans_kthread; 2490 goto fail_trans_kthread;
2486 } 2491 }
2487 2492
2488 if (!(sb->s_flags & MS_RDONLY)) { 2493 if (sb->s_flags & MS_RDONLY)
2489 down_read(&fs_info->cleanup_work_sem); 2494 return 0;
2490 err = btrfs_orphan_cleanup(fs_info->fs_root);
2491 if (!err)
2492 err = btrfs_orphan_cleanup(fs_info->tree_root);
2493 up_read(&fs_info->cleanup_work_sem);
2494 2495
2495 if (!err) 2496 down_read(&fs_info->cleanup_work_sem);
2496 err = btrfs_recover_balance(fs_info->tree_root); 2497 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2498 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2499 up_read(&fs_info->cleanup_work_sem);
2500 close_ctree(tree_root);
2501 return ret;
2502 }
2503 up_read(&fs_info->cleanup_work_sem);
2497 2504
2498 if (err) { 2505 ret = btrfs_resume_balance_async(fs_info);
2499 close_ctree(tree_root); 2506 if (ret) {
2500 return err; 2507 printk(KERN_WARNING "btrfs: failed to resume balance\n");
2501 } 2508 close_ctree(tree_root);
2509 return ret;
2502 } 2510 }
2503 2511
2504 return 0; 2512 return 0;
@@ -3426,6 +3434,7 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3426 mutex_unlock(&head->mutex); 3434 mutex_unlock(&head->mutex);
3427 btrfs_put_delayed_ref(ref); 3435 btrfs_put_delayed_ref(ref);
3428 3436
3437 spin_lock(&delayed_refs->lock);
3429 continue; 3438 continue;
3430 } 3439 }
3431 3440
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4b5a1e1bdefb..6e1d36702ff7 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2347,12 +2347,10 @@ next:
2347 return count; 2347 return count;
2348} 2348}
2349 2349
2350
2351static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs, 2350static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
2352 unsigned long num_refs) 2351 unsigned long num_refs,
2352 struct list_head *first_seq)
2353{ 2353{
2354 struct list_head *first_seq = delayed_refs->seq_head.next;
2355
2356 spin_unlock(&delayed_refs->lock); 2354 spin_unlock(&delayed_refs->lock);
2357 pr_debug("waiting for more refs (num %ld, first %p)\n", 2355 pr_debug("waiting for more refs (num %ld, first %p)\n",
2358 num_refs, first_seq); 2356 num_refs, first_seq);
@@ -2381,6 +2379,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2381 struct btrfs_delayed_ref_root *delayed_refs; 2379 struct btrfs_delayed_ref_root *delayed_refs;
2382 struct btrfs_delayed_ref_node *ref; 2380 struct btrfs_delayed_ref_node *ref;
2383 struct list_head cluster; 2381 struct list_head cluster;
2382 struct list_head *first_seq = NULL;
2384 int ret; 2383 int ret;
2385 u64 delayed_start; 2384 u64 delayed_start;
2386 int run_all = count == (unsigned long)-1; 2385 int run_all = count == (unsigned long)-1;
@@ -2436,8 +2435,10 @@ again:
2436 */ 2435 */
2437 consider_waiting = 1; 2436 consider_waiting = 1;
2438 num_refs = delayed_refs->num_entries; 2437 num_refs = delayed_refs->num_entries;
2438 first_seq = root->fs_info->tree_mod_seq_list.next;
2439 } else { 2439 } else {
2440 wait_for_more_refs(delayed_refs, num_refs); 2440 wait_for_more_refs(delayed_refs,
2441 num_refs, first_seq);
2441 /* 2442 /*
2442 * after waiting, things have changed. we 2443 * after waiting, things have changed. we
2443 * dropped the lock and someone else might have 2444 * dropped the lock and someone else might have
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index aaa12c1eb348..01c21b6c6d43 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3324,6 +3324,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
3324 writepage_t writepage, void *data, 3324 writepage_t writepage, void *data,
3325 void (*flush_fn)(void *)) 3325 void (*flush_fn)(void *))
3326{ 3326{
3327 struct inode *inode = mapping->host;
3327 int ret = 0; 3328 int ret = 0;
3328 int done = 0; 3329 int done = 0;
3329 int nr_to_write_done = 0; 3330 int nr_to_write_done = 0;
@@ -3334,6 +3335,18 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
3334 int scanned = 0; 3335 int scanned = 0;
3335 int tag; 3336 int tag;
3336 3337
3338 /*
3339 * We have to hold onto the inode so that ordered extents can do their
3340 * work when the IO finishes. The alternative to this is failing to add
3341 * an ordered extent if the igrab() fails there and that is a huge pain
3342 * to deal with, so instead just hold onto the inode throughout the
3343 * writepages operation. If it fails here we are freeing up the inode
3344 * anyway and we'd rather not waste our time writing out stuff that is
3345 * going to be truncated anyway.
3346 */
3347 if (!igrab(inode))
3348 return 0;
3349
3337 pagevec_init(&pvec, 0); 3350 pagevec_init(&pvec, 0);
3338 if (wbc->range_cyclic) { 3351 if (wbc->range_cyclic) {
3339 index = mapping->writeback_index; /* Start from prev offset */ 3352 index = mapping->writeback_index; /* Start from prev offset */
@@ -3428,6 +3441,7 @@ retry:
3428 index = 0; 3441 index = 0;
3429 goto retry; 3442 goto retry;
3430 } 3443 }
3444 btrfs_add_delayed_iput(inode);
3431 return ret; 3445 return ret;
3432} 3446}
3433 3447
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 70dc8ca73e25..9aa01ec2138d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1334,7 +1334,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1334 loff_t *ppos, size_t count, size_t ocount) 1334 loff_t *ppos, size_t count, size_t ocount)
1335{ 1335{
1336 struct file *file = iocb->ki_filp; 1336 struct file *file = iocb->ki_filp;
1337 struct inode *inode = fdentry(file)->d_inode;
1338 struct iov_iter i; 1337 struct iov_iter i;
1339 ssize_t written; 1338 ssize_t written;
1340 ssize_t written_buffered; 1339 ssize_t written_buffered;
@@ -1344,18 +1343,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1344 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos, 1343 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1345 count, ocount); 1344 count, ocount);
1346 1345
1347 /*
1348 * the generic O_DIRECT will update in-memory i_size after the
1349 * DIOs are done. But our endio handlers that update the on
1350 * disk i_size never update past the in memory i_size. So we
1351 * need one more update here to catch any additions to the
1352 * file
1353 */
1354 if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
1355 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
1356 mark_inode_dirty(inode);
1357 }
1358
1359 if (written < 0 || written == count) 1346 if (written < 0 || written == count)
1360 return written; 1347 return written;
1361 1348
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 81296c57405a..6c4e2baa9290 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1543,29 +1543,26 @@ again:
1543 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; 1543 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1544 1544
1545 /* 1545 /*
1546 * XXX - this can go away after a few releases. 1546 * We need to search for bits in this bitmap. We could only cover some
1547 * 1547 * of the extent in this bitmap thanks to how we add space, so we need
1548 * since the only user of btrfs_remove_free_space is the tree logging 1548 * to search for as much as it as we can and clear that amount, and then
1549 * stuff, and the only way to test that is under crash conditions, we 1549 * go searching for the next bit.
1550 * want to have this debug stuff here just in case somethings not
1551 * working. Search the bitmap for the space we are trying to use to
1552 * make sure its actually there. If its not there then we need to stop
1553 * because something has gone wrong.
1554 */ 1550 */
1555 search_start = *offset; 1551 search_start = *offset;
1556 search_bytes = *bytes; 1552 search_bytes = ctl->unit;
1557 search_bytes = min(search_bytes, end - search_start + 1); 1553 search_bytes = min(search_bytes, end - search_start + 1);
1558 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); 1554 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1559 BUG_ON(ret < 0 || search_start != *offset); 1555 BUG_ON(ret < 0 || search_start != *offset);
1560 1556
1561 if (*offset > bitmap_info->offset && *offset + *bytes > end) { 1557 /* We may have found more bits than what we need */
1562 bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1); 1558 search_bytes = min(search_bytes, *bytes);
1563 *bytes -= end - *offset + 1; 1559
1564 *offset = end + 1; 1560 /* Cannot clear past the end of the bitmap */
1565 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { 1561 search_bytes = min(search_bytes, end - search_start + 1);
1566 bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes); 1562
1567 *bytes = 0; 1563 bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
1568 } 1564 *offset += search_bytes;
1565 *bytes -= search_bytes;
1569 1566
1570 if (*bytes) { 1567 if (*bytes) {
1571 struct rb_node *next = rb_next(&bitmap_info->offset_index); 1568 struct rb_node *next = rb_next(&bitmap_info->offset_index);
@@ -1596,7 +1593,7 @@ again:
1596 * everything over again. 1593 * everything over again.
1597 */ 1594 */
1598 search_start = *offset; 1595 search_start = *offset;
1599 search_bytes = *bytes; 1596 search_bytes = ctl->unit;
1600 ret = search_bitmap(ctl, bitmap_info, &search_start, 1597 ret = search_bitmap(ctl, bitmap_info, &search_start,
1601 &search_bytes); 1598 &search_bytes);
1602 if (ret < 0 || search_start != *offset) 1599 if (ret < 0 || search_start != *offset)
@@ -1879,12 +1876,14 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1879{ 1876{
1880 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1877 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1881 struct btrfs_free_space *info; 1878 struct btrfs_free_space *info;
1882 struct btrfs_free_space *next_info = NULL;
1883 int ret = 0; 1879 int ret = 0;
1884 1880
1885 spin_lock(&ctl->tree_lock); 1881 spin_lock(&ctl->tree_lock);
1886 1882
1887again: 1883again:
1884 if (!bytes)
1885 goto out_lock;
1886
1888 info = tree_search_offset(ctl, offset, 0, 0); 1887 info = tree_search_offset(ctl, offset, 0, 0);
1889 if (!info) { 1888 if (!info) {
1890 /* 1889 /*
@@ -1905,88 +1904,48 @@ again:
1905 } 1904 }
1906 } 1905 }
1907 1906
1908 if (info->bytes < bytes && rb_next(&info->offset_index)) { 1907 if (!info->bitmap) {
1909 u64 end;
1910 next_info = rb_entry(rb_next(&info->offset_index),
1911 struct btrfs_free_space,
1912 offset_index);
1913
1914 if (next_info->bitmap)
1915 end = next_info->offset +
1916 BITS_PER_BITMAP * ctl->unit - 1;
1917 else
1918 end = next_info->offset + next_info->bytes;
1919
1920 if (next_info->bytes < bytes ||
1921 next_info->offset > offset || offset > end) {
1922 printk(KERN_CRIT "Found free space at %llu, size %llu,"
1923 " trying to use %llu\n",
1924 (unsigned long long)info->offset,
1925 (unsigned long long)info->bytes,
1926 (unsigned long long)bytes);
1927 WARN_ON(1);
1928 ret = -EINVAL;
1929 goto out_lock;
1930 }
1931
1932 info = next_info;
1933 }
1934
1935 if (info->bytes == bytes) {
1936 unlink_free_space(ctl, info); 1908 unlink_free_space(ctl, info);
1937 if (info->bitmap) { 1909 if (offset == info->offset) {
1938 kfree(info->bitmap); 1910 u64 to_free = min(bytes, info->bytes);
1939 ctl->total_bitmaps--; 1911
1940 } 1912 info->bytes -= to_free;
1941 kmem_cache_free(btrfs_free_space_cachep, info); 1913 info->offset += to_free;
1942 ret = 0; 1914 if (info->bytes) {
1943 goto out_lock; 1915 ret = link_free_space(ctl, info);
1944 } 1916 WARN_ON(ret);
1945 1917 } else {
1946 if (!info->bitmap && info->offset == offset) { 1918 kmem_cache_free(btrfs_free_space_cachep, info);
1947 unlink_free_space(ctl, info); 1919 }
1948 info->offset += bytes;
1949 info->bytes -= bytes;
1950 ret = link_free_space(ctl, info);
1951 WARN_ON(ret);
1952 goto out_lock;
1953 }
1954 1920
1955 if (!info->bitmap && info->offset <= offset && 1921 offset += to_free;
1956 info->offset + info->bytes >= offset + bytes) { 1922 bytes -= to_free;
1957 u64 old_start = info->offset; 1923 goto again;
1958 /* 1924 } else {
1959 * we're freeing space in the middle of the info, 1925 u64 old_end = info->bytes + info->offset;
1960 * this can happen during tree log replay
1961 *
1962 * first unlink the old info and then
1963 * insert it again after the hole we're creating
1964 */
1965 unlink_free_space(ctl, info);
1966 if (offset + bytes < info->offset + info->bytes) {
1967 u64 old_end = info->offset + info->bytes;
1968 1926
1969 info->offset = offset + bytes; 1927 info->bytes = offset - info->offset;
1970 info->bytes = old_end - info->offset;
1971 ret = link_free_space(ctl, info); 1928 ret = link_free_space(ctl, info);
1972 WARN_ON(ret); 1929 WARN_ON(ret);
1973 if (ret) 1930 if (ret)
1974 goto out_lock; 1931 goto out_lock;
1975 } else {
1976 /* the hole we're creating ends at the end
1977 * of the info struct, just free the info
1978 */
1979 kmem_cache_free(btrfs_free_space_cachep, info);
1980 }
1981 spin_unlock(&ctl->tree_lock);
1982 1932
1983 /* step two, insert a new info struct to cover 1933 /* Not enough bytes in this entry to satisfy us */
1984 * anything before the hole 1934 if (old_end < offset + bytes) {
1985 */ 1935 bytes -= old_end - offset;
1986 ret = btrfs_add_free_space(block_group, old_start, 1936 offset = old_end;
1987 offset - old_start); 1937 goto again;
1988 WARN_ON(ret); /* -ENOMEM */ 1938 } else if (old_end == offset + bytes) {
1989 goto out; 1939 /* all done */
1940 goto out_lock;
1941 }
1942 spin_unlock(&ctl->tree_lock);
1943
1944 ret = btrfs_add_free_space(block_group, offset + bytes,
1945 old_end - (offset + bytes));
1946 WARN_ON(ret);
1947 goto out;
1948 }
1990 } 1949 }
1991 1950
1992 ret = remove_from_bitmap(ctl, info, &offset, &bytes); 1951 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a4f02501da40..a7d1921ac76b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -987,7 +987,7 @@ static noinline void async_cow_start(struct btrfs_work *work)
987 async_cow->start, async_cow->end, async_cow, 987 async_cow->start, async_cow->end, async_cow,
988 &num_added); 988 &num_added);
989 if (num_added == 0) { 989 if (num_added == 0) {
990 iput(async_cow->inode); 990 btrfs_add_delayed_iput(async_cow->inode);
991 async_cow->inode = NULL; 991 async_cow->inode = NULL;
992 } 992 }
993} 993}
@@ -1023,7 +1023,7 @@ static noinline void async_cow_free(struct btrfs_work *work)
1023 struct async_cow *async_cow; 1023 struct async_cow *async_cow;
1024 async_cow = container_of(work, struct async_cow, work); 1024 async_cow = container_of(work, struct async_cow, work);
1025 if (async_cow->inode) 1025 if (async_cow->inode)
1026 iput(async_cow->inode); 1026 btrfs_add_delayed_iput(async_cow->inode);
1027 kfree(async_cow); 1027 kfree(async_cow);
1028} 1028}
1029 1029
@@ -3754,7 +3754,7 @@ void btrfs_evict_inode(struct inode *inode)
3754 btrfs_wait_ordered_range(inode, 0, (u64)-1); 3754 btrfs_wait_ordered_range(inode, 0, (u64)-1);
3755 3755
3756 if (root->fs_info->log_root_recovering) { 3756 if (root->fs_info->log_root_recovering) {
3757 BUG_ON(!test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3757 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3758 &BTRFS_I(inode)->runtime_flags)); 3758 &BTRFS_I(inode)->runtime_flags));
3759 goto no_delete; 3759 goto no_delete;
3760 } 3760 }
@@ -5876,8 +5876,17 @@ map:
5876 bh_result->b_size = len; 5876 bh_result->b_size = len;
5877 bh_result->b_bdev = em->bdev; 5877 bh_result->b_bdev = em->bdev;
5878 set_buffer_mapped(bh_result); 5878 set_buffer_mapped(bh_result);
5879 if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 5879 if (create) {
5880 set_buffer_new(bh_result); 5880 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5881 set_buffer_new(bh_result);
5882
5883 /*
5884 * Need to update the i_size under the extent lock so buffered
5885 * readers will get the updated i_size when we unlock.
5886 */
5887 if (start + len > i_size_read(inode))
5888 i_size_write(inode, start + len);
5889 }
5881 5890
5882 free_extent_map(em); 5891 free_extent_map(em);
5883 5892
@@ -6360,12 +6369,48 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6360 */ 6369 */
6361 ordered = btrfs_lookup_ordered_range(inode, lockstart, 6370 ordered = btrfs_lookup_ordered_range(inode, lockstart,
6362 lockend - lockstart + 1); 6371 lockend - lockstart + 1);
6363 if (!ordered) 6372
6373 /*
6374 * We need to make sure there are no buffered pages in this
6375 * range either, we could have raced between the invalidate in
6376 * generic_file_direct_write and locking the extent. The
6377 * invalidate needs to happen so that reads after a write do not
6378 * get stale data.
6379 */
6380 if (!ordered && (!writing ||
6381 !test_range_bit(&BTRFS_I(inode)->io_tree,
6382 lockstart, lockend, EXTENT_UPTODATE, 0,
6383 cached_state)))
6364 break; 6384 break;
6385
6365 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6386 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6366 &cached_state, GFP_NOFS); 6387 &cached_state, GFP_NOFS);
6367 btrfs_start_ordered_extent(inode, ordered, 1); 6388
6368 btrfs_put_ordered_extent(ordered); 6389 if (ordered) {
6390 btrfs_start_ordered_extent(inode, ordered, 1);
6391 btrfs_put_ordered_extent(ordered);
6392 } else {
6393 /* Screw you mmap */
6394 ret = filemap_write_and_wait_range(file->f_mapping,
6395 lockstart,
6396 lockend);
6397 if (ret)
6398 goto out;
6399
6400 /*
6401 * If we found a page that couldn't be invalidated just
6402 * fall back to buffered.
6403 */
6404 ret = invalidate_inode_pages2_range(file->f_mapping,
6405 lockstart >> PAGE_CACHE_SHIFT,
6406 lockend >> PAGE_CACHE_SHIFT);
6407 if (ret) {
6408 if (ret == -EBUSY)
6409 ret = 0;
6410 goto out;
6411 }
6412 }
6413
6369 cond_resched(); 6414 cond_resched();
6370 } 6415 }
6371 6416
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 497c530724cf..e440aa653c30 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -339,7 +339,7 @@ struct btrfs_ioctl_get_dev_stats {
339#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) 339#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
340#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \ 340#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
341 struct btrfs_ioctl_vol_args_v2) 341 struct btrfs_ioctl_vol_args_v2)
342#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64) 342#define BTRFS_IOC_SUBVOL_GETFLAGS _IOR(BTRFS_IOCTL_MAGIC, 25, __u64)
343#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64) 343#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
344#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \ 344#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
345 struct btrfs_ioctl_scrub_args) 345 struct btrfs_ioctl_scrub_args)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 0eb9a4da069e..e23991574fdf 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1187,6 +1187,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1187 if (ret) 1187 if (ret)
1188 goto restore; 1188 goto restore;
1189 1189
1190 ret = btrfs_resume_balance_async(fs_info);
1191 if (ret)
1192 goto restore;
1193
1190 sb->s_flags &= ~MS_RDONLY; 1194 sb->s_flags &= ~MS_RDONLY;
1191 } 1195 }
1192 1196
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 2017d0ff511c..8abeae4224f9 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -690,6 +690,8 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
690 kfree(name); 690 kfree(name);
691 691
692 iput(inode); 692 iput(inode);
693
694 btrfs_run_delayed_items(trans, root);
693 return ret; 695 return ret;
694} 696}
695 697
@@ -895,6 +897,7 @@ again:
895 ret = btrfs_unlink_inode(trans, root, dir, 897 ret = btrfs_unlink_inode(trans, root, dir,
896 inode, victim_name, 898 inode, victim_name,
897 victim_name_len); 899 victim_name_len);
900 btrfs_run_delayed_items(trans, root);
898 } 901 }
899 kfree(victim_name); 902 kfree(victim_name);
900 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 903 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
@@ -1475,6 +1478,9 @@ again:
1475 ret = btrfs_unlink_inode(trans, root, dir, inode, 1478 ret = btrfs_unlink_inode(trans, root, dir, inode,
1476 name, name_len); 1479 name, name_len);
1477 BUG_ON(ret); 1480 BUG_ON(ret);
1481
1482 btrfs_run_delayed_items(trans, root);
1483
1478 kfree(name); 1484 kfree(name);
1479 iput(inode); 1485 iput(inode);
1480 1486
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 8a3d2594b807..ecaad40e7ef4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2845,31 +2845,48 @@ out:
2845 2845
2846static int balance_kthread(void *data) 2846static int balance_kthread(void *data)
2847{ 2847{
2848 struct btrfs_balance_control *bctl = 2848 struct btrfs_fs_info *fs_info = data;
2849 (struct btrfs_balance_control *)data;
2850 struct btrfs_fs_info *fs_info = bctl->fs_info;
2851 int ret = 0; 2849 int ret = 0;
2852 2850
2853 mutex_lock(&fs_info->volume_mutex); 2851 mutex_lock(&fs_info->volume_mutex);
2854 mutex_lock(&fs_info->balance_mutex); 2852 mutex_lock(&fs_info->balance_mutex);
2855 2853
2856 set_balance_control(bctl); 2854 if (fs_info->balance_ctl) {
2857
2858 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2859 printk(KERN_INFO "btrfs: force skipping balance\n");
2860 } else {
2861 printk(KERN_INFO "btrfs: continuing balance\n"); 2855 printk(KERN_INFO "btrfs: continuing balance\n");
2862 ret = btrfs_balance(bctl, NULL); 2856 ret = btrfs_balance(fs_info->balance_ctl, NULL);
2863 } 2857 }
2864 2858
2865 mutex_unlock(&fs_info->balance_mutex); 2859 mutex_unlock(&fs_info->balance_mutex);
2866 mutex_unlock(&fs_info->volume_mutex); 2860 mutex_unlock(&fs_info->volume_mutex);
2861
2867 return ret; 2862 return ret;
2868} 2863}
2869 2864
2870int btrfs_recover_balance(struct btrfs_root *tree_root) 2865int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
2871{ 2866{
2872 struct task_struct *tsk; 2867 struct task_struct *tsk;
2868
2869 spin_lock(&fs_info->balance_lock);
2870 if (!fs_info->balance_ctl) {
2871 spin_unlock(&fs_info->balance_lock);
2872 return 0;
2873 }
2874 spin_unlock(&fs_info->balance_lock);
2875
2876 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2877 printk(KERN_INFO "btrfs: force skipping balance\n");
2878 return 0;
2879 }
2880
2881 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
2882 if (IS_ERR(tsk))
2883 return PTR_ERR(tsk);
2884
2885 return 0;
2886}
2887
2888int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
2889{
2873 struct btrfs_balance_control *bctl; 2890 struct btrfs_balance_control *bctl;
2874 struct btrfs_balance_item *item; 2891 struct btrfs_balance_item *item;
2875 struct btrfs_disk_balance_args disk_bargs; 2892 struct btrfs_disk_balance_args disk_bargs;
@@ -2882,29 +2899,30 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
2882 if (!path) 2899 if (!path)
2883 return -ENOMEM; 2900 return -ENOMEM;
2884 2901
2885 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2886 if (!bctl) {
2887 ret = -ENOMEM;
2888 goto out;
2889 }
2890
2891 key.objectid = BTRFS_BALANCE_OBJECTID; 2902 key.objectid = BTRFS_BALANCE_OBJECTID;
2892 key.type = BTRFS_BALANCE_ITEM_KEY; 2903 key.type = BTRFS_BALANCE_ITEM_KEY;
2893 key.offset = 0; 2904 key.offset = 0;
2894 2905
2895 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); 2906 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2896 if (ret < 0) 2907 if (ret < 0)
2897 goto out_bctl; 2908 goto out;
2898 if (ret > 0) { /* ret = -ENOENT; */ 2909 if (ret > 0) { /* ret = -ENOENT; */
2899 ret = 0; 2910 ret = 0;
2900 goto out_bctl; 2911 goto out;
2912 }
2913
2914 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2915 if (!bctl) {
2916 ret = -ENOMEM;
2917 goto out;
2901 } 2918 }
2902 2919
2903 leaf = path->nodes[0]; 2920 leaf = path->nodes[0];
2904 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); 2921 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2905 2922
2906 bctl->fs_info = tree_root->fs_info; 2923 bctl->fs_info = fs_info;
2907 bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME; 2924 bctl->flags = btrfs_balance_flags(leaf, item);
2925 bctl->flags |= BTRFS_BALANCE_RESUME;
2908 2926
2909 btrfs_balance_data(leaf, item, &disk_bargs); 2927 btrfs_balance_data(leaf, item, &disk_bargs);
2910 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); 2928 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
@@ -2913,14 +2931,13 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
2913 btrfs_balance_sys(leaf, item, &disk_bargs); 2931 btrfs_balance_sys(leaf, item, &disk_bargs);
2914 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 2932 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2915 2933
2916 tsk = kthread_run(balance_kthread, bctl, "btrfs-balance"); 2934 mutex_lock(&fs_info->volume_mutex);
2917 if (IS_ERR(tsk)) 2935 mutex_lock(&fs_info->balance_mutex);
2918 ret = PTR_ERR(tsk);
2919 else
2920 goto out;
2921 2936
2922out_bctl: 2937 set_balance_control(bctl);
2923 kfree(bctl); 2938
2939 mutex_unlock(&fs_info->balance_mutex);
2940 mutex_unlock(&fs_info->volume_mutex);
2924out: 2941out:
2925 btrfs_free_path(path); 2942 btrfs_free_path(path);
2926 return ret; 2943 return ret;
@@ -4061,16 +4078,18 @@ static void btrfs_end_bio(struct bio *bio, int err)
4061 4078
4062 BUG_ON(stripe_index >= bbio->num_stripes); 4079 BUG_ON(stripe_index >= bbio->num_stripes);
4063 dev = bbio->stripes[stripe_index].dev; 4080 dev = bbio->stripes[stripe_index].dev;
4064 if (bio->bi_rw & WRITE) 4081 if (dev->bdev) {
4065 btrfs_dev_stat_inc(dev, 4082 if (bio->bi_rw & WRITE)
4066 BTRFS_DEV_STAT_WRITE_ERRS); 4083 btrfs_dev_stat_inc(dev,
4067 else 4084 BTRFS_DEV_STAT_WRITE_ERRS);
4068 btrfs_dev_stat_inc(dev, 4085 else
4069 BTRFS_DEV_STAT_READ_ERRS); 4086 btrfs_dev_stat_inc(dev,
4070 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH) 4087 BTRFS_DEV_STAT_READ_ERRS);
4071 btrfs_dev_stat_inc(dev, 4088 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4072 BTRFS_DEV_STAT_FLUSH_ERRS); 4089 btrfs_dev_stat_inc(dev,
4073 btrfs_dev_stat_print_on_error(dev); 4090 BTRFS_DEV_STAT_FLUSH_ERRS);
4091 btrfs_dev_stat_print_on_error(dev);
4092 }
4074 } 4093 }
4075 } 4094 }
4076 4095
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 74366f27a76b..95f6637614db 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -281,7 +281,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
281int btrfs_init_new_device(struct btrfs_root *root, char *path); 281int btrfs_init_new_device(struct btrfs_root *root, char *path);
282int btrfs_balance(struct btrfs_balance_control *bctl, 282int btrfs_balance(struct btrfs_balance_control *bctl,
283 struct btrfs_ioctl_balance_args *bargs); 283 struct btrfs_ioctl_balance_args *bargs);
284int btrfs_recover_balance(struct btrfs_root *tree_root); 284int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
285int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
285int btrfs_pause_balance(struct btrfs_fs_info *fs_info); 286int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
286int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); 287int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
287int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); 288int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 173b1d22e59b..8b67304e4b80 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -54,7 +54,12 @@
54 (CONGESTION_ON_THRESH(congestion_kb) - \ 54 (CONGESTION_ON_THRESH(congestion_kb) - \
55 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 55 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
56 56
57 57static inline struct ceph_snap_context *page_snap_context(struct page *page)
58{
59 if (PagePrivate(page))
60 return (void *)page->private;
61 return NULL;
62}
58 63
59/* 64/*
60 * Dirty a page. Optimistically adjust accounting, on the assumption 65 * Dirty a page. Optimistically adjust accounting, on the assumption
@@ -142,10 +147,9 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset)
142{ 147{
143 struct inode *inode; 148 struct inode *inode;
144 struct ceph_inode_info *ci; 149 struct ceph_inode_info *ci;
145 struct ceph_snap_context *snapc = (void *)page->private; 150 struct ceph_snap_context *snapc = page_snap_context(page);
146 151
147 BUG_ON(!PageLocked(page)); 152 BUG_ON(!PageLocked(page));
148 BUG_ON(!page->private);
149 BUG_ON(!PagePrivate(page)); 153 BUG_ON(!PagePrivate(page));
150 BUG_ON(!page->mapping); 154 BUG_ON(!page->mapping);
151 155
@@ -182,7 +186,6 @@ static int ceph_releasepage(struct page *page, gfp_t g)
182 struct inode *inode = page->mapping ? page->mapping->host : NULL; 186 struct inode *inode = page->mapping ? page->mapping->host : NULL;
183 dout("%p releasepage %p idx %lu\n", inode, page, page->index); 187 dout("%p releasepage %p idx %lu\n", inode, page, page->index);
184 WARN_ON(PageDirty(page)); 188 WARN_ON(PageDirty(page));
185 WARN_ON(page->private);
186 WARN_ON(PagePrivate(page)); 189 WARN_ON(PagePrivate(page));
187 return 0; 190 return 0;
188} 191}
@@ -443,7 +446,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
443 osdc = &fsc->client->osdc; 446 osdc = &fsc->client->osdc;
444 447
445 /* verify this is a writeable snap context */ 448 /* verify this is a writeable snap context */
446 snapc = (void *)page->private; 449 snapc = page_snap_context(page);
447 if (snapc == NULL) { 450 if (snapc == NULL) {
448 dout("writepage %p page %p not dirty?\n", inode, page); 451 dout("writepage %p page %p not dirty?\n", inode, page);
449 goto out; 452 goto out;
@@ -451,7 +454,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
451 oldest = get_oldest_context(inode, &snap_size); 454 oldest = get_oldest_context(inode, &snap_size);
452 if (snapc->seq > oldest->seq) { 455 if (snapc->seq > oldest->seq) {
453 dout("writepage %p page %p snapc %p not writeable - noop\n", 456 dout("writepage %p page %p snapc %p not writeable - noop\n",
454 inode, page, (void *)page->private); 457 inode, page, snapc);
455 /* we should only noop if called by kswapd */ 458 /* we should only noop if called by kswapd */
456 WARN_ON((current->flags & PF_MEMALLOC) == 0); 459 WARN_ON((current->flags & PF_MEMALLOC) == 0);
457 ceph_put_snap_context(oldest); 460 ceph_put_snap_context(oldest);
@@ -591,7 +594,7 @@ static void writepages_finish(struct ceph_osd_request *req,
591 clear_bdi_congested(&fsc->backing_dev_info, 594 clear_bdi_congested(&fsc->backing_dev_info,
592 BLK_RW_ASYNC); 595 BLK_RW_ASYNC);
593 596
594 ceph_put_snap_context((void *)page->private); 597 ceph_put_snap_context(page_snap_context(page));
595 page->private = 0; 598 page->private = 0;
596 ClearPagePrivate(page); 599 ClearPagePrivate(page);
597 dout("unlocking %d %p\n", i, page); 600 dout("unlocking %d %p\n", i, page);
@@ -795,7 +798,7 @@ get_more_pages:
795 } 798 }
796 799
797 /* only if matching snap context */ 800 /* only if matching snap context */
798 pgsnapc = (void *)page->private; 801 pgsnapc = page_snap_context(page);
799 if (pgsnapc->seq > snapc->seq) { 802 if (pgsnapc->seq > snapc->seq) {
800 dout("page snapc %p %lld > oldest %p %lld\n", 803 dout("page snapc %p %lld > oldest %p %lld\n",
801 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 804 pgsnapc, pgsnapc->seq, snapc, snapc->seq);
@@ -984,7 +987,7 @@ retry_locked:
984 BUG_ON(!ci->i_snap_realm); 987 BUG_ON(!ci->i_snap_realm);
985 down_read(&mdsc->snap_rwsem); 988 down_read(&mdsc->snap_rwsem);
986 BUG_ON(!ci->i_snap_realm->cached_context); 989 BUG_ON(!ci->i_snap_realm->cached_context);
987 snapc = (void *)page->private; 990 snapc = page_snap_context(page);
988 if (snapc && snapc != ci->i_head_snapc) { 991 if (snapc && snapc != ci->i_head_snapc) {
989 /* 992 /*
990 * this page is already dirty in another (older) snap 993 * this page is already dirty in another (older) snap
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 78db68a5cf44..0ae86ddf2213 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1653,24 +1653,26 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1653 * If yes, we have encountered a double deliminator 1653 * If yes, we have encountered a double deliminator
1654 * reset the NULL character to the deliminator 1654 * reset the NULL character to the deliminator
1655 */ 1655 */
1656 if (tmp_end < end && tmp_end[1] == delim) 1656 if (tmp_end < end && tmp_end[1] == delim) {
1657 tmp_end[0] = delim; 1657 tmp_end[0] = delim;
1658 1658
1659 /* Keep iterating until we get to a single deliminator 1659 /* Keep iterating until we get to a single
1660 * OR the end 1660 * deliminator OR the end
1661 */ 1661 */
1662 while ((tmp_end = strchr(tmp_end, delim)) != NULL && 1662 while ((tmp_end = strchr(tmp_end, delim))
1663 (tmp_end[1] == delim)) { 1663 != NULL && (tmp_end[1] == delim)) {
1664 tmp_end = (char *) &tmp_end[2]; 1664 tmp_end = (char *) &tmp_end[2];
1665 } 1665 }
1666 1666
1667 /* Reset var options to point to next element */ 1667 /* Reset var options to point to next element */
1668 if (tmp_end) { 1668 if (tmp_end) {
1669 tmp_end[0] = '\0'; 1669 tmp_end[0] = '\0';
1670 options = (char *) &tmp_end[1]; 1670 options = (char *) &tmp_end[1];
1671 } else 1671 } else
1672 /* Reached the end of the mount option string */ 1672 /* Reached the end of the mount option
1673 options = end; 1673 * string */
1674 options = end;
1675 }
1674 1676
1675 /* Now build new password string */ 1677 /* Now build new password string */
1676 temp_len = strlen(value); 1678 temp_len = strlen(value);
@@ -3493,18 +3495,15 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
3493 * MS-CIFS indicates that servers are only limited by the client's 3495 * MS-CIFS indicates that servers are only limited by the client's
3494 * bufsize for reads, testing against win98se shows that it throws 3496 * bufsize for reads, testing against win98se shows that it throws
3495 * INVALID_PARAMETER errors if you try to request too large a read. 3497 * INVALID_PARAMETER errors if you try to request too large a read.
3498 * OS/2 just sends back short reads.
3496 * 3499 *
3497 * If the server advertises a MaxBufferSize of less than one page, 3500 * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
3498 * assume that it also can't satisfy reads larger than that either. 3501 * it can't handle a read request larger than its MaxBufferSize either.
3499 *
3500 * FIXME: Is there a better heuristic for this?
3501 */ 3502 */
3502 if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP)) 3503 if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
3503 defsize = CIFS_DEFAULT_IOSIZE; 3504 defsize = CIFS_DEFAULT_IOSIZE;
3504 else if (server->capabilities & CAP_LARGE_READ_X) 3505 else if (server->capabilities & CAP_LARGE_READ_X)
3505 defsize = CIFS_DEFAULT_NON_POSIX_RSIZE; 3506 defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
3506 else if (server->maxBuf >= PAGE_CACHE_SIZE)
3507 defsize = CIFSMaxBufSize;
3508 else 3507 else
3509 defsize = server->maxBuf - sizeof(READ_RSP); 3508 defsize = server->maxBuf - sizeof(READ_RSP);
3510 3509
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index 69f994a7d524..0dbe58a8b172 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -149,7 +149,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
149 (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred); 149 (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
150 if (!IS_ERR(*lower_file)) 150 if (!IS_ERR(*lower_file))
151 goto out; 151 goto out;
152 if (flags & O_RDONLY) { 152 if ((flags & O_ACCMODE) == O_RDONLY) {
153 rc = PTR_ERR((*lower_file)); 153 rc = PTR_ERR((*lower_file));
154 goto out; 154 goto out;
155 } 155 }
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 3a06f4043df4..c0038f6566d4 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -49,7 +49,10 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
49 mutex_lock(&ecryptfs_daemon_hash_mux); 49 mutex_lock(&ecryptfs_daemon_hash_mux);
50 /* TODO: Just use file->private_data? */ 50 /* TODO: Just use file->private_data? */
51 rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); 51 rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
52 BUG_ON(rc || !daemon); 52 if (rc || !daemon) {
53 mutex_unlock(&ecryptfs_daemon_hash_mux);
54 return -EINVAL;
55 }
53 mutex_lock(&daemon->mux); 56 mutex_lock(&daemon->mux);
54 mutex_unlock(&ecryptfs_daemon_hash_mux); 57 mutex_unlock(&ecryptfs_daemon_hash_mux);
55 if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { 58 if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
@@ -122,6 +125,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
122 goto out_unlock_daemon; 125 goto out_unlock_daemon;
123 } 126 }
124 daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN; 127 daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
128 file->private_data = daemon;
125 atomic_inc(&ecryptfs_num_miscdev_opens); 129 atomic_inc(&ecryptfs_num_miscdev_opens);
126out_unlock_daemon: 130out_unlock_daemon:
127 mutex_unlock(&daemon->mux); 131 mutex_unlock(&daemon->mux);
@@ -152,9 +156,9 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
152 156
153 mutex_lock(&ecryptfs_daemon_hash_mux); 157 mutex_lock(&ecryptfs_daemon_hash_mux);
154 rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); 158 rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
155 BUG_ON(rc || !daemon); 159 if (rc || !daemon)
160 daemon = file->private_data;
156 mutex_lock(&daemon->mux); 161 mutex_lock(&daemon->mux);
157 BUG_ON(daemon->pid != task_pid(current));
158 BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN)); 162 BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
159 daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN; 163 daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
160 atomic_dec(&ecryptfs_num_miscdev_opens); 164 atomic_dec(&ecryptfs_num_miscdev_opens);
@@ -191,31 +195,32 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
191 struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type, 195 struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
192 u16 msg_flags, struct ecryptfs_daemon *daemon) 196 u16 msg_flags, struct ecryptfs_daemon *daemon)
193{ 197{
194 int rc = 0; 198 struct ecryptfs_message *msg;
195 199
196 mutex_lock(&msg_ctx->mux); 200 msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
197 msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size), 201 if (!msg) {
198 GFP_KERNEL);
199 if (!msg_ctx->msg) {
200 rc = -ENOMEM;
201 printk(KERN_ERR "%s: Out of memory whilst attempting " 202 printk(KERN_ERR "%s: Out of memory whilst attempting "
202 "to kmalloc(%zd, GFP_KERNEL)\n", __func__, 203 "to kmalloc(%zd, GFP_KERNEL)\n", __func__,
203 (sizeof(*msg_ctx->msg) + data_size)); 204 (sizeof(*msg) + data_size));
204 goto out_unlock; 205 return -ENOMEM;
205 } 206 }
207
208 mutex_lock(&msg_ctx->mux);
209 msg_ctx->msg = msg;
206 msg_ctx->msg->index = msg_ctx->index; 210 msg_ctx->msg->index = msg_ctx->index;
207 msg_ctx->msg->data_len = data_size; 211 msg_ctx->msg->data_len = data_size;
208 msg_ctx->type = msg_type; 212 msg_ctx->type = msg_type;
209 memcpy(msg_ctx->msg->data, data, data_size); 213 memcpy(msg_ctx->msg->data, data, data_size);
210 msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size); 214 msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
211 mutex_lock(&daemon->mux);
212 list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue); 215 list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
216 mutex_unlock(&msg_ctx->mux);
217
218 mutex_lock(&daemon->mux);
213 daemon->num_queued_msg_ctx++; 219 daemon->num_queued_msg_ctx++;
214 wake_up_interruptible(&daemon->wait); 220 wake_up_interruptible(&daemon->wait);
215 mutex_unlock(&daemon->mux); 221 mutex_unlock(&daemon->mux);
216out_unlock: 222
217 mutex_unlock(&msg_ctx->mux); 223 return 0;
218 return rc;
219} 224}
220 225
221/* 226/*
@@ -269,8 +274,16 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
269 mutex_lock(&ecryptfs_daemon_hash_mux); 274 mutex_lock(&ecryptfs_daemon_hash_mux);
270 /* TODO: Just use file->private_data? */ 275 /* TODO: Just use file->private_data? */
271 rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); 276 rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
272 BUG_ON(rc || !daemon); 277 if (rc || !daemon) {
278 mutex_unlock(&ecryptfs_daemon_hash_mux);
279 return -EINVAL;
280 }
273 mutex_lock(&daemon->mux); 281 mutex_lock(&daemon->mux);
282 if (task_pid(current) != daemon->pid) {
283 mutex_unlock(&daemon->mux);
284 mutex_unlock(&ecryptfs_daemon_hash_mux);
285 return -EPERM;
286 }
274 if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { 287 if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
275 rc = 0; 288 rc = 0;
276 mutex_unlock(&ecryptfs_daemon_hash_mux); 289 mutex_unlock(&ecryptfs_daemon_hash_mux);
@@ -307,9 +320,6 @@ check_list:
307 * message from the queue; try again */ 320 * message from the queue; try again */
308 goto check_list; 321 goto check_list;
309 } 322 }
310 BUG_ON(euid != daemon->euid);
311 BUG_ON(current_user_ns() != daemon->user_ns);
312 BUG_ON(task_pid(current) != daemon->pid);
313 msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue, 323 msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
314 struct ecryptfs_msg_ctx, daemon_out_list); 324 struct ecryptfs_msg_ctx, daemon_out_list);
315 BUG_ON(!msg_ctx); 325 BUG_ON(!msg_ctx);
diff --git a/fs/exec.c b/fs/exec.c
index a79786a8d2c8..da27b91ff1e8 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -819,10 +819,10 @@ static int exec_mmap(struct mm_struct *mm)
819 /* Notify parent that we're no longer interested in the old VM */ 819 /* Notify parent that we're no longer interested in the old VM */
820 tsk = current; 820 tsk = current;
821 old_mm = current->mm; 821 old_mm = current->mm;
822 sync_mm_rss(old_mm);
823 mm_release(tsk, old_mm); 822 mm_release(tsk, old_mm);
824 823
825 if (old_mm) { 824 if (old_mm) {
825 sync_mm_rss(old_mm);
826 /* 826 /*
827 * Make sure that if there is a core dump in progress 827 * Make sure that if there is a core dump in progress
828 * for the old mm, we get out and die instead of going 828 * for the old mm, we get out and die instead of going
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index c640ba57074b..09addc8615fa 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -31,6 +31,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
31 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); 31 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
32 struct hfsplus_vh *vh = sbi->s_vhdr; 32 struct hfsplus_vh *vh = sbi->s_vhdr;
33 struct hfsplus_vh *bvh = sbi->s_backup_vhdr; 33 struct hfsplus_vh *bvh = sbi->s_backup_vhdr;
34 u32 cnid = (unsigned long)dentry->d_fsdata;
34 35
35 if (!capable(CAP_SYS_ADMIN)) 36 if (!capable(CAP_SYS_ADMIN))
36 return -EPERM; 37 return -EPERM;
@@ -41,8 +42,12 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
41 vh->finder_info[0] = bvh->finder_info[0] = 42 vh->finder_info[0] = bvh->finder_info[0] =
42 cpu_to_be32(parent_ino(dentry)); 43 cpu_to_be32(parent_ino(dentry));
43 44
44 /* Bootloader */ 45 /*
45 vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(inode->i_ino); 46 * Bootloader. Just using the inode here breaks in the case of
47 * hard links - the firmware wants the ID of the hard link file,
48 * but the inode points at the indirect inode
49 */
50 vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(cnid);
46 51
47 /* Per spec, the OS X system folder - same as finder_info[0] here */ 52 /* Per spec, the OS X system folder - same as finder_info[0] here */
48 vh->finder_info[5] = bvh->finder_info[5] = 53 vh->finder_info[5] = bvh->finder_info[5] =
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 7daf4b852d1c..90effcccca9a 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
56 DECLARE_COMPLETION_ONSTACK(wait); 56 DECLARE_COMPLETION_ONSTACK(wait);
57 struct bio *bio; 57 struct bio *bio;
58 int ret = 0; 58 int ret = 0;
59 unsigned int io_size; 59 u64 io_size;
60 loff_t start; 60 loff_t start;
61 int offset; 61 int offset;
62 62
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 17ba6b995659..f005b5bebdc7 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -207,7 +207,6 @@ error_0:
207static void nfs4_shutdown_session(struct nfs_client *clp) 207static void nfs4_shutdown_session(struct nfs_client *clp)
208{ 208{
209 if (nfs4_has_session(clp)) { 209 if (nfs4_has_session(clp)) {
210 nfs4_deviceid_purge_client(clp);
211 nfs4_destroy_session(clp->cl_session); 210 nfs4_destroy_session(clp->cl_session);
212 nfs4_destroy_clientid(clp); 211 nfs4_destroy_clientid(clp);
213 } 212 }
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 3168f6e3d4d4..9a4cbfc85d81 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -490,6 +490,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
490 dreq->error = -EIO; 490 dreq->error = -EIO;
491 spin_unlock(cinfo.lock); 491 spin_unlock(cinfo.lock);
492 } 492 }
493 nfs_release_request(req);
493 } 494 }
494 nfs_pageio_complete(&desc); 495 nfs_pageio_complete(&desc);
495 496
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index b5b86a05059c..864c51e4b400 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -57,6 +57,11 @@ unsigned int nfs_idmap_cache_timeout = 600;
57static const struct cred *id_resolver_cache; 57static const struct cred *id_resolver_cache;
58static struct key_type key_type_id_resolver_legacy; 58static struct key_type key_type_id_resolver_legacy;
59 59
60struct idmap {
61 struct rpc_pipe *idmap_pipe;
62 struct key_construction *idmap_key_cons;
63 struct mutex idmap_mutex;
64};
60 65
61/** 66/**
62 * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields 67 * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
@@ -310,9 +315,11 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
310 name, namelen, type, data, 315 name, namelen, type, data,
311 data_size, NULL); 316 data_size, NULL);
312 if (ret < 0) { 317 if (ret < 0) {
318 mutex_lock(&idmap->idmap_mutex);
313 ret = nfs_idmap_request_key(&key_type_id_resolver_legacy, 319 ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
314 name, namelen, type, data, 320 name, namelen, type, data,
315 data_size, idmap); 321 data_size, idmap);
322 mutex_unlock(&idmap->idmap_mutex);
316 } 323 }
317 return ret; 324 return ret;
318} 325}
@@ -354,11 +361,6 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *typ
354/* idmap classic begins here */ 361/* idmap classic begins here */
355module_param(nfs_idmap_cache_timeout, int, 0644); 362module_param(nfs_idmap_cache_timeout, int, 0644);
356 363
357struct idmap {
358 struct rpc_pipe *idmap_pipe;
359 struct key_construction *idmap_key_cons;
360};
361
362enum { 364enum {
363 Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err 365 Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err
364}; 366};
@@ -469,6 +471,7 @@ nfs_idmap_new(struct nfs_client *clp)
469 return error; 471 return error;
470 } 472 }
471 idmap->idmap_pipe = pipe; 473 idmap->idmap_pipe = pipe;
474 mutex_init(&idmap->idmap_mutex);
472 475
473 clp->cl_idmap = idmap; 476 clp->cl_idmap = idmap;
474 return 0; 477 return 0;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e605d695dbcb..f7296983eba6 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1530,7 +1530,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
1530 nfsi->delegation_state = 0; 1530 nfsi->delegation_state = 0;
1531 init_rwsem(&nfsi->rwsem); 1531 init_rwsem(&nfsi->rwsem);
1532 nfsi->layout = NULL; 1532 nfsi->layout = NULL;
1533 atomic_set(&nfsi->commit_info.rpcs_out, 0);
1534#endif 1533#endif
1535} 1534}
1536 1535
@@ -1545,6 +1544,7 @@ static void init_once(void *foo)
1545 INIT_LIST_HEAD(&nfsi->commit_info.list); 1544 INIT_LIST_HEAD(&nfsi->commit_info.list);
1546 nfsi->npages = 0; 1545 nfsi->npages = 0;
1547 nfsi->commit_info.ncommit = 0; 1546 nfsi->commit_info.ncommit = 0;
1547 atomic_set(&nfsi->commit_info.rpcs_out, 0);
1548 atomic_set(&nfsi->silly_count, 1); 1548 atomic_set(&nfsi->silly_count, 1);
1549 INIT_HLIST_HEAD(&nfsi->silly_list); 1549 INIT_HLIST_HEAD(&nfsi->silly_list);
1550 init_waitqueue_head(&nfsi->waitqueue); 1550 init_waitqueue_head(&nfsi->waitqueue);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index b8323aa7b543..bbc49caa7a82 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -70,6 +70,10 @@ find_pnfs_driver(u32 id)
70 70
71 spin_lock(&pnfs_spinlock); 71 spin_lock(&pnfs_spinlock);
72 local = find_pnfs_driver_locked(id); 72 local = find_pnfs_driver_locked(id);
73 if (local != NULL && !try_module_get(local->owner)) {
74 dprintk("%s: Could not grab reference on module\n", __func__);
75 local = NULL;
76 }
73 spin_unlock(&pnfs_spinlock); 77 spin_unlock(&pnfs_spinlock);
74 return local; 78 return local;
75} 79}
@@ -80,6 +84,9 @@ unset_pnfs_layoutdriver(struct nfs_server *nfss)
80 if (nfss->pnfs_curr_ld) { 84 if (nfss->pnfs_curr_ld) {
81 if (nfss->pnfs_curr_ld->clear_layoutdriver) 85 if (nfss->pnfs_curr_ld->clear_layoutdriver)
82 nfss->pnfs_curr_ld->clear_layoutdriver(nfss); 86 nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
87 /* Decrement the MDS count. Purge the deviceid cache if zero */
88 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
89 nfs4_deviceid_purge_client(nfss->nfs_client);
83 module_put(nfss->pnfs_curr_ld->owner); 90 module_put(nfss->pnfs_curr_ld->owner);
84 } 91 }
85 nfss->pnfs_curr_ld = NULL; 92 nfss->pnfs_curr_ld = NULL;
@@ -115,10 +122,6 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
115 goto out_no_driver; 122 goto out_no_driver;
116 } 123 }
117 } 124 }
118 if (!try_module_get(ld_type->owner)) {
119 dprintk("%s: Could not grab reference on module\n", __func__);
120 goto out_no_driver;
121 }
122 server->pnfs_curr_ld = ld_type; 125 server->pnfs_curr_ld = ld_type;
123 if (ld_type->set_layoutdriver 126 if (ld_type->set_layoutdriver
124 && ld_type->set_layoutdriver(server, mntfh)) { 127 && ld_type->set_layoutdriver(server, mntfh)) {
@@ -127,6 +130,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
127 module_put(ld_type->owner); 130 module_put(ld_type->owner);
128 goto out_no_driver; 131 goto out_no_driver;
129 } 132 }
133 /* Bump the MDS count */
134 atomic_inc(&server->nfs_client->cl_mds_count);
130 135
131 dprintk("%s: pNFS module for %u set\n", __func__, id); 136 dprintk("%s: pNFS module for %u set\n", __func__, id);
132 return; 137 return;
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 08a07a218d26..57ceaf33d177 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
191 while (!list_empty(head)) { 191 while (!list_empty(head)) {
192 ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); 192 ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
193 list_del_init(&ii->i_dirty); 193 list_del_init(&ii->i_dirty);
194 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
195 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
194 iput(&ii->vfs_inode); 196 iput(&ii->vfs_inode);
195 } 197 }
196} 198}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 0e72ad6f22aa..88e11fb346b6 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2309 if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) 2309 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2310 continue; 2310 continue;
2311 list_del_init(&ii->i_dirty); 2311 list_del_init(&ii->i_dirty);
2312 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2313 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2312 iput(&ii->vfs_inode); 2314 iput(&ii->vfs_inode);
2313 } 2315 }
2314} 2316}
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 81a4cd22f80b..4f7795fb5fc0 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -456,7 +456,7 @@ static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
456 stats->ls_gets++; 456 stats->ls_gets++;
457 stats->ls_total += ktime_to_ns(kt); 457 stats->ls_total += ktime_to_ns(kt);
458 /* overflow */ 458 /* overflow */
459 if (unlikely(stats->ls_gets) == 0) { 459 if (unlikely(stats->ls_gets == 0)) {
460 stats->ls_gets++; 460 stats->ls_gets++;
461 stats->ls_total = ktime_to_ns(kt); 461 stats->ls_total = ktime_to_ns(kt);
462 } 462 }
@@ -3932,6 +3932,8 @@ unqueue:
3932static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, 3932static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3933 struct ocfs2_lock_res *lockres) 3933 struct ocfs2_lock_res *lockres)
3934{ 3934{
3935 unsigned long flags;
3936
3935 assert_spin_locked(&lockres->l_lock); 3937 assert_spin_locked(&lockres->l_lock);
3936 3938
3937 if (lockres->l_flags & OCFS2_LOCK_FREEING) { 3939 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
@@ -3945,21 +3947,22 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3945 3947
3946 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED); 3948 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
3947 3949
3948 spin_lock(&osb->dc_task_lock); 3950 spin_lock_irqsave(&osb->dc_task_lock, flags);
3949 if (list_empty(&lockres->l_blocked_list)) { 3951 if (list_empty(&lockres->l_blocked_list)) {
3950 list_add_tail(&lockres->l_blocked_list, 3952 list_add_tail(&lockres->l_blocked_list,
3951 &osb->blocked_lock_list); 3953 &osb->blocked_lock_list);
3952 osb->blocked_lock_count++; 3954 osb->blocked_lock_count++;
3953 } 3955 }
3954 spin_unlock(&osb->dc_task_lock); 3956 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
3955} 3957}
3956 3958
3957static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) 3959static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
3958{ 3960{
3959 unsigned long processed; 3961 unsigned long processed;
3962 unsigned long flags;
3960 struct ocfs2_lock_res *lockres; 3963 struct ocfs2_lock_res *lockres;
3961 3964
3962 spin_lock(&osb->dc_task_lock); 3965 spin_lock_irqsave(&osb->dc_task_lock, flags);
3963 /* grab this early so we know to try again if a state change and 3966 /* grab this early so we know to try again if a state change and
3964 * wake happens part-way through our work */ 3967 * wake happens part-way through our work */
3965 osb->dc_work_sequence = osb->dc_wake_sequence; 3968 osb->dc_work_sequence = osb->dc_wake_sequence;
@@ -3972,38 +3975,40 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
3972 struct ocfs2_lock_res, l_blocked_list); 3975 struct ocfs2_lock_res, l_blocked_list);
3973 list_del_init(&lockres->l_blocked_list); 3976 list_del_init(&lockres->l_blocked_list);
3974 osb->blocked_lock_count--; 3977 osb->blocked_lock_count--;
3975 spin_unlock(&osb->dc_task_lock); 3978 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
3976 3979
3977 BUG_ON(!processed); 3980 BUG_ON(!processed);
3978 processed--; 3981 processed--;
3979 3982
3980 ocfs2_process_blocked_lock(osb, lockres); 3983 ocfs2_process_blocked_lock(osb, lockres);
3981 3984
3982 spin_lock(&osb->dc_task_lock); 3985 spin_lock_irqsave(&osb->dc_task_lock, flags);
3983 } 3986 }
3984 spin_unlock(&osb->dc_task_lock); 3987 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
3985} 3988}
3986 3989
3987static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb) 3990static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
3988{ 3991{
3989 int empty = 0; 3992 int empty = 0;
3993 unsigned long flags;
3990 3994
3991 spin_lock(&osb->dc_task_lock); 3995 spin_lock_irqsave(&osb->dc_task_lock, flags);
3992 if (list_empty(&osb->blocked_lock_list)) 3996 if (list_empty(&osb->blocked_lock_list))
3993 empty = 1; 3997 empty = 1;
3994 3998
3995 spin_unlock(&osb->dc_task_lock); 3999 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
3996 return empty; 4000 return empty;
3997} 4001}
3998 4002
3999static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb) 4003static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
4000{ 4004{
4001 int should_wake = 0; 4005 int should_wake = 0;
4006 unsigned long flags;
4002 4007
4003 spin_lock(&osb->dc_task_lock); 4008 spin_lock_irqsave(&osb->dc_task_lock, flags);
4004 if (osb->dc_work_sequence != osb->dc_wake_sequence) 4009 if (osb->dc_work_sequence != osb->dc_wake_sequence)
4005 should_wake = 1; 4010 should_wake = 1;
4006 spin_unlock(&osb->dc_task_lock); 4011 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4007 4012
4008 return should_wake; 4013 return should_wake;
4009} 4014}
@@ -4033,10 +4038,12 @@ static int ocfs2_downconvert_thread(void *arg)
4033 4038
4034void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb) 4039void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
4035{ 4040{
4036 spin_lock(&osb->dc_task_lock); 4041 unsigned long flags;
4042
4043 spin_lock_irqsave(&osb->dc_task_lock, flags);
4037 /* make sure the voting thread gets a swipe at whatever changes 4044 /* make sure the voting thread gets a swipe at whatever changes
4038 * the caller may have made to the voting state */ 4045 * the caller may have made to the voting state */
4039 osb->dc_wake_sequence++; 4046 osb->dc_wake_sequence++;
4040 spin_unlock(&osb->dc_task_lock); 4047 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4041 wake_up(&osb->dc_event); 4048 wake_up(&osb->dc_event);
4042} 4049}
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 2f5b92ef0e53..70b5863a2d64 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -923,8 +923,6 @@ out_unlock:
923 923
924 ocfs2_inode_unlock(inode, 0); 924 ocfs2_inode_unlock(inode, 0);
925out: 925out:
926 if (ret && ret != -ENXIO)
927 ret = -ENXIO;
928 return ret; 926 return ret;
929} 927}
930 928
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 061591a3ab08..98513c8ed589 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2422,8 +2422,10 @@ out_dio:
2422 unaligned_dio = 0; 2422 unaligned_dio = 0;
2423 } 2423 }
2424 2424
2425 if (unaligned_dio) 2425 if (unaligned_dio) {
2426 ocfs2_iocb_clear_unaligned_aio(iocb);
2426 atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio); 2427 atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
2428 }
2427 2429
2428out: 2430out:
2429 if (rw_level != -1) 2431 if (rw_level != -1)
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 92fcd575775a..0a86e302655f 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -399,8 +399,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
399 msecs_to_jiffies(oinfo->dqi_syncms)); 399 msecs_to_jiffies(oinfo->dqi_syncms));
400 400
401out_err: 401out_err:
402 if (status)
403 mlog_errno(status);
404 return status; 402 return status;
405out_unlock: 403out_unlock:
406 ocfs2_unlock_global_qf(oinfo, 0); 404 ocfs2_unlock_global_qf(oinfo, 0);
diff --git a/fs/open.c b/fs/open.c
index d6c79a0dffc7..1540632d8387 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -397,10 +397,10 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
397{ 397{
398 struct file *file; 398 struct file *file;
399 struct inode *inode; 399 struct inode *inode;
400 int error; 400 int error, fput_needed;
401 401
402 error = -EBADF; 402 error = -EBADF;
403 file = fget(fd); 403 file = fget_raw_light(fd, &fput_needed);
404 if (!file) 404 if (!file)
405 goto out; 405 goto out;
406 406
@@ -414,7 +414,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
414 if (!error) 414 if (!error)
415 set_fs_pwd(current->fs, &file->f_path); 415 set_fs_pwd(current->fs, &file->f_path);
416out_putf: 416out_putf:
417 fput(file); 417 fput_light(file, fput_needed);
418out: 418out:
419 return error; 419 return error;
420} 420}
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index aeb19e68e086..11a2aa2a56c4 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -258,7 +258,7 @@ fail:
258 return rc; 258 return rc;
259} 259}
260 260
261int pstore_fill_super(struct super_block *sb, void *data, int silent) 261static int pstore_fill_super(struct super_block *sb, void *data, int silent)
262{ 262{
263 struct inode *inode; 263 struct inode *inode;
264 264
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 82c585f715e3..03ce7a9b81cc 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -94,20 +94,15 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
94 * as we can from the end of the buffer. 94 * as we can from the end of the buffer.
95 */ 95 */
96static void pstore_dump(struct kmsg_dumper *dumper, 96static void pstore_dump(struct kmsg_dumper *dumper,
97 enum kmsg_dump_reason reason, 97 enum kmsg_dump_reason reason)
98 const char *s1, unsigned long l1,
99 const char *s2, unsigned long l2)
100{ 98{
101 unsigned long s1_start, s2_start; 99 unsigned long total = 0;
102 unsigned long l1_cpy, l2_cpy;
103 unsigned long size, total = 0;
104 char *dst;
105 const char *why; 100 const char *why;
106 u64 id; 101 u64 id;
107 int hsize, ret;
108 unsigned int part = 1; 102 unsigned int part = 1;
109 unsigned long flags = 0; 103 unsigned long flags = 0;
110 int is_locked = 0; 104 int is_locked = 0;
105 int ret;
111 106
112 why = get_reason_str(reason); 107 why = get_reason_str(reason);
113 108
@@ -119,30 +114,25 @@ static void pstore_dump(struct kmsg_dumper *dumper,
119 spin_lock_irqsave(&psinfo->buf_lock, flags); 114 spin_lock_irqsave(&psinfo->buf_lock, flags);
120 oopscount++; 115 oopscount++;
121 while (total < kmsg_bytes) { 116 while (total < kmsg_bytes) {
117 char *dst;
118 unsigned long size;
119 int hsize;
120 size_t len;
121
122 dst = psinfo->buf; 122 dst = psinfo->buf;
123 hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part); 123 hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part);
124 size = psinfo->bufsize - hsize; 124 size = psinfo->bufsize - hsize;
125 dst += hsize; 125 dst += hsize;
126 126
127 l2_cpy = min(l2, size); 127 if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len))
128 l1_cpy = min(l1, size - l2_cpy);
129
130 if (l1_cpy + l2_cpy == 0)
131 break; 128 break;
132 129
133 s2_start = l2 - l2_cpy;
134 s1_start = l1 - l1_cpy;
135
136 memcpy(dst, s1 + s1_start, l1_cpy);
137 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
138
139 ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part, 130 ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
140 hsize + l1_cpy + l2_cpy, psinfo); 131 hsize + len, psinfo);
141 if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) 132 if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
142 pstore_new_entry = 1; 133 pstore_new_entry = 1;
143 l1 -= l1_cpy; 134
144 l2 -= l2_cpy; 135 total += hsize + len;
145 total += l1_cpy + l2_cpy;
146 part++; 136 part++;
147 } 137 }
148 if (in_nmi()) { 138 if (in_nmi()) {
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 9123cce28c1e..453030f9c5bc 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -106,6 +106,8 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
106 time->tv_sec = 0; 106 time->tv_sec = 0;
107 time->tv_nsec = 0; 107 time->tv_nsec = 0;
108 108
109 /* Update old/shadowed buffer. */
110 persistent_ram_save_old(prz);
109 size = persistent_ram_old_size(prz); 111 size = persistent_ram_old_size(prz);
110 *buf = kmalloc(size, GFP_KERNEL); 112 *buf = kmalloc(size, GFP_KERNEL);
111 if (*buf == NULL) 113 if (*buf == NULL)
@@ -184,6 +186,7 @@ static int ramoops_pstore_erase(enum pstore_type_id type, u64 id,
184 return -EINVAL; 186 return -EINVAL;
185 187
186 persistent_ram_free_old(cxt->przs[id]); 188 persistent_ram_free_old(cxt->przs[id]);
189 persistent_ram_zap(cxt->przs[id]);
187 190
188 return 0; 191 return 0;
189} 192}
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 31f8d184f3a0..c5fbdbbf81ac 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -250,23 +250,24 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
250 persistent_ram_update_ecc(prz, start, count); 250 persistent_ram_update_ecc(prz, start, count);
251} 251}
252 252
253static void __init 253void persistent_ram_save_old(struct persistent_ram_zone *prz)
254persistent_ram_save_old(struct persistent_ram_zone *prz)
255{ 254{
256 struct persistent_ram_buffer *buffer = prz->buffer; 255 struct persistent_ram_buffer *buffer = prz->buffer;
257 size_t size = buffer_size(prz); 256 size_t size = buffer_size(prz);
258 size_t start = buffer_start(prz); 257 size_t start = buffer_start(prz);
259 char *dest;
260 258
261 persistent_ram_ecc_old(prz); 259 if (!size)
260 return;
262 261
263 dest = kmalloc(size, GFP_KERNEL); 262 if (!prz->old_log) {
264 if (dest == NULL) { 263 persistent_ram_ecc_old(prz);
264 prz->old_log = kmalloc(size, GFP_KERNEL);
265 }
266 if (!prz->old_log) {
265 pr_err("persistent_ram: failed to allocate buffer\n"); 267 pr_err("persistent_ram: failed to allocate buffer\n");
266 return; 268 return;
267 } 269 }
268 270
269 prz->old_log = dest;
270 prz->old_log_size = size; 271 prz->old_log_size = size;
271 memcpy(prz->old_log, &buffer->data[start], size - start); 272 memcpy(prz->old_log, &buffer->data[start], size - start);
272 memcpy(prz->old_log + size - start, &buffer->data[0], start); 273 memcpy(prz->old_log + size - start, &buffer->data[0], start);
@@ -319,6 +320,13 @@ void persistent_ram_free_old(struct persistent_ram_zone *prz)
319 prz->old_log_size = 0; 320 prz->old_log_size = 0;
320} 321}
321 322
323void persistent_ram_zap(struct persistent_ram_zone *prz)
324{
325 atomic_set(&prz->buffer->start, 0);
326 atomic_set(&prz->buffer->size, 0);
327 persistent_ram_update_header_ecc(prz);
328}
329
322static void *persistent_ram_vmap(phys_addr_t start, size_t size) 330static void *persistent_ram_vmap(phys_addr_t start, size_t size)
323{ 331{
324 struct page **pages; 332 struct page **pages;
@@ -405,6 +413,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool
405 " size %zu, start %zu\n", 413 " size %zu, start %zu\n",
406 buffer_size(prz), buffer_start(prz)); 414 buffer_size(prz), buffer_start(prz));
407 persistent_ram_save_old(prz); 415 persistent_ram_save_old(prz);
416 return 0;
408 } 417 }
409 } else { 418 } else {
410 pr_info("persistent_ram: no valid data in buffer" 419 pr_info("persistent_ram: no valid data in buffer"
@@ -412,8 +421,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool
412 } 421 }
413 422
414 prz->buffer->sig = PERSISTENT_RAM_SIG; 423 prz->buffer->sig = PERSISTENT_RAM_SIG;
415 atomic_set(&prz->buffer->start, 0); 424 persistent_ram_zap(prz);
416 atomic_set(&prz->buffer->size, 0);
417 425
418 return 0; 426 return 0;
419} 427}
@@ -448,7 +456,6 @@ struct persistent_ram_zone * __init persistent_ram_new(phys_addr_t start,
448 goto err; 456 goto err;
449 457
450 persistent_ram_post_init(prz, ecc); 458 persistent_ram_post_init(prz, ecc);
451 persistent_ram_update_header_ecc(prz);
452 459
453 return prz; 460 return prz;
454err: 461err:
diff --git a/fs/splice.c b/fs/splice.c
index c9f1318a3b82..7bf08fa22ec9 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -273,13 +273,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
273 * Check if we need to grow the arrays holding pages and partial page 273 * Check if we need to grow the arrays holding pages and partial page
274 * descriptions. 274 * descriptions.
275 */ 275 */
276int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) 276int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
277{ 277{
278 if (pipe->buffers <= PIPE_DEF_BUFFERS) 278 unsigned int buffers = ACCESS_ONCE(pipe->buffers);
279
280 spd->nr_pages_max = buffers;
281 if (buffers <= PIPE_DEF_BUFFERS)
279 return 0; 282 return 0;
280 283
281 spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL); 284 spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
282 spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL); 285 spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
283 286
284 if (spd->pages && spd->partial) 287 if (spd->pages && spd->partial)
285 return 0; 288 return 0;
@@ -289,10 +292,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
289 return -ENOMEM; 292 return -ENOMEM;
290} 293}
291 294
292void splice_shrink_spd(struct pipe_inode_info *pipe, 295void splice_shrink_spd(struct splice_pipe_desc *spd)
293 struct splice_pipe_desc *spd)
294{ 296{
295 if (pipe->buffers <= PIPE_DEF_BUFFERS) 297 if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
296 return; 298 return;
297 299
298 kfree(spd->pages); 300 kfree(spd->pages);
@@ -315,6 +317,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
315 struct splice_pipe_desc spd = { 317 struct splice_pipe_desc spd = {
316 .pages = pages, 318 .pages = pages,
317 .partial = partial, 319 .partial = partial,
320 .nr_pages_max = PIPE_DEF_BUFFERS,
318 .flags = flags, 321 .flags = flags,
319 .ops = &page_cache_pipe_buf_ops, 322 .ops = &page_cache_pipe_buf_ops,
320 .spd_release = spd_release_page, 323 .spd_release = spd_release_page,
@@ -326,7 +329,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
326 index = *ppos >> PAGE_CACHE_SHIFT; 329 index = *ppos >> PAGE_CACHE_SHIFT;
327 loff = *ppos & ~PAGE_CACHE_MASK; 330 loff = *ppos & ~PAGE_CACHE_MASK;
328 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 331 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
329 nr_pages = min(req_pages, pipe->buffers); 332 nr_pages = min(req_pages, spd.nr_pages_max);
330 333
331 /* 334 /*
332 * Lookup the (hopefully) full range of pages we need. 335 * Lookup the (hopefully) full range of pages we need.
@@ -497,7 +500,7 @@ fill_it:
497 if (spd.nr_pages) 500 if (spd.nr_pages)
498 error = splice_to_pipe(pipe, &spd); 501 error = splice_to_pipe(pipe, &spd);
499 502
500 splice_shrink_spd(pipe, &spd); 503 splice_shrink_spd(&spd);
501 return error; 504 return error;
502} 505}
503 506
@@ -598,6 +601,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
598 struct splice_pipe_desc spd = { 601 struct splice_pipe_desc spd = {
599 .pages = pages, 602 .pages = pages,
600 .partial = partial, 603 .partial = partial,
604 .nr_pages_max = PIPE_DEF_BUFFERS,
601 .flags = flags, 605 .flags = flags,
602 .ops = &default_pipe_buf_ops, 606 .ops = &default_pipe_buf_ops,
603 .spd_release = spd_release_page, 607 .spd_release = spd_release_page,
@@ -608,8 +612,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
608 612
609 res = -ENOMEM; 613 res = -ENOMEM;
610 vec = __vec; 614 vec = __vec;
611 if (pipe->buffers > PIPE_DEF_BUFFERS) { 615 if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
612 vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL); 616 vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
613 if (!vec) 617 if (!vec)
614 goto shrink_ret; 618 goto shrink_ret;
615 } 619 }
@@ -617,7 +621,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
617 offset = *ppos & ~PAGE_CACHE_MASK; 621 offset = *ppos & ~PAGE_CACHE_MASK;
618 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 622 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
619 623
620 for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) { 624 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
621 struct page *page; 625 struct page *page;
622 626
623 page = alloc_page(GFP_USER); 627 page = alloc_page(GFP_USER);
@@ -665,7 +669,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
665shrink_ret: 669shrink_ret:
666 if (vec != __vec) 670 if (vec != __vec)
667 kfree(vec); 671 kfree(vec);
668 splice_shrink_spd(pipe, &spd); 672 splice_shrink_spd(&spd);
669 return res; 673 return res;
670 674
671err: 675err:
@@ -1614,6 +1618,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
1614 struct splice_pipe_desc spd = { 1618 struct splice_pipe_desc spd = {
1615 .pages = pages, 1619 .pages = pages,
1616 .partial = partial, 1620 .partial = partial,
1621 .nr_pages_max = PIPE_DEF_BUFFERS,
1617 .flags = flags, 1622 .flags = flags,
1618 .ops = &user_page_pipe_buf_ops, 1623 .ops = &user_page_pipe_buf_ops,
1619 .spd_release = spd_release_page, 1624 .spd_release = spd_release_page,
@@ -1629,13 +1634,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
1629 1634
1630 spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages, 1635 spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
1631 spd.partial, false, 1636 spd.partial, false,
1632 pipe->buffers); 1637 spd.nr_pages_max);
1633 if (spd.nr_pages <= 0) 1638 if (spd.nr_pages <= 0)
1634 ret = spd.nr_pages; 1639 ret = spd.nr_pages;
1635 else 1640 else
1636 ret = splice_to_pipe(pipe, &spd); 1641 ret = splice_to_pipe(pipe, &spd);
1637 1642
1638 splice_shrink_spd(pipe, &spd); 1643 splice_shrink_spd(&spd);
1639 return ret; 1644 return ret;
1640} 1645}
1641 1646
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 84a7e6f3c046..92df3b081539 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2918,7 +2918,7 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
2918 struct dentry *dent; 2918 struct dentry *dent;
2919 struct ubifs_debug_info *d = c->dbg; 2919 struct ubifs_debug_info *d = c->dbg;
2920 2920
2921 if (!IS_ENABLED(DEBUG_FS)) 2921 if (!IS_ENABLED(CONFIG_DEBUG_FS))
2922 return 0; 2922 return 0;
2923 2923
2924 n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, 2924 n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME,
@@ -3013,7 +3013,7 @@ out:
3013 */ 3013 */
3014void dbg_debugfs_exit_fs(struct ubifs_info *c) 3014void dbg_debugfs_exit_fs(struct ubifs_info *c)
3015{ 3015{
3016 if (IS_ENABLED(DEBUG_FS)) 3016 if (IS_ENABLED(CONFIG_DEBUG_FS))
3017 debugfs_remove_recursive(c->dbg->dfs_dir); 3017 debugfs_remove_recursive(c->dbg->dfs_dir);
3018} 3018}
3019 3019
@@ -3099,7 +3099,7 @@ int dbg_debugfs_init(void)
3099 const char *fname; 3099 const char *fname;
3100 struct dentry *dent; 3100 struct dentry *dent;
3101 3101
3102 if (!IS_ENABLED(DEBUG_FS)) 3102 if (!IS_ENABLED(CONFIG_DEBUG_FS))
3103 return 0; 3103 return 0;
3104 3104
3105 fname = "ubifs"; 3105 fname = "ubifs";
@@ -3166,7 +3166,7 @@ out:
3166 */ 3166 */
3167void dbg_debugfs_exit(void) 3167void dbg_debugfs_exit(void)
3168{ 3168{
3169 if (IS_ENABLED(DEBUG_FS)) 3169 if (IS_ENABLED(CONFIG_DEBUG_FS))
3170 debugfs_remove_recursive(dfs_rootdir); 3170 debugfs_remove_recursive(dfs_rootdir);
3171} 3171}
3172 3172
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
index 2559d174e004..28ec13af28d9 100644
--- a/fs/ubifs/find.c
+++ b/fs/ubifs/find.c
@@ -939,8 +939,8 @@ static int find_dirtiest_idx_leb(struct ubifs_info *c)
939 } 939 }
940 dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty, 940 dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty,
941 lp->free, lp->flags); 941 lp->free, lp->flags);
942 ubifs_assert(lp->flags | LPROPS_TAKEN); 942 ubifs_assert(lp->flags & LPROPS_TAKEN);
943 ubifs_assert(lp->flags | LPROPS_INDEX); 943 ubifs_assert(lp->flags & LPROPS_INDEX);
944 return lnum; 944 return lnum;
945} 945}
946 946
diff --git a/fs/udf/super.c b/fs/udf/super.c
index ac8a348dcb69..8d86a8706c0e 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -56,6 +56,7 @@
56#include <linux/seq_file.h> 56#include <linux/seq_file.h>
57#include <linux/bitmap.h> 57#include <linux/bitmap.h>
58#include <linux/crc-itu-t.h> 58#include <linux/crc-itu-t.h>
59#include <linux/log2.h>
59#include <asm/byteorder.h> 60#include <asm/byteorder.h>
60 61
61#include "udf_sb.h" 62#include "udf_sb.h"
@@ -1215,16 +1216,65 @@ out_bh:
1215 return ret; 1216 return ret;
1216} 1217}
1217 1218
1219static int udf_load_sparable_map(struct super_block *sb,
1220 struct udf_part_map *map,
1221 struct sparablePartitionMap *spm)
1222{
1223 uint32_t loc;
1224 uint16_t ident;
1225 struct sparingTable *st;
1226 struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
1227 int i;
1228 struct buffer_head *bh;
1229
1230 map->s_partition_type = UDF_SPARABLE_MAP15;
1231 sdata->s_packet_len = le16_to_cpu(spm->packetLength);
1232 if (!is_power_of_2(sdata->s_packet_len)) {
1233 udf_err(sb, "error loading logical volume descriptor: "
1234 "Invalid packet length %u\n",
1235 (unsigned)sdata->s_packet_len);
1236 return -EIO;
1237 }
1238 if (spm->numSparingTables > 4) {
1239 udf_err(sb, "error loading logical volume descriptor: "
1240 "Too many sparing tables (%d)\n",
1241 (int)spm->numSparingTables);
1242 return -EIO;
1243 }
1244
1245 for (i = 0; i < spm->numSparingTables; i++) {
1246 loc = le32_to_cpu(spm->locSparingTable[i]);
1247 bh = udf_read_tagged(sb, loc, loc, &ident);
1248 if (!bh)
1249 continue;
1250
1251 st = (struct sparingTable *)bh->b_data;
1252 if (ident != 0 ||
1253 strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
1254 strlen(UDF_ID_SPARING)) ||
1255 sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
1256 sb->s_blocksize) {
1257 brelse(bh);
1258 continue;
1259 }
1260
1261 sdata->s_spar_map[i] = bh;
1262 }
1263 map->s_partition_func = udf_get_pblock_spar15;
1264 return 0;
1265}
1266
1218static int udf_load_logicalvol(struct super_block *sb, sector_t block, 1267static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1219 struct kernel_lb_addr *fileset) 1268 struct kernel_lb_addr *fileset)
1220{ 1269{
1221 struct logicalVolDesc *lvd; 1270 struct logicalVolDesc *lvd;
1222 int i, j, offset; 1271 int i, offset;
1223 uint8_t type; 1272 uint8_t type;
1224 struct udf_sb_info *sbi = UDF_SB(sb); 1273 struct udf_sb_info *sbi = UDF_SB(sb);
1225 struct genericPartitionMap *gpm; 1274 struct genericPartitionMap *gpm;
1226 uint16_t ident; 1275 uint16_t ident;
1227 struct buffer_head *bh; 1276 struct buffer_head *bh;
1277 unsigned int table_len;
1228 int ret = 0; 1278 int ret = 0;
1229 1279
1230 bh = udf_read_tagged(sb, block, block, &ident); 1280 bh = udf_read_tagged(sb, block, block, &ident);
@@ -1232,15 +1282,20 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1232 return 1; 1282 return 1;
1233 BUG_ON(ident != TAG_IDENT_LVD); 1283 BUG_ON(ident != TAG_IDENT_LVD);
1234 lvd = (struct logicalVolDesc *)bh->b_data; 1284 lvd = (struct logicalVolDesc *)bh->b_data;
1235 1285 table_len = le32_to_cpu(lvd->mapTableLength);
1236 i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); 1286 if (sizeof(*lvd) + table_len > sb->s_blocksize) {
1237 if (i != 0) { 1287 udf_err(sb, "error loading logical volume descriptor: "
1238 ret = i; 1288 "Partition table too long (%u > %lu)\n", table_len,
1289 sb->s_blocksize - sizeof(*lvd));
1239 goto out_bh; 1290 goto out_bh;
1240 } 1291 }
1241 1292
1293 ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1294 if (ret)
1295 goto out_bh;
1296
1242 for (i = 0, offset = 0; 1297 for (i = 0, offset = 0;
1243 i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength); 1298 i < sbi->s_partitions && offset < table_len;
1244 i++, offset += gpm->partitionMapLength) { 1299 i++, offset += gpm->partitionMapLength) {
1245 struct udf_part_map *map = &sbi->s_partmaps[i]; 1300 struct udf_part_map *map = &sbi->s_partmaps[i];
1246 gpm = (struct genericPartitionMap *) 1301 gpm = (struct genericPartitionMap *)
@@ -1275,38 +1330,9 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1275 } else if (!strncmp(upm2->partIdent.ident, 1330 } else if (!strncmp(upm2->partIdent.ident,
1276 UDF_ID_SPARABLE, 1331 UDF_ID_SPARABLE,
1277 strlen(UDF_ID_SPARABLE))) { 1332 strlen(UDF_ID_SPARABLE))) {
1278 uint32_t loc; 1333 if (udf_load_sparable_map(sb, map,
1279 struct sparingTable *st; 1334 (struct sparablePartitionMap *)gpm) < 0)
1280 struct sparablePartitionMap *spm = 1335 goto out_bh;
1281 (struct sparablePartitionMap *)gpm;
1282
1283 map->s_partition_type = UDF_SPARABLE_MAP15;
1284 map->s_type_specific.s_sparing.s_packet_len =
1285 le16_to_cpu(spm->packetLength);
1286 for (j = 0; j < spm->numSparingTables; j++) {
1287 struct buffer_head *bh2;
1288
1289 loc = le32_to_cpu(
1290 spm->locSparingTable[j]);
1291 bh2 = udf_read_tagged(sb, loc, loc,
1292 &ident);
1293 map->s_type_specific.s_sparing.
1294 s_spar_map[j] = bh2;
1295
1296 if (bh2 == NULL)
1297 continue;
1298
1299 st = (struct sparingTable *)bh2->b_data;
1300 if (ident != 0 || strncmp(
1301 st->sparingIdent.ident,
1302 UDF_ID_SPARING,
1303 strlen(UDF_ID_SPARING))) {
1304 brelse(bh2);
1305 map->s_type_specific.s_sparing.
1306 s_spar_map[j] = NULL;
1307 }
1308 }
1309 map->s_partition_func = udf_get_pblock_spar15;
1310 } else if (!strncmp(upm2->partIdent.ident, 1336 } else if (!strncmp(upm2->partIdent.ident,
1311 UDF_ID_METADATA, 1337 UDF_ID_METADATA,
1312 strlen(UDF_ID_METADATA))) { 1338 strlen(UDF_ID_METADATA))) {
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 229641fb8e67..9d1aeb7e2734 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1080,6 +1080,7 @@ restart:
1080 goto restart; 1080 goto restart;
1081 } 1081 }
1082 1082
1083 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1083 trace_xfs_alloc_size_neither(args); 1084 trace_xfs_alloc_size_neither(args);
1084 args->agbno = NULLAGBLOCK; 1085 args->agbno = NULLAGBLOCK;
1085 return 0; 1086 return 0;
@@ -2441,7 +2442,7 @@ xfs_alloc_vextent(
2441 DECLARE_COMPLETION_ONSTACK(done); 2442 DECLARE_COMPLETION_ONSTACK(done);
2442 2443
2443 args->done = &done; 2444 args->done = &done;
2444 INIT_WORK(&args->work, xfs_alloc_vextent_worker); 2445 INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
2445 queue_work(xfs_alloc_wq, &args->work); 2446 queue_work(xfs_alloc_wq, &args->work);
2446 wait_for_completion(&done); 2447 wait_for_completion(&done);
2447 return args->result; 2448 return args->result;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index ae31c313a79e..8dad722c0041 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -981,10 +981,15 @@ xfs_vm_writepage(
981 imap_valid = 0; 981 imap_valid = 0;
982 } 982 }
983 } else { 983 } else {
984 if (PageUptodate(page)) { 984 if (PageUptodate(page))
985 ASSERT(buffer_mapped(bh)); 985 ASSERT(buffer_mapped(bh));
986 imap_valid = 0; 986 /*
987 } 987 * This buffer is not uptodate and will not be
988 * written to disk. Ensure that we will put any
989 * subsequent writeable buffers into a new
990 * ioend.
991 */
992 imap_valid = 0;
988 continue; 993 continue;
989 } 994 }
990 995
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 172d3cc8f8cb..a4beb421018a 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -201,14 +201,7 @@ xfs_buf_alloc(
201 bp->b_length = numblks; 201 bp->b_length = numblks;
202 bp->b_io_length = numblks; 202 bp->b_io_length = numblks;
203 bp->b_flags = flags; 203 bp->b_flags = flags;
204 204 bp->b_bn = blkno;
205 /*
206 * We do not set the block number here in the buffer because we have not
207 * finished initialising the buffer. We insert the buffer into the cache
208 * in this state, so this ensures that we are unable to do IO on a
209 * buffer that hasn't been fully initialised.
210 */
211 bp->b_bn = XFS_BUF_DADDR_NULL;
212 atomic_set(&bp->b_pin_count, 0); 205 atomic_set(&bp->b_pin_count, 0);
213 init_waitqueue_head(&bp->b_waiters); 206 init_waitqueue_head(&bp->b_waiters);
214 207
@@ -567,11 +560,6 @@ xfs_buf_get(
567 if (bp != new_bp) 560 if (bp != new_bp)
568 xfs_buf_free(new_bp); 561 xfs_buf_free(new_bp);
569 562
570 /*
571 * Now we have a workable buffer, fill in the block number so
572 * that we can do IO on it.
573 */
574 bp->b_bn = blkno;
575 bp->b_io_length = bp->b_length; 563 bp->b_io_length = bp->b_length;
576 564
577found: 565found:
@@ -772,7 +760,7 @@ xfs_buf_get_uncached(
772 int error, i; 760 int error, i;
773 xfs_buf_t *bp; 761 xfs_buf_t *bp;
774 762
775 bp = xfs_buf_alloc(target, 0, numblks, 0); 763 bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0);
776 if (unlikely(bp == NULL)) 764 if (unlikely(bp == NULL))
777 goto fail; 765 goto fail;
778 766
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 6cdbf90c6f7b..d041d47d9d86 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -505,6 +505,14 @@ xfs_inode_item_push(
505 } 505 }
506 506
507 /* 507 /*
508 * Stale inode items should force out the iclog.
509 */
510 if (ip->i_flags & XFS_ISTALE) {
511 rval = XFS_ITEM_PINNED;
512 goto out_unlock;
513 }
514
515 /*
508 * Someone else is already flushing the inode. Nothing we can do 516 * Someone else is already flushing the inode. Nothing we can do
509 * here but wait for the flush to finish and remove the item from 517 * here but wait for the flush to finish and remove the item from
510 * the AIL. 518 * the AIL.
@@ -514,15 +522,6 @@ xfs_inode_item_push(
514 goto out_unlock; 522 goto out_unlock;
515 } 523 }
516 524
517 /*
518 * Stale inode items should force out the iclog.
519 */
520 if (ip->i_flags & XFS_ISTALE) {
521 xfs_ifunlock(ip);
522 xfs_iunlock(ip, XFS_ILOCK_SHARED);
523 return XFS_ITEM_PINNED;
524 }
525
526 ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); 525 ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
527 ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); 526 ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
528 527
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index f30d9807dc48..d90d4a388609 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -38,13 +38,21 @@
38kmem_zone_t *xfs_log_ticket_zone; 38kmem_zone_t *xfs_log_ticket_zone;
39 39
40/* Local miscellaneous function prototypes */ 40/* Local miscellaneous function prototypes */
41STATIC int xlog_commit_record(struct log *log, struct xlog_ticket *ticket, 41STATIC int
42 xlog_in_core_t **, xfs_lsn_t *); 42xlog_commit_record(
43 struct xlog *log,
44 struct xlog_ticket *ticket,
45 struct xlog_in_core **iclog,
46 xfs_lsn_t *commitlsnp);
47
43STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, 48STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
44 xfs_buftarg_t *log_target, 49 xfs_buftarg_t *log_target,
45 xfs_daddr_t blk_offset, 50 xfs_daddr_t blk_offset,
46 int num_bblks); 51 int num_bblks);
47STATIC int xlog_space_left(struct log *log, atomic64_t *head); 52STATIC int
53xlog_space_left(
54 struct xlog *log,
55 atomic64_t *head);
48STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); 56STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
49STATIC void xlog_dealloc_log(xlog_t *log); 57STATIC void xlog_dealloc_log(xlog_t *log);
50 58
@@ -64,8 +72,10 @@ STATIC void xlog_state_switch_iclogs(xlog_t *log,
64 int eventual_size); 72 int eventual_size);
65STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); 73STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
66 74
67STATIC void xlog_grant_push_ail(struct log *log, 75STATIC void
68 int need_bytes); 76xlog_grant_push_ail(
77 struct xlog *log,
78 int need_bytes);
69STATIC void xlog_regrant_reserve_log_space(xlog_t *log, 79STATIC void xlog_regrant_reserve_log_space(xlog_t *log,
70 xlog_ticket_t *ticket); 80 xlog_ticket_t *ticket);
71STATIC void xlog_ungrant_log_space(xlog_t *log, 81STATIC void xlog_ungrant_log_space(xlog_t *log,
@@ -73,7 +83,9 @@ STATIC void xlog_ungrant_log_space(xlog_t *log,
73 83
74#if defined(DEBUG) 84#if defined(DEBUG)
75STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr); 85STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr);
76STATIC void xlog_verify_grant_tail(struct log *log); 86STATIC void
87xlog_verify_grant_tail(
88 struct xlog *log);
77STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, 89STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
78 int count, boolean_t syncing); 90 int count, boolean_t syncing);
79STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, 91STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
@@ -89,9 +101,9 @@ STATIC int xlog_iclogs_empty(xlog_t *log);
89 101
90static void 102static void
91xlog_grant_sub_space( 103xlog_grant_sub_space(
92 struct log *log, 104 struct xlog *log,
93 atomic64_t *head, 105 atomic64_t *head,
94 int bytes) 106 int bytes)
95{ 107{
96 int64_t head_val = atomic64_read(head); 108 int64_t head_val = atomic64_read(head);
97 int64_t new, old; 109 int64_t new, old;
@@ -115,9 +127,9 @@ xlog_grant_sub_space(
115 127
116static void 128static void
117xlog_grant_add_space( 129xlog_grant_add_space(
118 struct log *log, 130 struct xlog *log,
119 atomic64_t *head, 131 atomic64_t *head,
120 int bytes) 132 int bytes)
121{ 133{
122 int64_t head_val = atomic64_read(head); 134 int64_t head_val = atomic64_read(head);
123 int64_t new, old; 135 int64_t new, old;
@@ -165,7 +177,7 @@ xlog_grant_head_wake_all(
165 177
166static inline int 178static inline int
167xlog_ticket_reservation( 179xlog_ticket_reservation(
168 struct log *log, 180 struct xlog *log,
169 struct xlog_grant_head *head, 181 struct xlog_grant_head *head,
170 struct xlog_ticket *tic) 182 struct xlog_ticket *tic)
171{ 183{
@@ -182,7 +194,7 @@ xlog_ticket_reservation(
182 194
183STATIC bool 195STATIC bool
184xlog_grant_head_wake( 196xlog_grant_head_wake(
185 struct log *log, 197 struct xlog *log,
186 struct xlog_grant_head *head, 198 struct xlog_grant_head *head,
187 int *free_bytes) 199 int *free_bytes)
188{ 200{
@@ -204,7 +216,7 @@ xlog_grant_head_wake(
204 216
205STATIC int 217STATIC int
206xlog_grant_head_wait( 218xlog_grant_head_wait(
207 struct log *log, 219 struct xlog *log,
208 struct xlog_grant_head *head, 220 struct xlog_grant_head *head,
209 struct xlog_ticket *tic, 221 struct xlog_ticket *tic,
210 int need_bytes) 222 int need_bytes)
@@ -256,7 +268,7 @@ shutdown:
256 */ 268 */
257STATIC int 269STATIC int
258xlog_grant_head_check( 270xlog_grant_head_check(
259 struct log *log, 271 struct xlog *log,
260 struct xlog_grant_head *head, 272 struct xlog_grant_head *head,
261 struct xlog_ticket *tic, 273 struct xlog_ticket *tic,
262 int *need_bytes) 274 int *need_bytes)
@@ -323,7 +335,7 @@ xfs_log_regrant(
323 struct xfs_mount *mp, 335 struct xfs_mount *mp,
324 struct xlog_ticket *tic) 336 struct xlog_ticket *tic)
325{ 337{
326 struct log *log = mp->m_log; 338 struct xlog *log = mp->m_log;
327 int need_bytes; 339 int need_bytes;
328 int error = 0; 340 int error = 0;
329 341
@@ -389,7 +401,7 @@ xfs_log_reserve(
389 bool permanent, 401 bool permanent,
390 uint t_type) 402 uint t_type)
391{ 403{
392 struct log *log = mp->m_log; 404 struct xlog *log = mp->m_log;
393 struct xlog_ticket *tic; 405 struct xlog_ticket *tic;
394 int need_bytes; 406 int need_bytes;
395 int error = 0; 407 int error = 0;
@@ -465,7 +477,7 @@ xfs_log_done(
465 struct xlog_in_core **iclog, 477 struct xlog_in_core **iclog,
466 uint flags) 478 uint flags)
467{ 479{
468 struct log *log = mp->m_log; 480 struct xlog *log = mp->m_log;
469 xfs_lsn_t lsn = 0; 481 xfs_lsn_t lsn = 0;
470 482
471 if (XLOG_FORCED_SHUTDOWN(log) || 483 if (XLOG_FORCED_SHUTDOWN(log) ||
@@ -810,6 +822,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
810void 822void
811xfs_log_unmount(xfs_mount_t *mp) 823xfs_log_unmount(xfs_mount_t *mp)
812{ 824{
825 cancel_delayed_work_sync(&mp->m_sync_work);
813 xfs_trans_ail_destroy(mp); 826 xfs_trans_ail_destroy(mp);
814 xlog_dealloc_log(mp->m_log); 827 xlog_dealloc_log(mp->m_log);
815} 828}
@@ -838,7 +851,7 @@ void
838xfs_log_space_wake( 851xfs_log_space_wake(
839 struct xfs_mount *mp) 852 struct xfs_mount *mp)
840{ 853{
841 struct log *log = mp->m_log; 854 struct xlog *log = mp->m_log;
842 int free_bytes; 855 int free_bytes;
843 856
844 if (XLOG_FORCED_SHUTDOWN(log)) 857 if (XLOG_FORCED_SHUTDOWN(log))
@@ -916,7 +929,7 @@ xfs_lsn_t
916xlog_assign_tail_lsn_locked( 929xlog_assign_tail_lsn_locked(
917 struct xfs_mount *mp) 930 struct xfs_mount *mp)
918{ 931{
919 struct log *log = mp->m_log; 932 struct xlog *log = mp->m_log;
920 struct xfs_log_item *lip; 933 struct xfs_log_item *lip;
921 xfs_lsn_t tail_lsn; 934 xfs_lsn_t tail_lsn;
922 935
@@ -965,7 +978,7 @@ xlog_assign_tail_lsn(
965 */ 978 */
966STATIC int 979STATIC int
967xlog_space_left( 980xlog_space_left(
968 struct log *log, 981 struct xlog *log,
969 atomic64_t *head) 982 atomic64_t *head)
970{ 983{
971 int free_bytes; 984 int free_bytes;
@@ -1277,7 +1290,7 @@ out:
1277 */ 1290 */
1278STATIC int 1291STATIC int
1279xlog_commit_record( 1292xlog_commit_record(
1280 struct log *log, 1293 struct xlog *log,
1281 struct xlog_ticket *ticket, 1294 struct xlog_ticket *ticket,
1282 struct xlog_in_core **iclog, 1295 struct xlog_in_core **iclog,
1283 xfs_lsn_t *commitlsnp) 1296 xfs_lsn_t *commitlsnp)
@@ -1311,7 +1324,7 @@ xlog_commit_record(
1311 */ 1324 */
1312STATIC void 1325STATIC void
1313xlog_grant_push_ail( 1326xlog_grant_push_ail(
1314 struct log *log, 1327 struct xlog *log,
1315 int need_bytes) 1328 int need_bytes)
1316{ 1329{
1317 xfs_lsn_t threshold_lsn = 0; 1330 xfs_lsn_t threshold_lsn = 0;
@@ -1790,7 +1803,7 @@ xlog_write_start_rec(
1790 1803
1791static xlog_op_header_t * 1804static xlog_op_header_t *
1792xlog_write_setup_ophdr( 1805xlog_write_setup_ophdr(
1793 struct log *log, 1806 struct xlog *log,
1794 struct xlog_op_header *ophdr, 1807 struct xlog_op_header *ophdr,
1795 struct xlog_ticket *ticket, 1808 struct xlog_ticket *ticket,
1796 uint flags) 1809 uint flags)
@@ -1873,7 +1886,7 @@ xlog_write_setup_copy(
1873 1886
1874static int 1887static int
1875xlog_write_copy_finish( 1888xlog_write_copy_finish(
1876 struct log *log, 1889 struct xlog *log,
1877 struct xlog_in_core *iclog, 1890 struct xlog_in_core *iclog,
1878 uint flags, 1891 uint flags,
1879 int *record_cnt, 1892 int *record_cnt,
@@ -1958,7 +1971,7 @@ xlog_write_copy_finish(
1958 */ 1971 */
1959int 1972int
1960xlog_write( 1973xlog_write(
1961 struct log *log, 1974 struct xlog *log,
1962 struct xfs_log_vec *log_vector, 1975 struct xfs_log_vec *log_vector,
1963 struct xlog_ticket *ticket, 1976 struct xlog_ticket *ticket,
1964 xfs_lsn_t *start_lsn, 1977 xfs_lsn_t *start_lsn,
@@ -2821,7 +2834,7 @@ _xfs_log_force(
2821 uint flags, 2834 uint flags,
2822 int *log_flushed) 2835 int *log_flushed)
2823{ 2836{
2824 struct log *log = mp->m_log; 2837 struct xlog *log = mp->m_log;
2825 struct xlog_in_core *iclog; 2838 struct xlog_in_core *iclog;
2826 xfs_lsn_t lsn; 2839 xfs_lsn_t lsn;
2827 2840
@@ -2969,7 +2982,7 @@ _xfs_log_force_lsn(
2969 uint flags, 2982 uint flags,
2970 int *log_flushed) 2983 int *log_flushed)
2971{ 2984{
2972 struct log *log = mp->m_log; 2985 struct xlog *log = mp->m_log;
2973 struct xlog_in_core *iclog; 2986 struct xlog_in_core *iclog;
2974 int already_slept = 0; 2987 int already_slept = 0;
2975 2988
@@ -3147,7 +3160,7 @@ xfs_log_ticket_get(
3147 */ 3160 */
3148xlog_ticket_t * 3161xlog_ticket_t *
3149xlog_ticket_alloc( 3162xlog_ticket_alloc(
3150 struct log *log, 3163 struct xlog *log,
3151 int unit_bytes, 3164 int unit_bytes,
3152 int cnt, 3165 int cnt,
3153 char client, 3166 char client,
@@ -3278,7 +3291,7 @@ xlog_ticket_alloc(
3278 */ 3291 */
3279void 3292void
3280xlog_verify_dest_ptr( 3293xlog_verify_dest_ptr(
3281 struct log *log, 3294 struct xlog *log,
3282 char *ptr) 3295 char *ptr)
3283{ 3296{
3284 int i; 3297 int i;
@@ -3307,7 +3320,7 @@ xlog_verify_dest_ptr(
3307 */ 3320 */
3308STATIC void 3321STATIC void
3309xlog_verify_grant_tail( 3322xlog_verify_grant_tail(
3310 struct log *log) 3323 struct xlog *log)
3311{ 3324{
3312 int tail_cycle, tail_blocks; 3325 int tail_cycle, tail_blocks;
3313 int cycle, space; 3326 int cycle, space;
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 7d6197c58493..ddc4529d07d3 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -44,7 +44,7 @@
44 */ 44 */
45static struct xlog_ticket * 45static struct xlog_ticket *
46xlog_cil_ticket_alloc( 46xlog_cil_ticket_alloc(
47 struct log *log) 47 struct xlog *log)
48{ 48{
49 struct xlog_ticket *tic; 49 struct xlog_ticket *tic;
50 50
@@ -72,7 +72,7 @@ xlog_cil_ticket_alloc(
72 */ 72 */
73void 73void
74xlog_cil_init_post_recovery( 74xlog_cil_init_post_recovery(
75 struct log *log) 75 struct xlog *log)
76{ 76{
77 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 77 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
78 log->l_cilp->xc_ctx->sequence = 1; 78 log->l_cilp->xc_ctx->sequence = 1;
@@ -182,7 +182,7 @@ xlog_cil_prepare_log_vecs(
182 */ 182 */
183STATIC void 183STATIC void
184xfs_cil_prepare_item( 184xfs_cil_prepare_item(
185 struct log *log, 185 struct xlog *log,
186 struct xfs_log_vec *lv, 186 struct xfs_log_vec *lv,
187 int *len, 187 int *len,
188 int *diff_iovecs) 188 int *diff_iovecs)
@@ -231,7 +231,7 @@ xfs_cil_prepare_item(
231 */ 231 */
232static void 232static void
233xlog_cil_insert_items( 233xlog_cil_insert_items(
234 struct log *log, 234 struct xlog *log,
235 struct xfs_log_vec *log_vector, 235 struct xfs_log_vec *log_vector,
236 struct xlog_ticket *ticket) 236 struct xlog_ticket *ticket)
237{ 237{
@@ -373,7 +373,7 @@ xlog_cil_committed(
373 */ 373 */
374STATIC int 374STATIC int
375xlog_cil_push( 375xlog_cil_push(
376 struct log *log) 376 struct xlog *log)
377{ 377{
378 struct xfs_cil *cil = log->l_cilp; 378 struct xfs_cil *cil = log->l_cilp;
379 struct xfs_log_vec *lv; 379 struct xfs_log_vec *lv;
@@ -601,7 +601,7 @@ xlog_cil_push_work(
601 */ 601 */
602static void 602static void
603xlog_cil_push_background( 603xlog_cil_push_background(
604 struct log *log) 604 struct xlog *log)
605{ 605{
606 struct xfs_cil *cil = log->l_cilp; 606 struct xfs_cil *cil = log->l_cilp;
607 607
@@ -629,7 +629,7 @@ xlog_cil_push_background(
629 629
630static void 630static void
631xlog_cil_push_foreground( 631xlog_cil_push_foreground(
632 struct log *log, 632 struct xlog *log,
633 xfs_lsn_t push_seq) 633 xfs_lsn_t push_seq)
634{ 634{
635 struct xfs_cil *cil = log->l_cilp; 635 struct xfs_cil *cil = log->l_cilp;
@@ -683,7 +683,7 @@ xfs_log_commit_cil(
683 xfs_lsn_t *commit_lsn, 683 xfs_lsn_t *commit_lsn,
684 int flags) 684 int flags)
685{ 685{
686 struct log *log = mp->m_log; 686 struct xlog *log = mp->m_log;
687 int log_flags = 0; 687 int log_flags = 0;
688 struct xfs_log_vec *log_vector; 688 struct xfs_log_vec *log_vector;
689 689
@@ -754,7 +754,7 @@ xfs_log_commit_cil(
754 */ 754 */
755xfs_lsn_t 755xfs_lsn_t
756xlog_cil_force_lsn( 756xlog_cil_force_lsn(
757 struct log *log, 757 struct xlog *log,
758 xfs_lsn_t sequence) 758 xfs_lsn_t sequence)
759{ 759{
760 struct xfs_cil *cil = log->l_cilp; 760 struct xfs_cil *cil = log->l_cilp;
@@ -833,7 +833,7 @@ xfs_log_item_in_current_chkpt(
833 */ 833 */
834int 834int
835xlog_cil_init( 835xlog_cil_init(
836 struct log *log) 836 struct xlog *log)
837{ 837{
838 struct xfs_cil *cil; 838 struct xfs_cil *cil;
839 struct xfs_cil_ctx *ctx; 839 struct xfs_cil_ctx *ctx;
@@ -869,7 +869,7 @@ xlog_cil_init(
869 869
870void 870void
871xlog_cil_destroy( 871xlog_cil_destroy(
872 struct log *log) 872 struct xlog *log)
873{ 873{
874 if (log->l_cilp->xc_ctx) { 874 if (log->l_cilp->xc_ctx) {
875 if (log->l_cilp->xc_ctx->ticket) 875 if (log->l_cilp->xc_ctx->ticket)
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 5bc33261f5be..72eba2201b14 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -19,7 +19,7 @@
19#define __XFS_LOG_PRIV_H__ 19#define __XFS_LOG_PRIV_H__
20 20
21struct xfs_buf; 21struct xfs_buf;
22struct log; 22struct xlog;
23struct xlog_ticket; 23struct xlog_ticket;
24struct xfs_mount; 24struct xfs_mount;
25 25
@@ -352,7 +352,7 @@ typedef struct xlog_in_core {
352 struct xlog_in_core *ic_next; 352 struct xlog_in_core *ic_next;
353 struct xlog_in_core *ic_prev; 353 struct xlog_in_core *ic_prev;
354 struct xfs_buf *ic_bp; 354 struct xfs_buf *ic_bp;
355 struct log *ic_log; 355 struct xlog *ic_log;
356 int ic_size; 356 int ic_size;
357 int ic_offset; 357 int ic_offset;
358 int ic_bwritecnt; 358 int ic_bwritecnt;
@@ -409,7 +409,7 @@ struct xfs_cil_ctx {
409 * operations almost as efficient as the old logging methods. 409 * operations almost as efficient as the old logging methods.
410 */ 410 */
411struct xfs_cil { 411struct xfs_cil {
412 struct log *xc_log; 412 struct xlog *xc_log;
413 struct list_head xc_cil; 413 struct list_head xc_cil;
414 spinlock_t xc_cil_lock; 414 spinlock_t xc_cil_lock;
415 struct xfs_cil_ctx *xc_ctx; 415 struct xfs_cil_ctx *xc_ctx;
@@ -487,7 +487,7 @@ struct xlog_grant_head {
487 * overflow 31 bits worth of byte offset, so using a byte number will mean 487 * overflow 31 bits worth of byte offset, so using a byte number will mean
488 * that round off problems won't occur when releasing partial reservations. 488 * that round off problems won't occur when releasing partial reservations.
489 */ 489 */
490typedef struct log { 490typedef struct xlog {
491 /* The following fields don't need locking */ 491 /* The following fields don't need locking */
492 struct xfs_mount *l_mp; /* mount point */ 492 struct xfs_mount *l_mp; /* mount point */
493 struct xfs_ail *l_ailp; /* AIL log is working with */ 493 struct xfs_ail *l_ailp; /* AIL log is working with */
@@ -553,9 +553,14 @@ extern int xlog_recover_finish(xlog_t *log);
553extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); 553extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
554 554
555extern kmem_zone_t *xfs_log_ticket_zone; 555extern kmem_zone_t *xfs_log_ticket_zone;
556struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes, 556struct xlog_ticket *
557 int count, char client, bool permanent, 557xlog_ticket_alloc(
558 xfs_km_flags_t alloc_flags); 558 struct xlog *log,
559 int unit_bytes,
560 int count,
561 char client,
562 bool permanent,
563 xfs_km_flags_t alloc_flags);
559 564
560 565
561static inline void 566static inline void
@@ -567,9 +572,14 @@ xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
567} 572}
568 573
569void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); 574void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
570int xlog_write(struct log *log, struct xfs_log_vec *log_vector, 575int
571 struct xlog_ticket *tic, xfs_lsn_t *start_lsn, 576xlog_write(
572 xlog_in_core_t **commit_iclog, uint flags); 577 struct xlog *log,
578 struct xfs_log_vec *log_vector,
579 struct xlog_ticket *tic,
580 xfs_lsn_t *start_lsn,
581 struct xlog_in_core **commit_iclog,
582 uint flags);
573 583
574/* 584/*
575 * When we crack an atomic LSN, we sample it first so that the value will not 585 * When we crack an atomic LSN, we sample it first so that the value will not
@@ -629,17 +639,23 @@ xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
629/* 639/*
630 * Committed Item List interfaces 640 * Committed Item List interfaces
631 */ 641 */
632int xlog_cil_init(struct log *log); 642int
633void xlog_cil_init_post_recovery(struct log *log); 643xlog_cil_init(struct xlog *log);
634void xlog_cil_destroy(struct log *log); 644void
645xlog_cil_init_post_recovery(struct xlog *log);
646void
647xlog_cil_destroy(struct xlog *log);
635 648
636/* 649/*
637 * CIL force routines 650 * CIL force routines
638 */ 651 */
639xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence); 652xfs_lsn_t
653xlog_cil_force_lsn(
654 struct xlog *log,
655 xfs_lsn_t sequence);
640 656
641static inline void 657static inline void
642xlog_cil_force(struct log *log) 658xlog_cil_force(struct xlog *log)
643{ 659{
644 xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence); 660 xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
645} 661}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index ca386909131a..a7be98abd6a9 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1471,8 +1471,8 @@ xlog_recover_add_item(
1471 1471
1472STATIC int 1472STATIC int
1473xlog_recover_add_to_cont_trans( 1473xlog_recover_add_to_cont_trans(
1474 struct log *log, 1474 struct xlog *log,
1475 xlog_recover_t *trans, 1475 struct xlog_recover *trans,
1476 xfs_caddr_t dp, 1476 xfs_caddr_t dp,
1477 int len) 1477 int len)
1478{ 1478{
@@ -1517,8 +1517,8 @@ xlog_recover_add_to_cont_trans(
1517 */ 1517 */
1518STATIC int 1518STATIC int
1519xlog_recover_add_to_trans( 1519xlog_recover_add_to_trans(
1520 struct log *log, 1520 struct xlog *log,
1521 xlog_recover_t *trans, 1521 struct xlog_recover *trans,
1522 xfs_caddr_t dp, 1522 xfs_caddr_t dp,
1523 int len) 1523 int len)
1524{ 1524{
@@ -1588,8 +1588,8 @@ xlog_recover_add_to_trans(
1588 */ 1588 */
1589STATIC int 1589STATIC int
1590xlog_recover_reorder_trans( 1590xlog_recover_reorder_trans(
1591 struct log *log, 1591 struct xlog *log,
1592 xlog_recover_t *trans, 1592 struct xlog_recover *trans,
1593 int pass) 1593 int pass)
1594{ 1594{
1595 xlog_recover_item_t *item, *n; 1595 xlog_recover_item_t *item, *n;
@@ -1642,8 +1642,8 @@ xlog_recover_reorder_trans(
1642 */ 1642 */
1643STATIC int 1643STATIC int
1644xlog_recover_buffer_pass1( 1644xlog_recover_buffer_pass1(
1645 struct log *log, 1645 struct xlog *log,
1646 xlog_recover_item_t *item) 1646 struct xlog_recover_item *item)
1647{ 1647{
1648 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 1648 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1649 struct list_head *bucket; 1649 struct list_head *bucket;
@@ -1696,7 +1696,7 @@ xlog_recover_buffer_pass1(
1696 */ 1696 */
1697STATIC int 1697STATIC int
1698xlog_check_buffer_cancelled( 1698xlog_check_buffer_cancelled(
1699 struct log *log, 1699 struct xlog *log,
1700 xfs_daddr_t blkno, 1700 xfs_daddr_t blkno,
1701 uint len, 1701 uint len,
1702 ushort flags) 1702 ushort flags)
@@ -2689,9 +2689,9 @@ xlog_recover_free_trans(
2689 2689
2690STATIC int 2690STATIC int
2691xlog_recover_commit_pass1( 2691xlog_recover_commit_pass1(
2692 struct log *log, 2692 struct xlog *log,
2693 struct xlog_recover *trans, 2693 struct xlog_recover *trans,
2694 xlog_recover_item_t *item) 2694 struct xlog_recover_item *item)
2695{ 2695{
2696 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); 2696 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
2697 2697
@@ -2716,10 +2716,10 @@ xlog_recover_commit_pass1(
2716 2716
2717STATIC int 2717STATIC int
2718xlog_recover_commit_pass2( 2718xlog_recover_commit_pass2(
2719 struct log *log, 2719 struct xlog *log,
2720 struct xlog_recover *trans, 2720 struct xlog_recover *trans,
2721 struct list_head *buffer_list, 2721 struct list_head *buffer_list,
2722 xlog_recover_item_t *item) 2722 struct xlog_recover_item *item)
2723{ 2723{
2724 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); 2724 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
2725 2725
@@ -2753,7 +2753,7 @@ xlog_recover_commit_pass2(
2753 */ 2753 */
2754STATIC int 2754STATIC int
2755xlog_recover_commit_trans( 2755xlog_recover_commit_trans(
2756 struct log *log, 2756 struct xlog *log,
2757 struct xlog_recover *trans, 2757 struct xlog_recover *trans,
2758 int pass) 2758 int pass)
2759{ 2759{
@@ -2793,8 +2793,8 @@ out:
2793 2793
2794STATIC int 2794STATIC int
2795xlog_recover_unmount_trans( 2795xlog_recover_unmount_trans(
2796 struct log *log, 2796 struct xlog *log,
2797 xlog_recover_t *trans) 2797 struct xlog_recover *trans)
2798{ 2798{
2799 /* Do nothing now */ 2799 /* Do nothing now */
2800 xfs_warn(log->l_mp, "%s: Unmount LR", __func__); 2800 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 8b89c5ac72d9..90c1fc9eaea4 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -53,7 +53,7 @@ typedef struct xfs_trans_reservations {
53 53
54#include "xfs_sync.h" 54#include "xfs_sync.h"
55 55
56struct log; 56struct xlog;
57struct xfs_mount_args; 57struct xfs_mount_args;
58struct xfs_inode; 58struct xfs_inode;
59struct xfs_bmbt_irec; 59struct xfs_bmbt_irec;
@@ -133,7 +133,7 @@ typedef struct xfs_mount {
133 uint m_readio_blocks; /* min read size blocks */ 133 uint m_readio_blocks; /* min read size blocks */
134 uint m_writeio_log; /* min write size log bytes */ 134 uint m_writeio_log; /* min write size log bytes */
135 uint m_writeio_blocks; /* min write size blocks */ 135 uint m_writeio_blocks; /* min write size blocks */
136 struct log *m_log; /* log specific stuff */ 136 struct xlog *m_log; /* log specific stuff */
137 int m_logbufs; /* number of log buffers */ 137 int m_logbufs; /* number of log buffers */
138 int m_logbsize; /* size of each log buffer */ 138 int m_logbsize; /* size of each log buffer */
139 uint m_rsumlevels; /* rt summary levels */ 139 uint m_rsumlevels; /* rt summary levels */
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index c9d3409c5ca3..1e9ee064dbb2 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -386,23 +386,23 @@ xfs_sync_worker(
386 * We shouldn't write/force the log if we are in the mount/unmount 386 * We shouldn't write/force the log if we are in the mount/unmount
387 * process or on a read only filesystem. The workqueue still needs to be 387 * process or on a read only filesystem. The workqueue still needs to be
388 * active in both cases, however, because it is used for inode reclaim 388 * active in both cases, however, because it is used for inode reclaim
389 * during these times. Use the s_umount semaphore to provide exclusion 389 * during these times. Use the MS_ACTIVE flag to avoid doing anything
390 * with unmount. 390 * during mount. Doing work during unmount is avoided by calling
391 * cancel_delayed_work_sync on this work queue before tearing down
392 * the ail and the log in xfs_log_unmount.
391 */ 393 */
392 if (down_read_trylock(&mp->m_super->s_umount)) { 394 if (!(mp->m_super->s_flags & MS_ACTIVE) &&
393 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 395 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
394 /* dgc: errors ignored here */ 396 /* dgc: errors ignored here */
395 if (mp->m_super->s_frozen == SB_UNFROZEN && 397 if (mp->m_super->s_frozen == SB_UNFROZEN &&
396 xfs_log_need_covered(mp)) 398 xfs_log_need_covered(mp))
397 error = xfs_fs_log_dummy(mp); 399 error = xfs_fs_log_dummy(mp);
398 else 400 else
399 xfs_log_force(mp, 0); 401 xfs_log_force(mp, 0);
400 402
401 /* start pushing all the metadata that is currently 403 /* start pushing all the metadata that is currently
402 * dirty */ 404 * dirty */
403 xfs_ail_push_all(mp->m_ail); 405 xfs_ail_push_all(mp->m_ail);
404 }
405 up_read(&mp->m_super->s_umount);
406 } 406 }
407 407
408 /* queue us up again */ 408 /* queue us up again */
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 7cf9d3529e51..caf5dabfd553 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -32,7 +32,7 @@ struct xfs_da_node_entry;
32struct xfs_dquot; 32struct xfs_dquot;
33struct xfs_log_item; 33struct xfs_log_item;
34struct xlog_ticket; 34struct xlog_ticket;
35struct log; 35struct xlog;
36struct xlog_recover; 36struct xlog_recover;
37struct xlog_recover_item; 37struct xlog_recover_item;
38struct xfs_buf_log_format; 38struct xfs_buf_log_format;
@@ -762,7 +762,7 @@ DEFINE_DQUOT_EVENT(xfs_dqflush_force);
762DEFINE_DQUOT_EVENT(xfs_dqflush_done); 762DEFINE_DQUOT_EVENT(xfs_dqflush_done);
763 763
764DECLARE_EVENT_CLASS(xfs_loggrant_class, 764DECLARE_EVENT_CLASS(xfs_loggrant_class,
765 TP_PROTO(struct log *log, struct xlog_ticket *tic), 765 TP_PROTO(struct xlog *log, struct xlog_ticket *tic),
766 TP_ARGS(log, tic), 766 TP_ARGS(log, tic),
767 TP_STRUCT__entry( 767 TP_STRUCT__entry(
768 __field(dev_t, dev) 768 __field(dev_t, dev)
@@ -830,7 +830,7 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
830 830
831#define DEFINE_LOGGRANT_EVENT(name) \ 831#define DEFINE_LOGGRANT_EVENT(name) \
832DEFINE_EVENT(xfs_loggrant_class, name, \ 832DEFINE_EVENT(xfs_loggrant_class, name, \
833 TP_PROTO(struct log *log, struct xlog_ticket *tic), \ 833 TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \
834 TP_ARGS(log, tic)) 834 TP_ARGS(log, tic))
835DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm); 835DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
836DEFINE_LOGGRANT_EVENT(xfs_log_done_perm); 836DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
@@ -1664,7 +1664,7 @@ DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
1664DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after); 1664DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
1665 1665
1666DECLARE_EVENT_CLASS(xfs_log_recover_item_class, 1666DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
1667 TP_PROTO(struct log *log, struct xlog_recover *trans, 1667 TP_PROTO(struct xlog *log, struct xlog_recover *trans,
1668 struct xlog_recover_item *item, int pass), 1668 struct xlog_recover_item *item, int pass),
1669 TP_ARGS(log, trans, item, pass), 1669 TP_ARGS(log, trans, item, pass),
1670 TP_STRUCT__entry( 1670 TP_STRUCT__entry(
@@ -1698,7 +1698,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
1698 1698
1699#define DEFINE_LOG_RECOVER_ITEM(name) \ 1699#define DEFINE_LOG_RECOVER_ITEM(name) \
1700DEFINE_EVENT(xfs_log_recover_item_class, name, \ 1700DEFINE_EVENT(xfs_log_recover_item_class, name, \
1701 TP_PROTO(struct log *log, struct xlog_recover *trans, \ 1701 TP_PROTO(struct xlog *log, struct xlog_recover *trans, \
1702 struct xlog_recover_item *item, int pass), \ 1702 struct xlog_recover_item *item, int pass), \
1703 TP_ARGS(log, trans, item, pass)) 1703 TP_ARGS(log, trans, item, pass))
1704 1704
@@ -1709,7 +1709,7 @@ DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
1709DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover); 1709DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
1710 1710
1711DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class, 1711DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
1712 TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), 1712 TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f),
1713 TP_ARGS(log, buf_f), 1713 TP_ARGS(log, buf_f),
1714 TP_STRUCT__entry( 1714 TP_STRUCT__entry(
1715 __field(dev_t, dev) 1715 __field(dev_t, dev)
@@ -1739,7 +1739,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
1739 1739
1740#define DEFINE_LOG_RECOVER_BUF_ITEM(name) \ 1740#define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
1741DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \ 1741DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
1742 TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \ 1742 TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \
1743 TP_ARGS(log, buf_f)) 1743 TP_ARGS(log, buf_f))
1744 1744
1745DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel); 1745DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
@@ -1752,7 +1752,7 @@ DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
1752DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf); 1752DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
1753 1753
1754DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class, 1754DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
1755 TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), 1755 TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f),
1756 TP_ARGS(log, in_f), 1756 TP_ARGS(log, in_f),
1757 TP_STRUCT__entry( 1757 TP_STRUCT__entry(
1758 __field(dev_t, dev) 1758 __field(dev_t, dev)
@@ -1790,7 +1790,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
1790) 1790)
1791#define DEFINE_LOG_RECOVER_INO_ITEM(name) \ 1791#define DEFINE_LOG_RECOVER_INO_ITEM(name) \
1792DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \ 1792DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
1793 TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \ 1793 TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \
1794 TP_ARGS(log, in_f)) 1794 TP_ARGS(log, in_f))
1795 1795
1796DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover); 1796DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);