diff options
Diffstat (limited to 'fs')
57 files changed, 861 insertions, 437 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 3f75895c919b..8f7d1237b7a0 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -179,7 +179,8 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id, | |||
179 | 179 | ||
180 | static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, | 180 | static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, |
181 | struct ulist *parents, int level, | 181 | struct ulist *parents, int level, |
182 | struct btrfs_key *key, u64 wanted_disk_byte, | 182 | struct btrfs_key *key, u64 time_seq, |
183 | u64 wanted_disk_byte, | ||
183 | const u64 *extent_item_pos) | 184 | const u64 *extent_item_pos) |
184 | { | 185 | { |
185 | int ret; | 186 | int ret; |
@@ -212,7 +213,7 @@ add_parent: | |||
212 | */ | 213 | */ |
213 | while (1) { | 214 | while (1) { |
214 | eie = NULL; | 215 | eie = NULL; |
215 | ret = btrfs_next_leaf(root, path); | 216 | ret = btrfs_next_old_leaf(root, path, time_seq); |
216 | if (ret < 0) | 217 | if (ret < 0) |
217 | return ret; | 218 | return ret; |
218 | if (ret) | 219 | if (ret) |
@@ -294,18 +295,10 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, | |||
294 | goto out; | 295 | goto out; |
295 | } | 296 | } |
296 | 297 | ||
297 | if (level == 0) { | 298 | if (level == 0) |
298 | if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) { | ||
299 | ret = btrfs_next_leaf(root, path); | ||
300 | if (ret) | ||
301 | goto out; | ||
302 | eb = path->nodes[0]; | ||
303 | } | ||
304 | |||
305 | btrfs_item_key_to_cpu(eb, &key, path->slots[0]); | 299 | btrfs_item_key_to_cpu(eb, &key, path->slots[0]); |
306 | } | ||
307 | 300 | ||
308 | ret = add_all_parents(root, path, parents, level, &key, | 301 | ret = add_all_parents(root, path, parents, level, &key, time_seq, |
309 | ref->wanted_disk_byte, extent_item_pos); | 302 | ref->wanted_disk_byte, extent_item_pos); |
310 | out: | 303 | out: |
311 | btrfs_free_path(path); | 304 | btrfs_free_path(path); |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index e616f8872e69..12394a90d60f 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -37,6 +37,7 @@ | |||
37 | #define BTRFS_INODE_IN_DEFRAG 3 | 37 | #define BTRFS_INODE_IN_DEFRAG 3 |
38 | #define BTRFS_INODE_DELALLOC_META_RESERVED 4 | 38 | #define BTRFS_INODE_DELALLOC_META_RESERVED 4 |
39 | #define BTRFS_INODE_HAS_ORPHAN_ITEM 5 | 39 | #define BTRFS_INODE_HAS_ORPHAN_ITEM 5 |
40 | #define BTRFS_INODE_HAS_ASYNC_EXTENT 6 | ||
40 | 41 | ||
41 | /* in memory btrfs inode */ | 42 | /* in memory btrfs inode */ |
42 | struct btrfs_inode { | 43 | struct btrfs_inode { |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 9cebb1fd6a3c..da6e9364a5e3 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
@@ -93,6 +93,7 @@ | |||
93 | #include "print-tree.h" | 93 | #include "print-tree.h" |
94 | #include "locking.h" | 94 | #include "locking.h" |
95 | #include "check-integrity.h" | 95 | #include "check-integrity.h" |
96 | #include "rcu-string.h" | ||
96 | 97 | ||
97 | #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 | 98 | #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 |
98 | #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 | 99 | #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 |
@@ -843,13 +844,14 @@ static int btrfsic_process_superblock_dev_mirror( | |||
843 | superblock_tmp->never_written = 0; | 844 | superblock_tmp->never_written = 0; |
844 | superblock_tmp->mirror_num = 1 + superblock_mirror_num; | 845 | superblock_tmp->mirror_num = 1 + superblock_mirror_num; |
845 | if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) | 846 | if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) |
846 | printk(KERN_INFO "New initial S-block (bdev %p, %s)" | 847 | printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)" |
847 | " @%llu (%s/%llu/%d)\n", | 848 | " @%llu (%s/%llu/%d)\n", |
848 | superblock_bdev, device->name, | 849 | superblock_bdev, |
849 | (unsigned long long)dev_bytenr, | 850 | rcu_str_deref(device->name), |
850 | dev_state->name, | 851 | (unsigned long long)dev_bytenr, |
851 | (unsigned long long)dev_bytenr, | 852 | dev_state->name, |
852 | superblock_mirror_num); | 853 | (unsigned long long)dev_bytenr, |
854 | superblock_mirror_num); | ||
853 | list_add(&superblock_tmp->all_blocks_node, | 855 | list_add(&superblock_tmp->all_blocks_node, |
854 | &state->all_blocks_list); | 856 | &state->all_blocks_list); |
855 | btrfsic_block_hashtable_add(superblock_tmp, | 857 | btrfsic_block_hashtable_add(superblock_tmp, |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index d7a96cfdc50a..15cbc2bf4ff0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -467,6 +467,15 @@ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, | |||
467 | return 0; | 467 | return 0; |
468 | } | 468 | } |
469 | 469 | ||
470 | /* | ||
471 | * This allocates memory and gets a tree modification sequence number when | ||
472 | * needed. | ||
473 | * | ||
474 | * Returns 0 when no sequence number is needed, < 0 on error. | ||
475 | * Returns 1 when a sequence number was added. In this case, | ||
476 | * fs_info->tree_mod_seq_lock was acquired and must be released by the caller | ||
477 | * after inserting into the rb tree. | ||
478 | */ | ||
470 | static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, | 479 | static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, |
471 | struct tree_mod_elem **tm_ret) | 480 | struct tree_mod_elem **tm_ret) |
472 | { | 481 | { |
@@ -491,11 +500,11 @@ static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, | |||
491 | */ | 500 | */ |
492 | kfree(tm); | 501 | kfree(tm); |
493 | seq = 0; | 502 | seq = 0; |
503 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
494 | } else { | 504 | } else { |
495 | __get_tree_mod_seq(fs_info, &tm->elem); | 505 | __get_tree_mod_seq(fs_info, &tm->elem); |
496 | seq = tm->elem.seq; | 506 | seq = tm->elem.seq; |
497 | } | 507 | } |
498 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
499 | 508 | ||
500 | return seq; | 509 | return seq; |
501 | } | 510 | } |
@@ -521,7 +530,9 @@ tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info, | |||
521 | tm->slot = slot; | 530 | tm->slot = slot; |
522 | tm->generation = btrfs_node_ptr_generation(eb, slot); | 531 | tm->generation = btrfs_node_ptr_generation(eb, slot); |
523 | 532 | ||
524 | return __tree_mod_log_insert(fs_info, tm); | 533 | ret = __tree_mod_log_insert(fs_info, tm); |
534 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
535 | return ret; | ||
525 | } | 536 | } |
526 | 537 | ||
527 | static noinline int | 538 | static noinline int |
@@ -559,7 +570,9 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, | |||
559 | tm->move.nr_items = nr_items; | 570 | tm->move.nr_items = nr_items; |
560 | tm->op = MOD_LOG_MOVE_KEYS; | 571 | tm->op = MOD_LOG_MOVE_KEYS; |
561 | 572 | ||
562 | return __tree_mod_log_insert(fs_info, tm); | 573 | ret = __tree_mod_log_insert(fs_info, tm); |
574 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
575 | return ret; | ||
563 | } | 576 | } |
564 | 577 | ||
565 | static noinline int | 578 | static noinline int |
@@ -580,7 +593,9 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, | |||
580 | tm->generation = btrfs_header_generation(old_root); | 593 | tm->generation = btrfs_header_generation(old_root); |
581 | tm->op = MOD_LOG_ROOT_REPLACE; | 594 | tm->op = MOD_LOG_ROOT_REPLACE; |
582 | 595 | ||
583 | return __tree_mod_log_insert(fs_info, tm); | 596 | ret = __tree_mod_log_insert(fs_info, tm); |
597 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
598 | return ret; | ||
584 | } | 599 | } |
585 | 600 | ||
586 | static struct tree_mod_elem * | 601 | static struct tree_mod_elem * |
@@ -1023,6 +1038,10 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, | |||
1023 | looped = 1; | 1038 | looped = 1; |
1024 | } | 1039 | } |
1025 | 1040 | ||
1041 | /* if there's no old root to return, return what we found instead */ | ||
1042 | if (!found) | ||
1043 | found = tm; | ||
1044 | |||
1026 | return found; | 1045 | return found; |
1027 | } | 1046 | } |
1028 | 1047 | ||
@@ -1143,22 +1162,36 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, | |||
1143 | return eb_rewin; | 1162 | return eb_rewin; |
1144 | } | 1163 | } |
1145 | 1164 | ||
1165 | /* | ||
1166 | * get_old_root() rewinds the state of @root's root node to the given @time_seq | ||
1167 | * value. If there are no changes, the current root->root_node is returned. If | ||
1168 | * anything changed in between, there's a fresh buffer allocated on which the | ||
1169 | * rewind operations are done. In any case, the returned buffer is read locked. | ||
1170 | * Returns NULL on error (with no locks held). | ||
1171 | */ | ||
1146 | static inline struct extent_buffer * | 1172 | static inline struct extent_buffer * |
1147 | get_old_root(struct btrfs_root *root, u64 time_seq) | 1173 | get_old_root(struct btrfs_root *root, u64 time_seq) |
1148 | { | 1174 | { |
1149 | struct tree_mod_elem *tm; | 1175 | struct tree_mod_elem *tm; |
1150 | struct extent_buffer *eb; | 1176 | struct extent_buffer *eb; |
1151 | struct tree_mod_root *old_root; | 1177 | struct tree_mod_root *old_root = NULL; |
1152 | u64 old_generation; | 1178 | u64 old_generation = 0; |
1179 | u64 logical; | ||
1153 | 1180 | ||
1181 | eb = btrfs_read_lock_root_node(root); | ||
1154 | tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); | 1182 | tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); |
1155 | if (!tm) | 1183 | if (!tm) |
1156 | return root->node; | 1184 | return root->node; |
1157 | 1185 | ||
1158 | old_root = &tm->old_root; | 1186 | if (tm->op == MOD_LOG_ROOT_REPLACE) { |
1159 | old_generation = tm->generation; | 1187 | old_root = &tm->old_root; |
1188 | old_generation = tm->generation; | ||
1189 | logical = old_root->logical; | ||
1190 | } else { | ||
1191 | logical = root->node->start; | ||
1192 | } | ||
1160 | 1193 | ||
1161 | tm = tree_mod_log_search(root->fs_info, old_root->logical, time_seq); | 1194 | tm = tree_mod_log_search(root->fs_info, logical, time_seq); |
1162 | /* | 1195 | /* |
1163 | * there was an item in the log when __tree_mod_log_oldest_root | 1196 | * there was an item in the log when __tree_mod_log_oldest_root |
1164 | * returned. this one must not go away, because the time_seq passed to | 1197 | * returned. this one must not go away, because the time_seq passed to |
@@ -1166,22 +1199,25 @@ get_old_root(struct btrfs_root *root, u64 time_seq) | |||
1166 | */ | 1199 | */ |
1167 | BUG_ON(!tm); | 1200 | BUG_ON(!tm); |
1168 | 1201 | ||
1169 | if (old_root->logical == root->node->start) { | 1202 | if (old_root) |
1170 | /* there are logged operations for the current root */ | ||
1171 | eb = btrfs_clone_extent_buffer(root->node); | ||
1172 | } else { | ||
1173 | /* there's a root replace operation for the current root */ | ||
1174 | eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT, | 1203 | eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT, |
1175 | root->nodesize); | 1204 | root->nodesize); |
1205 | else | ||
1206 | eb = btrfs_clone_extent_buffer(root->node); | ||
1207 | btrfs_tree_read_unlock(root->node); | ||
1208 | free_extent_buffer(root->node); | ||
1209 | if (!eb) | ||
1210 | return NULL; | ||
1211 | btrfs_tree_read_lock(eb); | ||
1212 | if (old_root) { | ||
1176 | btrfs_set_header_bytenr(eb, eb->start); | 1213 | btrfs_set_header_bytenr(eb, eb->start); |
1177 | btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); | 1214 | btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); |
1178 | btrfs_set_header_owner(eb, root->root_key.objectid); | 1215 | btrfs_set_header_owner(eb, root->root_key.objectid); |
1216 | btrfs_set_header_level(eb, old_root->level); | ||
1217 | btrfs_set_header_generation(eb, old_generation); | ||
1179 | } | 1218 | } |
1180 | if (!eb) | ||
1181 | return NULL; | ||
1182 | btrfs_set_header_level(eb, old_root->level); | ||
1183 | btrfs_set_header_generation(eb, old_generation); | ||
1184 | __tree_mod_log_rewind(eb, time_seq, tm); | 1219 | __tree_mod_log_rewind(eb, time_seq, tm); |
1220 | extent_buffer_get(eb); | ||
1185 | 1221 | ||
1186 | return eb; | 1222 | return eb; |
1187 | } | 1223 | } |
@@ -1650,8 +1686,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
1650 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) | 1686 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) |
1651 | return 0; | 1687 | return 0; |
1652 | 1688 | ||
1653 | btrfs_header_nritems(mid); | ||
1654 | |||
1655 | left = read_node_slot(root, parent, pslot - 1); | 1689 | left = read_node_slot(root, parent, pslot - 1); |
1656 | if (left) { | 1690 | if (left) { |
1657 | btrfs_tree_lock(left); | 1691 | btrfs_tree_lock(left); |
@@ -1681,7 +1715,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
1681 | wret = push_node_left(trans, root, left, mid, 1); | 1715 | wret = push_node_left(trans, root, left, mid, 1); |
1682 | if (wret < 0) | 1716 | if (wret < 0) |
1683 | ret = wret; | 1717 | ret = wret; |
1684 | btrfs_header_nritems(mid); | ||
1685 | } | 1718 | } |
1686 | 1719 | ||
1687 | /* | 1720 | /* |
@@ -2615,9 +2648,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, | |||
2615 | 2648 | ||
2616 | again: | 2649 | again: |
2617 | b = get_old_root(root, time_seq); | 2650 | b = get_old_root(root, time_seq); |
2618 | extent_buffer_get(b); | ||
2619 | level = btrfs_header_level(b); | 2651 | level = btrfs_header_level(b); |
2620 | btrfs_tree_read_lock(b); | ||
2621 | p->locks[level] = BTRFS_READ_LOCK; | 2652 | p->locks[level] = BTRFS_READ_LOCK; |
2622 | 2653 | ||
2623 | while (b) { | 2654 | while (b) { |
@@ -5001,6 +5032,12 @@ next: | |||
5001 | */ | 5032 | */ |
5002 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) | 5033 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) |
5003 | { | 5034 | { |
5035 | return btrfs_next_old_leaf(root, path, 0); | ||
5036 | } | ||
5037 | |||
5038 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | ||
5039 | u64 time_seq) | ||
5040 | { | ||
5004 | int slot; | 5041 | int slot; |
5005 | int level; | 5042 | int level; |
5006 | struct extent_buffer *c; | 5043 | struct extent_buffer *c; |
@@ -5025,7 +5062,10 @@ again: | |||
5025 | path->keep_locks = 1; | 5062 | path->keep_locks = 1; |
5026 | path->leave_spinning = 1; | 5063 | path->leave_spinning = 1; |
5027 | 5064 | ||
5028 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 5065 | if (time_seq) |
5066 | ret = btrfs_search_old_slot(root, &key, path, time_seq); | ||
5067 | else | ||
5068 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
5029 | path->keep_locks = 0; | 5069 | path->keep_locks = 0; |
5030 | 5070 | ||
5031 | if (ret < 0) | 5071 | if (ret < 0) |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0236d03c6732..8b73b2d4deb7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -2753,6 +2753,8 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, | |||
2753 | } | 2753 | } |
2754 | 2754 | ||
2755 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2755 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2756 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | ||
2757 | u64 time_seq); | ||
2756 | static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) | 2758 | static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) |
2757 | { | 2759 | { |
2758 | ++p->slots[0]; | 2760 | ++p->slots[0]; |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index c18d0442ae6d..2399f4086915 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
@@ -1879,3 +1879,21 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) | |||
1879 | } | 1879 | } |
1880 | } | 1880 | } |
1881 | } | 1881 | } |
1882 | |||
1883 | void btrfs_destroy_delayed_inodes(struct btrfs_root *root) | ||
1884 | { | ||
1885 | struct btrfs_delayed_root *delayed_root; | ||
1886 | struct btrfs_delayed_node *curr_node, *prev_node; | ||
1887 | |||
1888 | delayed_root = btrfs_get_delayed_root(root); | ||
1889 | |||
1890 | curr_node = btrfs_first_delayed_node(delayed_root); | ||
1891 | while (curr_node) { | ||
1892 | __btrfs_kill_delayed_node(curr_node); | ||
1893 | |||
1894 | prev_node = curr_node; | ||
1895 | curr_node = btrfs_next_delayed_node(curr_node); | ||
1896 | btrfs_release_delayed_node(prev_node); | ||
1897 | } | ||
1898 | } | ||
1899 | |||
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index 7083d08b2a21..f5aa4023d3e1 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h | |||
@@ -124,6 +124,9 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev); | |||
124 | /* Used for drop dead root */ | 124 | /* Used for drop dead root */ |
125 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); | 125 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); |
126 | 126 | ||
127 | /* Used for clean the transaction */ | ||
128 | void btrfs_destroy_delayed_inodes(struct btrfs_root *root); | ||
129 | |||
127 | /* Used for readdir() */ | 130 | /* Used for readdir() */ |
128 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | 131 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, |
129 | struct list_head *del_list); | 132 | struct list_head *del_list); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7ae51decf6d3..e1890b1d3075 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include "free-space-cache.h" | 44 | #include "free-space-cache.h" |
45 | #include "inode-map.h" | 45 | #include "inode-map.h" |
46 | #include "check-integrity.h" | 46 | #include "check-integrity.h" |
47 | #include "rcu-string.h" | ||
47 | 48 | ||
48 | static struct extent_io_ops btree_extent_io_ops; | 49 | static struct extent_io_ops btree_extent_io_ops; |
49 | static void end_workqueue_fn(struct btrfs_work *work); | 50 | static void end_workqueue_fn(struct btrfs_work *work); |
@@ -2118,7 +2119,7 @@ int open_ctree(struct super_block *sb, | |||
2118 | 2119 | ||
2119 | features = btrfs_super_incompat_flags(disk_super); | 2120 | features = btrfs_super_incompat_flags(disk_super); |
2120 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; | 2121 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; |
2121 | if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO) | 2122 | if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) |
2122 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; | 2123 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; |
2123 | 2124 | ||
2124 | /* | 2125 | /* |
@@ -2575,8 +2576,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) | |||
2575 | struct btrfs_device *device = (struct btrfs_device *) | 2576 | struct btrfs_device *device = (struct btrfs_device *) |
2576 | bh->b_private; | 2577 | bh->b_private; |
2577 | 2578 | ||
2578 | printk_ratelimited(KERN_WARNING "lost page write due to " | 2579 | printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to " |
2579 | "I/O error on %s\n", device->name); | 2580 | "I/O error on %s\n", |
2581 | rcu_str_deref(device->name)); | ||
2580 | /* note, we dont' set_buffer_write_io_error because we have | 2582 | /* note, we dont' set_buffer_write_io_error because we have |
2581 | * our own ways of dealing with the IO errors | 2583 | * our own ways of dealing with the IO errors |
2582 | */ | 2584 | */ |
@@ -2749,8 +2751,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
2749 | wait_for_completion(&device->flush_wait); | 2751 | wait_for_completion(&device->flush_wait); |
2750 | 2752 | ||
2751 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { | 2753 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { |
2752 | printk("btrfs: disabling barriers on dev %s\n", | 2754 | printk_in_rcu("btrfs: disabling barriers on dev %s\n", |
2753 | device->name); | 2755 | rcu_str_deref(device->name)); |
2754 | device->nobarriers = 1; | 2756 | device->nobarriers = 1; |
2755 | } | 2757 | } |
2756 | if (!bio_flagged(bio, BIO_UPTODATE)) { | 2758 | if (!bio_flagged(bio, BIO_UPTODATE)) { |
@@ -3400,7 +3402,6 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
3400 | 3402 | ||
3401 | delayed_refs = &trans->delayed_refs; | 3403 | delayed_refs = &trans->delayed_refs; |
3402 | 3404 | ||
3403 | again: | ||
3404 | spin_lock(&delayed_refs->lock); | 3405 | spin_lock(&delayed_refs->lock); |
3405 | if (delayed_refs->num_entries == 0) { | 3406 | if (delayed_refs->num_entries == 0) { |
3406 | spin_unlock(&delayed_refs->lock); | 3407 | spin_unlock(&delayed_refs->lock); |
@@ -3408,31 +3409,36 @@ again: | |||
3408 | return ret; | 3409 | return ret; |
3409 | } | 3410 | } |
3410 | 3411 | ||
3411 | node = rb_first(&delayed_refs->root); | 3412 | while ((node = rb_first(&delayed_refs->root)) != NULL) { |
3412 | while (node) { | ||
3413 | ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); | 3413 | ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); |
3414 | node = rb_next(node); | ||
3415 | |||
3416 | ref->in_tree = 0; | ||
3417 | rb_erase(&ref->rb_node, &delayed_refs->root); | ||
3418 | delayed_refs->num_entries--; | ||
3419 | 3414 | ||
3420 | atomic_set(&ref->refs, 1); | 3415 | atomic_set(&ref->refs, 1); |
3421 | if (btrfs_delayed_ref_is_head(ref)) { | 3416 | if (btrfs_delayed_ref_is_head(ref)) { |
3422 | struct btrfs_delayed_ref_head *head; | 3417 | struct btrfs_delayed_ref_head *head; |
3423 | 3418 | ||
3424 | head = btrfs_delayed_node_to_head(ref); | 3419 | head = btrfs_delayed_node_to_head(ref); |
3425 | spin_unlock(&delayed_refs->lock); | 3420 | if (!mutex_trylock(&head->mutex)) { |
3426 | mutex_lock(&head->mutex); | 3421 | atomic_inc(&ref->refs); |
3422 | spin_unlock(&delayed_refs->lock); | ||
3423 | |||
3424 | /* Need to wait for the delayed ref to run */ | ||
3425 | mutex_lock(&head->mutex); | ||
3426 | mutex_unlock(&head->mutex); | ||
3427 | btrfs_put_delayed_ref(ref); | ||
3428 | |||
3429 | continue; | ||
3430 | } | ||
3431 | |||
3427 | kfree(head->extent_op); | 3432 | kfree(head->extent_op); |
3428 | delayed_refs->num_heads--; | 3433 | delayed_refs->num_heads--; |
3429 | if (list_empty(&head->cluster)) | 3434 | if (list_empty(&head->cluster)) |
3430 | delayed_refs->num_heads_ready--; | 3435 | delayed_refs->num_heads_ready--; |
3431 | list_del_init(&head->cluster); | 3436 | list_del_init(&head->cluster); |
3432 | mutex_unlock(&head->mutex); | ||
3433 | btrfs_put_delayed_ref(ref); | ||
3434 | goto again; | ||
3435 | } | 3437 | } |
3438 | ref->in_tree = 0; | ||
3439 | rb_erase(&ref->rb_node, &delayed_refs->root); | ||
3440 | delayed_refs->num_entries--; | ||
3441 | |||
3436 | spin_unlock(&delayed_refs->lock); | 3442 | spin_unlock(&delayed_refs->lock); |
3437 | btrfs_put_delayed_ref(ref); | 3443 | btrfs_put_delayed_ref(ref); |
3438 | 3444 | ||
@@ -3520,11 +3526,9 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
3520 | &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, | 3526 | &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, |
3521 | offset >> PAGE_CACHE_SHIFT); | 3527 | offset >> PAGE_CACHE_SHIFT); |
3522 | spin_unlock(&dirty_pages->buffer_lock); | 3528 | spin_unlock(&dirty_pages->buffer_lock); |
3523 | if (eb) { | 3529 | if (eb) |
3524 | ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, | 3530 | ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, |
3525 | &eb->bflags); | 3531 | &eb->bflags); |
3526 | atomic_set(&eb->refs, 1); | ||
3527 | } | ||
3528 | if (PageWriteback(page)) | 3532 | if (PageWriteback(page)) |
3529 | end_page_writeback(page); | 3533 | end_page_writeback(page); |
3530 | 3534 | ||
@@ -3538,8 +3542,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
3538 | spin_unlock_irq(&page->mapping->tree_lock); | 3542 | spin_unlock_irq(&page->mapping->tree_lock); |
3539 | } | 3543 | } |
3540 | 3544 | ||
3541 | page->mapping->a_ops->invalidatepage(page, 0); | ||
3542 | unlock_page(page); | 3545 | unlock_page(page); |
3546 | page_cache_release(page); | ||
3543 | } | 3547 | } |
3544 | } | 3548 | } |
3545 | 3549 | ||
@@ -3553,8 +3557,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | |||
3553 | u64 start; | 3557 | u64 start; |
3554 | u64 end; | 3558 | u64 end; |
3555 | int ret; | 3559 | int ret; |
3560 | bool loop = true; | ||
3556 | 3561 | ||
3557 | unpin = pinned_extents; | 3562 | unpin = pinned_extents; |
3563 | again: | ||
3558 | while (1) { | 3564 | while (1) { |
3559 | ret = find_first_extent_bit(unpin, 0, &start, &end, | 3565 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
3560 | EXTENT_DIRTY); | 3566 | EXTENT_DIRTY); |
@@ -3572,6 +3578,15 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | |||
3572 | cond_resched(); | 3578 | cond_resched(); |
3573 | } | 3579 | } |
3574 | 3580 | ||
3581 | if (loop) { | ||
3582 | if (unpin == &root->fs_info->freed_extents[0]) | ||
3583 | unpin = &root->fs_info->freed_extents[1]; | ||
3584 | else | ||
3585 | unpin = &root->fs_info->freed_extents[0]; | ||
3586 | loop = false; | ||
3587 | goto again; | ||
3588 | } | ||
3589 | |||
3575 | return 0; | 3590 | return 0; |
3576 | } | 3591 | } |
3577 | 3592 | ||
@@ -3585,21 +3600,23 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, | |||
3585 | /* FIXME: cleanup wait for commit */ | 3600 | /* FIXME: cleanup wait for commit */ |
3586 | cur_trans->in_commit = 1; | 3601 | cur_trans->in_commit = 1; |
3587 | cur_trans->blocked = 1; | 3602 | cur_trans->blocked = 1; |
3588 | if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) | 3603 | wake_up(&root->fs_info->transaction_blocked_wait); |
3589 | wake_up(&root->fs_info->transaction_blocked_wait); | ||
3590 | 3604 | ||
3591 | cur_trans->blocked = 0; | 3605 | cur_trans->blocked = 0; |
3592 | if (waitqueue_active(&root->fs_info->transaction_wait)) | 3606 | wake_up(&root->fs_info->transaction_wait); |
3593 | wake_up(&root->fs_info->transaction_wait); | ||
3594 | 3607 | ||
3595 | cur_trans->commit_done = 1; | 3608 | cur_trans->commit_done = 1; |
3596 | if (waitqueue_active(&cur_trans->commit_wait)) | 3609 | wake_up(&cur_trans->commit_wait); |
3597 | wake_up(&cur_trans->commit_wait); | 3610 | |
3611 | btrfs_destroy_delayed_inodes(root); | ||
3612 | btrfs_assert_delayed_root_empty(root); | ||
3598 | 3613 | ||
3599 | btrfs_destroy_pending_snapshots(cur_trans); | 3614 | btrfs_destroy_pending_snapshots(cur_trans); |
3600 | 3615 | ||
3601 | btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, | 3616 | btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, |
3602 | EXTENT_DIRTY); | 3617 | EXTENT_DIRTY); |
3618 | btrfs_destroy_pinned_extent(root, | ||
3619 | root->fs_info->pinned_extents); | ||
3603 | 3620 | ||
3604 | /* | 3621 | /* |
3605 | memset(cur_trans, 0, sizeof(*cur_trans)); | 3622 | memset(cur_trans, 0, sizeof(*cur_trans)); |
@@ -3648,6 +3665,9 @@ int btrfs_cleanup_transaction(struct btrfs_root *root) | |||
3648 | if (waitqueue_active(&t->commit_wait)) | 3665 | if (waitqueue_active(&t->commit_wait)) |
3649 | wake_up(&t->commit_wait); | 3666 | wake_up(&t->commit_wait); |
3650 | 3667 | ||
3668 | btrfs_destroy_delayed_inodes(root); | ||
3669 | btrfs_assert_delayed_root_empty(root); | ||
3670 | |||
3651 | btrfs_destroy_pending_snapshots(t); | 3671 | btrfs_destroy_pending_snapshots(t); |
3652 | 3672 | ||
3653 | btrfs_destroy_delalloc_inodes(root); | 3673 | btrfs_destroy_delalloc_inodes(root); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2c8f7b204617..aaa12c1eb348 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include "volumes.h" | 20 | #include "volumes.h" |
21 | #include "check-integrity.h" | 21 | #include "check-integrity.h" |
22 | #include "locking.h" | 22 | #include "locking.h" |
23 | #include "rcu-string.h" | ||
23 | 24 | ||
24 | static struct kmem_cache *extent_state_cache; | 25 | static struct kmem_cache *extent_state_cache; |
25 | static struct kmem_cache *extent_buffer_cache; | 26 | static struct kmem_cache *extent_buffer_cache; |
@@ -1917,9 +1918,9 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, | |||
1917 | return -EIO; | 1918 | return -EIO; |
1918 | } | 1919 | } |
1919 | 1920 | ||
1920 | printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s " | 1921 | printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu " |
1921 | "sector %llu)\n", page->mapping->host->i_ino, start, | 1922 | "(dev %s sector %llu)\n", page->mapping->host->i_ino, |
1922 | dev->name, sector); | 1923 | start, rcu_str_deref(dev->name), sector); |
1923 | 1924 | ||
1924 | bio_put(bio); | 1925 | bio_put(bio); |
1925 | return 0; | 1926 | return 0; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f6ab6f5e635a..a4f02501da40 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -830,7 +830,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
830 | if (IS_ERR(trans)) { | 830 | if (IS_ERR(trans)) { |
831 | extent_clear_unlock_delalloc(inode, | 831 | extent_clear_unlock_delalloc(inode, |
832 | &BTRFS_I(inode)->io_tree, | 832 | &BTRFS_I(inode)->io_tree, |
833 | start, end, NULL, | 833 | start, end, locked_page, |
834 | EXTENT_CLEAR_UNLOCK_PAGE | | 834 | EXTENT_CLEAR_UNLOCK_PAGE | |
835 | EXTENT_CLEAR_UNLOCK | | 835 | EXTENT_CLEAR_UNLOCK | |
836 | EXTENT_CLEAR_DELALLOC | | 836 | EXTENT_CLEAR_DELALLOC | |
@@ -963,7 +963,7 @@ out: | |||
963 | out_unlock: | 963 | out_unlock: |
964 | extent_clear_unlock_delalloc(inode, | 964 | extent_clear_unlock_delalloc(inode, |
965 | &BTRFS_I(inode)->io_tree, | 965 | &BTRFS_I(inode)->io_tree, |
966 | start, end, NULL, | 966 | start, end, locked_page, |
967 | EXTENT_CLEAR_UNLOCK_PAGE | | 967 | EXTENT_CLEAR_UNLOCK_PAGE | |
968 | EXTENT_CLEAR_UNLOCK | | 968 | EXTENT_CLEAR_UNLOCK | |
969 | EXTENT_CLEAR_DELALLOC | | 969 | EXTENT_CLEAR_DELALLOC | |
@@ -986,8 +986,10 @@ static noinline void async_cow_start(struct btrfs_work *work) | |||
986 | compress_file_range(async_cow->inode, async_cow->locked_page, | 986 | compress_file_range(async_cow->inode, async_cow->locked_page, |
987 | async_cow->start, async_cow->end, async_cow, | 987 | async_cow->start, async_cow->end, async_cow, |
988 | &num_added); | 988 | &num_added); |
989 | if (num_added == 0) | 989 | if (num_added == 0) { |
990 | iput(async_cow->inode); | ||
990 | async_cow->inode = NULL; | 991 | async_cow->inode = NULL; |
992 | } | ||
991 | } | 993 | } |
992 | 994 | ||
993 | /* | 995 | /* |
@@ -1020,6 +1022,8 @@ static noinline void async_cow_free(struct btrfs_work *work) | |||
1020 | { | 1022 | { |
1021 | struct async_cow *async_cow; | 1023 | struct async_cow *async_cow; |
1022 | async_cow = container_of(work, struct async_cow, work); | 1024 | async_cow = container_of(work, struct async_cow, work); |
1025 | if (async_cow->inode) | ||
1026 | iput(async_cow->inode); | ||
1023 | kfree(async_cow); | 1027 | kfree(async_cow); |
1024 | } | 1028 | } |
1025 | 1029 | ||
@@ -1038,7 +1042,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, | |||
1038 | while (start < end) { | 1042 | while (start < end) { |
1039 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); | 1043 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); |
1040 | BUG_ON(!async_cow); /* -ENOMEM */ | 1044 | BUG_ON(!async_cow); /* -ENOMEM */ |
1041 | async_cow->inode = inode; | 1045 | async_cow->inode = igrab(inode); |
1042 | async_cow->root = root; | 1046 | async_cow->root = root; |
1043 | async_cow->locked_page = locked_page; | 1047 | async_cow->locked_page = locked_page; |
1044 | async_cow->start = start; | 1048 | async_cow->start = start; |
@@ -1136,8 +1140,18 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
1136 | u64 ino = btrfs_ino(inode); | 1140 | u64 ino = btrfs_ino(inode); |
1137 | 1141 | ||
1138 | path = btrfs_alloc_path(); | 1142 | path = btrfs_alloc_path(); |
1139 | if (!path) | 1143 | if (!path) { |
1144 | extent_clear_unlock_delalloc(inode, | ||
1145 | &BTRFS_I(inode)->io_tree, | ||
1146 | start, end, locked_page, | ||
1147 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
1148 | EXTENT_CLEAR_UNLOCK | | ||
1149 | EXTENT_CLEAR_DELALLOC | | ||
1150 | EXTENT_CLEAR_DIRTY | | ||
1151 | EXTENT_SET_WRITEBACK | | ||
1152 | EXTENT_END_WRITEBACK); | ||
1140 | return -ENOMEM; | 1153 | return -ENOMEM; |
1154 | } | ||
1141 | 1155 | ||
1142 | nolock = btrfs_is_free_space_inode(root, inode); | 1156 | nolock = btrfs_is_free_space_inode(root, inode); |
1143 | 1157 | ||
@@ -1147,6 +1161,15 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
1147 | trans = btrfs_join_transaction(root); | 1161 | trans = btrfs_join_transaction(root); |
1148 | 1162 | ||
1149 | if (IS_ERR(trans)) { | 1163 | if (IS_ERR(trans)) { |
1164 | extent_clear_unlock_delalloc(inode, | ||
1165 | &BTRFS_I(inode)->io_tree, | ||
1166 | start, end, locked_page, | ||
1167 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
1168 | EXTENT_CLEAR_UNLOCK | | ||
1169 | EXTENT_CLEAR_DELALLOC | | ||
1170 | EXTENT_CLEAR_DIRTY | | ||
1171 | EXTENT_SET_WRITEBACK | | ||
1172 | EXTENT_END_WRITEBACK); | ||
1150 | btrfs_free_path(path); | 1173 | btrfs_free_path(path); |
1151 | return PTR_ERR(trans); | 1174 | return PTR_ERR(trans); |
1152 | } | 1175 | } |
@@ -1327,8 +1350,11 @@ out_check: | |||
1327 | } | 1350 | } |
1328 | btrfs_release_path(path); | 1351 | btrfs_release_path(path); |
1329 | 1352 | ||
1330 | if (cur_offset <= end && cow_start == (u64)-1) | 1353 | if (cur_offset <= end && cow_start == (u64)-1) { |
1331 | cow_start = cur_offset; | 1354 | cow_start = cur_offset; |
1355 | cur_offset = end; | ||
1356 | } | ||
1357 | |||
1332 | if (cow_start != (u64)-1) { | 1358 | if (cow_start != (u64)-1) { |
1333 | ret = cow_file_range(inode, locked_page, cow_start, end, | 1359 | ret = cow_file_range(inode, locked_page, cow_start, end, |
1334 | page_started, nr_written, 1); | 1360 | page_started, nr_written, 1); |
@@ -1347,6 +1373,17 @@ error: | |||
1347 | if (!ret) | 1373 | if (!ret) |
1348 | ret = err; | 1374 | ret = err; |
1349 | 1375 | ||
1376 | if (ret && cur_offset < end) | ||
1377 | extent_clear_unlock_delalloc(inode, | ||
1378 | &BTRFS_I(inode)->io_tree, | ||
1379 | cur_offset, end, locked_page, | ||
1380 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
1381 | EXTENT_CLEAR_UNLOCK | | ||
1382 | EXTENT_CLEAR_DELALLOC | | ||
1383 | EXTENT_CLEAR_DIRTY | | ||
1384 | EXTENT_SET_WRITEBACK | | ||
1385 | EXTENT_END_WRITEBACK); | ||
1386 | |||
1350 | btrfs_free_path(path); | 1387 | btrfs_free_path(path); |
1351 | return ret; | 1388 | return ret; |
1352 | } | 1389 | } |
@@ -1361,20 +1398,23 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, | |||
1361 | int ret; | 1398 | int ret; |
1362 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1399 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1363 | 1400 | ||
1364 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) | 1401 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) { |
1365 | ret = run_delalloc_nocow(inode, locked_page, start, end, | 1402 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
1366 | page_started, 1, nr_written); | 1403 | page_started, 1, nr_written); |
1367 | else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) | 1404 | } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) { |
1368 | ret = run_delalloc_nocow(inode, locked_page, start, end, | 1405 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
1369 | page_started, 0, nr_written); | 1406 | page_started, 0, nr_written); |
1370 | else if (!btrfs_test_opt(root, COMPRESS) && | 1407 | } else if (!btrfs_test_opt(root, COMPRESS) && |
1371 | !(BTRFS_I(inode)->force_compress) && | 1408 | !(BTRFS_I(inode)->force_compress) && |
1372 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) | 1409 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) { |
1373 | ret = cow_file_range(inode, locked_page, start, end, | 1410 | ret = cow_file_range(inode, locked_page, start, end, |
1374 | page_started, nr_written, 1); | 1411 | page_started, nr_written, 1); |
1375 | else | 1412 | } else { |
1413 | set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | ||
1414 | &BTRFS_I(inode)->runtime_flags); | ||
1376 | ret = cow_file_range_async(inode, locked_page, start, end, | 1415 | ret = cow_file_range_async(inode, locked_page, start, end, |
1377 | page_started, nr_written); | 1416 | page_started, nr_written); |
1417 | } | ||
1378 | return ret; | 1418 | return ret; |
1379 | } | 1419 | } |
1380 | 1420 | ||
@@ -7054,10 +7094,13 @@ static void fixup_inode_flags(struct inode *dir, struct inode *inode) | |||
7054 | else | 7094 | else |
7055 | b_inode->flags &= ~BTRFS_INODE_NODATACOW; | 7095 | b_inode->flags &= ~BTRFS_INODE_NODATACOW; |
7056 | 7096 | ||
7057 | if (b_dir->flags & BTRFS_INODE_COMPRESS) | 7097 | if (b_dir->flags & BTRFS_INODE_COMPRESS) { |
7058 | b_inode->flags |= BTRFS_INODE_COMPRESS; | 7098 | b_inode->flags |= BTRFS_INODE_COMPRESS; |
7059 | else | 7099 | b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS; |
7060 | b_inode->flags &= ~BTRFS_INODE_COMPRESS; | 7100 | } else { |
7101 | b_inode->flags &= ~(BTRFS_INODE_COMPRESS | | ||
7102 | BTRFS_INODE_NOCOMPRESS); | ||
7103 | } | ||
7061 | } | 7104 | } |
7062 | 7105 | ||
7063 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | 7106 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 24b776c08d99..0e92e5763005 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include "locking.h" | 52 | #include "locking.h" |
53 | #include "inode-map.h" | 53 | #include "inode-map.h" |
54 | #include "backref.h" | 54 | #include "backref.h" |
55 | #include "rcu-string.h" | ||
55 | 56 | ||
56 | /* Mask out flags that are inappropriate for the given type of inode. */ | 57 | /* Mask out flags that are inappropriate for the given type of inode. */ |
57 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) | 58 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) |
@@ -785,39 +786,57 @@ none: | |||
785 | return -ENOENT; | 786 | return -ENOENT; |
786 | } | 787 | } |
787 | 788 | ||
788 | /* | 789 | static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) |
789 | * Validaty check of prev em and next em: | ||
790 | * 1) no prev/next em | ||
791 | * 2) prev/next em is an hole/inline extent | ||
792 | */ | ||
793 | static int check_adjacent_extents(struct inode *inode, struct extent_map *em) | ||
794 | { | 790 | { |
795 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 791 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
796 | struct extent_map *prev = NULL, *next = NULL; | 792 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
797 | int ret = 0; | 793 | struct extent_map *em; |
794 | u64 len = PAGE_CACHE_SIZE; | ||
798 | 795 | ||
796 | /* | ||
797 | * hopefully we have this extent in the tree already, try without | ||
798 | * the full extent lock | ||
799 | */ | ||
799 | read_lock(&em_tree->lock); | 800 | read_lock(&em_tree->lock); |
800 | prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1); | 801 | em = lookup_extent_mapping(em_tree, start, len); |
801 | next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1); | ||
802 | read_unlock(&em_tree->lock); | 802 | read_unlock(&em_tree->lock); |
803 | 803 | ||
804 | if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) && | 804 | if (!em) { |
805 | (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)) | 805 | /* get the big lock and read metadata off disk */ |
806 | ret = 1; | 806 | lock_extent(io_tree, start, start + len - 1); |
807 | free_extent_map(prev); | 807 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); |
808 | free_extent_map(next); | 808 | unlock_extent(io_tree, start, start + len - 1); |
809 | |||
810 | if (IS_ERR(em)) | ||
811 | return NULL; | ||
812 | } | ||
813 | |||
814 | return em; | ||
815 | } | ||
816 | |||
817 | static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em) | ||
818 | { | ||
819 | struct extent_map *next; | ||
820 | bool ret = true; | ||
809 | 821 | ||
822 | /* this is the last extent */ | ||
823 | if (em->start + em->len >= i_size_read(inode)) | ||
824 | return false; | ||
825 | |||
826 | next = defrag_lookup_extent(inode, em->start + em->len); | ||
827 | if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) | ||
828 | ret = false; | ||
829 | |||
830 | free_extent_map(next); | ||
810 | return ret; | 831 | return ret; |
811 | } | 832 | } |
812 | 833 | ||
813 | static int should_defrag_range(struct inode *inode, u64 start, u64 len, | 834 | static int should_defrag_range(struct inode *inode, u64 start, int thresh, |
814 | int thresh, u64 *last_len, u64 *skip, | 835 | u64 *last_len, u64 *skip, u64 *defrag_end) |
815 | u64 *defrag_end) | ||
816 | { | 836 | { |
817 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 837 | struct extent_map *em; |
818 | struct extent_map *em = NULL; | ||
819 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
820 | int ret = 1; | 838 | int ret = 1; |
839 | bool next_mergeable = true; | ||
821 | 840 | ||
822 | /* | 841 | /* |
823 | * make sure that once we start defragging an extent, we keep on | 842 | * make sure that once we start defragging an extent, we keep on |
@@ -828,23 +847,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
828 | 847 | ||
829 | *skip = 0; | 848 | *skip = 0; |
830 | 849 | ||
831 | /* | 850 | em = defrag_lookup_extent(inode, start); |
832 | * hopefully we have this extent in the tree already, try without | 851 | if (!em) |
833 | * the full extent lock | 852 | return 0; |
834 | */ | ||
835 | read_lock(&em_tree->lock); | ||
836 | em = lookup_extent_mapping(em_tree, start, len); | ||
837 | read_unlock(&em_tree->lock); | ||
838 | |||
839 | if (!em) { | ||
840 | /* get the big lock and read metadata off disk */ | ||
841 | lock_extent(io_tree, start, start + len - 1); | ||
842 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); | ||
843 | unlock_extent(io_tree, start, start + len - 1); | ||
844 | |||
845 | if (IS_ERR(em)) | ||
846 | return 0; | ||
847 | } | ||
848 | 853 | ||
849 | /* this will cover holes, and inline extents */ | 854 | /* this will cover holes, and inline extents */ |
850 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { | 855 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { |
@@ -852,18 +857,15 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
852 | goto out; | 857 | goto out; |
853 | } | 858 | } |
854 | 859 | ||
855 | /* If we have nothing to merge with us, just skip. */ | 860 | next_mergeable = defrag_check_next_extent(inode, em); |
856 | if (check_adjacent_extents(inode, em)) { | ||
857 | ret = 0; | ||
858 | goto out; | ||
859 | } | ||
860 | 861 | ||
861 | /* | 862 | /* |
862 | * we hit a real extent, if it is big don't bother defragging it again | 863 | * we hit a real extent, if it is big or the next extent is not a |
864 | * real extent, don't bother defragging it | ||
863 | */ | 865 | */ |
864 | if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) | 866 | if ((*last_len == 0 || *last_len >= thresh) && |
867 | (em->len >= thresh || !next_mergeable)) | ||
865 | ret = 0; | 868 | ret = 0; |
866 | |||
867 | out: | 869 | out: |
868 | /* | 870 | /* |
869 | * last_len ends up being a counter of how many bytes we've defragged. | 871 | * last_len ends up being a counter of how many bytes we've defragged. |
@@ -1142,8 +1144,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, | |||
1142 | break; | 1144 | break; |
1143 | 1145 | ||
1144 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, | 1146 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, |
1145 | PAGE_CACHE_SIZE, extent_thresh, | 1147 | extent_thresh, &last_len, &skip, |
1146 | &last_len, &skip, &defrag_end)) { | 1148 | &defrag_end)) { |
1147 | unsigned long next; | 1149 | unsigned long next; |
1148 | /* | 1150 | /* |
1149 | * the should_defrag function tells us how much to skip | 1151 | * the should_defrag function tells us how much to skip |
@@ -1304,6 +1306,14 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1304 | ret = -EINVAL; | 1306 | ret = -EINVAL; |
1305 | goto out_free; | 1307 | goto out_free; |
1306 | } | 1308 | } |
1309 | if (device->fs_devices && device->fs_devices->seeding) { | ||
1310 | printk(KERN_INFO "btrfs: resizer unable to apply on " | ||
1311 | "seeding device %llu\n", | ||
1312 | (unsigned long long)devid); | ||
1313 | ret = -EINVAL; | ||
1314 | goto out_free; | ||
1315 | } | ||
1316 | |||
1307 | if (!strcmp(sizestr, "max")) | 1317 | if (!strcmp(sizestr, "max")) |
1308 | new_size = device->bdev->bd_inode->i_size; | 1318 | new_size = device->bdev->bd_inode->i_size; |
1309 | else { | 1319 | else { |
@@ -1345,8 +1355,9 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1345 | do_div(new_size, root->sectorsize); | 1355 | do_div(new_size, root->sectorsize); |
1346 | new_size *= root->sectorsize; | 1356 | new_size *= root->sectorsize; |
1347 | 1357 | ||
1348 | printk(KERN_INFO "btrfs: new size for %s is %llu\n", | 1358 | printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n", |
1349 | device->name, (unsigned long long)new_size); | 1359 | rcu_str_deref(device->name), |
1360 | (unsigned long long)new_size); | ||
1350 | 1361 | ||
1351 | if (new_size > old_size) { | 1362 | if (new_size > old_size) { |
1352 | trans = btrfs_start_transaction(root, 0); | 1363 | trans = btrfs_start_transaction(root, 0); |
@@ -2264,7 +2275,12 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) | |||
2264 | di_args->total_bytes = dev->total_bytes; | 2275 | di_args->total_bytes = dev->total_bytes; |
2265 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); | 2276 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); |
2266 | if (dev->name) { | 2277 | if (dev->name) { |
2267 | strncpy(di_args->path, dev->name, sizeof(di_args->path)); | 2278 | struct rcu_string *name; |
2279 | |||
2280 | rcu_read_lock(); | ||
2281 | name = rcu_dereference(dev->name); | ||
2282 | strncpy(di_args->path, name->str, sizeof(di_args->path)); | ||
2283 | rcu_read_unlock(); | ||
2268 | di_args->path[sizeof(di_args->path) - 1] = 0; | 2284 | di_args->path[sizeof(di_args->path) - 1] = 0; |
2269 | } else { | 2285 | } else { |
2270 | di_args->path[0] = '\0'; | 2286 | di_args->path[0] = '\0'; |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 9e138cdc36c5..643335a4fe3c 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -627,7 +627,27 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) | |||
627 | /* start IO across the range first to instantiate any delalloc | 627 | /* start IO across the range first to instantiate any delalloc |
628 | * extents | 628 | * extents |
629 | */ | 629 | */ |
630 | filemap_write_and_wait_range(inode->i_mapping, start, orig_end); | 630 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); |
631 | |||
632 | /* | ||
633 | * So with compression we will find and lock a dirty page and clear the | ||
634 | * first one as dirty, setup an async extent, and immediately return | ||
635 | * with the entire range locked but with nobody actually marked with | ||
636 | * writeback. So we can't just filemap_write_and_wait_range() and | ||
637 | * expect it to work since it will just kick off a thread to do the | ||
638 | * actual work. So we need to call filemap_fdatawrite_range _again_ | ||
639 | * since it will wait on the page lock, which won't be unlocked until | ||
640 | * after the pages have been marked as writeback and so we're good to go | ||
641 | * from there. We have to do this otherwise we'll miss the ordered | ||
642 | * extents and that results in badness. Please Josef, do not think you | ||
643 | * know better and pull this out at some point in the future, it is | ||
644 | * right and you are wrong. | ||
645 | */ | ||
646 | if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | ||
647 | &BTRFS_I(inode)->runtime_flags)) | ||
648 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); | ||
649 | |||
650 | filemap_fdatawait_range(inode->i_mapping, start, orig_end); | ||
631 | 651 | ||
632 | end = orig_end; | 652 | end = orig_end; |
633 | found = 0; | 653 | found = 0; |
diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h new file mode 100644 index 000000000000..9e111e4576d4 --- /dev/null +++ b/fs/btrfs/rcu-string.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public | ||
6 | * License v2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public | ||
14 | * License along with this program; if not, write to the | ||
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
16 | * Boston, MA 021110-1307, USA. | ||
17 | */ | ||
18 | |||
19 | struct rcu_string { | ||
20 | struct rcu_head rcu; | ||
21 | char str[0]; | ||
22 | }; | ||
23 | |||
24 | static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask) | ||
25 | { | ||
26 | size_t len = strlen(src) + 1; | ||
27 | struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) + | ||
28 | (len * sizeof(char)), mask); | ||
29 | if (!ret) | ||
30 | return ret; | ||
31 | strncpy(ret->str, src, len); | ||
32 | return ret; | ||
33 | } | ||
34 | |||
35 | static inline void rcu_string_free(struct rcu_string *str) | ||
36 | { | ||
37 | if (str) | ||
38 | kfree_rcu(str, rcu); | ||
39 | } | ||
40 | |||
41 | #define printk_in_rcu(fmt, ...) do { \ | ||
42 | rcu_read_lock(); \ | ||
43 | printk(fmt, __VA_ARGS__); \ | ||
44 | rcu_read_unlock(); \ | ||
45 | } while (0) | ||
46 | |||
47 | #define printk_ratelimited_in_rcu(fmt, ...) do { \ | ||
48 | rcu_read_lock(); \ | ||
49 | printk_ratelimited(fmt, __VA_ARGS__); \ | ||
50 | rcu_read_unlock(); \ | ||
51 | } while (0) | ||
52 | |||
53 | #define rcu_str_deref(rcu_str) ({ \ | ||
54 | struct rcu_string *__str = rcu_dereference(rcu_str); \ | ||
55 | __str->str; \ | ||
56 | }) | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index a38cfa4f251e..b223620cd5a6 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "backref.h" | 26 | #include "backref.h" |
27 | #include "extent_io.h" | 27 | #include "extent_io.h" |
28 | #include "check-integrity.h" | 28 | #include "check-integrity.h" |
29 | #include "rcu-string.h" | ||
29 | 30 | ||
30 | /* | 31 | /* |
31 | * This is only the first step towards a full-features scrub. It reads all | 32 | * This is only the first step towards a full-features scrub. It reads all |
@@ -320,10 +321,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
320 | * hold all of the paths here | 321 | * hold all of the paths here |
321 | */ | 322 | */ |
322 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) | 323 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) |
323 | printk(KERN_WARNING "btrfs: %s at logical %llu on dev " | 324 | printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " |
324 | "%s, sector %llu, root %llu, inode %llu, offset %llu, " | 325 | "%s, sector %llu, root %llu, inode %llu, offset %llu, " |
325 | "length %llu, links %u (path: %s)\n", swarn->errstr, | 326 | "length %llu, links %u (path: %s)\n", swarn->errstr, |
326 | swarn->logical, swarn->dev->name, | 327 | swarn->logical, rcu_str_deref(swarn->dev->name), |
327 | (unsigned long long)swarn->sector, root, inum, offset, | 328 | (unsigned long long)swarn->sector, root, inum, offset, |
328 | min(isize - offset, (u64)PAGE_SIZE), nlink, | 329 | min(isize - offset, (u64)PAGE_SIZE), nlink, |
329 | (char *)(unsigned long)ipath->fspath->val[i]); | 330 | (char *)(unsigned long)ipath->fspath->val[i]); |
@@ -332,10 +333,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
332 | return 0; | 333 | return 0; |
333 | 334 | ||
334 | err: | 335 | err: |
335 | printk(KERN_WARNING "btrfs: %s at logical %llu on dev " | 336 | printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " |
336 | "%s, sector %llu, root %llu, inode %llu, offset %llu: path " | 337 | "%s, sector %llu, root %llu, inode %llu, offset %llu: path " |
337 | "resolving failed with ret=%d\n", swarn->errstr, | 338 | "resolving failed with ret=%d\n", swarn->errstr, |
338 | swarn->logical, swarn->dev->name, | 339 | swarn->logical, rcu_str_deref(swarn->dev->name), |
339 | (unsigned long long)swarn->sector, root, inum, offset, ret); | 340 | (unsigned long long)swarn->sector, root, inum, offset, ret); |
340 | 341 | ||
341 | free_ipath(ipath); | 342 | free_ipath(ipath); |
@@ -390,10 +391,11 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | |||
390 | do { | 391 | do { |
391 | ret = tree_backref_for_extent(&ptr, eb, ei, item_size, | 392 | ret = tree_backref_for_extent(&ptr, eb, ei, item_size, |
392 | &ref_root, &ref_level); | 393 | &ref_root, &ref_level); |
393 | printk(KERN_WARNING | 394 | printk_in_rcu(KERN_WARNING |
394 | "btrfs: %s at logical %llu on dev %s, " | 395 | "btrfs: %s at logical %llu on dev %s, " |
395 | "sector %llu: metadata %s (level %d) in tree " | 396 | "sector %llu: metadata %s (level %d) in tree " |
396 | "%llu\n", errstr, swarn.logical, dev->name, | 397 | "%llu\n", errstr, swarn.logical, |
398 | rcu_str_deref(dev->name), | ||
397 | (unsigned long long)swarn.sector, | 399 | (unsigned long long)swarn.sector, |
398 | ref_level ? "node" : "leaf", | 400 | ref_level ? "node" : "leaf", |
399 | ret < 0 ? -1 : ref_level, | 401 | ret < 0 ? -1 : ref_level, |
@@ -580,9 +582,11 @@ out: | |||
580 | spin_lock(&sdev->stat_lock); | 582 | spin_lock(&sdev->stat_lock); |
581 | ++sdev->stat.uncorrectable_errors; | 583 | ++sdev->stat.uncorrectable_errors; |
582 | spin_unlock(&sdev->stat_lock); | 584 | spin_unlock(&sdev->stat_lock); |
583 | printk_ratelimited(KERN_ERR | 585 | |
586 | printk_ratelimited_in_rcu(KERN_ERR | ||
584 | "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", | 587 | "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", |
585 | (unsigned long long)fixup->logical, sdev->dev->name); | 588 | (unsigned long long)fixup->logical, |
589 | rcu_str_deref(sdev->dev->name)); | ||
586 | } | 590 | } |
587 | 591 | ||
588 | btrfs_free_path(path); | 592 | btrfs_free_path(path); |
@@ -936,18 +940,20 @@ corrected_error: | |||
936 | spin_lock(&sdev->stat_lock); | 940 | spin_lock(&sdev->stat_lock); |
937 | sdev->stat.corrected_errors++; | 941 | sdev->stat.corrected_errors++; |
938 | spin_unlock(&sdev->stat_lock); | 942 | spin_unlock(&sdev->stat_lock); |
939 | printk_ratelimited(KERN_ERR | 943 | printk_ratelimited_in_rcu(KERN_ERR |
940 | "btrfs: fixed up error at logical %llu on dev %s\n", | 944 | "btrfs: fixed up error at logical %llu on dev %s\n", |
941 | (unsigned long long)logical, sdev->dev->name); | 945 | (unsigned long long)logical, |
946 | rcu_str_deref(sdev->dev->name)); | ||
942 | } | 947 | } |
943 | } else { | 948 | } else { |
944 | did_not_correct_error: | 949 | did_not_correct_error: |
945 | spin_lock(&sdev->stat_lock); | 950 | spin_lock(&sdev->stat_lock); |
946 | sdev->stat.uncorrectable_errors++; | 951 | sdev->stat.uncorrectable_errors++; |
947 | spin_unlock(&sdev->stat_lock); | 952 | spin_unlock(&sdev->stat_lock); |
948 | printk_ratelimited(KERN_ERR | 953 | printk_ratelimited_in_rcu(KERN_ERR |
949 | "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", | 954 | "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", |
950 | (unsigned long long)logical, sdev->dev->name); | 955 | (unsigned long long)logical, |
956 | rcu_str_deref(sdev->dev->name)); | ||
951 | } | 957 | } |
952 | 958 | ||
953 | out: | 959 | out: |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 96eb9fef7bd2..0eb9a4da069e 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include "version.h" | 54 | #include "version.h" |
55 | #include "export.h" | 55 | #include "export.h" |
56 | #include "compression.h" | 56 | #include "compression.h" |
57 | #include "rcu-string.h" | ||
57 | 58 | ||
58 | #define CREATE_TRACE_POINTS | 59 | #define CREATE_TRACE_POINTS |
59 | #include <trace/events/btrfs.h> | 60 | #include <trace/events/btrfs.h> |
@@ -1482,12 +1483,44 @@ static void btrfs_fs_dirty_inode(struct inode *inode, int flags) | |||
1482 | "error %d\n", btrfs_ino(inode), ret); | 1483 | "error %d\n", btrfs_ino(inode), ret); |
1483 | } | 1484 | } |
1484 | 1485 | ||
1486 | static int btrfs_show_devname(struct seq_file *m, struct dentry *root) | ||
1487 | { | ||
1488 | struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); | ||
1489 | struct btrfs_fs_devices *cur_devices; | ||
1490 | struct btrfs_device *dev, *first_dev = NULL; | ||
1491 | struct list_head *head; | ||
1492 | struct rcu_string *name; | ||
1493 | |||
1494 | mutex_lock(&fs_info->fs_devices->device_list_mutex); | ||
1495 | cur_devices = fs_info->fs_devices; | ||
1496 | while (cur_devices) { | ||
1497 | head = &cur_devices->devices; | ||
1498 | list_for_each_entry(dev, head, dev_list) { | ||
1499 | if (!first_dev || dev->devid < first_dev->devid) | ||
1500 | first_dev = dev; | ||
1501 | } | ||
1502 | cur_devices = cur_devices->seed; | ||
1503 | } | ||
1504 | |||
1505 | if (first_dev) { | ||
1506 | rcu_read_lock(); | ||
1507 | name = rcu_dereference(first_dev->name); | ||
1508 | seq_escape(m, name->str, " \t\n\\"); | ||
1509 | rcu_read_unlock(); | ||
1510 | } else { | ||
1511 | WARN_ON(1); | ||
1512 | } | ||
1513 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | ||
1514 | return 0; | ||
1515 | } | ||
1516 | |||
1485 | static const struct super_operations btrfs_super_ops = { | 1517 | static const struct super_operations btrfs_super_ops = { |
1486 | .drop_inode = btrfs_drop_inode, | 1518 | .drop_inode = btrfs_drop_inode, |
1487 | .evict_inode = btrfs_evict_inode, | 1519 | .evict_inode = btrfs_evict_inode, |
1488 | .put_super = btrfs_put_super, | 1520 | .put_super = btrfs_put_super, |
1489 | .sync_fs = btrfs_sync_fs, | 1521 | .sync_fs = btrfs_sync_fs, |
1490 | .show_options = btrfs_show_options, | 1522 | .show_options = btrfs_show_options, |
1523 | .show_devname = btrfs_show_devname, | ||
1491 | .write_inode = btrfs_write_inode, | 1524 | .write_inode = btrfs_write_inode, |
1492 | .dirty_inode = btrfs_fs_dirty_inode, | 1525 | .dirty_inode = btrfs_fs_dirty_inode, |
1493 | .alloc_inode = btrfs_alloc_inode, | 1526 | .alloc_inode = btrfs_alloc_inode, |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 1791c6e3d834..b72b068183ec 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -100,6 +100,10 @@ loop: | |||
100 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); | 100 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); |
101 | cur_trans = fs_info->running_transaction; | 101 | cur_trans = fs_info->running_transaction; |
102 | goto loop; | 102 | goto loop; |
103 | } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { | ||
104 | spin_unlock(&root->fs_info->trans_lock); | ||
105 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); | ||
106 | return -EROFS; | ||
103 | } | 107 | } |
104 | 108 | ||
105 | atomic_set(&cur_trans->num_writers, 1); | 109 | atomic_set(&cur_trans->num_writers, 1); |
@@ -1213,14 +1217,20 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | |||
1213 | 1217 | ||
1214 | 1218 | ||
1215 | static void cleanup_transaction(struct btrfs_trans_handle *trans, | 1219 | static void cleanup_transaction(struct btrfs_trans_handle *trans, |
1216 | struct btrfs_root *root) | 1220 | struct btrfs_root *root, int err) |
1217 | { | 1221 | { |
1218 | struct btrfs_transaction *cur_trans = trans->transaction; | 1222 | struct btrfs_transaction *cur_trans = trans->transaction; |
1219 | 1223 | ||
1220 | WARN_ON(trans->use_count > 1); | 1224 | WARN_ON(trans->use_count > 1); |
1221 | 1225 | ||
1226 | btrfs_abort_transaction(trans, root, err); | ||
1227 | |||
1222 | spin_lock(&root->fs_info->trans_lock); | 1228 | spin_lock(&root->fs_info->trans_lock); |
1223 | list_del_init(&cur_trans->list); | 1229 | list_del_init(&cur_trans->list); |
1230 | if (cur_trans == root->fs_info->running_transaction) { | ||
1231 | root->fs_info->running_transaction = NULL; | ||
1232 | root->fs_info->trans_no_join = 0; | ||
1233 | } | ||
1224 | spin_unlock(&root->fs_info->trans_lock); | 1234 | spin_unlock(&root->fs_info->trans_lock); |
1225 | 1235 | ||
1226 | btrfs_cleanup_one_transaction(trans->transaction, root); | 1236 | btrfs_cleanup_one_transaction(trans->transaction, root); |
@@ -1526,7 +1536,7 @@ cleanup_transaction: | |||
1526 | // WARN_ON(1); | 1536 | // WARN_ON(1); |
1527 | if (current->journal_info == trans) | 1537 | if (current->journal_info == trans) |
1528 | current->journal_info = NULL; | 1538 | current->journal_info = NULL; |
1529 | cleanup_transaction(trans, root); | 1539 | cleanup_transaction(trans, root, ret); |
1530 | 1540 | ||
1531 | return ret; | 1541 | return ret; |
1532 | } | 1542 | } |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 7782020996fe..8a3d2594b807 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include "volumes.h" | 35 | #include "volumes.h" |
36 | #include "async-thread.h" | 36 | #include "async-thread.h" |
37 | #include "check-integrity.h" | 37 | #include "check-integrity.h" |
38 | #include "rcu-string.h" | ||
38 | 39 | ||
39 | static int init_first_rw_device(struct btrfs_trans_handle *trans, | 40 | static int init_first_rw_device(struct btrfs_trans_handle *trans, |
40 | struct btrfs_root *root, | 41 | struct btrfs_root *root, |
@@ -64,7 +65,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices) | |||
64 | device = list_entry(fs_devices->devices.next, | 65 | device = list_entry(fs_devices->devices.next, |
65 | struct btrfs_device, dev_list); | 66 | struct btrfs_device, dev_list); |
66 | list_del(&device->dev_list); | 67 | list_del(&device->dev_list); |
67 | kfree(device->name); | 68 | rcu_string_free(device->name); |
68 | kfree(device); | 69 | kfree(device); |
69 | } | 70 | } |
70 | kfree(fs_devices); | 71 | kfree(fs_devices); |
@@ -334,8 +335,8 @@ static noinline int device_list_add(const char *path, | |||
334 | { | 335 | { |
335 | struct btrfs_device *device; | 336 | struct btrfs_device *device; |
336 | struct btrfs_fs_devices *fs_devices; | 337 | struct btrfs_fs_devices *fs_devices; |
338 | struct rcu_string *name; | ||
337 | u64 found_transid = btrfs_super_generation(disk_super); | 339 | u64 found_transid = btrfs_super_generation(disk_super); |
338 | char *name; | ||
339 | 340 | ||
340 | fs_devices = find_fsid(disk_super->fsid); | 341 | fs_devices = find_fsid(disk_super->fsid); |
341 | if (!fs_devices) { | 342 | if (!fs_devices) { |
@@ -369,11 +370,13 @@ static noinline int device_list_add(const char *path, | |||
369 | memcpy(device->uuid, disk_super->dev_item.uuid, | 370 | memcpy(device->uuid, disk_super->dev_item.uuid, |
370 | BTRFS_UUID_SIZE); | 371 | BTRFS_UUID_SIZE); |
371 | spin_lock_init(&device->io_lock); | 372 | spin_lock_init(&device->io_lock); |
372 | device->name = kstrdup(path, GFP_NOFS); | 373 | |
373 | if (!device->name) { | 374 | name = rcu_string_strdup(path, GFP_NOFS); |
375 | if (!name) { | ||
374 | kfree(device); | 376 | kfree(device); |
375 | return -ENOMEM; | 377 | return -ENOMEM; |
376 | } | 378 | } |
379 | rcu_assign_pointer(device->name, name); | ||
377 | INIT_LIST_HEAD(&device->dev_alloc_list); | 380 | INIT_LIST_HEAD(&device->dev_alloc_list); |
378 | 381 | ||
379 | /* init readahead state */ | 382 | /* init readahead state */ |
@@ -390,12 +393,12 @@ static noinline int device_list_add(const char *path, | |||
390 | 393 | ||
391 | device->fs_devices = fs_devices; | 394 | device->fs_devices = fs_devices; |
392 | fs_devices->num_devices++; | 395 | fs_devices->num_devices++; |
393 | } else if (!device->name || strcmp(device->name, path)) { | 396 | } else if (!device->name || strcmp(device->name->str, path)) { |
394 | name = kstrdup(path, GFP_NOFS); | 397 | name = rcu_string_strdup(path, GFP_NOFS); |
395 | if (!name) | 398 | if (!name) |
396 | return -ENOMEM; | 399 | return -ENOMEM; |
397 | kfree(device->name); | 400 | rcu_string_free(device->name); |
398 | device->name = name; | 401 | rcu_assign_pointer(device->name, name); |
399 | if (device->missing) { | 402 | if (device->missing) { |
400 | fs_devices->missing_devices--; | 403 | fs_devices->missing_devices--; |
401 | device->missing = 0; | 404 | device->missing = 0; |
@@ -430,15 +433,22 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) | |||
430 | 433 | ||
431 | /* We have held the volume lock, it is safe to get the devices. */ | 434 | /* We have held the volume lock, it is safe to get the devices. */ |
432 | list_for_each_entry(orig_dev, &orig->devices, dev_list) { | 435 | list_for_each_entry(orig_dev, &orig->devices, dev_list) { |
436 | struct rcu_string *name; | ||
437 | |||
433 | device = kzalloc(sizeof(*device), GFP_NOFS); | 438 | device = kzalloc(sizeof(*device), GFP_NOFS); |
434 | if (!device) | 439 | if (!device) |
435 | goto error; | 440 | goto error; |
436 | 441 | ||
437 | device->name = kstrdup(orig_dev->name, GFP_NOFS); | 442 | /* |
438 | if (!device->name) { | 443 | * This is ok to do without rcu read locked because we hold the |
444 | * uuid mutex so nothing we touch in here is going to disappear. | ||
445 | */ | ||
446 | name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS); | ||
447 | if (!name) { | ||
439 | kfree(device); | 448 | kfree(device); |
440 | goto error; | 449 | goto error; |
441 | } | 450 | } |
451 | rcu_assign_pointer(device->name, name); | ||
442 | 452 | ||
443 | device->devid = orig_dev->devid; | 453 | device->devid = orig_dev->devid; |
444 | device->work.func = pending_bios_fn; | 454 | device->work.func = pending_bios_fn; |
@@ -491,7 +501,7 @@ again: | |||
491 | } | 501 | } |
492 | list_del_init(&device->dev_list); | 502 | list_del_init(&device->dev_list); |
493 | fs_devices->num_devices--; | 503 | fs_devices->num_devices--; |
494 | kfree(device->name); | 504 | rcu_string_free(device->name); |
495 | kfree(device); | 505 | kfree(device); |
496 | } | 506 | } |
497 | 507 | ||
@@ -516,7 +526,7 @@ static void __free_device(struct work_struct *work) | |||
516 | if (device->bdev) | 526 | if (device->bdev) |
517 | blkdev_put(device->bdev, device->mode); | 527 | blkdev_put(device->bdev, device->mode); |
518 | 528 | ||
519 | kfree(device->name); | 529 | rcu_string_free(device->name); |
520 | kfree(device); | 530 | kfree(device); |
521 | } | 531 | } |
522 | 532 | ||
@@ -540,6 +550,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
540 | mutex_lock(&fs_devices->device_list_mutex); | 550 | mutex_lock(&fs_devices->device_list_mutex); |
541 | list_for_each_entry(device, &fs_devices->devices, dev_list) { | 551 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
542 | struct btrfs_device *new_device; | 552 | struct btrfs_device *new_device; |
553 | struct rcu_string *name; | ||
543 | 554 | ||
544 | if (device->bdev) | 555 | if (device->bdev) |
545 | fs_devices->open_devices--; | 556 | fs_devices->open_devices--; |
@@ -555,8 +566,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
555 | new_device = kmalloc(sizeof(*new_device), GFP_NOFS); | 566 | new_device = kmalloc(sizeof(*new_device), GFP_NOFS); |
556 | BUG_ON(!new_device); /* -ENOMEM */ | 567 | BUG_ON(!new_device); /* -ENOMEM */ |
557 | memcpy(new_device, device, sizeof(*new_device)); | 568 | memcpy(new_device, device, sizeof(*new_device)); |
558 | new_device->name = kstrdup(device->name, GFP_NOFS); | 569 | |
559 | BUG_ON(device->name && !new_device->name); /* -ENOMEM */ | 570 | /* Safe because we are under uuid_mutex */ |
571 | name = rcu_string_strdup(device->name->str, GFP_NOFS); | ||
572 | BUG_ON(device->name && !name); /* -ENOMEM */ | ||
573 | rcu_assign_pointer(new_device->name, name); | ||
560 | new_device->bdev = NULL; | 574 | new_device->bdev = NULL; |
561 | new_device->writeable = 0; | 575 | new_device->writeable = 0; |
562 | new_device->in_fs_metadata = 0; | 576 | new_device->in_fs_metadata = 0; |
@@ -621,9 +635,9 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
621 | if (!device->name) | 635 | if (!device->name) |
622 | continue; | 636 | continue; |
623 | 637 | ||
624 | bdev = blkdev_get_by_path(device->name, flags, holder); | 638 | bdev = blkdev_get_by_path(device->name->str, flags, holder); |
625 | if (IS_ERR(bdev)) { | 639 | if (IS_ERR(bdev)) { |
626 | printk(KERN_INFO "open %s failed\n", device->name); | 640 | printk(KERN_INFO "open %s failed\n", device->name->str); |
627 | goto error; | 641 | goto error; |
628 | } | 642 | } |
629 | filemap_write_and_wait(bdev->bd_inode->i_mapping); | 643 | filemap_write_and_wait(bdev->bd_inode->i_mapping); |
@@ -1632,6 +1646,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1632 | struct block_device *bdev; | 1646 | struct block_device *bdev; |
1633 | struct list_head *devices; | 1647 | struct list_head *devices; |
1634 | struct super_block *sb = root->fs_info->sb; | 1648 | struct super_block *sb = root->fs_info->sb; |
1649 | struct rcu_string *name; | ||
1635 | u64 total_bytes; | 1650 | u64 total_bytes; |
1636 | int seeding_dev = 0; | 1651 | int seeding_dev = 0; |
1637 | int ret = 0; | 1652 | int ret = 0; |
@@ -1671,23 +1686,24 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1671 | goto error; | 1686 | goto error; |
1672 | } | 1687 | } |
1673 | 1688 | ||
1674 | device->name = kstrdup(device_path, GFP_NOFS); | 1689 | name = rcu_string_strdup(device_path, GFP_NOFS); |
1675 | if (!device->name) { | 1690 | if (!name) { |
1676 | kfree(device); | 1691 | kfree(device); |
1677 | ret = -ENOMEM; | 1692 | ret = -ENOMEM; |
1678 | goto error; | 1693 | goto error; |
1679 | } | 1694 | } |
1695 | rcu_assign_pointer(device->name, name); | ||
1680 | 1696 | ||
1681 | ret = find_next_devid(root, &device->devid); | 1697 | ret = find_next_devid(root, &device->devid); |
1682 | if (ret) { | 1698 | if (ret) { |
1683 | kfree(device->name); | 1699 | rcu_string_free(device->name); |
1684 | kfree(device); | 1700 | kfree(device); |
1685 | goto error; | 1701 | goto error; |
1686 | } | 1702 | } |
1687 | 1703 | ||
1688 | trans = btrfs_start_transaction(root, 0); | 1704 | trans = btrfs_start_transaction(root, 0); |
1689 | if (IS_ERR(trans)) { | 1705 | if (IS_ERR(trans)) { |
1690 | kfree(device->name); | 1706 | rcu_string_free(device->name); |
1691 | kfree(device); | 1707 | kfree(device); |
1692 | ret = PTR_ERR(trans); | 1708 | ret = PTR_ERR(trans); |
1693 | goto error; | 1709 | goto error; |
@@ -1796,7 +1812,7 @@ error_trans: | |||
1796 | unlock_chunks(root); | 1812 | unlock_chunks(root); |
1797 | btrfs_abort_transaction(trans, root, ret); | 1813 | btrfs_abort_transaction(trans, root, ret); |
1798 | btrfs_end_transaction(trans, root); | 1814 | btrfs_end_transaction(trans, root); |
1799 | kfree(device->name); | 1815 | rcu_string_free(device->name); |
1800 | kfree(device); | 1816 | kfree(device); |
1801 | error: | 1817 | error: |
1802 | blkdev_put(bdev, FMODE_EXCL); | 1818 | blkdev_put(bdev, FMODE_EXCL); |
@@ -4204,10 +4220,17 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
4204 | bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; | 4220 | bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; |
4205 | dev = bbio->stripes[dev_nr].dev; | 4221 | dev = bbio->stripes[dev_nr].dev; |
4206 | if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { | 4222 | if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { |
4223 | #ifdef DEBUG | ||
4224 | struct rcu_string *name; | ||
4225 | |||
4226 | rcu_read_lock(); | ||
4227 | name = rcu_dereference(dev->name); | ||
4207 | pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu " | 4228 | pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu " |
4208 | "(%s id %llu), size=%u\n", rw, | 4229 | "(%s id %llu), size=%u\n", rw, |
4209 | (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev, | 4230 | (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev, |
4210 | dev->name, dev->devid, bio->bi_size); | 4231 | name->str, dev->devid, bio->bi_size); |
4232 | rcu_read_unlock(); | ||
4233 | #endif | ||
4211 | bio->bi_bdev = dev->bdev; | 4234 | bio->bi_bdev = dev->bdev; |
4212 | if (async_submit) | 4235 | if (async_submit) |
4213 | schedule_bio(root, dev, rw, bio); | 4236 | schedule_bio(root, dev, rw, bio); |
@@ -4694,8 +4717,9 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) | |||
4694 | key.offset = device->devid; | 4717 | key.offset = device->devid; |
4695 | ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); | 4718 | ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); |
4696 | if (ret) { | 4719 | if (ret) { |
4697 | printk(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n", | 4720 | printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n", |
4698 | device->name, (unsigned long long)device->devid); | 4721 | rcu_str_deref(device->name), |
4722 | (unsigned long long)device->devid); | ||
4699 | __btrfs_reset_dev_stats(device); | 4723 | __btrfs_reset_dev_stats(device); |
4700 | device->dev_stats_valid = 1; | 4724 | device->dev_stats_valid = 1; |
4701 | btrfs_release_path(path); | 4725 | btrfs_release_path(path); |
@@ -4747,8 +4771,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
4747 | BUG_ON(!path); | 4771 | BUG_ON(!path); |
4748 | ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); | 4772 | ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); |
4749 | if (ret < 0) { | 4773 | if (ret < 0) { |
4750 | printk(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n", | 4774 | printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n", |
4751 | ret, device->name); | 4775 | ret, rcu_str_deref(device->name)); |
4752 | goto out; | 4776 | goto out; |
4753 | } | 4777 | } |
4754 | 4778 | ||
@@ -4757,8 +4781,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
4757 | /* need to delete old one and insert a new one */ | 4781 | /* need to delete old one and insert a new one */ |
4758 | ret = btrfs_del_item(trans, dev_root, path); | 4782 | ret = btrfs_del_item(trans, dev_root, path); |
4759 | if (ret != 0) { | 4783 | if (ret != 0) { |
4760 | printk(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n", | 4784 | printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n", |
4761 | device->name, ret); | 4785 | rcu_str_deref(device->name), ret); |
4762 | goto out; | 4786 | goto out; |
4763 | } | 4787 | } |
4764 | ret = 1; | 4788 | ret = 1; |
@@ -4770,8 +4794,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
4770 | ret = btrfs_insert_empty_item(trans, dev_root, path, | 4794 | ret = btrfs_insert_empty_item(trans, dev_root, path, |
4771 | &key, sizeof(*ptr)); | 4795 | &key, sizeof(*ptr)); |
4772 | if (ret < 0) { | 4796 | if (ret < 0) { |
4773 | printk(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n", | 4797 | printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n", |
4774 | device->name, ret); | 4798 | rcu_str_deref(device->name), ret); |
4775 | goto out; | 4799 | goto out; |
4776 | } | 4800 | } |
4777 | } | 4801 | } |
@@ -4823,9 +4847,9 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) | |||
4823 | { | 4847 | { |
4824 | if (!dev->dev_stats_valid) | 4848 | if (!dev->dev_stats_valid) |
4825 | return; | 4849 | return; |
4826 | printk_ratelimited(KERN_ERR | 4850 | printk_ratelimited_in_rcu(KERN_ERR |
4827 | "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", | 4851 | "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", |
4828 | dev->name, | 4852 | rcu_str_deref(dev->name), |
4829 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), | 4853 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), |
4830 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), | 4854 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), |
4831 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), | 4855 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), |
@@ -4837,8 +4861,8 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) | |||
4837 | 4861 | ||
4838 | static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) | 4862 | static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) |
4839 | { | 4863 | { |
4840 | printk(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", | 4864 | printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", |
4841 | dev->name, | 4865 | rcu_str_deref(dev->name), |
4842 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), | 4866 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), |
4843 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), | 4867 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), |
4844 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), | 4868 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 3406a88ca83e..74366f27a76b 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -58,7 +58,7 @@ struct btrfs_device { | |||
58 | /* the mode sent to blkdev_get */ | 58 | /* the mode sent to blkdev_get */ |
59 | fmode_t mode; | 59 | fmode_t mode; |
60 | 60 | ||
61 | char *name; | 61 | struct rcu_string *name; |
62 | 62 | ||
63 | /* the internal btrfs device id */ | 63 | /* the internal btrfs device id */ |
64 | u64 devid; | 64 | u64 devid; |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 20350a93ed99..6df0cbe1cbc9 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -174,6 +174,7 @@ struct smb_version_operations { | |||
174 | void (*add_credits)(struct TCP_Server_Info *, const unsigned int); | 174 | void (*add_credits)(struct TCP_Server_Info *, const unsigned int); |
175 | void (*set_credits)(struct TCP_Server_Info *, const int); | 175 | void (*set_credits)(struct TCP_Server_Info *, const int); |
176 | int * (*get_credits_field)(struct TCP_Server_Info *); | 176 | int * (*get_credits_field)(struct TCP_Server_Info *); |
177 | __u64 (*get_next_mid)(struct TCP_Server_Info *); | ||
177 | /* data offset from read response message */ | 178 | /* data offset from read response message */ |
178 | unsigned int (*read_data_offset)(char *); | 179 | unsigned int (*read_data_offset)(char *); |
179 | /* data length from read response message */ | 180 | /* data length from read response message */ |
@@ -399,6 +400,12 @@ set_credits(struct TCP_Server_Info *server, const int val) | |||
399 | server->ops->set_credits(server, val); | 400 | server->ops->set_credits(server, val); |
400 | } | 401 | } |
401 | 402 | ||
403 | static inline __u64 | ||
404 | get_next_mid(struct TCP_Server_Info *server) | ||
405 | { | ||
406 | return server->ops->get_next_mid(server); | ||
407 | } | ||
408 | |||
402 | /* | 409 | /* |
403 | * Macros to allow the TCP_Server_Info->net field and related code to drop out | 410 | * Macros to allow the TCP_Server_Info->net field and related code to drop out |
404 | * when CONFIG_NET_NS isn't set. | 411 | * when CONFIG_NET_NS isn't set. |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 5ec21ecf7980..0a6cbfe2761e 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -114,7 +114,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct, | |||
114 | void **request_buf); | 114 | void **request_buf); |
115 | extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, | 115 | extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, |
116 | const struct nls_table *nls_cp); | 116 | const struct nls_table *nls_cp); |
117 | extern __u64 GetNextMid(struct TCP_Server_Info *server); | ||
118 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); | 117 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); |
119 | extern u64 cifs_UnixTimeToNT(struct timespec); | 118 | extern u64 cifs_UnixTimeToNT(struct timespec); |
120 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, | 119 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index b5ad716b2642..5b400730c213 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -268,7 +268,7 @@ small_smb_init_no_tc(const int smb_command, const int wct, | |||
268 | return rc; | 268 | return rc; |
269 | 269 | ||
270 | buffer = (struct smb_hdr *)*request_buf; | 270 | buffer = (struct smb_hdr *)*request_buf; |
271 | buffer->Mid = GetNextMid(ses->server); | 271 | buffer->Mid = get_next_mid(ses->server); |
272 | if (ses->capabilities & CAP_UNICODE) | 272 | if (ses->capabilities & CAP_UNICODE) |
273 | buffer->Flags2 |= SMBFLG2_UNICODE; | 273 | buffer->Flags2 |= SMBFLG2_UNICODE; |
274 | if (ses->capabilities & CAP_STATUS32) | 274 | if (ses->capabilities & CAP_STATUS32) |
@@ -402,7 +402,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) | |||
402 | 402 | ||
403 | cFYI(1, "secFlags 0x%x", secFlags); | 403 | cFYI(1, "secFlags 0x%x", secFlags); |
404 | 404 | ||
405 | pSMB->hdr.Mid = GetNextMid(server); | 405 | pSMB->hdr.Mid = get_next_mid(server); |
406 | pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); | 406 | pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); |
407 | 407 | ||
408 | if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) | 408 | if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) |
@@ -782,7 +782,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses) | |||
782 | return rc; | 782 | return rc; |
783 | } | 783 | } |
784 | 784 | ||
785 | pSMB->hdr.Mid = GetNextMid(ses->server); | 785 | pSMB->hdr.Mid = get_next_mid(ses->server); |
786 | 786 | ||
787 | if (ses->server->sec_mode & | 787 | if (ses->server->sec_mode & |
788 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 788 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
@@ -4762,7 +4762,7 @@ getDFSRetry: | |||
4762 | 4762 | ||
4763 | /* server pointer checked in called function, | 4763 | /* server pointer checked in called function, |
4764 | but should never be null here anyway */ | 4764 | but should never be null here anyway */ |
4765 | pSMB->hdr.Mid = GetNextMid(ses->server); | 4765 | pSMB->hdr.Mid = get_next_mid(ses->server); |
4766 | pSMB->hdr.Tid = ses->ipc_tid; | 4766 | pSMB->hdr.Tid = ses->ipc_tid; |
4767 | pSMB->hdr.Uid = ses->Suid; | 4767 | pSMB->hdr.Uid = ses->Suid; |
4768 | if (ses->capabilities & CAP_STATUS32) | 4768 | if (ses->capabilities & CAP_STATUS32) |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ccafdedd0dbc..78db68a5cf44 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -1058,13 +1058,15 @@ cifs_demultiplex_thread(void *p) | |||
1058 | if (mid_entry != NULL) { | 1058 | if (mid_entry != NULL) { |
1059 | if (!mid_entry->multiRsp || mid_entry->multiEnd) | 1059 | if (!mid_entry->multiRsp || mid_entry->multiEnd) |
1060 | mid_entry->callback(mid_entry); | 1060 | mid_entry->callback(mid_entry); |
1061 | } else if (!server->ops->is_oplock_break(buf, server)) { | 1061 | } else if (!server->ops->is_oplock_break || |
1062 | !server->ops->is_oplock_break(buf, server)) { | ||
1062 | cERROR(1, "No task to wake, unknown frame received! " | 1063 | cERROR(1, "No task to wake, unknown frame received! " |
1063 | "NumMids %d", atomic_read(&midCount)); | 1064 | "NumMids %d", atomic_read(&midCount)); |
1064 | cifs_dump_mem("Received Data is: ", buf, | 1065 | cifs_dump_mem("Received Data is: ", buf, |
1065 | HEADER_SIZE(server)); | 1066 | HEADER_SIZE(server)); |
1066 | #ifdef CONFIG_CIFS_DEBUG2 | 1067 | #ifdef CONFIG_CIFS_DEBUG2 |
1067 | server->ops->dump_detail(buf); | 1068 | if (server->ops->dump_detail) |
1069 | server->ops->dump_detail(buf); | ||
1068 | cifs_dump_mids(server); | 1070 | cifs_dump_mids(server); |
1069 | #endif /* CIFS_DEBUG2 */ | 1071 | #endif /* CIFS_DEBUG2 */ |
1070 | 1072 | ||
@@ -3938,7 +3940,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses, | |||
3938 | header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, | 3940 | header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, |
3939 | NULL /*no tid */ , 4 /*wct */ ); | 3941 | NULL /*no tid */ , 4 /*wct */ ); |
3940 | 3942 | ||
3941 | smb_buffer->Mid = GetNextMid(ses->server); | 3943 | smb_buffer->Mid = get_next_mid(ses->server); |
3942 | smb_buffer->Uid = ses->Suid; | 3944 | smb_buffer->Uid = ses->Suid; |
3943 | pSMB = (TCONX_REQ *) smb_buffer; | 3945 | pSMB = (TCONX_REQ *) smb_buffer; |
3944 | pSMBr = (TCONX_RSP *) smb_buffer_response; | 3946 | pSMBr = (TCONX_RSP *) smb_buffer_response; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 253170dfa716..513adbc211d7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -876,7 +876,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) | |||
876 | struct cifsLockInfo *li, *tmp; | 876 | struct cifsLockInfo *li, *tmp; |
877 | struct cifs_tcon *tcon; | 877 | struct cifs_tcon *tcon; |
878 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); | 878 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); |
879 | unsigned int num, max_num; | 879 | unsigned int num, max_num, max_buf; |
880 | LOCKING_ANDX_RANGE *buf, *cur; | 880 | LOCKING_ANDX_RANGE *buf, *cur; |
881 | int types[] = {LOCKING_ANDX_LARGE_FILES, | 881 | int types[] = {LOCKING_ANDX_LARGE_FILES, |
882 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; | 882 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; |
@@ -892,8 +892,19 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) | |||
892 | return rc; | 892 | return rc; |
893 | } | 893 | } |
894 | 894 | ||
895 | max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / | 895 | /* |
896 | sizeof(LOCKING_ANDX_RANGE); | 896 | * Accessing maxBuf is racy with cifs_reconnect - need to store value |
897 | * and check it for zero before using. | ||
898 | */ | ||
899 | max_buf = tcon->ses->server->maxBuf; | ||
900 | if (!max_buf) { | ||
901 | mutex_unlock(&cinode->lock_mutex); | ||
902 | FreeXid(xid); | ||
903 | return -EINVAL; | ||
904 | } | ||
905 | |||
906 | max_num = (max_buf - sizeof(struct smb_hdr)) / | ||
907 | sizeof(LOCKING_ANDX_RANGE); | ||
897 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); | 908 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); |
898 | if (!buf) { | 909 | if (!buf) { |
899 | mutex_unlock(&cinode->lock_mutex); | 910 | mutex_unlock(&cinode->lock_mutex); |
@@ -1218,7 +1229,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
1218 | int types[] = {LOCKING_ANDX_LARGE_FILES, | 1229 | int types[] = {LOCKING_ANDX_LARGE_FILES, |
1219 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; | 1230 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; |
1220 | unsigned int i; | 1231 | unsigned int i; |
1221 | unsigned int max_num, num; | 1232 | unsigned int max_num, num, max_buf; |
1222 | LOCKING_ANDX_RANGE *buf, *cur; | 1233 | LOCKING_ANDX_RANGE *buf, *cur; |
1223 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); | 1234 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
1224 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); | 1235 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); |
@@ -1228,8 +1239,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
1228 | 1239 | ||
1229 | INIT_LIST_HEAD(&tmp_llist); | 1240 | INIT_LIST_HEAD(&tmp_llist); |
1230 | 1241 | ||
1231 | max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / | 1242 | /* |
1232 | sizeof(LOCKING_ANDX_RANGE); | 1243 | * Accessing maxBuf is racy with cifs_reconnect - need to store value |
1244 | * and check it for zero before using. | ||
1245 | */ | ||
1246 | max_buf = tcon->ses->server->maxBuf; | ||
1247 | if (!max_buf) | ||
1248 | return -EINVAL; | ||
1249 | |||
1250 | max_num = (max_buf - sizeof(struct smb_hdr)) / | ||
1251 | sizeof(LOCKING_ANDX_RANGE); | ||
1233 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); | 1252 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); |
1234 | if (!buf) | 1253 | if (!buf) |
1235 | return -ENOMEM; | 1254 | return -ENOMEM; |
@@ -1247,46 +1266,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
1247 | continue; | 1266 | continue; |
1248 | if (types[i] != li->type) | 1267 | if (types[i] != li->type) |
1249 | continue; | 1268 | continue; |
1250 | if (!cinode->can_cache_brlcks) { | 1269 | if (cinode->can_cache_brlcks) { |
1251 | cur->Pid = cpu_to_le16(li->pid); | ||
1252 | cur->LengthLow = cpu_to_le32((u32)li->length); | ||
1253 | cur->LengthHigh = | ||
1254 | cpu_to_le32((u32)(li->length>>32)); | ||
1255 | cur->OffsetLow = cpu_to_le32((u32)li->offset); | ||
1256 | cur->OffsetHigh = | ||
1257 | cpu_to_le32((u32)(li->offset>>32)); | ||
1258 | /* | ||
1259 | * We need to save a lock here to let us add | ||
1260 | * it again to the file's list if the unlock | ||
1261 | * range request fails on the server. | ||
1262 | */ | ||
1263 | list_move(&li->llist, &tmp_llist); | ||
1264 | if (++num == max_num) { | ||
1265 | stored_rc = cifs_lockv(xid, tcon, | ||
1266 | cfile->netfid, | ||
1267 | li->type, num, | ||
1268 | 0, buf); | ||
1269 | if (stored_rc) { | ||
1270 | /* | ||
1271 | * We failed on the unlock range | ||
1272 | * request - add all locks from | ||
1273 | * the tmp list to the head of | ||
1274 | * the file's list. | ||
1275 | */ | ||
1276 | cifs_move_llist(&tmp_llist, | ||
1277 | &cfile->llist); | ||
1278 | rc = stored_rc; | ||
1279 | } else | ||
1280 | /* | ||
1281 | * The unlock range request | ||
1282 | * succeed - free the tmp list. | ||
1283 | */ | ||
1284 | cifs_free_llist(&tmp_llist); | ||
1285 | cur = buf; | ||
1286 | num = 0; | ||
1287 | } else | ||
1288 | cur++; | ||
1289 | } else { | ||
1290 | /* | 1270 | /* |
1291 | * We can cache brlock requests - simply remove | 1271 | * We can cache brlock requests - simply remove |
1292 | * a lock from the file's list. | 1272 | * a lock from the file's list. |
@@ -1294,7 +1274,41 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
1294 | list_del(&li->llist); | 1274 | list_del(&li->llist); |
1295 | cifs_del_lock_waiters(li); | 1275 | cifs_del_lock_waiters(li); |
1296 | kfree(li); | 1276 | kfree(li); |
1277 | continue; | ||
1297 | } | 1278 | } |
1279 | cur->Pid = cpu_to_le16(li->pid); | ||
1280 | cur->LengthLow = cpu_to_le32((u32)li->length); | ||
1281 | cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); | ||
1282 | cur->OffsetLow = cpu_to_le32((u32)li->offset); | ||
1283 | cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); | ||
1284 | /* | ||
1285 | * We need to save a lock here to let us add it again to | ||
1286 | * the file's list if the unlock range request fails on | ||
1287 | * the server. | ||
1288 | */ | ||
1289 | list_move(&li->llist, &tmp_llist); | ||
1290 | if (++num == max_num) { | ||
1291 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, | ||
1292 | li->type, num, 0, buf); | ||
1293 | if (stored_rc) { | ||
1294 | /* | ||
1295 | * We failed on the unlock range | ||
1296 | * request - add all locks from the tmp | ||
1297 | * list to the head of the file's list. | ||
1298 | */ | ||
1299 | cifs_move_llist(&tmp_llist, | ||
1300 | &cfile->llist); | ||
1301 | rc = stored_rc; | ||
1302 | } else | ||
1303 | /* | ||
1304 | * The unlock range request succeed - | ||
1305 | * free the tmp list. | ||
1306 | */ | ||
1307 | cifs_free_llist(&tmp_llist); | ||
1308 | cur = buf; | ||
1309 | num = 0; | ||
1310 | } else | ||
1311 | cur++; | ||
1298 | } | 1312 | } |
1299 | if (num) { | 1313 | if (num) { |
1300 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, | 1314 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index e2552d2b2e42..557506ae1e2a 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free) | |||
212 | return; | 212 | return; |
213 | } | 213 | } |
214 | 214 | ||
215 | /* | ||
216 | * Find a free multiplex id (SMB mid). Otherwise there could be | ||
217 | * mid collisions which might cause problems, demultiplexing the | ||
218 | * wrong response to this request. Multiplex ids could collide if | ||
219 | * one of a series requests takes much longer than the others, or | ||
220 | * if a very large number of long lived requests (byte range | ||
221 | * locks or FindNotify requests) are pending. No more than | ||
222 | * 64K-1 requests can be outstanding at one time. If no | ||
223 | * mids are available, return zero. A future optimization | ||
224 | * could make the combination of mids and uid the key we use | ||
225 | * to demultiplex on (rather than mid alone). | ||
226 | * In addition to the above check, the cifs demultiplex | ||
227 | * code already used the command code as a secondary | ||
228 | * check of the frame and if signing is negotiated the | ||
229 | * response would be discarded if the mid were the same | ||
230 | * but the signature was wrong. Since the mid is not put in the | ||
231 | * pending queue until later (when it is about to be dispatched) | ||
232 | * we do have to limit the number of outstanding requests | ||
233 | * to somewhat less than 64K-1 although it is hard to imagine | ||
234 | * so many threads being in the vfs at one time. | ||
235 | */ | ||
236 | __u64 GetNextMid(struct TCP_Server_Info *server) | ||
237 | { | ||
238 | __u64 mid = 0; | ||
239 | __u16 last_mid, cur_mid; | ||
240 | bool collision; | ||
241 | |||
242 | spin_lock(&GlobalMid_Lock); | ||
243 | |||
244 | /* mid is 16 bit only for CIFS/SMB */ | ||
245 | cur_mid = (__u16)((server->CurrentMid) & 0xffff); | ||
246 | /* we do not want to loop forever */ | ||
247 | last_mid = cur_mid; | ||
248 | cur_mid++; | ||
249 | |||
250 | /* | ||
251 | * This nested loop looks more expensive than it is. | ||
252 | * In practice the list of pending requests is short, | ||
253 | * fewer than 50, and the mids are likely to be unique | ||
254 | * on the first pass through the loop unless some request | ||
255 | * takes longer than the 64 thousand requests before it | ||
256 | * (and it would also have to have been a request that | ||
257 | * did not time out). | ||
258 | */ | ||
259 | while (cur_mid != last_mid) { | ||
260 | struct mid_q_entry *mid_entry; | ||
261 | unsigned int num_mids; | ||
262 | |||
263 | collision = false; | ||
264 | if (cur_mid == 0) | ||
265 | cur_mid++; | ||
266 | |||
267 | num_mids = 0; | ||
268 | list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { | ||
269 | ++num_mids; | ||
270 | if (mid_entry->mid == cur_mid && | ||
271 | mid_entry->mid_state == MID_REQUEST_SUBMITTED) { | ||
272 | /* This mid is in use, try a different one */ | ||
273 | collision = true; | ||
274 | break; | ||
275 | } | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * if we have more than 32k mids in the list, then something | ||
280 | * is very wrong. Possibly a local user is trying to DoS the | ||
281 | * box by issuing long-running calls and SIGKILL'ing them. If | ||
282 | * we get to 2^16 mids then we're in big trouble as this | ||
283 | * function could loop forever. | ||
284 | * | ||
285 | * Go ahead and assign out the mid in this situation, but force | ||
286 | * an eventual reconnect to clean out the pending_mid_q. | ||
287 | */ | ||
288 | if (num_mids > 32768) | ||
289 | server->tcpStatus = CifsNeedReconnect; | ||
290 | |||
291 | if (!collision) { | ||
292 | mid = (__u64)cur_mid; | ||
293 | server->CurrentMid = mid; | ||
294 | break; | ||
295 | } | ||
296 | cur_mid++; | ||
297 | } | ||
298 | spin_unlock(&GlobalMid_Lock); | ||
299 | return mid; | ||
300 | } | ||
301 | |||
302 | /* NB: MID can not be set if treeCon not passed in, in that | 215 | /* NB: MID can not be set if treeCon not passed in, in that |
303 | case it is responsbility of caller to set the mid */ | 216 | case it is responsbility of caller to set the mid */ |
304 | void | 217 | void |
@@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , | |||
334 | 247 | ||
335 | /* Uid is not converted */ | 248 | /* Uid is not converted */ |
336 | buffer->Uid = treeCon->ses->Suid; | 249 | buffer->Uid = treeCon->ses->Suid; |
337 | buffer->Mid = GetNextMid(treeCon->ses->server); | 250 | buffer->Mid = get_next_mid(treeCon->ses->server); |
338 | } | 251 | } |
339 | if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) | 252 | if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) |
340 | buffer->Flags2 |= SMBFLG2_DFS; | 253 | buffer->Flags2 |= SMBFLG2_DFS; |
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index d9d615fbed3f..6dec38f5522d 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
@@ -125,6 +125,94 @@ cifs_get_credits_field(struct TCP_Server_Info *server) | |||
125 | return &server->credits; | 125 | return &server->credits; |
126 | } | 126 | } |
127 | 127 | ||
128 | /* | ||
129 | * Find a free multiplex id (SMB mid). Otherwise there could be | ||
130 | * mid collisions which might cause problems, demultiplexing the | ||
131 | * wrong response to this request. Multiplex ids could collide if | ||
132 | * one of a series requests takes much longer than the others, or | ||
133 | * if a very large number of long lived requests (byte range | ||
134 | * locks or FindNotify requests) are pending. No more than | ||
135 | * 64K-1 requests can be outstanding at one time. If no | ||
136 | * mids are available, return zero. A future optimization | ||
137 | * could make the combination of mids and uid the key we use | ||
138 | * to demultiplex on (rather than mid alone). | ||
139 | * In addition to the above check, the cifs demultiplex | ||
140 | * code already used the command code as a secondary | ||
141 | * check of the frame and if signing is negotiated the | ||
142 | * response would be discarded if the mid were the same | ||
143 | * but the signature was wrong. Since the mid is not put in the | ||
144 | * pending queue until later (when it is about to be dispatched) | ||
145 | * we do have to limit the number of outstanding requests | ||
146 | * to somewhat less than 64K-1 although it is hard to imagine | ||
147 | * so many threads being in the vfs at one time. | ||
148 | */ | ||
149 | static __u64 | ||
150 | cifs_get_next_mid(struct TCP_Server_Info *server) | ||
151 | { | ||
152 | __u64 mid = 0; | ||
153 | __u16 last_mid, cur_mid; | ||
154 | bool collision; | ||
155 | |||
156 | spin_lock(&GlobalMid_Lock); | ||
157 | |||
158 | /* mid is 16 bit only for CIFS/SMB */ | ||
159 | cur_mid = (__u16)((server->CurrentMid) & 0xffff); | ||
160 | /* we do not want to loop forever */ | ||
161 | last_mid = cur_mid; | ||
162 | cur_mid++; | ||
163 | |||
164 | /* | ||
165 | * This nested loop looks more expensive than it is. | ||
166 | * In practice the list of pending requests is short, | ||
167 | * fewer than 50, and the mids are likely to be unique | ||
168 | * on the first pass through the loop unless some request | ||
169 | * takes longer than the 64 thousand requests before it | ||
170 | * (and it would also have to have been a request that | ||
171 | * did not time out). | ||
172 | */ | ||
173 | while (cur_mid != last_mid) { | ||
174 | struct mid_q_entry *mid_entry; | ||
175 | unsigned int num_mids; | ||
176 | |||
177 | collision = false; | ||
178 | if (cur_mid == 0) | ||
179 | cur_mid++; | ||
180 | |||
181 | num_mids = 0; | ||
182 | list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { | ||
183 | ++num_mids; | ||
184 | if (mid_entry->mid == cur_mid && | ||
185 | mid_entry->mid_state == MID_REQUEST_SUBMITTED) { | ||
186 | /* This mid is in use, try a different one */ | ||
187 | collision = true; | ||
188 | break; | ||
189 | } | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * if we have more than 32k mids in the list, then something | ||
194 | * is very wrong. Possibly a local user is trying to DoS the | ||
195 | * box by issuing long-running calls and SIGKILL'ing them. If | ||
196 | * we get to 2^16 mids then we're in big trouble as this | ||
197 | * function could loop forever. | ||
198 | * | ||
199 | * Go ahead and assign out the mid in this situation, but force | ||
200 | * an eventual reconnect to clean out the pending_mid_q. | ||
201 | */ | ||
202 | if (num_mids > 32768) | ||
203 | server->tcpStatus = CifsNeedReconnect; | ||
204 | |||
205 | if (!collision) { | ||
206 | mid = (__u64)cur_mid; | ||
207 | server->CurrentMid = mid; | ||
208 | break; | ||
209 | } | ||
210 | cur_mid++; | ||
211 | } | ||
212 | spin_unlock(&GlobalMid_Lock); | ||
213 | return mid; | ||
214 | } | ||
215 | |||
128 | struct smb_version_operations smb1_operations = { | 216 | struct smb_version_operations smb1_operations = { |
129 | .send_cancel = send_nt_cancel, | 217 | .send_cancel = send_nt_cancel, |
130 | .compare_fids = cifs_compare_fids, | 218 | .compare_fids = cifs_compare_fids, |
@@ -133,6 +221,7 @@ struct smb_version_operations smb1_operations = { | |||
133 | .add_credits = cifs_add_credits, | 221 | .add_credits = cifs_add_credits, |
134 | .set_credits = cifs_set_credits, | 222 | .set_credits = cifs_set_credits, |
135 | .get_credits_field = cifs_get_credits_field, | 223 | .get_credits_field = cifs_get_credits_field, |
224 | .get_next_mid = cifs_get_next_mid, | ||
136 | .read_data_offset = cifs_read_data_offset, | 225 | .read_data_offset = cifs_read_data_offset, |
137 | .read_data_length = cifs_read_data_length, | 226 | .read_data_length = cifs_read_data_length, |
138 | .map_error = map_smb_to_linux_error, | 227 | .map_error = map_smb_to_linux_error, |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 1b36ffe6a47b..3097ee58fd7d 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -779,7 +779,7 @@ send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, | |||
779 | 779 | ||
780 | pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; | 780 | pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; |
781 | pSMB->Timeout = 0; | 781 | pSMB->Timeout = 0; |
782 | pSMB->hdr.Mid = GetNextMid(ses->server); | 782 | pSMB->hdr.Mid = get_next_mid(ses->server); |
783 | 783 | ||
784 | return SendReceive(xid, ses, in_buf, out_buf, | 784 | return SendReceive(xid, ses, in_buf, out_buf, |
785 | &bytes_returned, 0); | 785 | &bytes_returned, 0); |
diff --git a/fs/dcache.c b/fs/dcache.c index 85c9e2bff8e6..40469044088d 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -683,6 +683,8 @@ EXPORT_SYMBOL(dget_parent); | |||
683 | /** | 683 | /** |
684 | * d_find_alias - grab a hashed alias of inode | 684 | * d_find_alias - grab a hashed alias of inode |
685 | * @inode: inode in question | 685 | * @inode: inode in question |
686 | * @want_discon: flag, used by d_splice_alias, to request | ||
687 | * that only a DISCONNECTED alias be returned. | ||
686 | * | 688 | * |
687 | * If inode has a hashed alias, or is a directory and has any alias, | 689 | * If inode has a hashed alias, or is a directory and has any alias, |
688 | * acquire the reference to alias and return it. Otherwise return NULL. | 690 | * acquire the reference to alias and return it. Otherwise return NULL. |
@@ -691,9 +693,10 @@ EXPORT_SYMBOL(dget_parent); | |||
691 | * of a filesystem. | 693 | * of a filesystem. |
692 | * | 694 | * |
693 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer | 695 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer |
694 | * any other hashed alias over that. | 696 | * any other hashed alias over that one unless @want_discon is set, |
697 | * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. | ||
695 | */ | 698 | */ |
696 | static struct dentry *__d_find_alias(struct inode *inode) | 699 | static struct dentry *__d_find_alias(struct inode *inode, int want_discon) |
697 | { | 700 | { |
698 | struct dentry *alias, *discon_alias; | 701 | struct dentry *alias, *discon_alias; |
699 | 702 | ||
@@ -705,7 +708,7 @@ again: | |||
705 | if (IS_ROOT(alias) && | 708 | if (IS_ROOT(alias) && |
706 | (alias->d_flags & DCACHE_DISCONNECTED)) { | 709 | (alias->d_flags & DCACHE_DISCONNECTED)) { |
707 | discon_alias = alias; | 710 | discon_alias = alias; |
708 | } else { | 711 | } else if (!want_discon) { |
709 | __dget_dlock(alias); | 712 | __dget_dlock(alias); |
710 | spin_unlock(&alias->d_lock); | 713 | spin_unlock(&alias->d_lock); |
711 | return alias; | 714 | return alias; |
@@ -736,7 +739,7 @@ struct dentry *d_find_alias(struct inode *inode) | |||
736 | 739 | ||
737 | if (!list_empty(&inode->i_dentry)) { | 740 | if (!list_empty(&inode->i_dentry)) { |
738 | spin_lock(&inode->i_lock); | 741 | spin_lock(&inode->i_lock); |
739 | de = __d_find_alias(inode); | 742 | de = __d_find_alias(inode, 0); |
740 | spin_unlock(&inode->i_lock); | 743 | spin_unlock(&inode->i_lock); |
741 | } | 744 | } |
742 | return de; | 745 | return de; |
@@ -1647,8 +1650,9 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) | |||
1647 | 1650 | ||
1648 | if (inode && S_ISDIR(inode->i_mode)) { | 1651 | if (inode && S_ISDIR(inode->i_mode)) { |
1649 | spin_lock(&inode->i_lock); | 1652 | spin_lock(&inode->i_lock); |
1650 | new = __d_find_any_alias(inode); | 1653 | new = __d_find_alias(inode, 1); |
1651 | if (new) { | 1654 | if (new) { |
1655 | BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); | ||
1652 | spin_unlock(&inode->i_lock); | 1656 | spin_unlock(&inode->i_lock); |
1653 | security_d_instantiate(new, inode); | 1657 | security_d_instantiate(new, inode); |
1654 | d_move(new, dentry); | 1658 | d_move(new, dentry); |
@@ -2478,7 +2482,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |||
2478 | struct dentry *alias; | 2482 | struct dentry *alias; |
2479 | 2483 | ||
2480 | /* Does an aliased dentry already exist? */ | 2484 | /* Does an aliased dentry already exist? */ |
2481 | alias = __d_find_alias(inode); | 2485 | alias = __d_find_alias(inode, 0); |
2482 | if (alias) { | 2486 | if (alias) { |
2483 | actual = alias; | 2487 | actual = alias; |
2484 | write_seqlock(&rename_lock); | 2488 | write_seqlock(&rename_lock); |
@@ -819,10 +819,10 @@ static int exec_mmap(struct mm_struct *mm) | |||
819 | /* Notify parent that we're no longer interested in the old VM */ | 819 | /* Notify parent that we're no longer interested in the old VM */ |
820 | tsk = current; | 820 | tsk = current; |
821 | old_mm = current->mm; | 821 | old_mm = current->mm; |
822 | sync_mm_rss(old_mm); | ||
823 | mm_release(tsk, old_mm); | 822 | mm_release(tsk, old_mm); |
824 | 823 | ||
825 | if (old_mm) { | 824 | if (old_mm) { |
825 | sync_mm_rss(old_mm); | ||
826 | /* | 826 | /* |
827 | * Make sure that if there is a core dump in progress | 827 | * Make sure that if there is a core dump in progress |
828 | * for the old mm, we get out and die instead of going | 828 | * for the old mm, we get out and die instead of going |
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c index e32bc919e4e3..5a7b691e748b 100644 --- a/fs/exofs/sys.c +++ b/fs/exofs/sys.c | |||
@@ -109,7 +109,7 @@ static struct kobj_type odev_ktype = { | |||
109 | static struct kobj_type uuid_ktype = { | 109 | static struct kobj_type uuid_ktype = { |
110 | }; | 110 | }; |
111 | 111 | ||
112 | void exofs_sysfs_dbg_print() | 112 | void exofs_sysfs_dbg_print(void) |
113 | { | 113 | { |
114 | #ifdef CONFIG_EXOFS_DEBUG | 114 | #ifdef CONFIG_EXOFS_DEBUG |
115 | struct kobject *k_name, *k_tmp; | 115 | struct kobject *k_name, *k_tmp; |
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 99b6324290db..cee7812cc3cf 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -90,8 +90,8 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, | |||
90 | * unusual file system layouts. | 90 | * unusual file system layouts. |
91 | */ | 91 | */ |
92 | if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { | 92 | if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { |
93 | block_cluster = EXT4_B2C(sbi, (start - | 93 | block_cluster = EXT4_B2C(sbi, |
94 | ext4_block_bitmap(sb, gdp))); | 94 | ext4_block_bitmap(sb, gdp) - start); |
95 | if (block_cluster < num_clusters) | 95 | if (block_cluster < num_clusters) |
96 | block_cluster = -1; | 96 | block_cluster = -1; |
97 | else if (block_cluster == num_clusters) { | 97 | else if (block_cluster == num_clusters) { |
@@ -102,7 +102,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, | |||
102 | 102 | ||
103 | if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { | 103 | if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { |
104 | inode_cluster = EXT4_B2C(sbi, | 104 | inode_cluster = EXT4_B2C(sbi, |
105 | start - ext4_inode_bitmap(sb, gdp)); | 105 | ext4_inode_bitmap(sb, gdp) - start); |
106 | if (inode_cluster < num_clusters) | 106 | if (inode_cluster < num_clusters) |
107 | inode_cluster = -1; | 107 | inode_cluster = -1; |
108 | else if (inode_cluster == num_clusters) { | 108 | else if (inode_cluster == num_clusters) { |
@@ -114,7 +114,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, | |||
114 | itbl_blk = ext4_inode_table(sb, gdp); | 114 | itbl_blk = ext4_inode_table(sb, gdp); |
115 | for (i = 0; i < sbi->s_itb_per_group; i++) { | 115 | for (i = 0; i < sbi->s_itb_per_group; i++) { |
116 | if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { | 116 | if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { |
117 | c = EXT4_B2C(sbi, start - itbl_blk + i); | 117 | c = EXT4_B2C(sbi, itbl_blk + i - start); |
118 | if ((c < num_clusters) || (c == inode_cluster) || | 118 | if ((c < num_clusters) || (c == inode_cluster) || |
119 | (c == block_cluster) || (c == itbl_cluster)) | 119 | (c == block_cluster) || (c == itbl_cluster)) |
120 | continue; | 120 | continue; |
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 8ad112ae0ade..e34deac3f366 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
@@ -123,7 +123,6 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
123 | else | 123 | else |
124 | ext4_clear_inode_flag(inode, i); | 124 | ext4_clear_inode_flag(inode, i); |
125 | } | 125 | } |
126 | ei->i_flags = flags; | ||
127 | 126 | ||
128 | ext4_set_inode_flags(inode); | 127 | ext4_set_inode_flags(inode); |
129 | inode->i_ctime = ext4_current_time(inode); | 128 | inode->i_ctime = ext4_current_time(inode); |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8d2fb8c88cf3..41a3ccff18d8 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -664,6 +664,7 @@ static long writeback_sb_inodes(struct super_block *sb, | |||
664 | /* Wait for I_SYNC. This function drops i_lock... */ | 664 | /* Wait for I_SYNC. This function drops i_lock... */ |
665 | inode_sleep_on_writeback(inode); | 665 | inode_sleep_on_writeback(inode); |
666 | /* Inode may be gone, start again */ | 666 | /* Inode may be gone, start again */ |
667 | spin_lock(&wb->list_lock); | ||
667 | continue; | 668 | continue; |
668 | } | 669 | } |
669 | inode->i_state |= I_SYNC; | 670 | inode->i_state |= I_SYNC; |
diff --git a/fs/fuse/control.c b/fs/fuse/control.c index 42593c587d48..03ff5b1eba93 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c | |||
@@ -75,19 +75,13 @@ static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf, | |||
75 | unsigned global_limit) | 75 | unsigned global_limit) |
76 | { | 76 | { |
77 | unsigned long t; | 77 | unsigned long t; |
78 | char tmp[32]; | ||
79 | unsigned limit = (1 << 16) - 1; | 78 | unsigned limit = (1 << 16) - 1; |
80 | int err; | 79 | int err; |
81 | 80 | ||
82 | if (*ppos || count >= sizeof(tmp) - 1) | 81 | if (*ppos) |
83 | return -EINVAL; | ||
84 | |||
85 | if (copy_from_user(tmp, buf, count)) | ||
86 | return -EINVAL; | 82 | return -EINVAL; |
87 | 83 | ||
88 | tmp[count] = '\0'; | 84 | err = kstrtoul_from_user(buf, count, 0, &t); |
89 | |||
90 | err = strict_strtoul(tmp, 0, &t); | ||
91 | if (err) | 85 | if (err) |
92 | return err; | 86 | return err; |
93 | 87 | ||
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index df5ac048dc74..334e0b18a014 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -775,6 +775,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir, | |||
775 | static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, | 775 | static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, |
776 | struct kstat *stat) | 776 | struct kstat *stat) |
777 | { | 777 | { |
778 | unsigned int blkbits; | ||
779 | |||
778 | stat->dev = inode->i_sb->s_dev; | 780 | stat->dev = inode->i_sb->s_dev; |
779 | stat->ino = attr->ino; | 781 | stat->ino = attr->ino; |
780 | stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); | 782 | stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); |
@@ -790,7 +792,13 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, | |||
790 | stat->ctime.tv_nsec = attr->ctimensec; | 792 | stat->ctime.tv_nsec = attr->ctimensec; |
791 | stat->size = attr->size; | 793 | stat->size = attr->size; |
792 | stat->blocks = attr->blocks; | 794 | stat->blocks = attr->blocks; |
793 | stat->blksize = (1 << inode->i_blkbits); | 795 | |
796 | if (attr->blksize != 0) | ||
797 | blkbits = ilog2(attr->blksize); | ||
798 | else | ||
799 | blkbits = inode->i_sb->s_blocksize_bits; | ||
800 | |||
801 | stat->blksize = 1 << blkbits; | ||
794 | } | 802 | } |
795 | 803 | ||
796 | static int fuse_do_getattr(struct inode *inode, struct kstat *stat, | 804 | static int fuse_do_getattr(struct inode *inode, struct kstat *stat, |
@@ -863,6 +871,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat, | |||
863 | if (stat) { | 871 | if (stat) { |
864 | generic_fillattr(inode, stat); | 872 | generic_fillattr(inode, stat); |
865 | stat->mode = fi->orig_i_mode; | 873 | stat->mode = fi->orig_i_mode; |
874 | stat->ino = fi->orig_ino; | ||
866 | } | 875 | } |
867 | } | 876 | } |
868 | 877 | ||
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 9562109d3a87..b321a688cde7 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -2173,6 +2173,44 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |||
2173 | return ret; | 2173 | return ret; |
2174 | } | 2174 | } |
2175 | 2175 | ||
2176 | long fuse_file_fallocate(struct file *file, int mode, loff_t offset, | ||
2177 | loff_t length) | ||
2178 | { | ||
2179 | struct fuse_file *ff = file->private_data; | ||
2180 | struct fuse_conn *fc = ff->fc; | ||
2181 | struct fuse_req *req; | ||
2182 | struct fuse_fallocate_in inarg = { | ||
2183 | .fh = ff->fh, | ||
2184 | .offset = offset, | ||
2185 | .length = length, | ||
2186 | .mode = mode | ||
2187 | }; | ||
2188 | int err; | ||
2189 | |||
2190 | if (fc->no_fallocate) | ||
2191 | return -EOPNOTSUPP; | ||
2192 | |||
2193 | req = fuse_get_req(fc); | ||
2194 | if (IS_ERR(req)) | ||
2195 | return PTR_ERR(req); | ||
2196 | |||
2197 | req->in.h.opcode = FUSE_FALLOCATE; | ||
2198 | req->in.h.nodeid = ff->nodeid; | ||
2199 | req->in.numargs = 1; | ||
2200 | req->in.args[0].size = sizeof(inarg); | ||
2201 | req->in.args[0].value = &inarg; | ||
2202 | fuse_request_send(fc, req); | ||
2203 | err = req->out.h.error; | ||
2204 | if (err == -ENOSYS) { | ||
2205 | fc->no_fallocate = 1; | ||
2206 | err = -EOPNOTSUPP; | ||
2207 | } | ||
2208 | fuse_put_request(fc, req); | ||
2209 | |||
2210 | return err; | ||
2211 | } | ||
2212 | EXPORT_SYMBOL_GPL(fuse_file_fallocate); | ||
2213 | |||
2176 | static const struct file_operations fuse_file_operations = { | 2214 | static const struct file_operations fuse_file_operations = { |
2177 | .llseek = fuse_file_llseek, | 2215 | .llseek = fuse_file_llseek, |
2178 | .read = do_sync_read, | 2216 | .read = do_sync_read, |
@@ -2190,6 +2228,7 @@ static const struct file_operations fuse_file_operations = { | |||
2190 | .unlocked_ioctl = fuse_file_ioctl, | 2228 | .unlocked_ioctl = fuse_file_ioctl, |
2191 | .compat_ioctl = fuse_file_compat_ioctl, | 2229 | .compat_ioctl = fuse_file_compat_ioctl, |
2192 | .poll = fuse_file_poll, | 2230 | .poll = fuse_file_poll, |
2231 | .fallocate = fuse_file_fallocate, | ||
2193 | }; | 2232 | }; |
2194 | 2233 | ||
2195 | static const struct file_operations fuse_direct_io_file_operations = { | 2234 | static const struct file_operations fuse_direct_io_file_operations = { |
@@ -2206,6 +2245,7 @@ static const struct file_operations fuse_direct_io_file_operations = { | |||
2206 | .unlocked_ioctl = fuse_file_ioctl, | 2245 | .unlocked_ioctl = fuse_file_ioctl, |
2207 | .compat_ioctl = fuse_file_compat_ioctl, | 2246 | .compat_ioctl = fuse_file_compat_ioctl, |
2208 | .poll = fuse_file_poll, | 2247 | .poll = fuse_file_poll, |
2248 | .fallocate = fuse_file_fallocate, | ||
2209 | /* no splice_read */ | 2249 | /* no splice_read */ |
2210 | }; | 2250 | }; |
2211 | 2251 | ||
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 572cefc78012..771fb6322c07 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -82,6 +82,9 @@ struct fuse_inode { | |||
82 | preserve the original mode */ | 82 | preserve the original mode */ |
83 | umode_t orig_i_mode; | 83 | umode_t orig_i_mode; |
84 | 84 | ||
85 | /** 64 bit inode number */ | ||
86 | u64 orig_ino; | ||
87 | |||
85 | /** Version of last attribute change */ | 88 | /** Version of last attribute change */ |
86 | u64 attr_version; | 89 | u64 attr_version; |
87 | 90 | ||
@@ -478,6 +481,9 @@ struct fuse_conn { | |||
478 | /** Are BSD file locking primitives not implemented by fs? */ | 481 | /** Are BSD file locking primitives not implemented by fs? */ |
479 | unsigned no_flock:1; | 482 | unsigned no_flock:1; |
480 | 483 | ||
484 | /** Is fallocate not implemented by fs? */ | ||
485 | unsigned no_fallocate:1; | ||
486 | |||
481 | /** The number of requests waiting for completion */ | 487 | /** The number of requests waiting for completion */ |
482 | atomic_t num_waiting; | 488 | atomic_t num_waiting; |
483 | 489 | ||
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 42678a33b7bb..1cd61652018c 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) | |||
91 | fi->nlookup = 0; | 91 | fi->nlookup = 0; |
92 | fi->attr_version = 0; | 92 | fi->attr_version = 0; |
93 | fi->writectr = 0; | 93 | fi->writectr = 0; |
94 | fi->orig_ino = 0; | ||
94 | INIT_LIST_HEAD(&fi->write_files); | 95 | INIT_LIST_HEAD(&fi->write_files); |
95 | INIT_LIST_HEAD(&fi->queued_writes); | 96 | INIT_LIST_HEAD(&fi->queued_writes); |
96 | INIT_LIST_HEAD(&fi->writepages); | 97 | INIT_LIST_HEAD(&fi->writepages); |
@@ -139,6 +140,18 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) | |||
139 | return 0; | 140 | return 0; |
140 | } | 141 | } |
141 | 142 | ||
143 | /* | ||
144 | * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down | ||
145 | * so that it will fit. | ||
146 | */ | ||
147 | static ino_t fuse_squash_ino(u64 ino64) | ||
148 | { | ||
149 | ino_t ino = (ino_t) ino64; | ||
150 | if (sizeof(ino_t) < sizeof(u64)) | ||
151 | ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8; | ||
152 | return ino; | ||
153 | } | ||
154 | |||
142 | void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, | 155 | void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, |
143 | u64 attr_valid) | 156 | u64 attr_valid) |
144 | { | 157 | { |
@@ -148,7 +161,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, | |||
148 | fi->attr_version = ++fc->attr_version; | 161 | fi->attr_version = ++fc->attr_version; |
149 | fi->i_time = attr_valid; | 162 | fi->i_time = attr_valid; |
150 | 163 | ||
151 | inode->i_ino = attr->ino; | 164 | inode->i_ino = fuse_squash_ino(attr->ino); |
152 | inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); | 165 | inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); |
153 | set_nlink(inode, attr->nlink); | 166 | set_nlink(inode, attr->nlink); |
154 | inode->i_uid = attr->uid; | 167 | inode->i_uid = attr->uid; |
@@ -174,6 +187,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, | |||
174 | fi->orig_i_mode = inode->i_mode; | 187 | fi->orig_i_mode = inode->i_mode; |
175 | if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) | 188 | if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) |
176 | inode->i_mode &= ~S_ISVTX; | 189 | inode->i_mode &= ~S_ISVTX; |
190 | |||
191 | fi->orig_ino = attr->ino; | ||
177 | } | 192 | } |
178 | 193 | ||
179 | void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, | 194 | void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, |
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c index c640ba57074b..09addc8615fa 100644 --- a/fs/hfsplus/ioctl.c +++ b/fs/hfsplus/ioctl.c | |||
@@ -31,6 +31,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags) | |||
31 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); | 31 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); |
32 | struct hfsplus_vh *vh = sbi->s_vhdr; | 32 | struct hfsplus_vh *vh = sbi->s_vhdr; |
33 | struct hfsplus_vh *bvh = sbi->s_backup_vhdr; | 33 | struct hfsplus_vh *bvh = sbi->s_backup_vhdr; |
34 | u32 cnid = (unsigned long)dentry->d_fsdata; | ||
34 | 35 | ||
35 | if (!capable(CAP_SYS_ADMIN)) | 36 | if (!capable(CAP_SYS_ADMIN)) |
36 | return -EPERM; | 37 | return -EPERM; |
@@ -41,8 +42,12 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags) | |||
41 | vh->finder_info[0] = bvh->finder_info[0] = | 42 | vh->finder_info[0] = bvh->finder_info[0] = |
42 | cpu_to_be32(parent_ino(dentry)); | 43 | cpu_to_be32(parent_ino(dentry)); |
43 | 44 | ||
44 | /* Bootloader */ | 45 | /* |
45 | vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(inode->i_ino); | 46 | * Bootloader. Just using the inode here breaks in the case of |
47 | * hard links - the firmware wants the ID of the hard link file, | ||
48 | * but the inode points at the indirect inode | ||
49 | */ | ||
50 | vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(cnid); | ||
46 | 51 | ||
47 | /* Per spec, the OS X system folder - same as finder_info[0] here */ | 52 | /* Per spec, the OS X system folder - same as finder_info[0] here */ |
48 | vh->finder_info[5] = bvh->finder_info[5] = | 53 | vh->finder_info[5] = bvh->finder_info[5] = |
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 7daf4b852d1c..90effcccca9a 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c | |||
@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, | |||
56 | DECLARE_COMPLETION_ONSTACK(wait); | 56 | DECLARE_COMPLETION_ONSTACK(wait); |
57 | struct bio *bio; | 57 | struct bio *bio; |
58 | int ret = 0; | 58 | int ret = 0; |
59 | unsigned int io_size; | 59 | u64 io_size; |
60 | loff_t start; | 60 | loff_t start; |
61 | int offset; | 61 | int offset; |
62 | 62 | ||
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 970659daa323..23ff18fe080a 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/kthread.h> | 17 | #include <linux/kthread.h> |
18 | #include <linux/sunrpc/svcauth_gss.h> | 18 | #include <linux/sunrpc/svcauth_gss.h> |
19 | #include <linux/sunrpc/bc_xprt.h> | 19 | #include <linux/sunrpc/bc_xprt.h> |
20 | #include <linux/nsproxy.h> | ||
21 | 20 | ||
22 | #include <net/inet_sock.h> | 21 | #include <net/inet_sock.h> |
23 | 22 | ||
@@ -107,7 +106,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
107 | { | 106 | { |
108 | int ret; | 107 | int ret; |
109 | 108 | ||
110 | ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET, | 109 | ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET, |
111 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); | 110 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); |
112 | if (ret <= 0) | 111 | if (ret <= 0) |
113 | goto out_err; | 112 | goto out_err; |
@@ -115,7 +114,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
115 | dprintk("NFS: Callback listener port = %u (af %u)\n", | 114 | dprintk("NFS: Callback listener port = %u (af %u)\n", |
116 | nfs_callback_tcpport, PF_INET); | 115 | nfs_callback_tcpport, PF_INET); |
117 | 116 | ||
118 | ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET6, | 117 | ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6, |
119 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); | 118 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); |
120 | if (ret > 0) { | 119 | if (ret > 0) { |
121 | nfs_callback_tcpport6 = ret; | 120 | nfs_callback_tcpport6 = ret; |
@@ -184,7 +183,7 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
184 | * fore channel connection. | 183 | * fore channel connection. |
185 | * Returns the input port (0) and sets the svc_serv bc_xprt on success | 184 | * Returns the input port (0) and sets the svc_serv bc_xprt on success |
186 | */ | 185 | */ |
187 | ret = svc_create_xprt(serv, "tcp-bc", xprt->xprt_net, PF_INET, 0, | 186 | ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0, |
188 | SVC_SOCK_ANONYMOUS); | 187 | SVC_SOCK_ANONYMOUS); |
189 | if (ret < 0) { | 188 | if (ret < 0) { |
190 | rqstp = ERR_PTR(ret); | 189 | rqstp = ERR_PTR(ret); |
@@ -254,7 +253,7 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt) | |||
254 | char svc_name[12]; | 253 | char svc_name[12]; |
255 | int ret = 0; | 254 | int ret = 0; |
256 | int minorversion_setup; | 255 | int minorversion_setup; |
257 | struct net *net = current->nsproxy->net_ns; | 256 | struct net *net = &init_net; |
258 | 257 | ||
259 | mutex_lock(&nfs_callback_mutex); | 258 | mutex_lock(&nfs_callback_mutex); |
260 | if (cb_info->users++ || cb_info->task != NULL) { | 259 | if (cb_info->users++ || cb_info->task != NULL) { |
@@ -330,7 +329,7 @@ void nfs_callback_down(int minorversion) | |||
330 | cb_info->users--; | 329 | cb_info->users--; |
331 | if (cb_info->users == 0 && cb_info->task != NULL) { | 330 | if (cb_info->users == 0 && cb_info->task != NULL) { |
332 | kthread_stop(cb_info->task); | 331 | kthread_stop(cb_info->task); |
333 | svc_shutdown_net(cb_info->serv, current->nsproxy->net_ns); | 332 | svc_shutdown_net(cb_info->serv, &init_net); |
334 | svc_exit_thread(cb_info->rqst); | 333 | svc_exit_thread(cb_info->rqst); |
335 | cb_info->serv = NULL; | 334 | cb_info->serv = NULL; |
336 | cb_info->rqst = NULL; | 335 | cb_info->rqst = NULL; |
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 95bfc243992c..e64b01d2a338 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c | |||
@@ -455,9 +455,9 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, | |||
455 | args->csa_nrclists = ntohl(*p++); | 455 | args->csa_nrclists = ntohl(*p++); |
456 | args->csa_rclists = NULL; | 456 | args->csa_rclists = NULL; |
457 | if (args->csa_nrclists) { | 457 | if (args->csa_nrclists) { |
458 | args->csa_rclists = kmalloc(args->csa_nrclists * | 458 | args->csa_rclists = kmalloc_array(args->csa_nrclists, |
459 | sizeof(*args->csa_rclists), | 459 | sizeof(*args->csa_rclists), |
460 | GFP_KERNEL); | 460 | GFP_KERNEL); |
461 | if (unlikely(args->csa_rclists == NULL)) | 461 | if (unlikely(args->csa_rclists == NULL)) |
462 | goto out; | 462 | goto out; |
463 | 463 | ||
@@ -696,7 +696,7 @@ static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp, | |||
696 | const struct cb_sequenceres *res) | 696 | const struct cb_sequenceres *res) |
697 | { | 697 | { |
698 | __be32 *p; | 698 | __be32 *p; |
699 | unsigned status = res->csr_status; | 699 | __be32 status = res->csr_status; |
700 | 700 | ||
701 | if (unlikely(status != 0)) | 701 | if (unlikely(status != 0)) |
702 | goto out; | 702 | goto out; |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 7d108753af81..17ba6b995659 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -544,8 +544,6 @@ nfs_found_client(const struct nfs_client_initdata *cl_init, | |||
544 | 544 | ||
545 | smp_rmb(); | 545 | smp_rmb(); |
546 | 546 | ||
547 | BUG_ON(clp->cl_cons_state != NFS_CS_READY); | ||
548 | |||
549 | dprintk("<-- %s found nfs_client %p for %s\n", | 547 | dprintk("<-- %s found nfs_client %p for %s\n", |
550 | __func__, clp, cl_init->hostname ?: ""); | 548 | __func__, clp, cl_init->hostname ?: ""); |
551 | return clp; | 549 | return clp; |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index ad2775d3e219..3168f6e3d4d4 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -523,9 +523,9 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data) | |||
523 | nfs_list_remove_request(req); | 523 | nfs_list_remove_request(req); |
524 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { | 524 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { |
525 | /* Note the rewrite will go through mds */ | 525 | /* Note the rewrite will go through mds */ |
526 | kref_get(&req->wb_kref); | ||
527 | nfs_mark_request_commit(req, NULL, &cinfo); | 526 | nfs_mark_request_commit(req, NULL, &cinfo); |
528 | } | 527 | } else |
528 | nfs_release_request(req); | ||
529 | nfs_unlock_and_release_request(req); | 529 | nfs_unlock_and_release_request(req); |
530 | } | 530 | } |
531 | 531 | ||
@@ -716,12 +716,12 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | |||
716 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) | 716 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) |
717 | bit = NFS_IOHDR_NEED_RESCHED; | 717 | bit = NFS_IOHDR_NEED_RESCHED; |
718 | else if (dreq->flags == 0) { | 718 | else if (dreq->flags == 0) { |
719 | memcpy(&dreq->verf, &req->wb_verf, | 719 | memcpy(&dreq->verf, hdr->verf, |
720 | sizeof(dreq->verf)); | 720 | sizeof(dreq->verf)); |
721 | bit = NFS_IOHDR_NEED_COMMIT; | 721 | bit = NFS_IOHDR_NEED_COMMIT; |
722 | dreq->flags = NFS_ODIRECT_DO_COMMIT; | 722 | dreq->flags = NFS_ODIRECT_DO_COMMIT; |
723 | } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { | 723 | } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { |
724 | if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) { | 724 | if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) { |
725 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; | 725 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; |
726 | bit = NFS_IOHDR_NEED_RESCHED; | 726 | bit = NFS_IOHDR_NEED_RESCHED; |
727 | } else | 727 | } else |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index c6827f93ab57..cc5900ac61b5 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -295,7 +295,7 @@ is_ds_client(struct nfs_client *clp) | |||
295 | 295 | ||
296 | extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[]; | 296 | extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[]; |
297 | 297 | ||
298 | extern const u32 nfs4_fattr_bitmap[2]; | 298 | extern const u32 nfs4_fattr_bitmap[3]; |
299 | extern const u32 nfs4_statfs_bitmap[2]; | 299 | extern const u32 nfs4_statfs_bitmap[2]; |
300 | extern const u32 nfs4_pathconf_bitmap[2]; | 300 | extern const u32 nfs4_pathconf_bitmap[2]; |
301 | extern const u32 nfs4_fsinfo_bitmap[3]; | 301 | extern const u32 nfs4_fsinfo_bitmap[3]; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d48dbefa0e71..15fc7e4664ed 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -105,6 +105,8 @@ static int nfs4_map_errors(int err) | |||
105 | return -EINVAL; | 105 | return -EINVAL; |
106 | case -NFS4ERR_SHARE_DENIED: | 106 | case -NFS4ERR_SHARE_DENIED: |
107 | return -EACCES; | 107 | return -EACCES; |
108 | case -NFS4ERR_MINOR_VERS_MISMATCH: | ||
109 | return -EPROTONOSUPPORT; | ||
108 | default: | 110 | default: |
109 | dprintk("%s could not handle NFSv4 error %d\n", | 111 | dprintk("%s could not handle NFSv4 error %d\n", |
110 | __func__, -err); | 112 | __func__, -err); |
@@ -116,7 +118,7 @@ static int nfs4_map_errors(int err) | |||
116 | /* | 118 | /* |
117 | * This is our standard bitmap for GETATTR requests. | 119 | * This is our standard bitmap for GETATTR requests. |
118 | */ | 120 | */ |
119 | const u32 nfs4_fattr_bitmap[2] = { | 121 | const u32 nfs4_fattr_bitmap[3] = { |
120 | FATTR4_WORD0_TYPE | 122 | FATTR4_WORD0_TYPE |
121 | | FATTR4_WORD0_CHANGE | 123 | | FATTR4_WORD0_CHANGE |
122 | | FATTR4_WORD0_SIZE | 124 | | FATTR4_WORD0_SIZE |
@@ -133,6 +135,24 @@ const u32 nfs4_fattr_bitmap[2] = { | |||
133 | | FATTR4_WORD1_TIME_MODIFY | 135 | | FATTR4_WORD1_TIME_MODIFY |
134 | }; | 136 | }; |
135 | 137 | ||
138 | static const u32 nfs4_pnfs_open_bitmap[3] = { | ||
139 | FATTR4_WORD0_TYPE | ||
140 | | FATTR4_WORD0_CHANGE | ||
141 | | FATTR4_WORD0_SIZE | ||
142 | | FATTR4_WORD0_FSID | ||
143 | | FATTR4_WORD0_FILEID, | ||
144 | FATTR4_WORD1_MODE | ||
145 | | FATTR4_WORD1_NUMLINKS | ||
146 | | FATTR4_WORD1_OWNER | ||
147 | | FATTR4_WORD1_OWNER_GROUP | ||
148 | | FATTR4_WORD1_RAWDEV | ||
149 | | FATTR4_WORD1_SPACE_USED | ||
150 | | FATTR4_WORD1_TIME_ACCESS | ||
151 | | FATTR4_WORD1_TIME_METADATA | ||
152 | | FATTR4_WORD1_TIME_MODIFY, | ||
153 | FATTR4_WORD2_MDSTHRESHOLD | ||
154 | }; | ||
155 | |||
136 | const u32 nfs4_statfs_bitmap[2] = { | 156 | const u32 nfs4_statfs_bitmap[2] = { |
137 | FATTR4_WORD0_FILES_AVAIL | 157 | FATTR4_WORD0_FILES_AVAIL |
138 | | FATTR4_WORD0_FILES_FREE | 158 | | FATTR4_WORD0_FILES_FREE |
@@ -844,6 +864,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, | |||
844 | p->o_arg.name = &dentry->d_name; | 864 | p->o_arg.name = &dentry->d_name; |
845 | p->o_arg.server = server; | 865 | p->o_arg.server = server; |
846 | p->o_arg.bitmask = server->attr_bitmask; | 866 | p->o_arg.bitmask = server->attr_bitmask; |
867 | p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; | ||
847 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; | 868 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; |
848 | if (attrs != NULL && attrs->ia_valid != 0) { | 869 | if (attrs != NULL && attrs->ia_valid != 0) { |
849 | __be32 verf[2]; | 870 | __be32 verf[2]; |
@@ -1820,6 +1841,7 @@ static int _nfs4_do_open(struct inode *dir, | |||
1820 | opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); | 1841 | opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); |
1821 | if (!opendata->f_attr.mdsthreshold) | 1842 | if (!opendata->f_attr.mdsthreshold) |
1822 | goto err_opendata_put; | 1843 | goto err_opendata_put; |
1844 | opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; | ||
1823 | } | 1845 | } |
1824 | if (dentry->d_inode != NULL) | 1846 | if (dentry->d_inode != NULL) |
1825 | opendata->state = nfs4_get_open_state(dentry->d_inode, sp); | 1847 | opendata->state = nfs4_get_open_state(dentry->d_inode, sp); |
@@ -1880,6 +1902,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, | |||
1880 | struct nfs4_state *res; | 1902 | struct nfs4_state *res; |
1881 | int status; | 1903 | int status; |
1882 | 1904 | ||
1905 | fmode &= FMODE_READ|FMODE_WRITE; | ||
1883 | do { | 1906 | do { |
1884 | status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, | 1907 | status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, |
1885 | &res, ctx_th); | 1908 | &res, ctx_th); |
@@ -2526,6 +2549,14 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
2526 | 2549 | ||
2527 | nfs_fattr_init(fattr); | 2550 | nfs_fattr_init(fattr); |
2528 | 2551 | ||
2552 | /* Deal with open(O_TRUNC) */ | ||
2553 | if (sattr->ia_valid & ATTR_OPEN) | ||
2554 | sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); | ||
2555 | |||
2556 | /* Optimization: if the end result is no change, don't RPC */ | ||
2557 | if ((sattr->ia_valid & ~(ATTR_FILE)) == 0) | ||
2558 | return 0; | ||
2559 | |||
2529 | /* Search for an existing open(O_WRITE) file */ | 2560 | /* Search for an existing open(O_WRITE) file */ |
2530 | if (sattr->ia_valid & ATTR_FILE) { | 2561 | if (sattr->ia_valid & ATTR_FILE) { |
2531 | struct nfs_open_context *ctx; | 2562 | struct nfs_open_context *ctx; |
@@ -2537,10 +2568,6 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
2537 | } | 2568 | } |
2538 | } | 2569 | } |
2539 | 2570 | ||
2540 | /* Deal with open(O_TRUNC) */ | ||
2541 | if (sattr->ia_valid & ATTR_OPEN) | ||
2542 | sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); | ||
2543 | |||
2544 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state); | 2571 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state); |
2545 | if (status == 0) | 2572 | if (status == 0) |
2546 | nfs_setattr_update_inode(inode, sattr); | 2573 | nfs_setattr_update_inode(inode, sattr); |
@@ -5275,7 +5302,7 @@ static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, | |||
5275 | 5302 | ||
5276 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 5303 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
5277 | if (status) | 5304 | if (status) |
5278 | pr_warn("NFS: Got error %d from the server %s on " | 5305 | dprintk("NFS: Got error %d from the server %s on " |
5279 | "DESTROY_CLIENTID.", status, clp->cl_hostname); | 5306 | "DESTROY_CLIENTID.", status, clp->cl_hostname); |
5280 | return status; | 5307 | return status; |
5281 | } | 5308 | } |
@@ -5746,8 +5773,7 @@ int nfs4_proc_destroy_session(struct nfs4_session *session, | |||
5746 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 5773 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
5747 | 5774 | ||
5748 | if (status) | 5775 | if (status) |
5749 | printk(KERN_WARNING | 5776 | dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " |
5750 | "NFS: Got error %d from the server on DESTROY_SESSION. " | ||
5751 | "Session has been destroyed regardless...\n", status); | 5777 | "Session has been destroyed regardless...\n", status); |
5752 | 5778 | ||
5753 | dprintk("<-- nfs4_proc_destroy_session\n"); | 5779 | dprintk("<-- nfs4_proc_destroy_session\n"); |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index c679b9ecef63..f38300e9f171 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -244,6 +244,16 @@ static int nfs4_begin_drain_session(struct nfs_client *clp) | |||
244 | return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); | 244 | return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); |
245 | } | 245 | } |
246 | 246 | ||
247 | static void nfs41_finish_session_reset(struct nfs_client *clp) | ||
248 | { | ||
249 | clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); | ||
250 | clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); | ||
251 | /* create_session negotiated new slot table */ | ||
252 | clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | ||
253 | clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); | ||
254 | nfs41_setup_state_renewal(clp); | ||
255 | } | ||
256 | |||
247 | int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) | 257 | int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) |
248 | { | 258 | { |
249 | int status; | 259 | int status; |
@@ -259,8 +269,7 @@ do_confirm: | |||
259 | status = nfs4_proc_create_session(clp, cred); | 269 | status = nfs4_proc_create_session(clp, cred); |
260 | if (status != 0) | 270 | if (status != 0) |
261 | goto out; | 271 | goto out; |
262 | clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); | 272 | nfs41_finish_session_reset(clp); |
263 | nfs41_setup_state_renewal(clp); | ||
264 | nfs_mark_client_ready(clp, NFS_CS_READY); | 273 | nfs_mark_client_ready(clp, NFS_CS_READY); |
265 | out: | 274 | out: |
266 | return status; | 275 | return status; |
@@ -1772,16 +1781,9 @@ static int nfs4_reset_session(struct nfs_client *clp) | |||
1772 | status = nfs4_handle_reclaim_lease_error(clp, status); | 1781 | status = nfs4_handle_reclaim_lease_error(clp, status); |
1773 | goto out; | 1782 | goto out; |
1774 | } | 1783 | } |
1775 | clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); | 1784 | nfs41_finish_session_reset(clp); |
1776 | /* create_session negotiated new slot table */ | ||
1777 | clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | ||
1778 | clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); | ||
1779 | dprintk("%s: session reset was successful for server %s!\n", | 1785 | dprintk("%s: session reset was successful for server %s!\n", |
1780 | __func__, clp->cl_hostname); | 1786 | __func__, clp->cl_hostname); |
1781 | |||
1782 | /* Let the state manager reestablish state */ | ||
1783 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) | ||
1784 | nfs41_setup_state_renewal(clp); | ||
1785 | out: | 1787 | out: |
1786 | if (cred) | 1788 | if (cred) |
1787 | put_rpccred(cred); | 1789 | put_rpccred(cred); |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index ee4a74db95d0..18fae29b0301 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -1198,12 +1198,13 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c | |||
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask, | 1200 | static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask, |
1201 | const u32 *open_bitmap, | ||
1201 | struct compound_hdr *hdr) | 1202 | struct compound_hdr *hdr) |
1202 | { | 1203 | { |
1203 | encode_getattr_three(xdr, | 1204 | encode_getattr_three(xdr, |
1204 | bitmask[0] & nfs4_fattr_bitmap[0], | 1205 | bitmask[0] & open_bitmap[0], |
1205 | bitmask[1] & nfs4_fattr_bitmap[1], | 1206 | bitmask[1] & open_bitmap[1], |
1206 | bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD, | 1207 | bitmask[2] & open_bitmap[2], |
1207 | hdr); | 1208 | hdr); |
1208 | } | 1209 | } |
1209 | 1210 | ||
@@ -2221,7 +2222,7 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr, | |||
2221 | encode_putfh(xdr, args->fh, &hdr); | 2222 | encode_putfh(xdr, args->fh, &hdr); |
2222 | encode_open(xdr, args, &hdr); | 2223 | encode_open(xdr, args, &hdr); |
2223 | encode_getfh(xdr, &hdr); | 2224 | encode_getfh(xdr, &hdr); |
2224 | encode_getfattr_open(xdr, args->bitmask, &hdr); | 2225 | encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr); |
2225 | encode_nops(&hdr); | 2226 | encode_nops(&hdr); |
2226 | } | 2227 | } |
2227 | 2228 | ||
@@ -4359,7 +4360,10 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr, | |||
4359 | 4360 | ||
4360 | if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U))) | 4361 | if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U))) |
4361 | return -EIO; | 4362 | return -EIO; |
4362 | if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) { | 4363 | if (bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD) { |
4364 | /* Did the server return an unrequested attribute? */ | ||
4365 | if (unlikely(res == NULL)) | ||
4366 | return -EREMOTEIO; | ||
4363 | p = xdr_inline_decode(xdr, 4); | 4367 | p = xdr_inline_decode(xdr, 4); |
4364 | if (unlikely(!p)) | 4368 | if (unlikely(!p)) |
4365 | goto out_overflow; | 4369 | goto out_overflow; |
@@ -4372,6 +4376,7 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr, | |||
4372 | __func__); | 4376 | __func__); |
4373 | 4377 | ||
4374 | status = decode_first_threshold_item4(xdr, res); | 4378 | status = decode_first_threshold_item4(xdr, res); |
4379 | bitmap[2] &= ~FATTR4_WORD2_MDSTHRESHOLD; | ||
4375 | } | 4380 | } |
4376 | return status; | 4381 | return status; |
4377 | out_overflow: | 4382 | out_overflow: |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 29fd23c0efdc..64f90d845f6a 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -365,7 +365,7 @@ static inline bool | |||
365 | pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, | 365 | pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, |
366 | struct nfs_server *nfss) | 366 | struct nfs_server *nfss) |
367 | { | 367 | { |
368 | return (dst && src && src->bm != 0 && | 368 | return (dst && src && src->bm != 0 && nfss->pnfs_curr_ld && |
369 | nfss->pnfs_curr_ld->id == src->l_type); | 369 | nfss->pnfs_curr_ld->id == src->l_type); |
370 | } | 370 | } |
371 | 371 | ||
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index a706b6bcc286..617c7419a08e 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
@@ -651,7 +651,7 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) | |||
651 | /* Emulate the eof flag, which isn't normally needed in NFSv2 | 651 | /* Emulate the eof flag, which isn't normally needed in NFSv2 |
652 | * as it is guaranteed to always return the file attributes | 652 | * as it is guaranteed to always return the file attributes |
653 | */ | 653 | */ |
654 | if (data->args.offset + data->args.count >= data->res.fattr->size) | 654 | if (data->args.offset + data->res.count >= data->res.fattr->size) |
655 | data->res.eof = 1; | 655 | data->res.eof = 1; |
656 | } | 656 | } |
657 | return 0; | 657 | return 0; |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ff656c022684..906f09c7d842 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -1867,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options, | |||
1867 | if (data == NULL) | 1867 | if (data == NULL) |
1868 | goto out_no_data; | 1868 | goto out_no_data; |
1869 | 1869 | ||
1870 | args->version = NFS_DEFAULT_VERSION; | ||
1870 | switch (data->version) { | 1871 | switch (data->version) { |
1871 | case 1: | 1872 | case 1: |
1872 | data->namlen = 0; | 1873 | data->namlen = 0; |
@@ -2637,6 +2638,8 @@ static int nfs4_validate_mount_data(void *options, | |||
2637 | if (data == NULL) | 2638 | if (data == NULL) |
2638 | goto out_no_data; | 2639 | goto out_no_data; |
2639 | 2640 | ||
2641 | args->version = 4; | ||
2642 | |||
2640 | switch (data->version) { | 2643 | switch (data->version) { |
2641 | case 1: | 2644 | case 1: |
2642 | if (data->host_addrlen > sizeof(args->nfs_server.address)) | 2645 | if (data->host_addrlen > sizeof(args->nfs_server.address)) |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e6fe3d69d14c..4d6861c0dc14 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -80,6 +80,7 @@ struct nfs_write_header *nfs_writehdr_alloc(void) | |||
80 | INIT_LIST_HEAD(&hdr->rpc_list); | 80 | INIT_LIST_HEAD(&hdr->rpc_list); |
81 | spin_lock_init(&hdr->lock); | 81 | spin_lock_init(&hdr->lock); |
82 | atomic_set(&hdr->refcnt, 0); | 82 | atomic_set(&hdr->refcnt, 0); |
83 | hdr->verf = &p->verf; | ||
83 | } | 84 | } |
84 | return p; | 85 | return p; |
85 | } | 86 | } |
@@ -619,6 +620,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) | |||
619 | goto next; | 620 | goto next; |
620 | } | 621 | } |
621 | if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { | 622 | if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { |
623 | memcpy(&req->wb_verf, hdr->verf, sizeof(req->wb_verf)); | ||
622 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); | 624 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); |
623 | goto next; | 625 | goto next; |
624 | } | 626 | } |
@@ -1255,15 +1257,14 @@ static void nfs_writeback_release_common(void *calldata) | |||
1255 | struct nfs_write_data *data = calldata; | 1257 | struct nfs_write_data *data = calldata; |
1256 | struct nfs_pgio_header *hdr = data->header; | 1258 | struct nfs_pgio_header *hdr = data->header; |
1257 | int status = data->task.tk_status; | 1259 | int status = data->task.tk_status; |
1258 | struct nfs_page *req = hdr->req; | ||
1259 | 1260 | ||
1260 | if ((status >= 0) && nfs_write_need_commit(data)) { | 1261 | if ((status >= 0) && nfs_write_need_commit(data)) { |
1261 | spin_lock(&hdr->lock); | 1262 | spin_lock(&hdr->lock); |
1262 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) | 1263 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) |
1263 | ; /* Do nothing */ | 1264 | ; /* Do nothing */ |
1264 | else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) | 1265 | else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) |
1265 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | 1266 | memcpy(hdr->verf, &data->verf, sizeof(*hdr->verf)); |
1266 | else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) | 1267 | else if (memcmp(hdr->verf, &data->verf, sizeof(*hdr->verf))) |
1267 | set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); | 1268 | set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); |
1268 | spin_unlock(&hdr->lock); | 1269 | spin_unlock(&hdr->lock); |
1269 | } | 1270 | } |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 8fdc9ec5c5d3..94effd5bc4a1 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -900,7 +900,7 @@ static void free_session(struct kref *kref) | |||
900 | struct nfsd4_session *ses; | 900 | struct nfsd4_session *ses; |
901 | int mem; | 901 | int mem; |
902 | 902 | ||
903 | BUG_ON(!spin_is_locked(&client_lock)); | 903 | lockdep_assert_held(&client_lock); |
904 | ses = container_of(kref, struct nfsd4_session, se_ref); | 904 | ses = container_of(kref, struct nfsd4_session, se_ref); |
905 | nfsd4_del_conns(ses); | 905 | nfsd4_del_conns(ses); |
906 | spin_lock(&nfsd_drc_lock); | 906 | spin_lock(&nfsd_drc_lock); |
@@ -1080,7 +1080,7 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) | |||
1080 | static inline void | 1080 | static inline void |
1081 | free_client(struct nfs4_client *clp) | 1081 | free_client(struct nfs4_client *clp) |
1082 | { | 1082 | { |
1083 | BUG_ON(!spin_is_locked(&client_lock)); | 1083 | lockdep_assert_held(&client_lock); |
1084 | while (!list_empty(&clp->cl_sessions)) { | 1084 | while (!list_empty(&clp->cl_sessions)) { |
1085 | struct nfsd4_session *ses; | 1085 | struct nfsd4_session *ses; |
1086 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, | 1086 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, |
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 08a07a218d26..57ceaf33d177 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c | |||
@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs) | |||
191 | while (!list_empty(head)) { | 191 | while (!list_empty(head)) { |
192 | ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); | 192 | ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); |
193 | list_del_init(&ii->i_dirty); | 193 | list_del_init(&ii->i_dirty); |
194 | truncate_inode_pages(&ii->vfs_inode.i_data, 0); | ||
195 | nilfs_btnode_cache_clear(&ii->i_btnode_cache); | ||
194 | iput(&ii->vfs_inode); | 196 | iput(&ii->vfs_inode); |
195 | } | 197 | } |
196 | } | 198 | } |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 0e72ad6f22aa..88e11fb346b6 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) | |||
2309 | if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) | 2309 | if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) |
2310 | continue; | 2310 | continue; |
2311 | list_del_init(&ii->i_dirty); | 2311 | list_del_init(&ii->i_dirty); |
2312 | truncate_inode_pages(&ii->vfs_inode.i_data, 0); | ||
2313 | nilfs_btnode_cache_clear(&ii->i_btnode_cache); | ||
2312 | iput(&ii->vfs_inode); | 2314 | iput(&ii->vfs_inode); |
2313 | } | 2315 | } |
2314 | } | 2316 | } |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 616f41a7cde6..437195f204e1 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -1803,7 +1803,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
1803 | rcu_read_lock(); | 1803 | rcu_read_lock(); |
1804 | file = fcheck_files(files, fd); | 1804 | file = fcheck_files(files, fd); |
1805 | if (file) { | 1805 | if (file) { |
1806 | unsigned i_mode, f_mode = file->f_mode; | 1806 | unsigned f_mode = file->f_mode; |
1807 | 1807 | ||
1808 | rcu_read_unlock(); | 1808 | rcu_read_unlock(); |
1809 | put_files_struct(files); | 1809 | put_files_struct(files); |
@@ -1819,12 +1819,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
1819 | inode->i_gid = GLOBAL_ROOT_GID; | 1819 | inode->i_gid = GLOBAL_ROOT_GID; |
1820 | } | 1820 | } |
1821 | 1821 | ||
1822 | i_mode = S_IFLNK; | 1822 | if (S_ISLNK(inode->i_mode)) { |
1823 | if (f_mode & FMODE_READ) | 1823 | unsigned i_mode = S_IFLNK; |
1824 | i_mode |= S_IRUSR | S_IXUSR; | 1824 | if (f_mode & FMODE_READ) |
1825 | if (f_mode & FMODE_WRITE) | 1825 | i_mode |= S_IRUSR | S_IXUSR; |
1826 | i_mode |= S_IWUSR | S_IXUSR; | 1826 | if (f_mode & FMODE_WRITE) |
1827 | inode->i_mode = i_mode; | 1827 | i_mode |= S_IWUSR | S_IXUSR; |
1828 | inode->i_mode = i_mode; | ||
1829 | } | ||
1828 | 1830 | ||
1829 | security_task_to_inode(task, inode); | 1831 | security_task_to_inode(task, inode); |
1830 | put_task_struct(task); | 1832 | put_task_struct(task); |
@@ -1859,6 +1861,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir, | |||
1859 | ei = PROC_I(inode); | 1861 | ei = PROC_I(inode); |
1860 | ei->fd = fd; | 1862 | ei->fd = fd; |
1861 | 1863 | ||
1864 | inode->i_mode = S_IFLNK; | ||
1862 | inode->i_op = &proc_pid_link_inode_operations; | 1865 | inode->i_op = &proc_pid_link_inode_operations; |
1863 | inode->i_size = 64; | 1866 | inode->i_size = 64; |
1864 | ei->op.proc_get_link = proc_fd_link; | 1867 | ei->op.proc_get_link = proc_fd_link; |
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 82c585f715e3..03ce7a9b81cc 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c | |||
@@ -94,20 +94,15 @@ static const char *get_reason_str(enum kmsg_dump_reason reason) | |||
94 | * as we can from the end of the buffer. | 94 | * as we can from the end of the buffer. |
95 | */ | 95 | */ |
96 | static void pstore_dump(struct kmsg_dumper *dumper, | 96 | static void pstore_dump(struct kmsg_dumper *dumper, |
97 | enum kmsg_dump_reason reason, | 97 | enum kmsg_dump_reason reason) |
98 | const char *s1, unsigned long l1, | ||
99 | const char *s2, unsigned long l2) | ||
100 | { | 98 | { |
101 | unsigned long s1_start, s2_start; | 99 | unsigned long total = 0; |
102 | unsigned long l1_cpy, l2_cpy; | ||
103 | unsigned long size, total = 0; | ||
104 | char *dst; | ||
105 | const char *why; | 100 | const char *why; |
106 | u64 id; | 101 | u64 id; |
107 | int hsize, ret; | ||
108 | unsigned int part = 1; | 102 | unsigned int part = 1; |
109 | unsigned long flags = 0; | 103 | unsigned long flags = 0; |
110 | int is_locked = 0; | 104 | int is_locked = 0; |
105 | int ret; | ||
111 | 106 | ||
112 | why = get_reason_str(reason); | 107 | why = get_reason_str(reason); |
113 | 108 | ||
@@ -119,30 +114,25 @@ static void pstore_dump(struct kmsg_dumper *dumper, | |||
119 | spin_lock_irqsave(&psinfo->buf_lock, flags); | 114 | spin_lock_irqsave(&psinfo->buf_lock, flags); |
120 | oopscount++; | 115 | oopscount++; |
121 | while (total < kmsg_bytes) { | 116 | while (total < kmsg_bytes) { |
117 | char *dst; | ||
118 | unsigned long size; | ||
119 | int hsize; | ||
120 | size_t len; | ||
121 | |||
122 | dst = psinfo->buf; | 122 | dst = psinfo->buf; |
123 | hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part); | 123 | hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part); |
124 | size = psinfo->bufsize - hsize; | 124 | size = psinfo->bufsize - hsize; |
125 | dst += hsize; | 125 | dst += hsize; |
126 | 126 | ||
127 | l2_cpy = min(l2, size); | 127 | if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len)) |
128 | l1_cpy = min(l1, size - l2_cpy); | ||
129 | |||
130 | if (l1_cpy + l2_cpy == 0) | ||
131 | break; | 128 | break; |
132 | 129 | ||
133 | s2_start = l2 - l2_cpy; | ||
134 | s1_start = l1 - l1_cpy; | ||
135 | |||
136 | memcpy(dst, s1 + s1_start, l1_cpy); | ||
137 | memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); | ||
138 | |||
139 | ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part, | 130 | ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part, |
140 | hsize + l1_cpy + l2_cpy, psinfo); | 131 | hsize + len, psinfo); |
141 | if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) | 132 | if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) |
142 | pstore_new_entry = 1; | 133 | pstore_new_entry = 1; |
143 | l1 -= l1_cpy; | 134 | |
144 | l2 -= l2_cpy; | 135 | total += hsize + len; |
145 | total += l1_cpy + l2_cpy; | ||
146 | part++; | 136 | part++; |
147 | } | 137 | } |
148 | if (in_nmi()) { | 138 | if (in_nmi()) { |
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 685a83756b2b..84a7e6f3c046 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c | |||
@@ -2918,6 +2918,9 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) | |||
2918 | struct dentry *dent; | 2918 | struct dentry *dent; |
2919 | struct ubifs_debug_info *d = c->dbg; | 2919 | struct ubifs_debug_info *d = c->dbg; |
2920 | 2920 | ||
2921 | if (!IS_ENABLED(DEBUG_FS)) | ||
2922 | return 0; | ||
2923 | |||
2921 | n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, | 2924 | n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, |
2922 | c->vi.ubi_num, c->vi.vol_id); | 2925 | c->vi.ubi_num, c->vi.vol_id); |
2923 | if (n == UBIFS_DFS_DIR_LEN) { | 2926 | if (n == UBIFS_DFS_DIR_LEN) { |
@@ -3010,7 +3013,8 @@ out: | |||
3010 | */ | 3013 | */ |
3011 | void dbg_debugfs_exit_fs(struct ubifs_info *c) | 3014 | void dbg_debugfs_exit_fs(struct ubifs_info *c) |
3012 | { | 3015 | { |
3013 | debugfs_remove_recursive(c->dbg->dfs_dir); | 3016 | if (IS_ENABLED(DEBUG_FS)) |
3017 | debugfs_remove_recursive(c->dbg->dfs_dir); | ||
3014 | } | 3018 | } |
3015 | 3019 | ||
3016 | struct ubifs_global_debug_info ubifs_dbg; | 3020 | struct ubifs_global_debug_info ubifs_dbg; |
@@ -3095,6 +3099,9 @@ int dbg_debugfs_init(void) | |||
3095 | const char *fname; | 3099 | const char *fname; |
3096 | struct dentry *dent; | 3100 | struct dentry *dent; |
3097 | 3101 | ||
3102 | if (!IS_ENABLED(DEBUG_FS)) | ||
3103 | return 0; | ||
3104 | |||
3098 | fname = "ubifs"; | 3105 | fname = "ubifs"; |
3099 | dent = debugfs_create_dir(fname, NULL); | 3106 | dent = debugfs_create_dir(fname, NULL); |
3100 | if (IS_ERR_OR_NULL(dent)) | 3107 | if (IS_ERR_OR_NULL(dent)) |
@@ -3159,7 +3166,8 @@ out: | |||
3159 | */ | 3166 | */ |
3160 | void dbg_debugfs_exit(void) | 3167 | void dbg_debugfs_exit(void) |
3161 | { | 3168 | { |
3162 | debugfs_remove_recursive(dfs_rootdir); | 3169 | if (IS_ENABLED(DEBUG_FS)) |
3170 | debugfs_remove_recursive(dfs_rootdir); | ||
3163 | } | 3171 | } |
3164 | 3172 | ||
3165 | /** | 3173 | /** |