diff options
author | Elena Reshetova <elena.reshetova@intel.com> | 2017-03-03 03:55:16 -0500 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2017-04-18 08:07:23 -0400 |
commit | 6de5f18e7b0da0cdd265eda047a0bc4f48260bcb (patch) | |
tree | bb6b0807f0b91eea719abb636a19bf6b3e141fea /fs/btrfs/delayed-inode.c | |
parent | 6df8cdf5bda221f268ac23940bce589ad176993d (diff) |
btrfs: convert btrfs_delayed_node.refs from atomic_t to refcount_t
refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.
Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David Windsor <dwindsor@gmail.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/delayed-inode.c')
-rw-r--r-- | fs/btrfs/delayed-inode.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 1aff676f0e5b..7396c36e0adb 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
@@ -52,7 +52,7 @@ static inline void btrfs_init_delayed_node( | |||
52 | { | 52 | { |
53 | delayed_node->root = root; | 53 | delayed_node->root = root; |
54 | delayed_node->inode_id = inode_id; | 54 | delayed_node->inode_id = inode_id; |
55 | atomic_set(&delayed_node->refs, 0); | 55 | refcount_set(&delayed_node->refs, 0); |
56 | delayed_node->ins_root = RB_ROOT; | 56 | delayed_node->ins_root = RB_ROOT; |
57 | delayed_node->del_root = RB_ROOT; | 57 | delayed_node->del_root = RB_ROOT; |
58 | mutex_init(&delayed_node->mutex); | 58 | mutex_init(&delayed_node->mutex); |
@@ -81,7 +81,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( | |||
81 | 81 | ||
82 | node = READ_ONCE(btrfs_inode->delayed_node); | 82 | node = READ_ONCE(btrfs_inode->delayed_node); |
83 | if (node) { | 83 | if (node) { |
84 | atomic_inc(&node->refs); | 84 | refcount_inc(&node->refs); |
85 | return node; | 85 | return node; |
86 | } | 86 | } |
87 | 87 | ||
@@ -89,14 +89,14 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( | |||
89 | node = radix_tree_lookup(&root->delayed_nodes_tree, ino); | 89 | node = radix_tree_lookup(&root->delayed_nodes_tree, ino); |
90 | if (node) { | 90 | if (node) { |
91 | if (btrfs_inode->delayed_node) { | 91 | if (btrfs_inode->delayed_node) { |
92 | atomic_inc(&node->refs); /* can be accessed */ | 92 | refcount_inc(&node->refs); /* can be accessed */ |
93 | BUG_ON(btrfs_inode->delayed_node != node); | 93 | BUG_ON(btrfs_inode->delayed_node != node); |
94 | spin_unlock(&root->inode_lock); | 94 | spin_unlock(&root->inode_lock); |
95 | return node; | 95 | return node; |
96 | } | 96 | } |
97 | btrfs_inode->delayed_node = node; | 97 | btrfs_inode->delayed_node = node; |
98 | /* can be accessed and cached in the inode */ | 98 | /* can be accessed and cached in the inode */ |
99 | atomic_add(2, &node->refs); | 99 | refcount_add(2, &node->refs); |
100 | spin_unlock(&root->inode_lock); | 100 | spin_unlock(&root->inode_lock); |
101 | return node; | 101 | return node; |
102 | } | 102 | } |
@@ -125,7 +125,7 @@ again: | |||
125 | btrfs_init_delayed_node(node, root, ino); | 125 | btrfs_init_delayed_node(node, root, ino); |
126 | 126 | ||
127 | /* cached in the btrfs inode and can be accessed */ | 127 | /* cached in the btrfs inode and can be accessed */ |
128 | atomic_add(2, &node->refs); | 128 | refcount_set(&node->refs, 2); |
129 | 129 | ||
130 | ret = radix_tree_preload(GFP_NOFS); | 130 | ret = radix_tree_preload(GFP_NOFS); |
131 | if (ret) { | 131 | if (ret) { |
@@ -166,7 +166,7 @@ static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, | |||
166 | } else { | 166 | } else { |
167 | list_add_tail(&node->n_list, &root->node_list); | 167 | list_add_tail(&node->n_list, &root->node_list); |
168 | list_add_tail(&node->p_list, &root->prepare_list); | 168 | list_add_tail(&node->p_list, &root->prepare_list); |
169 | atomic_inc(&node->refs); /* inserted into list */ | 169 | refcount_inc(&node->refs); /* inserted into list */ |
170 | root->nodes++; | 170 | root->nodes++; |
171 | set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags); | 171 | set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags); |
172 | } | 172 | } |
@@ -180,7 +180,7 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, | |||
180 | spin_lock(&root->lock); | 180 | spin_lock(&root->lock); |
181 | if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { | 181 | if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { |
182 | root->nodes--; | 182 | root->nodes--; |
183 | atomic_dec(&node->refs); /* not in the list */ | 183 | refcount_dec(&node->refs); /* not in the list */ |
184 | list_del_init(&node->n_list); | 184 | list_del_init(&node->n_list); |
185 | if (!list_empty(&node->p_list)) | 185 | if (!list_empty(&node->p_list)) |
186 | list_del_init(&node->p_list); | 186 | list_del_init(&node->p_list); |
@@ -201,7 +201,7 @@ static struct btrfs_delayed_node *btrfs_first_delayed_node( | |||
201 | 201 | ||
202 | p = delayed_root->node_list.next; | 202 | p = delayed_root->node_list.next; |
203 | node = list_entry(p, struct btrfs_delayed_node, n_list); | 203 | node = list_entry(p, struct btrfs_delayed_node, n_list); |
204 | atomic_inc(&node->refs); | 204 | refcount_inc(&node->refs); |
205 | out: | 205 | out: |
206 | spin_unlock(&delayed_root->lock); | 206 | spin_unlock(&delayed_root->lock); |
207 | 207 | ||
@@ -228,7 +228,7 @@ static struct btrfs_delayed_node *btrfs_next_delayed_node( | |||
228 | p = node->n_list.next; | 228 | p = node->n_list.next; |
229 | 229 | ||
230 | next = list_entry(p, struct btrfs_delayed_node, n_list); | 230 | next = list_entry(p, struct btrfs_delayed_node, n_list); |
231 | atomic_inc(&next->refs); | 231 | refcount_inc(&next->refs); |
232 | out: | 232 | out: |
233 | spin_unlock(&delayed_root->lock); | 233 | spin_unlock(&delayed_root->lock); |
234 | 234 | ||
@@ -253,11 +253,11 @@ static void __btrfs_release_delayed_node( | |||
253 | btrfs_dequeue_delayed_node(delayed_root, delayed_node); | 253 | btrfs_dequeue_delayed_node(delayed_root, delayed_node); |
254 | mutex_unlock(&delayed_node->mutex); | 254 | mutex_unlock(&delayed_node->mutex); |
255 | 255 | ||
256 | if (atomic_dec_and_test(&delayed_node->refs)) { | 256 | if (refcount_dec_and_test(&delayed_node->refs)) { |
257 | bool free = false; | 257 | bool free = false; |
258 | struct btrfs_root *root = delayed_node->root; | 258 | struct btrfs_root *root = delayed_node->root; |
259 | spin_lock(&root->inode_lock); | 259 | spin_lock(&root->inode_lock); |
260 | if (atomic_read(&delayed_node->refs) == 0) { | 260 | if (refcount_read(&delayed_node->refs) == 0) { |
261 | radix_tree_delete(&root->delayed_nodes_tree, | 261 | radix_tree_delete(&root->delayed_nodes_tree, |
262 | delayed_node->inode_id); | 262 | delayed_node->inode_id); |
263 | free = true; | 263 | free = true; |
@@ -286,7 +286,7 @@ static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( | |||
286 | p = delayed_root->prepare_list.next; | 286 | p = delayed_root->prepare_list.next; |
287 | list_del_init(p); | 287 | list_del_init(p); |
288 | node = list_entry(p, struct btrfs_delayed_node, p_list); | 288 | node = list_entry(p, struct btrfs_delayed_node, p_list); |
289 | atomic_inc(&node->refs); | 289 | refcount_inc(&node->refs); |
290 | out: | 290 | out: |
291 | spin_unlock(&delayed_root->lock); | 291 | spin_unlock(&delayed_root->lock); |
292 | 292 | ||
@@ -1621,7 +1621,7 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode, | |||
1621 | * insert/delete delayed items in this period. So we also needn't | 1621 | * insert/delete delayed items in this period. So we also needn't |
1622 | * requeue or dequeue this delayed node. | 1622 | * requeue or dequeue this delayed node. |
1623 | */ | 1623 | */ |
1624 | atomic_dec(&delayed_node->refs); | 1624 | refcount_dec(&delayed_node->refs); |
1625 | 1625 | ||
1626 | return true; | 1626 | return true; |
1627 | } | 1627 | } |
@@ -1963,7 +1963,7 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) | |||
1963 | inode_id = delayed_nodes[n - 1]->inode_id + 1; | 1963 | inode_id = delayed_nodes[n - 1]->inode_id + 1; |
1964 | 1964 | ||
1965 | for (i = 0; i < n; i++) | 1965 | for (i = 0; i < n; i++) |
1966 | atomic_inc(&delayed_nodes[i]->refs); | 1966 | refcount_inc(&delayed_nodes[i]->refs); |
1967 | spin_unlock(&root->inode_lock); | 1967 | spin_unlock(&root->inode_lock); |
1968 | 1968 | ||
1969 | for (i = 0; i < n; i++) { | 1969 | for (i = 0; i < n; i++) { |