summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorElena Reshetova <elena.reshetova@intel.com>2017-10-20 06:26:02 -0400
committerJan Kara <jack@suse.cz>2017-10-31 12:54:56 -0400
commitab97f87325e28b7ef7717e6cb62e8da14a7176e1 (patch)
tree7ce8f0a747efaff1907f3774479dcbc2f4762c48
parent6685df31255493c3f0e9e0b8bf885e4c9762fc5d (diff)
fsnotify: convert fsnotify_mark.refcnt from atomic_t to refcount_t
atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable fsnotify_mark.refcnt is used as pure reference counter. Convert it to refcount_t and fix up the operations. Suggested-by: Kees Cook <keescook@chromium.org> Reviewed-by: David Windsor <dwindsor@gmail.com> Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Jan Kara <jack@suse.cz>
-rw-r--r--fs/notify/inotify/inotify_user.c4
-rw-r--r--fs/notify/mark.c14
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--kernel/audit_tree.c2
4 files changed, 11 insertions, 11 deletions
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 7cc7d3fb1862..d3c20e0bb046 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -376,7 +376,7 @@ static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group
376 376
377 fsnotify_get_mark(fsn_mark); 377 fsnotify_get_mark(fsn_mark);
378 /* One ref for being in the idr, one ref we just took */ 378 /* One ref for being in the idr, one ref we just took */
379 BUG_ON(atomic_read(&fsn_mark->refcnt) < 2); 379 BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
380 } 380 }
381 381
382 return i_mark; 382 return i_mark;
@@ -446,7 +446,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
446 * One ref for being in the idr 446 * One ref for being in the idr
447 * one ref grabbed by inotify_idr_find 447 * one ref grabbed by inotify_idr_find
448 */ 448 */
449 if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) { 449 if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
450 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", 450 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
451 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); 451 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
452 /* we can't really recover with bad ref cnting.. */ 452 /* we can't really recover with bad ref cnting.. */
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index f3a32ea15b49..e9191b416434 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -105,8 +105,8 @@ static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn);
105 105
106void fsnotify_get_mark(struct fsnotify_mark *mark) 106void fsnotify_get_mark(struct fsnotify_mark *mark)
107{ 107{
108 WARN_ON_ONCE(!atomic_read(&mark->refcnt)); 108 WARN_ON_ONCE(!refcount_read(&mark->refcnt));
109 atomic_inc(&mark->refcnt); 109 refcount_inc(&mark->refcnt);
110} 110}
111 111
112static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) 112static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
@@ -201,7 +201,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
201 201
202 /* Catch marks that were actually never attached to object */ 202 /* Catch marks that were actually never attached to object */
203 if (!mark->connector) { 203 if (!mark->connector) {
204 if (atomic_dec_and_test(&mark->refcnt)) 204 if (refcount_dec_and_test(&mark->refcnt))
205 fsnotify_final_mark_destroy(mark); 205 fsnotify_final_mark_destroy(mark);
206 return; 206 return;
207 } 207 }
@@ -210,7 +210,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
210 * We have to be careful so that traversals of obj_list under lock can 210 * We have to be careful so that traversals of obj_list under lock can
211 * safely grab mark reference. 211 * safely grab mark reference.
212 */ 212 */
213 if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock)) 213 if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
214 return; 214 return;
215 215
216 conn = mark->connector; 216 conn = mark->connector;
@@ -258,7 +258,7 @@ static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
258 if (!mark) 258 if (!mark)
259 return true; 259 return true;
260 260
261 if (atomic_inc_not_zero(&mark->refcnt)) { 261 if (refcount_inc_not_zero(&mark->refcnt)) {
262 spin_lock(&mark->lock); 262 spin_lock(&mark->lock);
263 if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) { 263 if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
264 /* mark is attached, group is still alive then */ 264 /* mark is attached, group is still alive then */
@@ -335,7 +335,7 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
335 335
336 WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex)); 336 WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
337 WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) && 337 WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
338 atomic_read(&mark->refcnt) < 1 + 338 refcount_read(&mark->refcnt) < 1 +
339 !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)); 339 !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
340 340
341 spin_lock(&mark->lock); 341 spin_lock(&mark->lock);
@@ -737,7 +737,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
737{ 737{
738 memset(mark, 0, sizeof(*mark)); 738 memset(mark, 0, sizeof(*mark));
739 spin_lock_init(&mark->lock); 739 spin_lock_init(&mark->lock);
740 atomic_set(&mark->refcnt, 1); 740 refcount_set(&mark->refcnt, 1);
741 fsnotify_get_group(group); 741 fsnotify_get_group(group);
742 mark->group = group; 742 mark->group = group;
743} 743}
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 744e2b9969fc..9bcb43953f4e 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -242,7 +242,7 @@ struct fsnotify_mark {
242 __u32 mask; 242 __u32 mask;
243 /* We hold one for presence in g_list. Also one ref for each 'thing' 243 /* We hold one for presence in g_list. Also one ref for each 'thing'
244 * in kernel that found and may be using this mark. */ 244 * in kernel that found and may be using this mark. */
245 atomic_t refcnt; 245 refcount_t refcnt;
246 /* Group this mark is for. Set on mark creation, stable until last ref 246 /* Group this mark is for. Set on mark creation, stable until last ref
247 * is dropped */ 247 * is dropped */
248 struct fsnotify_group *group; 248 struct fsnotify_group *group;
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 011d46e5f73f..45ec960ad536 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -1007,7 +1007,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
1007 * We are guaranteed to have at least one reference to the mark from 1007 * We are guaranteed to have at least one reference to the mark from
1008 * either the inode or the caller of fsnotify_destroy_mark(). 1008 * either the inode or the caller of fsnotify_destroy_mark().
1009 */ 1009 */
1010 BUG_ON(atomic_read(&entry->refcnt) < 1); 1010 BUG_ON(refcount_read(&entry->refcnt) < 1);
1011} 1011}
1012 1012
1013static const struct fsnotify_ops audit_tree_ops = { 1013static const struct fsnotify_ops audit_tree_ops = {