diff options
author | Elena Reshetova <elena.reshetova@intel.com> | 2017-10-20 06:26:02 -0400 |
---|---|---|
committer | Jan Kara <jack@suse.cz> | 2017-10-31 12:54:56 -0400 |
commit | ab97f87325e28b7ef7717e6cb62e8da14a7176e1 (patch) | |
tree | 7ce8f0a747efaff1907f3774479dcbc2f4762c48 /fs/notify/mark.c | |
parent | 6685df31255493c3f0e9e0b8bf885e4c9762fc5d (diff) |
fsnotify: convert fsnotify_mark.refcnt from atomic_t to refcount_t
atomic_t variables are currently used to implement reference
counters with the following properties:
- counter is initialized to 1 using atomic_set()
- a resource is freed upon counter reaching zero
- once counter reaches zero, its further
increments aren't allowed
- counter schema uses basic atomic operations
(set, inc, inc_not_zero, dec_and_test, etc.)
Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.
The variable fsnotify_mark.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.
Suggested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: David Windsor <dwindsor@gmail.com>
Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/notify/mark.c')
-rw-r--r-- | fs/notify/mark.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/fs/notify/mark.c b/fs/notify/mark.c index f3a32ea15b49..e9191b416434 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c | |||
@@ -105,8 +105,8 @@ static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn); | |||
105 | 105 | ||
106 | void fsnotify_get_mark(struct fsnotify_mark *mark) | 106 | void fsnotify_get_mark(struct fsnotify_mark *mark) |
107 | { | 107 | { |
108 | WARN_ON_ONCE(!atomic_read(&mark->refcnt)); | 108 | WARN_ON_ONCE(!refcount_read(&mark->refcnt)); |
109 | atomic_inc(&mark->refcnt); | 109 | refcount_inc(&mark->refcnt); |
110 | } | 110 | } |
111 | 111 | ||
112 | static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) | 112 | static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) |
@@ -201,7 +201,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) | |||
201 | 201 | ||
202 | /* Catch marks that were actually never attached to object */ | 202 | /* Catch marks that were actually never attached to object */ |
203 | if (!mark->connector) { | 203 | if (!mark->connector) { |
204 | if (atomic_dec_and_test(&mark->refcnt)) | 204 | if (refcount_dec_and_test(&mark->refcnt)) |
205 | fsnotify_final_mark_destroy(mark); | 205 | fsnotify_final_mark_destroy(mark); |
206 | return; | 206 | return; |
207 | } | 207 | } |
@@ -210,7 +210,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) | |||
210 | * We have to be careful so that traversals of obj_list under lock can | 210 | * We have to be careful so that traversals of obj_list under lock can |
211 | * safely grab mark reference. | 211 | * safely grab mark reference. |
212 | */ | 212 | */ |
213 | if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock)) | 213 | if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock)) |
214 | return; | 214 | return; |
215 | 215 | ||
216 | conn = mark->connector; | 216 | conn = mark->connector; |
@@ -258,7 +258,7 @@ static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark) | |||
258 | if (!mark) | 258 | if (!mark) |
259 | return true; | 259 | return true; |
260 | 260 | ||
261 | if (atomic_inc_not_zero(&mark->refcnt)) { | 261 | if (refcount_inc_not_zero(&mark->refcnt)) { |
262 | spin_lock(&mark->lock); | 262 | spin_lock(&mark->lock); |
263 | if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) { | 263 | if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) { |
264 | /* mark is attached, group is still alive then */ | 264 | /* mark is attached, group is still alive then */ |
@@ -335,7 +335,7 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark) | |||
335 | 335 | ||
336 | WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex)); | 336 | WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex)); |
337 | WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) && | 337 | WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) && |
338 | atomic_read(&mark->refcnt) < 1 + | 338 | refcount_read(&mark->refcnt) < 1 + |
339 | !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)); | 339 | !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)); |
340 | 340 | ||
341 | spin_lock(&mark->lock); | 341 | spin_lock(&mark->lock); |
@@ -737,7 +737,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark, | |||
737 | { | 737 | { |
738 | memset(mark, 0, sizeof(*mark)); | 738 | memset(mark, 0, sizeof(*mark)); |
739 | spin_lock_init(&mark->lock); | 739 | spin_lock_init(&mark->lock); |
740 | atomic_set(&mark->refcnt, 1); | 740 | refcount_set(&mark->refcnt, 1); |
741 | fsnotify_get_group(group); | 741 | fsnotify_get_group(group); |
742 | mark->group = group; | 742 | mark->group = group; |
743 | } | 743 | } |