aboutsummaryrefslogtreecommitdiffstats
path: root/fs/notify/mark.c
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2010-07-28 10:18:38 -0400
committerEric Paris <eparis@redhat.com>2010-07-28 10:18:52 -0400
commit75c1be487a690db43da2c1234fcacd84c982803c (patch)
treeb38ce47f157d3b0eff7ac6eb4756a4b390ac35ae /fs/notify/mark.c
parent700307a29ad61090dcf1d45f8f4a135f5e9211ae (diff)
fsnotify: srcu to protect read side of inode and vfsmount locks
Currently reading the inode->i_fsnotify_marks or vfsmount->mnt_fsnotify_marks lists are protected by a spinlock on both the read and the write side. This patch protects the read side of those lists with a new single srcu. Signed-off-by: Eric Paris <eparis@redhat.com>
Diffstat (limited to 'fs/notify/mark.c')
-rw-r--r--fs/notify/mark.c60
1 files changed, 57 insertions, 3 deletions
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 69c5a166930c..41f3990f900b 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -85,10 +85,12 @@
85#include <linux/fs.h> 85#include <linux/fs.h>
86#include <linux/init.h> 86#include <linux/init.h>
87#include <linux/kernel.h> 87#include <linux/kernel.h>
88#include <linux/kthread.h>
88#include <linux/module.h> 89#include <linux/module.h>
89#include <linux/mutex.h> 90#include <linux/mutex.h>
90#include <linux/slab.h> 91#include <linux/slab.h>
91#include <linux/spinlock.h> 92#include <linux/spinlock.h>
93#include <linux/srcu.h>
92#include <linux/writeback.h> /* for inode_lock */ 94#include <linux/writeback.h> /* for inode_lock */
93 95
94#include <asm/atomic.h> 96#include <asm/atomic.h>
@@ -96,6 +98,11 @@
96#include <linux/fsnotify_backend.h> 98#include <linux/fsnotify_backend.h>
97#include "fsnotify.h" 99#include "fsnotify.h"
98 100
101struct srcu_struct fsnotify_mark_srcu;
102static DEFINE_SPINLOCK(destroy_lock);
103static LIST_HEAD(destroy_list);
104static DECLARE_WAIT_QUEUE_HEAD(destroy_waitq);
105
99void fsnotify_get_mark(struct fsnotify_mark *mark) 106void fsnotify_get_mark(struct fsnotify_mark *mark)
100{ 107{
101 atomic_inc(&mark->refcnt); 108 atomic_inc(&mark->refcnt);
@@ -144,11 +151,14 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
144 151
145 list_del_init(&mark->g_list); 152 list_del_init(&mark->g_list);
146 153
147 fsnotify_put_mark(mark); /* for i_list and g_list */
148
149 spin_unlock(&group->mark_lock); 154 spin_unlock(&group->mark_lock);
150 spin_unlock(&mark->lock); 155 spin_unlock(&mark->lock);
151 156
157 spin_lock(&destroy_lock);
158 list_add(&mark->destroy_list, &destroy_list);
159 spin_unlock(&destroy_lock);
160 wake_up(&destroy_waitq);
161
152 /* 162 /*
153 * Some groups like to know that marks are being freed. This is a 163 * Some groups like to know that marks are being freed. This is a
154 * callback to the group function to let it know that this mark 164 * callback to the group function to let it know that this mark
@@ -263,12 +273,17 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
263err: 273err:
264 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; 274 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
265 list_del_init(&mark->g_list); 275 list_del_init(&mark->g_list);
276 mark->group = NULL;
266 atomic_dec(&group->num_marks); 277 atomic_dec(&group->num_marks);
267 fsnotify_put_mark(mark);
268 278
269 spin_unlock(&group->mark_lock); 279 spin_unlock(&group->mark_lock);
270 spin_unlock(&mark->lock); 280 spin_unlock(&mark->lock);
271 281
282 spin_lock(&destroy_lock);
283 list_add(&mark->destroy_list, &destroy_list);
284 spin_unlock(&destroy_lock);
285 wake_up(&destroy_waitq);
286
272 return ret; 287 return ret;
273} 288}
274 289
@@ -326,3 +341,42 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
326 atomic_set(&mark->refcnt, 1); 341 atomic_set(&mark->refcnt, 1);
327 mark->free_mark = free_mark; 342 mark->free_mark = free_mark;
328} 343}
344
345static int fsnotify_mark_destroy(void *ignored)
346{
347 struct fsnotify_mark *mark, *next;
348 LIST_HEAD(private_destroy_list);
349
350 for (;;) {
351 spin_lock(&destroy_lock);
352 list_for_each_entry_safe(mark, next, &destroy_list, destroy_list) {
353 list_del(&mark->destroy_list);
354 list_add(&mark->destroy_list, &private_destroy_list);
355 }
356 spin_unlock(&destroy_lock);
357
358 synchronize_srcu(&fsnotify_mark_srcu);
359
360 list_for_each_entry_safe(mark, next, &private_destroy_list, destroy_list) {
361 list_del_init(&mark->destroy_list);
362 fsnotify_put_mark(mark);
363 }
364
365 wait_event_interruptible(destroy_waitq, !list_empty(&destroy_list));
366 }
367
368 return 0;
369}
370
371static int __init fsnotify_mark_init(void)
372{
373 struct task_struct *thread;
374
375 thread = kthread_run(fsnotify_mark_destroy, NULL,
376 "fsnotify_mark");
377 if (IS_ERR(thread))
378 panic("unable to start fsnotify mark destruction thread.");
379
380 return 0;
381}
382device_initcall(fsnotify_mark_init);