aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2009-12-17 21:24:24 -0500
committerEric Paris <eparis@redhat.com>2010-07-28 09:58:53 -0400
commit841bdc10f573aa010dd5818d35a5690b7d9f73ce (patch)
tree58ef7a15e24ac07d3af7c6db7306199c9392f7dd /fs
parentd07754412f9cdc2f4a99318d5ee81ace6715ea99 (diff)
fsnotify: rename mark_entry to just mark
previously I used mark_entry when talking about marks on inodes. The _entry is pretty useless. Just use "mark" instead. Signed-off-by: Eric Paris <eparis@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/notify/group.c6
-rw-r--r--fs/notify/inode_mark.c148
2 files changed, 77 insertions, 77 deletions
diff --git a/fs/notify/group.c b/fs/notify/group.c
index b70e7d21dfde..9e9eb406afdd 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -74,11 +74,11 @@ void fsnotify_recalc_group_mask(struct fsnotify_group *group)
74{ 74{
75 __u32 mask = 0; 75 __u32 mask = 0;
76 __u32 old_mask = group->mask; 76 __u32 old_mask = group->mask;
77 struct fsnotify_mark *entry; 77 struct fsnotify_mark *mark;
78 78
79 spin_lock(&group->mark_lock); 79 spin_lock(&group->mark_lock);
80 list_for_each_entry(entry, &group->marks_list, g_list) 80 list_for_each_entry(mark, &group->marks_list, g_list)
81 mask |= entry->mask; 81 mask |= mark->mask;
82 spin_unlock(&group->mark_lock); 82 spin_unlock(&group->mark_lock);
83 83
84 group->mask = mask; 84 group->mask = mask;
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 01c42632eb2a..27c1b43ad739 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -30,21 +30,21 @@
30 * There are 3 spinlocks involved with fsnotify inode marks and they MUST 30 * There are 3 spinlocks involved with fsnotify inode marks and they MUST
31 * be taken in order as follows: 31 * be taken in order as follows:
32 * 32 *
33 * entry->lock 33 * mark->lock
34 * group->mark_lock 34 * group->mark_lock
35 * inode->i_lock 35 * inode->i_lock
36 * 36 *
37 * entry->lock protects 2 things, entry->group and entry->inode. You must hold 37 * mark->lock protects 2 things, mark->group and mark->inode. You must hold
38 * that lock to dereference either of these things (they could be NULL even with 38 * that lock to dereference either of these things (they could be NULL even with
39 * the lock) 39 * the lock)
40 * 40 *
41 * group->mark_lock protects the marks_list anchored inside a given group 41 * group->mark_lock protects the marks_list anchored inside a given group
42 * and each entry is hooked via the g_list. It also sorta protects the 42 * and each mark is hooked via the g_list. It also sorta protects the
43 * free_g_list, which when used is anchored by a private list on the stack of the 43 * free_g_list, which when used is anchored by a private list on the stack of the
44 * task which held the group->mark_lock. 44 * task which held the group->mark_lock.
45 * 45 *
46 * inode->i_lock protects the i_fsnotify_marks list anchored inside a 46 * inode->i_lock protects the i_fsnotify_marks list anchored inside a
47 * given inode and each entry is hooked via the i_list. (and sorta the 47 * given inode and each mark is hooked via the i_list. (and sorta the
48 * free_i_list) 48 * free_i_list)
49 * 49 *
50 * 50 *
@@ -95,15 +95,15 @@
95#include <linux/fsnotify_backend.h> 95#include <linux/fsnotify_backend.h>
96#include "fsnotify.h" 96#include "fsnotify.h"
97 97
98void fsnotify_get_mark(struct fsnotify_mark *entry) 98void fsnotify_get_mark(struct fsnotify_mark *mark)
99{ 99{
100 atomic_inc(&entry->refcnt); 100 atomic_inc(&mark->refcnt);
101} 101}
102 102
103void fsnotify_put_mark(struct fsnotify_mark *entry) 103void fsnotify_put_mark(struct fsnotify_mark *mark)
104{ 104{
105 if (atomic_dec_and_test(&entry->refcnt)) 105 if (atomic_dec_and_test(&mark->refcnt))
106 entry->free_mark(entry); 106 mark->free_mark(mark);
107} 107}
108 108
109/* 109/*
@@ -111,14 +111,14 @@ void fsnotify_put_mark(struct fsnotify_mark *entry)
111 */ 111 */
112static void fsnotify_recalc_inode_mask_locked(struct inode *inode) 112static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
113{ 113{
114 struct fsnotify_mark *entry; 114 struct fsnotify_mark *mark;
115 struct hlist_node *pos; 115 struct hlist_node *pos;
116 __u32 new_mask = 0; 116 __u32 new_mask = 0;
117 117
118 assert_spin_locked(&inode->i_lock); 118 assert_spin_locked(&inode->i_lock);
119 119
120 hlist_for_each_entry(entry, pos, &inode->i_fsnotify_marks, i.i_list) 120 hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list)
121 new_mask |= entry->mask; 121 new_mask |= mark->mask;
122 inode->i_fsnotify_mask = new_mask; 122 inode->i_fsnotify_mask = new_mask;
123} 123}
124 124
@@ -138,40 +138,40 @@ void fsnotify_recalc_inode_mask(struct inode *inode)
138/* 138/*
139 * Any time a mark is getting freed we end up here. 139 * Any time a mark is getting freed we end up here.
140 * The caller had better be holding a reference to this mark so we don't actually 140 * The caller had better be holding a reference to this mark so we don't actually
141 * do the final put under the entry->lock 141 * do the final put under the mark->lock
142 */ 142 */
143void fsnotify_destroy_mark(struct fsnotify_mark *entry) 143void fsnotify_destroy_mark(struct fsnotify_mark *mark)
144{ 144{
145 struct fsnotify_group *group; 145 struct fsnotify_group *group;
146 struct inode *inode; 146 struct inode *inode;
147 147
148 spin_lock(&entry->lock); 148 spin_lock(&mark->lock);
149 149
150 group = entry->group; 150 group = mark->group;
151 inode = entry->i.inode; 151 inode = mark->i.inode;
152 152
153 BUG_ON(group && !inode); 153 BUG_ON(group && !inode);
154 BUG_ON(!group && inode); 154 BUG_ON(!group && inode);
155 155
156 /* if !group something else already marked this to die */ 156 /* if !group something else already marked this to die */
157 if (!group) { 157 if (!group) {
158 spin_unlock(&entry->lock); 158 spin_unlock(&mark->lock);
159 return; 159 return;
160 } 160 }
161 161
162 /* 1 from caller and 1 for being on i_list/g_list */ 162 /* 1 from caller and 1 for being on i_list/g_list */
163 BUG_ON(atomic_read(&entry->refcnt) < 2); 163 BUG_ON(atomic_read(&mark->refcnt) < 2);
164 164
165 spin_lock(&group->mark_lock); 165 spin_lock(&group->mark_lock);
166 spin_lock(&inode->i_lock); 166 spin_lock(&inode->i_lock);
167 167
168 hlist_del_init(&entry->i.i_list); 168 hlist_del_init(&mark->i.i_list);
169 entry->i.inode = NULL; 169 mark->i.inode = NULL;
170 170
171 list_del_init(&entry->g_list); 171 list_del_init(&mark->g_list);
172 entry->group = NULL; 172 mark->group = NULL;
173 173
174 fsnotify_put_mark(entry); /* for i_list and g_list */ 174 fsnotify_put_mark(mark); /* for i_list and g_list */
175 175
176 /* 176 /*
177 * this mark is now off the inode->i_fsnotify_marks list and we 177 * this mark is now off the inode->i_fsnotify_marks list and we
@@ -182,21 +182,21 @@ void fsnotify_destroy_mark(struct fsnotify_mark *entry)
182 182
183 spin_unlock(&inode->i_lock); 183 spin_unlock(&inode->i_lock);
184 spin_unlock(&group->mark_lock); 184 spin_unlock(&group->mark_lock);
185 spin_unlock(&entry->lock); 185 spin_unlock(&mark->lock);
186 186
187 /* 187 /*
188 * Some groups like to know that marks are being freed. This is a 188 * Some groups like to know that marks are being freed. This is a
189 * callback to the group function to let it know that this entry 189 * callback to the group function to let it know that this mark
190 * is being freed. 190 * is being freed.
191 */ 191 */
192 if (group->ops->freeing_mark) 192 if (group->ops->freeing_mark)
193 group->ops->freeing_mark(entry, group); 193 group->ops->freeing_mark(mark, group);
194 194
195 /* 195 /*
196 * __fsnotify_update_child_dentry_flags(inode); 196 * __fsnotify_update_child_dentry_flags(inode);
197 * 197 *
198 * I really want to call that, but we can't, we have no idea if the inode 198 * I really want to call that, but we can't, we have no idea if the inode
199 * still exists the second we drop the entry->lock. 199 * still exists the second we drop the mark->lock.
200 * 200 *
201 * The next time an event arrive to this inode from one of it's children 201 * The next time an event arrive to this inode from one of it's children
202 * __fsnotify_parent will see that the inode doesn't care about it's 202 * __fsnotify_parent will see that the inode doesn't care about it's
@@ -221,20 +221,20 @@ void fsnotify_destroy_mark(struct fsnotify_mark *entry)
221 */ 221 */
222void fsnotify_clear_marks_by_group(struct fsnotify_group *group) 222void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
223{ 223{
224 struct fsnotify_mark *lentry, *entry; 224 struct fsnotify_mark *lmark, *mark;
225 LIST_HEAD(free_list); 225 LIST_HEAD(free_list);
226 226
227 spin_lock(&group->mark_lock); 227 spin_lock(&group->mark_lock);
228 list_for_each_entry_safe(entry, lentry, &group->marks_list, g_list) { 228 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
229 list_add(&entry->free_g_list, &free_list); 229 list_add(&mark->free_g_list, &free_list);
230 list_del_init(&entry->g_list); 230 list_del_init(&mark->g_list);
231 fsnotify_get_mark(entry); 231 fsnotify_get_mark(mark);
232 } 232 }
233 spin_unlock(&group->mark_lock); 233 spin_unlock(&group->mark_lock);
234 234
235 list_for_each_entry_safe(entry, lentry, &free_list, free_g_list) { 235 list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
236 fsnotify_destroy_mark(entry); 236 fsnotify_destroy_mark(mark);
237 fsnotify_put_mark(entry); 237 fsnotify_put_mark(mark);
238 } 238 }
239} 239}
240 240
@@ -243,21 +243,21 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
243 */ 243 */
244void fsnotify_clear_marks_by_inode(struct inode *inode) 244void fsnotify_clear_marks_by_inode(struct inode *inode)
245{ 245{
246 struct fsnotify_mark *entry, *lentry; 246 struct fsnotify_mark *mark, *lmark;
247 struct hlist_node *pos, *n; 247 struct hlist_node *pos, *n;
248 LIST_HEAD(free_list); 248 LIST_HEAD(free_list);
249 249
250 spin_lock(&inode->i_lock); 250 spin_lock(&inode->i_lock);
251 hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_marks, i.i_list) { 251 hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) {
252 list_add(&entry->i.free_i_list, &free_list); 252 list_add(&mark->i.free_i_list, &free_list);
253 hlist_del_init(&entry->i.i_list); 253 hlist_del_init(&mark->i.i_list);
254 fsnotify_get_mark(entry); 254 fsnotify_get_mark(mark);
255 } 255 }
256 spin_unlock(&inode->i_lock); 256 spin_unlock(&inode->i_lock);
257 257
258 list_for_each_entry_safe(entry, lentry, &free_list, i.free_i_list) { 258 list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
259 fsnotify_destroy_mark(entry); 259 fsnotify_destroy_mark(mark);
260 fsnotify_put_mark(entry); 260 fsnotify_put_mark(mark);
261 } 261 }
262} 262}
263 263
@@ -268,15 +268,15 @@ void fsnotify_clear_marks_by_inode(struct inode *inode)
268struct fsnotify_mark *fsnotify_find_mark(struct fsnotify_group *group, 268struct fsnotify_mark *fsnotify_find_mark(struct fsnotify_group *group,
269 struct inode *inode) 269 struct inode *inode)
270{ 270{
271 struct fsnotify_mark *entry; 271 struct fsnotify_mark *mark;
272 struct hlist_node *pos; 272 struct hlist_node *pos;
273 273
274 assert_spin_locked(&inode->i_lock); 274 assert_spin_locked(&inode->i_lock);
275 275
276 hlist_for_each_entry(entry, pos, &inode->i_fsnotify_marks, i.i_list) { 276 hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) {
277 if (entry->group == group) { 277 if (mark->group == group) {
278 fsnotify_get_mark(entry); 278 fsnotify_get_mark(mark);
279 return entry; 279 return mark;
280 } 280 }
281 } 281 }
282 return NULL; 282 return NULL;
@@ -294,35 +294,35 @@ void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *ol
294/* 294/*
295 * Nothing fancy, just initialize lists and locks and counters. 295 * Nothing fancy, just initialize lists and locks and counters.
296 */ 296 */
297void fsnotify_init_mark(struct fsnotify_mark *entry, 297void fsnotify_init_mark(struct fsnotify_mark *mark,
298 void (*free_mark)(struct fsnotify_mark *entry)) 298 void (*free_mark)(struct fsnotify_mark *mark))
299{ 299{
300 spin_lock_init(&entry->lock); 300 spin_lock_init(&mark->lock);
301 atomic_set(&entry->refcnt, 1); 301 atomic_set(&mark->refcnt, 1);
302 INIT_HLIST_NODE(&entry->i.i_list); 302 INIT_HLIST_NODE(&mark->i.i_list);
303 entry->group = NULL; 303 mark->group = NULL;
304 entry->mask = 0; 304 mark->mask = 0;
305 entry->i.inode = NULL; 305 mark->i.inode = NULL;
306 entry->free_mark = free_mark; 306 mark->free_mark = free_mark;
307} 307}
308 308
309/* 309/*
310 * Attach an initialized mark entry to a given group and inode. 310 * Attach an initialized mark mark to a given group and inode.
311 * These marks may be used for the fsnotify backend to determine which 311 * These marks may be used for the fsnotify backend to determine which
312 * event types should be delivered to which group and for which inodes. 312 * event types should be delivered to which group and for which inodes.
313 */ 313 */
314int fsnotify_add_mark(struct fsnotify_mark *entry, 314int fsnotify_add_mark(struct fsnotify_mark *mark,
315 struct fsnotify_group *group, struct inode *inode, 315 struct fsnotify_group *group, struct inode *inode,
316 int allow_dups) 316 int allow_dups)
317{ 317{
318 struct fsnotify_mark *lentry = NULL; 318 struct fsnotify_mark *lmark = NULL;
319 int ret = 0; 319 int ret = 0;
320 320
321 inode = igrab(inode); 321 inode = igrab(inode);
322 if (unlikely(!inode)) 322 if (unlikely(!inode))
323 return -EINVAL; 323 return -EINVAL;
324 324
325 entry->flags = FSNOTIFY_MARK_FLAG_INODE; 325 mark->flags = FSNOTIFY_MARK_FLAG_INODE;
326 326
327 /* 327 /*
328 * if this group isn't being testing for inode type events we need 328 * if this group isn't being testing for inode type events we need
@@ -340,24 +340,24 @@ int fsnotify_add_mark(struct fsnotify_mark *entry,
340 340
341 /* 341 /*
342 * LOCKING ORDER!!!! 342 * LOCKING ORDER!!!!
343 * entry->lock 343 * mark->lock
344 * group->mark_lock 344 * group->mark_lock
345 * inode->i_lock 345 * inode->i_lock
346 */ 346 */
347 spin_lock(&entry->lock); 347 spin_lock(&mark->lock);
348 spin_lock(&group->mark_lock); 348 spin_lock(&group->mark_lock);
349 spin_lock(&inode->i_lock); 349 spin_lock(&inode->i_lock);
350 350
351 if (!allow_dups) 351 if (!allow_dups)
352 lentry = fsnotify_find_mark(group, inode); 352 lmark = fsnotify_find_mark(group, inode);
353 if (!lentry) { 353 if (!lmark) {
354 entry->group = group; 354 mark->group = group;
355 entry->i.inode = inode; 355 mark->i.inode = inode;
356 356
357 hlist_add_head(&entry->i.i_list, &inode->i_fsnotify_marks); 357 hlist_add_head(&mark->i.i_list, &inode->i_fsnotify_marks);
358 list_add(&entry->g_list, &group->marks_list); 358 list_add(&mark->g_list, &group->marks_list);
359 359
360 fsnotify_get_mark(entry); /* for i_list and g_list */ 360 fsnotify_get_mark(mark); /* for i_list and g_list */
361 361
362 atomic_inc(&group->num_marks); 362 atomic_inc(&group->num_marks);
363 363
@@ -366,12 +366,12 @@ int fsnotify_add_mark(struct fsnotify_mark *entry,
366 366
367 spin_unlock(&inode->i_lock); 367 spin_unlock(&inode->i_lock);
368 spin_unlock(&group->mark_lock); 368 spin_unlock(&group->mark_lock);
369 spin_unlock(&entry->lock); 369 spin_unlock(&mark->lock);
370 370
371 if (lentry) { 371 if (lmark) {
372 ret = -EEXIST; 372 ret = -EEXIST;
373 iput(inode); 373 iput(inode);
374 fsnotify_put_mark(lentry); 374 fsnotify_put_mark(lmark);
375 } else { 375 } else {
376 __fsnotify_update_child_dentry_flags(inode); 376 __fsnotify_update_child_dentry_flags(inode);
377 } 377 }