diff options
-rw-r--r-- | include/linux/fsnotify_backend.h | 5 | ||||
-rw-r--r-- | init/Kconfig | 2 | ||||
-rw-r--r-- | kernel/audit_tree.c | 234 | ||||
-rw-r--r-- | kernel/auditsc.c | 4 |
4 files changed, 136 insertions, 109 deletions
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1679f250d59e..e25284371020 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
@@ -62,8 +62,9 @@ | |||
62 | 62 | ||
63 | /* listeners that hard code group numbers near the top */ | 63 | /* listeners that hard code group numbers near the top */ |
64 | #define DNOTIFY_GROUP_NUM UINT_MAX | 64 | #define DNOTIFY_GROUP_NUM UINT_MAX |
65 | #define AUDIT_WATCH_GROUP_NUM (DNOTIFY_GROUP_NUM-1) | 65 | #define AUDIT_WATCH_GROUP_NUM (DNOTIFY_GROUP_NUM-1) |
66 | #define INOTIFY_GROUP_NUM (AUDIT_WATCH_GROUP_NUM-1) | 66 | #define AUDIT_TREE_GROUP_NUM (AUDIT_WATCH_GROUP_NUM-1) |
67 | #define INOTIFY_GROUP_NUM (AUDIT_TREE_GROUP_NUM-1) | ||
67 | 68 | ||
68 | struct fsnotify_group; | 69 | struct fsnotify_group; |
69 | struct fsnotify_event; | 70 | struct fsnotify_event; |
diff --git a/init/Kconfig b/init/Kconfig index 5cff9a980c39..84e33c49a0cb 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -326,7 +326,7 @@ config AUDITSYSCALL | |||
326 | config AUDIT_TREE | 326 | config AUDIT_TREE |
327 | def_bool y | 327 | def_bool y |
328 | depends on AUDITSYSCALL | 328 | depends on AUDITSYSCALL |
329 | select INOTIFY | 329 | select FSNOTIFY |
330 | 330 | ||
331 | menu "RCU Subsystem" | 331 | menu "RCU Subsystem" |
332 | 332 | ||
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 46a57b57a335..a164600dd82e 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include "audit.h" | 1 | #include "audit.h" |
2 | #include <linux/inotify.h> | 2 | #include <linux/fsnotify_backend.h> |
3 | #include <linux/namei.h> | 3 | #include <linux/namei.h> |
4 | #include <linux/mount.h> | 4 | #include <linux/mount.h> |
5 | #include <linux/kthread.h> | 5 | #include <linux/kthread.h> |
@@ -22,7 +22,7 @@ struct audit_tree { | |||
22 | 22 | ||
23 | struct audit_chunk { | 23 | struct audit_chunk { |
24 | struct list_head hash; | 24 | struct list_head hash; |
25 | struct inotify_watch watch; | 25 | struct fsnotify_mark_entry mark; |
26 | struct list_head trees; /* with root here */ | 26 | struct list_head trees; /* with root here */ |
27 | int dead; | 27 | int dead; |
28 | int count; | 28 | int count; |
@@ -59,7 +59,7 @@ static LIST_HEAD(prune_list); | |||
59 | * tree is refcounted; one reference for "some rules on rules_list refer to | 59 | * tree is refcounted; one reference for "some rules on rules_list refer to |
60 | * it", one for each chunk with pointer to it. | 60 | * it", one for each chunk with pointer to it. |
61 | * | 61 | * |
62 | * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount | 62 | * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount |
63 | * of watch contributes 1 to .refs). | 63 | * of watch contributes 1 to .refs). |
64 | * | 64 | * |
65 | * node.index allows to get from node.list to containing chunk. | 65 | * node.index allows to get from node.list to containing chunk. |
@@ -68,7 +68,7 @@ static LIST_HEAD(prune_list); | |||
68 | * that makes a difference. Some. | 68 | * that makes a difference. Some. |
69 | */ | 69 | */ |
70 | 70 | ||
71 | static struct inotify_handle *rtree_ih; | 71 | static struct fsnotify_group *audit_tree_group; |
72 | 72 | ||
73 | static struct audit_tree *alloc_tree(const char *s) | 73 | static struct audit_tree *alloc_tree(const char *s) |
74 | { | 74 | { |
@@ -111,29 +111,6 @@ const char *audit_tree_path(struct audit_tree *tree) | |||
111 | return tree->pathname; | 111 | return tree->pathname; |
112 | } | 112 | } |
113 | 113 | ||
114 | static struct audit_chunk *alloc_chunk(int count) | ||
115 | { | ||
116 | struct audit_chunk *chunk; | ||
117 | size_t size; | ||
118 | int i; | ||
119 | |||
120 | size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); | ||
121 | chunk = kzalloc(size, GFP_KERNEL); | ||
122 | if (!chunk) | ||
123 | return NULL; | ||
124 | |||
125 | INIT_LIST_HEAD(&chunk->hash); | ||
126 | INIT_LIST_HEAD(&chunk->trees); | ||
127 | chunk->count = count; | ||
128 | atomic_long_set(&chunk->refs, 1); | ||
129 | for (i = 0; i < count; i++) { | ||
130 | INIT_LIST_HEAD(&chunk->owners[i].list); | ||
131 | chunk->owners[i].index = i; | ||
132 | } | ||
133 | inotify_init_watch(&chunk->watch); | ||
134 | return chunk; | ||
135 | } | ||
136 | |||
137 | static void free_chunk(struct audit_chunk *chunk) | 114 | static void free_chunk(struct audit_chunk *chunk) |
138 | { | 115 | { |
139 | int i; | 116 | int i; |
@@ -157,6 +134,35 @@ static void __put_chunk(struct rcu_head *rcu) | |||
157 | audit_put_chunk(chunk); | 134 | audit_put_chunk(chunk); |
158 | } | 135 | } |
159 | 136 | ||
137 | static void audit_tree_destroy_watch(struct fsnotify_mark_entry *entry) | ||
138 | { | ||
139 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); | ||
140 | call_rcu(&chunk->head, __put_chunk); | ||
141 | } | ||
142 | |||
143 | static struct audit_chunk *alloc_chunk(int count) | ||
144 | { | ||
145 | struct audit_chunk *chunk; | ||
146 | size_t size; | ||
147 | int i; | ||
148 | |||
149 | size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); | ||
150 | chunk = kzalloc(size, GFP_KERNEL); | ||
151 | if (!chunk) | ||
152 | return NULL; | ||
153 | |||
154 | INIT_LIST_HEAD(&chunk->hash); | ||
155 | INIT_LIST_HEAD(&chunk->trees); | ||
156 | chunk->count = count; | ||
157 | atomic_long_set(&chunk->refs, 1); | ||
158 | for (i = 0; i < count; i++) { | ||
159 | INIT_LIST_HEAD(&chunk->owners[i].list); | ||
160 | chunk->owners[i].index = i; | ||
161 | } | ||
162 | fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); | ||
163 | return chunk; | ||
164 | } | ||
165 | |||
160 | enum {HASH_SIZE = 128}; | 166 | enum {HASH_SIZE = 128}; |
161 | static struct list_head chunk_hash_heads[HASH_SIZE]; | 167 | static struct list_head chunk_hash_heads[HASH_SIZE]; |
162 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); | 168 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); |
@@ -167,10 +173,15 @@ static inline struct list_head *chunk_hash(const struct inode *inode) | |||
167 | return chunk_hash_heads + n % HASH_SIZE; | 173 | return chunk_hash_heads + n % HASH_SIZE; |
168 | } | 174 | } |
169 | 175 | ||
170 | /* hash_lock is held by caller */ | 176 | /* hash_lock & entry->lock is held by caller */ |
171 | static void insert_hash(struct audit_chunk *chunk) | 177 | static void insert_hash(struct audit_chunk *chunk) |
172 | { | 178 | { |
173 | struct list_head *list = chunk_hash(chunk->watch.inode); | 179 | struct fsnotify_mark_entry *entry = &chunk->mark; |
180 | struct list_head *list; | ||
181 | |||
182 | if (!entry->inode) | ||
183 | return; | ||
184 | list = chunk_hash(entry->inode); | ||
174 | list_add_rcu(&chunk->hash, list); | 185 | list_add_rcu(&chunk->hash, list); |
175 | } | 186 | } |
176 | 187 | ||
@@ -181,7 +192,8 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode) | |||
181 | struct audit_chunk *p; | 192 | struct audit_chunk *p; |
182 | 193 | ||
183 | list_for_each_entry_rcu(p, list, hash) { | 194 | list_for_each_entry_rcu(p, list, hash) { |
184 | if (p->watch.inode == inode) { | 195 | /* mark.inode may have gone NULL, but who cares? */ |
196 | if (p->mark.inode == inode) { | ||
185 | atomic_long_inc(&p->refs); | 197 | atomic_long_inc(&p->refs); |
186 | return p; | 198 | return p; |
187 | } | 199 | } |
@@ -210,38 +222,19 @@ static struct audit_chunk *find_chunk(struct node *p) | |||
210 | static void untag_chunk(struct node *p) | 222 | static void untag_chunk(struct node *p) |
211 | { | 223 | { |
212 | struct audit_chunk *chunk = find_chunk(p); | 224 | struct audit_chunk *chunk = find_chunk(p); |
225 | struct fsnotify_mark_entry *entry = &chunk->mark; | ||
213 | struct audit_chunk *new; | 226 | struct audit_chunk *new; |
214 | struct audit_tree *owner; | 227 | struct audit_tree *owner; |
215 | int size = chunk->count - 1; | 228 | int size = chunk->count - 1; |
216 | int i, j; | 229 | int i, j; |
217 | 230 | ||
218 | if (!pin_inotify_watch(&chunk->watch)) { | 231 | fsnotify_get_mark(entry); |
219 | /* | ||
220 | * Filesystem is shutting down; all watches are getting | ||
221 | * evicted, just take it off the node list for this | ||
222 | * tree and let the eviction logics take care of the | ||
223 | * rest. | ||
224 | */ | ||
225 | owner = p->owner; | ||
226 | if (owner->root == chunk) { | ||
227 | list_del_init(&owner->same_root); | ||
228 | owner->root = NULL; | ||
229 | } | ||
230 | list_del_init(&p->list); | ||
231 | p->owner = NULL; | ||
232 | put_tree(owner); | ||
233 | return; | ||
234 | } | ||
235 | 232 | ||
236 | spin_unlock(&hash_lock); | 233 | spin_unlock(&hash_lock); |
237 | 234 | ||
238 | /* | 235 | spin_lock(&entry->lock); |
239 | * pin_inotify_watch() succeeded, so the watch won't go away | 236 | if (chunk->dead || !entry->inode) { |
240 | * from under us. | 237 | spin_unlock(&entry->lock); |
241 | */ | ||
242 | mutex_lock(&chunk->watch.inode->inotify_mutex); | ||
243 | if (chunk->dead) { | ||
244 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | ||
245 | goto out; | 238 | goto out; |
246 | } | 239 | } |
247 | 240 | ||
@@ -256,16 +249,17 @@ static void untag_chunk(struct node *p) | |||
256 | list_del_init(&p->list); | 249 | list_del_init(&p->list); |
257 | list_del_rcu(&chunk->hash); | 250 | list_del_rcu(&chunk->hash); |
258 | spin_unlock(&hash_lock); | 251 | spin_unlock(&hash_lock); |
259 | inotify_evict_watch(&chunk->watch); | 252 | spin_unlock(&entry->lock); |
260 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | 253 | fsnotify_destroy_mark_by_entry(entry); |
261 | put_inotify_watch(&chunk->watch); | 254 | fsnotify_put_mark(entry); |
262 | goto out; | 255 | goto out; |
263 | } | 256 | } |
264 | 257 | ||
265 | new = alloc_chunk(size); | 258 | new = alloc_chunk(size); |
266 | if (!new) | 259 | if (!new) |
267 | goto Fallback; | 260 | goto Fallback; |
268 | if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) { | 261 | fsnotify_duplicate_mark(&new->mark, entry); |
262 | if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, 1)) { | ||
269 | free_chunk(new); | 263 | free_chunk(new); |
270 | goto Fallback; | 264 | goto Fallback; |
271 | } | 265 | } |
@@ -298,9 +292,9 @@ static void untag_chunk(struct node *p) | |||
298 | list_for_each_entry(owner, &new->trees, same_root) | 292 | list_for_each_entry(owner, &new->trees, same_root) |
299 | owner->root = new; | 293 | owner->root = new; |
300 | spin_unlock(&hash_lock); | 294 | spin_unlock(&hash_lock); |
301 | inotify_evict_watch(&chunk->watch); | 295 | spin_unlock(&entry->lock); |
302 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | 296 | fsnotify_destroy_mark_by_entry(entry); |
303 | put_inotify_watch(&chunk->watch); | 297 | fsnotify_put_mark(entry); |
304 | goto out; | 298 | goto out; |
305 | 299 | ||
306 | Fallback: | 300 | Fallback: |
@@ -314,31 +308,33 @@ Fallback: | |||
314 | p->owner = NULL; | 308 | p->owner = NULL; |
315 | put_tree(owner); | 309 | put_tree(owner); |
316 | spin_unlock(&hash_lock); | 310 | spin_unlock(&hash_lock); |
317 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | 311 | spin_unlock(&entry->lock); |
318 | out: | 312 | out: |
319 | unpin_inotify_watch(&chunk->watch); | 313 | fsnotify_put_mark(entry); |
320 | spin_lock(&hash_lock); | 314 | spin_lock(&hash_lock); |
321 | } | 315 | } |
322 | 316 | ||
323 | static int create_chunk(struct inode *inode, struct audit_tree *tree) | 317 | static int create_chunk(struct inode *inode, struct audit_tree *tree) |
324 | { | 318 | { |
319 | struct fsnotify_mark_entry *entry; | ||
325 | struct audit_chunk *chunk = alloc_chunk(1); | 320 | struct audit_chunk *chunk = alloc_chunk(1); |
326 | if (!chunk) | 321 | if (!chunk) |
327 | return -ENOMEM; | 322 | return -ENOMEM; |
328 | 323 | ||
329 | if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) { | 324 | entry = &chunk->mark; |
325 | if (fsnotify_add_mark(entry, audit_tree_group, inode, 0)) { | ||
330 | free_chunk(chunk); | 326 | free_chunk(chunk); |
331 | return -ENOSPC; | 327 | return -ENOSPC; |
332 | } | 328 | } |
333 | 329 | ||
334 | mutex_lock(&inode->inotify_mutex); | 330 | spin_lock(&entry->lock); |
335 | spin_lock(&hash_lock); | 331 | spin_lock(&hash_lock); |
336 | if (tree->goner) { | 332 | if (tree->goner) { |
337 | spin_unlock(&hash_lock); | 333 | spin_unlock(&hash_lock); |
338 | chunk->dead = 1; | 334 | chunk->dead = 1; |
339 | inotify_evict_watch(&chunk->watch); | 335 | spin_unlock(&entry->lock); |
340 | mutex_unlock(&inode->inotify_mutex); | 336 | fsnotify_destroy_mark_by_entry(entry); |
341 | put_inotify_watch(&chunk->watch); | 337 | fsnotify_put_mark(entry); |
342 | return 0; | 338 | return 0; |
343 | } | 339 | } |
344 | chunk->owners[0].index = (1U << 31); | 340 | chunk->owners[0].index = (1U << 31); |
@@ -351,30 +347,33 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) | |||
351 | } | 347 | } |
352 | insert_hash(chunk); | 348 | insert_hash(chunk); |
353 | spin_unlock(&hash_lock); | 349 | spin_unlock(&hash_lock); |
354 | mutex_unlock(&inode->inotify_mutex); | 350 | spin_unlock(&entry->lock); |
355 | return 0; | 351 | return 0; |
356 | } | 352 | } |
357 | 353 | ||
358 | /* the first tagged inode becomes root of tree */ | 354 | /* the first tagged inode becomes root of tree */ |
359 | static int tag_chunk(struct inode *inode, struct audit_tree *tree) | 355 | static int tag_chunk(struct inode *inode, struct audit_tree *tree) |
360 | { | 356 | { |
361 | struct inotify_watch *watch; | 357 | struct fsnotify_mark_entry *old_entry, *chunk_entry; |
362 | struct audit_tree *owner; | 358 | struct audit_tree *owner; |
363 | struct audit_chunk *chunk, *old; | 359 | struct audit_chunk *chunk, *old; |
364 | struct node *p; | 360 | struct node *p; |
365 | int n; | 361 | int n; |
366 | 362 | ||
367 | if (inotify_find_watch(rtree_ih, inode, &watch) < 0) | 363 | spin_lock(&inode->i_lock); |
364 | old_entry = fsnotify_find_mark_entry(audit_tree_group, inode); | ||
365 | spin_unlock(&inode->i_lock); | ||
366 | if (!old_entry) | ||
368 | return create_chunk(inode, tree); | 367 | return create_chunk(inode, tree); |
369 | 368 | ||
370 | old = container_of(watch, struct audit_chunk, watch); | 369 | old = container_of(old_entry, struct audit_chunk, mark); |
371 | 370 | ||
372 | /* are we already there? */ | 371 | /* are we already there? */ |
373 | spin_lock(&hash_lock); | 372 | spin_lock(&hash_lock); |
374 | for (n = 0; n < old->count; n++) { | 373 | for (n = 0; n < old->count; n++) { |
375 | if (old->owners[n].owner == tree) { | 374 | if (old->owners[n].owner == tree) { |
376 | spin_unlock(&hash_lock); | 375 | spin_unlock(&hash_lock); |
377 | put_inotify_watch(&old->watch); | 376 | fsnotify_put_mark(old_entry); |
378 | return 0; | 377 | return 0; |
379 | } | 378 | } |
380 | } | 379 | } |
@@ -382,25 +381,44 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |||
382 | 381 | ||
383 | chunk = alloc_chunk(old->count + 1); | 382 | chunk = alloc_chunk(old->count + 1); |
384 | if (!chunk) { | 383 | if (!chunk) { |
385 | put_inotify_watch(&old->watch); | 384 | fsnotify_put_mark(old_entry); |
386 | return -ENOMEM; | 385 | return -ENOMEM; |
387 | } | 386 | } |
388 | 387 | ||
389 | mutex_lock(&inode->inotify_mutex); | 388 | chunk_entry = &chunk->mark; |
390 | if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { | 389 | |
391 | mutex_unlock(&inode->inotify_mutex); | 390 | spin_lock(&old_entry->lock); |
392 | put_inotify_watch(&old->watch); | 391 | if (!old_entry->inode) { |
392 | /* old_entry is being shot, lets just lie */ | ||
393 | spin_unlock(&old_entry->lock); | ||
394 | fsnotify_put_mark(old_entry); | ||
393 | free_chunk(chunk); | 395 | free_chunk(chunk); |
396 | return -ENOENT; | ||
397 | } | ||
398 | |||
399 | fsnotify_duplicate_mark(chunk_entry, old_entry); | ||
400 | if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, 1)) { | ||
401 | spin_unlock(&old_entry->lock); | ||
402 | free_chunk(chunk); | ||
403 | fsnotify_put_mark(old_entry); | ||
394 | return -ENOSPC; | 404 | return -ENOSPC; |
395 | } | 405 | } |
406 | |||
407 | /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ | ||
408 | spin_lock(&chunk_entry->lock); | ||
396 | spin_lock(&hash_lock); | 409 | spin_lock(&hash_lock); |
410 | |||
411 | /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ | ||
397 | if (tree->goner) { | 412 | if (tree->goner) { |
398 | spin_unlock(&hash_lock); | 413 | spin_unlock(&hash_lock); |
399 | chunk->dead = 1; | 414 | chunk->dead = 1; |
400 | inotify_evict_watch(&chunk->watch); | 415 | spin_unlock(&chunk_entry->lock); |
401 | mutex_unlock(&inode->inotify_mutex); | 416 | spin_unlock(&old_entry->lock); |
402 | put_inotify_watch(&old->watch); | 417 | |
403 | put_inotify_watch(&chunk->watch); | 418 | fsnotify_destroy_mark_by_entry(chunk_entry); |
419 | |||
420 | fsnotify_put_mark(chunk_entry); | ||
421 | fsnotify_put_mark(old_entry); | ||
404 | return 0; | 422 | return 0; |
405 | } | 423 | } |
406 | list_replace_init(&old->trees, &chunk->trees); | 424 | list_replace_init(&old->trees, &chunk->trees); |
@@ -426,10 +444,11 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |||
426 | list_add(&tree->same_root, &chunk->trees); | 444 | list_add(&tree->same_root, &chunk->trees); |
427 | } | 445 | } |
428 | spin_unlock(&hash_lock); | 446 | spin_unlock(&hash_lock); |
429 | inotify_evict_watch(&old->watch); | 447 | spin_unlock(&chunk_entry->lock); |
430 | mutex_unlock(&inode->inotify_mutex); | 448 | spin_unlock(&old_entry->lock); |
431 | put_inotify_watch(&old->watch); /* pair to inotify_find_watch */ | 449 | fsnotify_destroy_mark_by_entry(old_entry); |
432 | put_inotify_watch(&old->watch); /* and kill it */ | 450 | fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ |
451 | fsnotify_put_mark(old_entry); /* and kill it */ | ||
433 | return 0; | 452 | return 0; |
434 | } | 453 | } |
435 | 454 | ||
@@ -584,7 +603,9 @@ void audit_trim_trees(void) | |||
584 | 603 | ||
585 | spin_lock(&hash_lock); | 604 | spin_lock(&hash_lock); |
586 | list_for_each_entry(node, &tree->chunks, list) { | 605 | list_for_each_entry(node, &tree->chunks, list) { |
587 | struct inode *inode = find_chunk(node)->watch.inode; | 606 | struct audit_chunk *chunk = find_chunk(node); |
607 | /* this could be NULL if the watch is dieing else where... */ | ||
608 | struct inode *inode = chunk->mark.inode; | ||
588 | node->index |= 1U<<31; | 609 | node->index |= 1U<<31; |
589 | if (iterate_mounts(compare_root, inode, root_mnt)) | 610 | if (iterate_mounts(compare_root, inode, root_mnt)) |
590 | node->index &= ~(1U<<31); | 611 | node->index &= ~(1U<<31); |
@@ -846,7 +867,6 @@ void audit_kill_trees(struct list_head *list) | |||
846 | * Here comes the stuff asynchronous to auditctl operations | 867 | * Here comes the stuff asynchronous to auditctl operations |
847 | */ | 868 | */ |
848 | 869 | ||
849 | /* inode->inotify_mutex is locked */ | ||
850 | static void evict_chunk(struct audit_chunk *chunk) | 870 | static void evict_chunk(struct audit_chunk *chunk) |
851 | { | 871 | { |
852 | struct audit_tree *owner; | 872 | struct audit_tree *owner; |
@@ -885,35 +905,41 @@ static void evict_chunk(struct audit_chunk *chunk) | |||
885 | mutex_unlock(&audit_filter_mutex); | 905 | mutex_unlock(&audit_filter_mutex); |
886 | } | 906 | } |
887 | 907 | ||
888 | static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask, | 908 | static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_event *event) |
889 | u32 cookie, const char *dname, struct inode *inode) | ||
890 | { | 909 | { |
891 | struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); | 910 | BUG(); |
911 | return -EOPNOTSUPP; | ||
912 | } | ||
892 | 913 | ||
893 | if (mask & IN_IGNORED) { | 914 | static void audit_tree_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group) |
894 | evict_chunk(chunk); | 915 | { |
895 | put_inotify_watch(watch); | 916 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); |
896 | } | 917 | |
918 | evict_chunk(chunk); | ||
919 | fsnotify_put_mark(entry); | ||
897 | } | 920 | } |
898 | 921 | ||
899 | static void destroy_watch(struct inotify_watch *watch) | 922 | static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask) |
900 | { | 923 | { |
901 | struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); | 924 | return 0; |
902 | call_rcu(&chunk->head, __put_chunk); | ||
903 | } | 925 | } |
904 | 926 | ||
905 | static const struct inotify_operations rtree_inotify_ops = { | 927 | static const struct fsnotify_ops audit_tree_ops = { |
906 | .handle_event = handle_event, | 928 | .handle_event = audit_tree_handle_event, |
907 | .destroy_watch = destroy_watch, | 929 | .should_send_event = audit_tree_send_event, |
930 | .free_group_priv = NULL, | ||
931 | .free_event_priv = NULL, | ||
932 | .freeing_mark = audit_tree_freeing_mark, | ||
908 | }; | 933 | }; |
909 | 934 | ||
910 | static int __init audit_tree_init(void) | 935 | static int __init audit_tree_init(void) |
911 | { | 936 | { |
912 | int i; | 937 | int i; |
913 | 938 | ||
914 | rtree_ih = inotify_init(&rtree_inotify_ops); | 939 | audit_tree_group = fsnotify_obtain_group(AUDIT_TREE_GROUP_NUM, |
915 | if (IS_ERR(rtree_ih)) | 940 | 0, &audit_tree_ops); |
916 | audit_panic("cannot initialize inotify handle for rectree watches"); | 941 | if (IS_ERR(audit_tree_group)) |
942 | audit_panic("cannot initialize fsnotify group for rectree watches"); | ||
917 | 943 | ||
918 | for (i = 0; i < HASH_SIZE; i++) | 944 | for (i = 0; i < HASH_SIZE; i++) |
919 | INIT_LIST_HEAD(&chunk_hash_heads[i]); | 945 | INIT_LIST_HEAD(&chunk_hash_heads[i]); |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 240063c370e6..786901cd8217 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -1725,7 +1725,7 @@ static inline void handle_one(const struct inode *inode) | |||
1725 | struct audit_tree_refs *p; | 1725 | struct audit_tree_refs *p; |
1726 | struct audit_chunk *chunk; | 1726 | struct audit_chunk *chunk; |
1727 | int count; | 1727 | int count; |
1728 | if (likely(list_empty(&inode->inotify_watches))) | 1728 | if (likely(hlist_empty(&inode->i_fsnotify_mark_entries))) |
1729 | return; | 1729 | return; |
1730 | context = current->audit_context; | 1730 | context = current->audit_context; |
1731 | p = context->trees; | 1731 | p = context->trees; |
@@ -1768,7 +1768,7 @@ retry: | |||
1768 | seq = read_seqbegin(&rename_lock); | 1768 | seq = read_seqbegin(&rename_lock); |
1769 | for(;;) { | 1769 | for(;;) { |
1770 | struct inode *inode = d->d_inode; | 1770 | struct inode *inode = d->d_inode; |
1771 | if (inode && unlikely(!list_empty(&inode->inotify_watches))) { | 1771 | if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_mark_entries))) { |
1772 | struct audit_chunk *chunk; | 1772 | struct audit_chunk *chunk; |
1773 | chunk = audit_tree_lookup(inode); | 1773 | chunk = audit_tree_lookup(inode); |
1774 | if (chunk) { | 1774 | if (chunk) { |