aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/audit_tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/audit_tree.c')
-rw-r--r--kernel/audit_tree.c246
1 files changed, 140 insertions, 106 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 46a57b57a335..37b2bea170c8 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -1,5 +1,5 @@
1#include "audit.h" 1#include "audit.h"
2#include <linux/inotify.h> 2#include <linux/fsnotify_backend.h>
3#include <linux/namei.h> 3#include <linux/namei.h>
4#include <linux/mount.h> 4#include <linux/mount.h>
5#include <linux/kthread.h> 5#include <linux/kthread.h>
@@ -22,7 +22,7 @@ struct audit_tree {
22 22
23struct audit_chunk { 23struct audit_chunk {
24 struct list_head hash; 24 struct list_head hash;
25 struct inotify_watch watch; 25 struct fsnotify_mark mark;
26 struct list_head trees; /* with root here */ 26 struct list_head trees; /* with root here */
27 int dead; 27 int dead;
28 int count; 28 int count;
@@ -59,7 +59,7 @@ static LIST_HEAD(prune_list);
59 * tree is refcounted; one reference for "some rules on rules_list refer to 59 * tree is refcounted; one reference for "some rules on rules_list refer to
60 * it", one for each chunk with pointer to it. 60 * it", one for each chunk with pointer to it.
61 * 61 *
62 * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount 62 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
63 * of watch contributes 1 to .refs). 63 * of watch contributes 1 to .refs).
64 * 64 *
65 * node.index allows to get from node.list to containing chunk. 65 * node.index allows to get from node.list to containing chunk.
@@ -68,7 +68,7 @@ static LIST_HEAD(prune_list);
68 * that makes a difference. Some. 68 * that makes a difference. Some.
69 */ 69 */
70 70
71static struct inotify_handle *rtree_ih; 71static struct fsnotify_group *audit_tree_group;
72 72
73static struct audit_tree *alloc_tree(const char *s) 73static struct audit_tree *alloc_tree(const char *s)
74{ 74{
@@ -111,29 +111,6 @@ const char *audit_tree_path(struct audit_tree *tree)
111 return tree->pathname; 111 return tree->pathname;
112} 112}
113 113
114static struct audit_chunk *alloc_chunk(int count)
115{
116 struct audit_chunk *chunk;
117 size_t size;
118 int i;
119
120 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
121 chunk = kzalloc(size, GFP_KERNEL);
122 if (!chunk)
123 return NULL;
124
125 INIT_LIST_HEAD(&chunk->hash);
126 INIT_LIST_HEAD(&chunk->trees);
127 chunk->count = count;
128 atomic_long_set(&chunk->refs, 1);
129 for (i = 0; i < count; i++) {
130 INIT_LIST_HEAD(&chunk->owners[i].list);
131 chunk->owners[i].index = i;
132 }
133 inotify_init_watch(&chunk->watch);
134 return chunk;
135}
136
137static void free_chunk(struct audit_chunk *chunk) 114static void free_chunk(struct audit_chunk *chunk)
138{ 115{
139 int i; 116 int i;
@@ -157,6 +134,35 @@ static void __put_chunk(struct rcu_head *rcu)
157 audit_put_chunk(chunk); 134 audit_put_chunk(chunk);
158} 135}
159 136
137static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
138{
139 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
140 call_rcu(&chunk->head, __put_chunk);
141}
142
143static struct audit_chunk *alloc_chunk(int count)
144{
145 struct audit_chunk *chunk;
146 size_t size;
147 int i;
148
149 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
150 chunk = kzalloc(size, GFP_KERNEL);
151 if (!chunk)
152 return NULL;
153
154 INIT_LIST_HEAD(&chunk->hash);
155 INIT_LIST_HEAD(&chunk->trees);
156 chunk->count = count;
157 atomic_long_set(&chunk->refs, 1);
158 for (i = 0; i < count; i++) {
159 INIT_LIST_HEAD(&chunk->owners[i].list);
160 chunk->owners[i].index = i;
161 }
162 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
163 return chunk;
164}
165
160enum {HASH_SIZE = 128}; 166enum {HASH_SIZE = 128};
161static struct list_head chunk_hash_heads[HASH_SIZE]; 167static struct list_head chunk_hash_heads[HASH_SIZE];
162static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); 168static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
@@ -167,10 +173,15 @@ static inline struct list_head *chunk_hash(const struct inode *inode)
167 return chunk_hash_heads + n % HASH_SIZE; 173 return chunk_hash_heads + n % HASH_SIZE;
168} 174}
169 175
170/* hash_lock is held by caller */ 176/* hash_lock & entry->lock is held by caller */
171static void insert_hash(struct audit_chunk *chunk) 177static void insert_hash(struct audit_chunk *chunk)
172{ 178{
173 struct list_head *list = chunk_hash(chunk->watch.inode); 179 struct fsnotify_mark *entry = &chunk->mark;
180 struct list_head *list;
181
182 if (!entry->i.inode)
183 return;
184 list = chunk_hash(entry->i.inode);
174 list_add_rcu(&chunk->hash, list); 185 list_add_rcu(&chunk->hash, list);
175} 186}
176 187
@@ -181,7 +192,8 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
181 struct audit_chunk *p; 192 struct audit_chunk *p;
182 193
183 list_for_each_entry_rcu(p, list, hash) { 194 list_for_each_entry_rcu(p, list, hash) {
184 if (p->watch.inode == inode) { 195 /* mark.inode may have gone NULL, but who cares? */
196 if (p->mark.i.inode == inode) {
185 atomic_long_inc(&p->refs); 197 atomic_long_inc(&p->refs);
186 return p; 198 return p;
187 } 199 }
@@ -210,38 +222,24 @@ static struct audit_chunk *find_chunk(struct node *p)
210static void untag_chunk(struct node *p) 222static void untag_chunk(struct node *p)
211{ 223{
212 struct audit_chunk *chunk = find_chunk(p); 224 struct audit_chunk *chunk = find_chunk(p);
213 struct audit_chunk *new; 225 struct fsnotify_mark *entry = &chunk->mark;
226 struct audit_chunk *new = NULL;
214 struct audit_tree *owner; 227 struct audit_tree *owner;
215 int size = chunk->count - 1; 228 int size = chunk->count - 1;
216 int i, j; 229 int i, j;
217 230
218 if (!pin_inotify_watch(&chunk->watch)) { 231 fsnotify_get_mark(entry);
219 /*
220 * Filesystem is shutting down; all watches are getting
221 * evicted, just take it off the node list for this
222 * tree and let the eviction logics take care of the
223 * rest.
224 */
225 owner = p->owner;
226 if (owner->root == chunk) {
227 list_del_init(&owner->same_root);
228 owner->root = NULL;
229 }
230 list_del_init(&p->list);
231 p->owner = NULL;
232 put_tree(owner);
233 return;
234 }
235 232
236 spin_unlock(&hash_lock); 233 spin_unlock(&hash_lock);
237 234
238 /* 235 if (size)
239 * pin_inotify_watch() succeeded, so the watch won't go away 236 new = alloc_chunk(size);
240 * from under us. 237
241 */ 238 spin_lock(&entry->lock);
242 mutex_lock(&chunk->watch.inode->inotify_mutex); 239 if (chunk->dead || !entry->i.inode) {
243 if (chunk->dead) { 240 spin_unlock(&entry->lock);
244 mutex_unlock(&chunk->watch.inode->inotify_mutex); 241 if (new)
242 free_chunk(new);
245 goto out; 243 goto out;
246 } 244 }
247 245
@@ -256,16 +254,17 @@ static void untag_chunk(struct node *p)
256 list_del_init(&p->list); 254 list_del_init(&p->list);
257 list_del_rcu(&chunk->hash); 255 list_del_rcu(&chunk->hash);
258 spin_unlock(&hash_lock); 256 spin_unlock(&hash_lock);
259 inotify_evict_watch(&chunk->watch); 257 spin_unlock(&entry->lock);
260 mutex_unlock(&chunk->watch.inode->inotify_mutex); 258 fsnotify_destroy_mark(entry);
261 put_inotify_watch(&chunk->watch); 259 fsnotify_put_mark(entry);
262 goto out; 260 goto out;
263 } 261 }
264 262
265 new = alloc_chunk(size);
266 if (!new) 263 if (!new)
267 goto Fallback; 264 goto Fallback;
268 if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) { 265
266 fsnotify_duplicate_mark(&new->mark, entry);
267 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
269 free_chunk(new); 268 free_chunk(new);
270 goto Fallback; 269 goto Fallback;
271 } 270 }
@@ -298,9 +297,9 @@ static void untag_chunk(struct node *p)
298 list_for_each_entry(owner, &new->trees, same_root) 297 list_for_each_entry(owner, &new->trees, same_root)
299 owner->root = new; 298 owner->root = new;
300 spin_unlock(&hash_lock); 299 spin_unlock(&hash_lock);
301 inotify_evict_watch(&chunk->watch); 300 spin_unlock(&entry->lock);
302 mutex_unlock(&chunk->watch.inode->inotify_mutex); 301 fsnotify_destroy_mark(entry);
303 put_inotify_watch(&chunk->watch); 302 fsnotify_put_mark(entry);
304 goto out; 303 goto out;
305 304
306Fallback: 305Fallback:
@@ -314,31 +313,33 @@ Fallback:
314 p->owner = NULL; 313 p->owner = NULL;
315 put_tree(owner); 314 put_tree(owner);
316 spin_unlock(&hash_lock); 315 spin_unlock(&hash_lock);
317 mutex_unlock(&chunk->watch.inode->inotify_mutex); 316 spin_unlock(&entry->lock);
318out: 317out:
319 unpin_inotify_watch(&chunk->watch); 318 fsnotify_put_mark(entry);
320 spin_lock(&hash_lock); 319 spin_lock(&hash_lock);
321} 320}
322 321
323static int create_chunk(struct inode *inode, struct audit_tree *tree) 322static int create_chunk(struct inode *inode, struct audit_tree *tree)
324{ 323{
324 struct fsnotify_mark *entry;
325 struct audit_chunk *chunk = alloc_chunk(1); 325 struct audit_chunk *chunk = alloc_chunk(1);
326 if (!chunk) 326 if (!chunk)
327 return -ENOMEM; 327 return -ENOMEM;
328 328
329 if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) { 329 entry = &chunk->mark;
330 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
330 free_chunk(chunk); 331 free_chunk(chunk);
331 return -ENOSPC; 332 return -ENOSPC;
332 } 333 }
333 334
334 mutex_lock(&inode->inotify_mutex); 335 spin_lock(&entry->lock);
335 spin_lock(&hash_lock); 336 spin_lock(&hash_lock);
336 if (tree->goner) { 337 if (tree->goner) {
337 spin_unlock(&hash_lock); 338 spin_unlock(&hash_lock);
338 chunk->dead = 1; 339 chunk->dead = 1;
339 inotify_evict_watch(&chunk->watch); 340 spin_unlock(&entry->lock);
340 mutex_unlock(&inode->inotify_mutex); 341 fsnotify_destroy_mark(entry);
341 put_inotify_watch(&chunk->watch); 342 fsnotify_put_mark(entry);
342 return 0; 343 return 0;
343 } 344 }
344 chunk->owners[0].index = (1U << 31); 345 chunk->owners[0].index = (1U << 31);
@@ -351,30 +352,31 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
351 } 352 }
352 insert_hash(chunk); 353 insert_hash(chunk);
353 spin_unlock(&hash_lock); 354 spin_unlock(&hash_lock);
354 mutex_unlock(&inode->inotify_mutex); 355 spin_unlock(&entry->lock);
355 return 0; 356 return 0;
356} 357}
357 358
358/* the first tagged inode becomes root of tree */ 359/* the first tagged inode becomes root of tree */
359static int tag_chunk(struct inode *inode, struct audit_tree *tree) 360static int tag_chunk(struct inode *inode, struct audit_tree *tree)
360{ 361{
361 struct inotify_watch *watch; 362 struct fsnotify_mark *old_entry, *chunk_entry;
362 struct audit_tree *owner; 363 struct audit_tree *owner;
363 struct audit_chunk *chunk, *old; 364 struct audit_chunk *chunk, *old;
364 struct node *p; 365 struct node *p;
365 int n; 366 int n;
366 367
367 if (inotify_find_watch(rtree_ih, inode, &watch) < 0) 368 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
369 if (!old_entry)
368 return create_chunk(inode, tree); 370 return create_chunk(inode, tree);
369 371
370 old = container_of(watch, struct audit_chunk, watch); 372 old = container_of(old_entry, struct audit_chunk, mark);
371 373
372 /* are we already there? */ 374 /* are we already there? */
373 spin_lock(&hash_lock); 375 spin_lock(&hash_lock);
374 for (n = 0; n < old->count; n++) { 376 for (n = 0; n < old->count; n++) {
375 if (old->owners[n].owner == tree) { 377 if (old->owners[n].owner == tree) {
376 spin_unlock(&hash_lock); 378 spin_unlock(&hash_lock);
377 put_inotify_watch(&old->watch); 379 fsnotify_put_mark(old_entry);
378 return 0; 380 return 0;
379 } 381 }
380 } 382 }
@@ -382,25 +384,44 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
382 384
383 chunk = alloc_chunk(old->count + 1); 385 chunk = alloc_chunk(old->count + 1);
384 if (!chunk) { 386 if (!chunk) {
385 put_inotify_watch(&old->watch); 387 fsnotify_put_mark(old_entry);
386 return -ENOMEM; 388 return -ENOMEM;
387 } 389 }
388 390
389 mutex_lock(&inode->inotify_mutex); 391 chunk_entry = &chunk->mark;
390 if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { 392
391 mutex_unlock(&inode->inotify_mutex); 393 spin_lock(&old_entry->lock);
392 put_inotify_watch(&old->watch); 394 if (!old_entry->i.inode) {
395 /* old_entry is being shot, lets just lie */
396 spin_unlock(&old_entry->lock);
397 fsnotify_put_mark(old_entry);
393 free_chunk(chunk); 398 free_chunk(chunk);
399 return -ENOENT;
400 }
401
402 fsnotify_duplicate_mark(chunk_entry, old_entry);
403 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
404 spin_unlock(&old_entry->lock);
405 free_chunk(chunk);
406 fsnotify_put_mark(old_entry);
394 return -ENOSPC; 407 return -ENOSPC;
395 } 408 }
409
410 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
411 spin_lock(&chunk_entry->lock);
396 spin_lock(&hash_lock); 412 spin_lock(&hash_lock);
413
414 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
397 if (tree->goner) { 415 if (tree->goner) {
398 spin_unlock(&hash_lock); 416 spin_unlock(&hash_lock);
399 chunk->dead = 1; 417 chunk->dead = 1;
400 inotify_evict_watch(&chunk->watch); 418 spin_unlock(&chunk_entry->lock);
401 mutex_unlock(&inode->inotify_mutex); 419 spin_unlock(&old_entry->lock);
402 put_inotify_watch(&old->watch); 420
403 put_inotify_watch(&chunk->watch); 421 fsnotify_destroy_mark(chunk_entry);
422
423 fsnotify_put_mark(chunk_entry);
424 fsnotify_put_mark(old_entry);
404 return 0; 425 return 0;
405 } 426 }
406 list_replace_init(&old->trees, &chunk->trees); 427 list_replace_init(&old->trees, &chunk->trees);
@@ -426,10 +447,11 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
426 list_add(&tree->same_root, &chunk->trees); 447 list_add(&tree->same_root, &chunk->trees);
427 } 448 }
428 spin_unlock(&hash_lock); 449 spin_unlock(&hash_lock);
429 inotify_evict_watch(&old->watch); 450 spin_unlock(&chunk_entry->lock);
430 mutex_unlock(&inode->inotify_mutex); 451 spin_unlock(&old_entry->lock);
431 put_inotify_watch(&old->watch); /* pair to inotify_find_watch */ 452 fsnotify_destroy_mark(old_entry);
432 put_inotify_watch(&old->watch); /* and kill it */ 453 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
454 fsnotify_put_mark(old_entry); /* and kill it */
433 return 0; 455 return 0;
434} 456}
435 457
@@ -584,7 +606,9 @@ void audit_trim_trees(void)
584 606
585 spin_lock(&hash_lock); 607 spin_lock(&hash_lock);
586 list_for_each_entry(node, &tree->chunks, list) { 608 list_for_each_entry(node, &tree->chunks, list) {
587 struct inode *inode = find_chunk(node)->watch.inode; 609 struct audit_chunk *chunk = find_chunk(node);
610 /* this could be NULL if the watch is dieing else where... */
611 struct inode *inode = chunk->mark.i.inode;
588 node->index |= 1U<<31; 612 node->index |= 1U<<31;
589 if (iterate_mounts(compare_root, inode, root_mnt)) 613 if (iterate_mounts(compare_root, inode, root_mnt))
590 node->index &= ~(1U<<31); 614 node->index &= ~(1U<<31);
@@ -846,7 +870,6 @@ void audit_kill_trees(struct list_head *list)
846 * Here comes the stuff asynchronous to auditctl operations 870 * Here comes the stuff asynchronous to auditctl operations
847 */ 871 */
848 872
849/* inode->inotify_mutex is locked */
850static void evict_chunk(struct audit_chunk *chunk) 873static void evict_chunk(struct audit_chunk *chunk)
851{ 874{
852 struct audit_tree *owner; 875 struct audit_tree *owner;
@@ -885,35 +908,46 @@ static void evict_chunk(struct audit_chunk *chunk)
885 mutex_unlock(&audit_filter_mutex); 908 mutex_unlock(&audit_filter_mutex);
886} 909}
887 910
888static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask, 911static int audit_tree_handle_event(struct fsnotify_group *group,
889 u32 cookie, const char *dname, struct inode *inode) 912 struct fsnotify_mark *inode_mark,
913 struct fsnotify_mark *vfsmonut_mark,
914 struct fsnotify_event *event)
915{
916 BUG();
917 return -EOPNOTSUPP;
918}
919
920static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
890{ 921{
891 struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); 922 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
892 923
893 if (mask & IN_IGNORED) { 924 evict_chunk(chunk);
894 evict_chunk(chunk); 925 fsnotify_put_mark(entry);
895 put_inotify_watch(watch);
896 }
897} 926}
898 927
899static void destroy_watch(struct inotify_watch *watch) 928static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
929 struct fsnotify_mark *inode_mark,
930 struct fsnotify_mark *vfsmount_mark,
931 __u32 mask, void *data, int data_type)
900{ 932{
901 struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); 933 return false;
902 call_rcu(&chunk->head, __put_chunk);
903} 934}
904 935
905static const struct inotify_operations rtree_inotify_ops = { 936static const struct fsnotify_ops audit_tree_ops = {
906 .handle_event = handle_event, 937 .handle_event = audit_tree_handle_event,
907 .destroy_watch = destroy_watch, 938 .should_send_event = audit_tree_send_event,
939 .free_group_priv = NULL,
940 .free_event_priv = NULL,
941 .freeing_mark = audit_tree_freeing_mark,
908}; 942};
909 943
910static int __init audit_tree_init(void) 944static int __init audit_tree_init(void)
911{ 945{
912 int i; 946 int i;
913 947
914 rtree_ih = inotify_init(&rtree_inotify_ops); 948 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
915 if (IS_ERR(rtree_ih)) 949 if (IS_ERR(audit_tree_group))
916 audit_panic("cannot initialize inotify handle for rectree watches"); 950 audit_panic("cannot initialize fsnotify group for rectree watches");
917 951
918 for (i = 0; i < HASH_SIZE; i++) 952 for (i = 0; i < HASH_SIZE; i++)
919 INIT_LIST_HEAD(&chunk_hash_heads[i]); 953 INIT_LIST_HEAD(&chunk_hash_heads[i]);