aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/audit_tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/audit_tree.c')
-rw-r--r--kernel/audit_tree.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 80f29e015570..2e0c97427b33 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -174,9 +174,9 @@ static void insert_hash(struct audit_chunk *chunk)
174 struct fsnotify_mark *entry = &chunk->mark; 174 struct fsnotify_mark *entry = &chunk->mark;
175 struct list_head *list; 175 struct list_head *list;
176 176
177 if (!entry->i.inode) 177 if (!entry->inode)
178 return; 178 return;
179 list = chunk_hash(entry->i.inode); 179 list = chunk_hash(entry->inode);
180 list_add_rcu(&chunk->hash, list); 180 list_add_rcu(&chunk->hash, list);
181} 181}
182 182
@@ -188,7 +188,7 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
188 188
189 list_for_each_entry_rcu(p, list, hash) { 189 list_for_each_entry_rcu(p, list, hash) {
190 /* mark.inode may have gone NULL, but who cares? */ 190 /* mark.inode may have gone NULL, but who cares? */
191 if (p->mark.i.inode == inode) { 191 if (p->mark.inode == inode) {
192 atomic_long_inc(&p->refs); 192 atomic_long_inc(&p->refs);
193 return p; 193 return p;
194 } 194 }
@@ -231,7 +231,7 @@ static void untag_chunk(struct node *p)
231 new = alloc_chunk(size); 231 new = alloc_chunk(size);
232 232
233 spin_lock(&entry->lock); 233 spin_lock(&entry->lock);
234 if (chunk->dead || !entry->i.inode) { 234 if (chunk->dead || !entry->inode) {
235 spin_unlock(&entry->lock); 235 spin_unlock(&entry->lock);
236 if (new) 236 if (new)
237 free_chunk(new); 237 free_chunk(new);
@@ -258,7 +258,7 @@ static void untag_chunk(struct node *p)
258 goto Fallback; 258 goto Fallback;
259 259
260 fsnotify_duplicate_mark(&new->mark, entry); 260 fsnotify_duplicate_mark(&new->mark, entry);
261 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { 261 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
262 fsnotify_put_mark(&new->mark); 262 fsnotify_put_mark(&new->mark);
263 goto Fallback; 263 goto Fallback;
264 } 264 }
@@ -386,7 +386,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
386 chunk_entry = &chunk->mark; 386 chunk_entry = &chunk->mark;
387 387
388 spin_lock(&old_entry->lock); 388 spin_lock(&old_entry->lock);
389 if (!old_entry->i.inode) { 389 if (!old_entry->inode) {
390 /* old_entry is being shot, lets just lie */ 390 /* old_entry is being shot, lets just lie */
391 spin_unlock(&old_entry->lock); 391 spin_unlock(&old_entry->lock);
392 fsnotify_put_mark(old_entry); 392 fsnotify_put_mark(old_entry);
@@ -395,7 +395,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
395 } 395 }
396 396
397 fsnotify_duplicate_mark(chunk_entry, old_entry); 397 fsnotify_duplicate_mark(chunk_entry, old_entry);
398 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { 398 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
399 spin_unlock(&old_entry->lock); 399 spin_unlock(&old_entry->lock);
400 fsnotify_put_mark(chunk_entry); 400 fsnotify_put_mark(chunk_entry);
401 fsnotify_put_mark(old_entry); 401 fsnotify_put_mark(old_entry);
@@ -611,7 +611,7 @@ void audit_trim_trees(void)
611 list_for_each_entry(node, &tree->chunks, list) { 611 list_for_each_entry(node, &tree->chunks, list) {
612 struct audit_chunk *chunk = find_chunk(node); 612 struct audit_chunk *chunk = find_chunk(node);
613 /* this could be NULL if the watch is dying else where... */ 613 /* this could be NULL if the watch is dying else where... */
614 struct inode *inode = chunk->mark.i.inode; 614 struct inode *inode = chunk->mark.inode;
615 node->index |= 1U<<31; 615 node->index |= 1U<<31;
616 if (iterate_mounts(compare_root, inode, root_mnt)) 616 if (iterate_mounts(compare_root, inode, root_mnt))
617 node->index &= ~(1U<<31); 617 node->index &= ~(1U<<31);