diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/audit.c | 87 | ||||
-rw-r--r-- | kernel/audit.h | 34 | ||||
-rw-r--r-- | kernel/audit_tree.c | 903 | ||||
-rw-r--r-- | kernel/auditfilter.c | 64 | ||||
-rw-r--r-- | kernel/auditsc.c | 221 |
6 files changed, 1301 insertions, 9 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 79f017e09fbd..f60afe742599 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -46,6 +46,7 @@ obj-$(CONFIG_IKCONFIG) += configs.o | |||
46 | obj-$(CONFIG_STOP_MACHINE) += stop_machine.o | 46 | obj-$(CONFIG_STOP_MACHINE) += stop_machine.o |
47 | obj-$(CONFIG_AUDIT) += audit.o auditfilter.o | 47 | obj-$(CONFIG_AUDIT) += audit.o auditfilter.o |
48 | obj-$(CONFIG_AUDITSYSCALL) += auditsc.o | 48 | obj-$(CONFIG_AUDITSYSCALL) += auditsc.o |
49 | obj-$(CONFIG_AUDIT_TREE) += audit_tree.o | ||
49 | obj-$(CONFIG_KPROBES) += kprobes.o | 50 | obj-$(CONFIG_KPROBES) += kprobes.o |
50 | obj-$(CONFIG_SYSFS) += ksysfs.o | 51 | obj-$(CONFIG_SYSFS) += ksysfs.o |
51 | obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o | 52 | obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o |
diff --git a/kernel/audit.c b/kernel/audit.c index 6977ea57a7e2..f93c2713017d 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -468,6 +468,21 @@ int audit_send_list(void *_dest) | |||
468 | return 0; | 468 | return 0; |
469 | } | 469 | } |
470 | 470 | ||
471 | #ifdef CONFIG_AUDIT_TREE | ||
472 | static int prune_tree_thread(void *unused) | ||
473 | { | ||
474 | mutex_lock(&audit_cmd_mutex); | ||
475 | audit_prune_trees(); | ||
476 | mutex_unlock(&audit_cmd_mutex); | ||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | void audit_schedule_prune(void) | ||
481 | { | ||
482 | kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); | ||
483 | } | ||
484 | #endif | ||
485 | |||
471 | struct sk_buff *audit_make_reply(int pid, int seq, int type, int done, | 486 | struct sk_buff *audit_make_reply(int pid, int seq, int type, int done, |
472 | int multi, void *payload, int size) | 487 | int multi, void *payload, int size) |
473 | { | 488 | { |
@@ -540,6 +555,8 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) | |||
540 | case AUDIT_SIGNAL_INFO: | 555 | case AUDIT_SIGNAL_INFO: |
541 | case AUDIT_TTY_GET: | 556 | case AUDIT_TTY_GET: |
542 | case AUDIT_TTY_SET: | 557 | case AUDIT_TTY_SET: |
558 | case AUDIT_TRIM: | ||
559 | case AUDIT_MAKE_EQUIV: | ||
543 | if (security_netlink_recv(skb, CAP_AUDIT_CONTROL)) | 560 | if (security_netlink_recv(skb, CAP_AUDIT_CONTROL)) |
544 | err = -EPERM; | 561 | err = -EPERM; |
545 | break; | 562 | break; |
@@ -756,6 +773,76 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
756 | uid, seq, data, nlmsg_len(nlh), | 773 | uid, seq, data, nlmsg_len(nlh), |
757 | loginuid, sid); | 774 | loginuid, sid); |
758 | break; | 775 | break; |
776 | case AUDIT_TRIM: | ||
777 | audit_trim_trees(); | ||
778 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | ||
779 | if (!ab) | ||
780 | break; | ||
781 | audit_log_format(ab, "auid=%u", loginuid); | ||
782 | if (sid) { | ||
783 | u32 len; | ||
784 | ctx = NULL; | ||
785 | if (selinux_sid_to_string(sid, &ctx, &len)) | ||
786 | audit_log_format(ab, " ssid=%u", sid); | ||
787 | else | ||
788 | audit_log_format(ab, " subj=%s", ctx); | ||
789 | kfree(ctx); | ||
790 | } | ||
791 | audit_log_format(ab, " op=trim res=1"); | ||
792 | audit_log_end(ab); | ||
793 | break; | ||
794 | case AUDIT_MAKE_EQUIV: { | ||
795 | void *bufp = data; | ||
796 | u32 sizes[2]; | ||
797 | size_t len = nlmsg_len(nlh); | ||
798 | char *old, *new; | ||
799 | |||
800 | err = -EINVAL; | ||
801 | if (len < 2 * sizeof(u32)) | ||
802 | break; | ||
803 | memcpy(sizes, bufp, 2 * sizeof(u32)); | ||
804 | bufp += 2 * sizeof(u32); | ||
805 | len -= 2 * sizeof(u32); | ||
806 | old = audit_unpack_string(&bufp, &len, sizes[0]); | ||
807 | if (IS_ERR(old)) { | ||
808 | err = PTR_ERR(old); | ||
809 | break; | ||
810 | } | ||
811 | new = audit_unpack_string(&bufp, &len, sizes[1]); | ||
812 | if (IS_ERR(new)) { | ||
813 | err = PTR_ERR(new); | ||
814 | kfree(old); | ||
815 | break; | ||
816 | } | ||
817 | /* OK, here comes... */ | ||
818 | err = audit_tag_tree(old, new); | ||
819 | |||
820 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | ||
821 | if (!ab) { | ||
822 | kfree(old); | ||
823 | kfree(new); | ||
824 | break; | ||
825 | } | ||
826 | audit_log_format(ab, "auid=%u", loginuid); | ||
827 | if (sid) { | ||
828 | u32 len; | ||
829 | ctx = NULL; | ||
830 | if (selinux_sid_to_string(sid, &ctx, &len)) | ||
831 | audit_log_format(ab, " ssid=%u", sid); | ||
832 | else | ||
833 | audit_log_format(ab, " subj=%s", ctx); | ||
834 | kfree(ctx); | ||
835 | } | ||
836 | audit_log_format(ab, " op=make_equiv old="); | ||
837 | audit_log_untrustedstring(ab, old); | ||
838 | audit_log_format(ab, " new="); | ||
839 | audit_log_untrustedstring(ab, new); | ||
840 | audit_log_format(ab, " res=%d", !err); | ||
841 | audit_log_end(ab); | ||
842 | kfree(old); | ||
843 | kfree(new); | ||
844 | break; | ||
845 | } | ||
759 | case AUDIT_SIGNAL_INFO: | 846 | case AUDIT_SIGNAL_INFO: |
760 | err = selinux_sid_to_string(audit_sig_sid, &ctx, &len); | 847 | err = selinux_sid_to_string(audit_sig_sid, &ctx, &len); |
761 | if (err) | 848 | if (err) |
diff --git a/kernel/audit.h b/kernel/audit.h index 95877435c347..2554bd524fd1 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
@@ -73,6 +73,9 @@ struct audit_field { | |||
73 | struct selinux_audit_rule *se_rule; | 73 | struct selinux_audit_rule *se_rule; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | struct audit_tree; | ||
77 | struct audit_chunk; | ||
78 | |||
76 | struct audit_krule { | 79 | struct audit_krule { |
77 | int vers_ops; | 80 | int vers_ops; |
78 | u32 flags; | 81 | u32 flags; |
@@ -86,7 +89,8 @@ struct audit_krule { | |||
86 | struct audit_field *arch_f; /* quick access to arch field */ | 89 | struct audit_field *arch_f; /* quick access to arch field */ |
87 | struct audit_field *inode_f; /* quick access to an inode field */ | 90 | struct audit_field *inode_f; /* quick access to an inode field */ |
88 | struct audit_watch *watch; /* associated watch */ | 91 | struct audit_watch *watch; /* associated watch */ |
89 | struct list_head rlist; /* entry in audit_watch.rules list */ | 92 | struct audit_tree *tree; /* associated watched tree */ |
93 | struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ | ||
90 | }; | 94 | }; |
91 | 95 | ||
92 | struct audit_entry { | 96 | struct audit_entry { |
@@ -130,6 +134,34 @@ extern void audit_handle_ievent(struct inotify_watch *, u32, u32, u32, | |||
130 | const char *, struct inode *); | 134 | const char *, struct inode *); |
131 | extern int selinux_audit_rule_update(void); | 135 | extern int selinux_audit_rule_update(void); |
132 | 136 | ||
137 | extern struct mutex audit_filter_mutex; | ||
138 | extern void audit_free_rule_rcu(struct rcu_head *); | ||
139 | |||
140 | #ifdef CONFIG_AUDIT_TREE | ||
141 | extern struct audit_chunk *audit_tree_lookup(const struct inode *); | ||
142 | extern void audit_put_chunk(struct audit_chunk *); | ||
143 | extern int audit_tree_match(struct audit_chunk *, struct audit_tree *); | ||
144 | extern int audit_make_tree(struct audit_krule *, char *, u32); | ||
145 | extern int audit_add_tree_rule(struct audit_krule *); | ||
146 | extern int audit_remove_tree_rule(struct audit_krule *); | ||
147 | extern void audit_trim_trees(void); | ||
148 | extern int audit_tag_tree(char *old, char *new); | ||
149 | extern void audit_schedule_prune(void); | ||
150 | extern void audit_prune_trees(void); | ||
151 | extern const char *audit_tree_path(struct audit_tree *); | ||
152 | extern void audit_put_tree(struct audit_tree *); | ||
153 | #else | ||
154 | #define audit_remove_tree_rule(rule) BUG() | ||
155 | #define audit_add_tree_rule(rule) -EINVAL | ||
156 | #define audit_make_tree(rule, str, op) -EINVAL | ||
157 | #define audit_trim_trees() (void)0 | ||
158 | #define audit_put_tree(tree) (void)0 | ||
159 | #define audit_tag_tree(old, new) -EINVAL | ||
160 | #define audit_tree_path(rule) "" /* never called */ | ||
161 | #endif | ||
162 | |||
163 | extern char *audit_unpack_string(void **, size_t *, size_t); | ||
164 | |||
133 | #ifdef CONFIG_AUDITSYSCALL | 165 | #ifdef CONFIG_AUDITSYSCALL |
134 | extern int __audit_signal_info(int sig, struct task_struct *t); | 166 | extern int __audit_signal_info(int sig, struct task_struct *t); |
135 | static inline int audit_signal_info(int sig, struct task_struct *t) | 167 | static inline int audit_signal_info(int sig, struct task_struct *t) |
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c new file mode 100644 index 000000000000..f4fcf58f20f8 --- /dev/null +++ b/kernel/audit_tree.c | |||
@@ -0,0 +1,903 @@ | |||
1 | #include "audit.h" | ||
2 | #include <linux/inotify.h> | ||
3 | #include <linux/namei.h> | ||
4 | #include <linux/mount.h> | ||
5 | |||
6 | struct audit_tree; | ||
7 | struct audit_chunk; | ||
8 | |||
9 | struct audit_tree { | ||
10 | atomic_t count; | ||
11 | int goner; | ||
12 | struct audit_chunk *root; | ||
13 | struct list_head chunks; | ||
14 | struct list_head rules; | ||
15 | struct list_head list; | ||
16 | struct list_head same_root; | ||
17 | struct rcu_head head; | ||
18 | char pathname[]; | ||
19 | }; | ||
20 | |||
21 | struct audit_chunk { | ||
22 | struct list_head hash; | ||
23 | struct inotify_watch watch; | ||
24 | struct list_head trees; /* with root here */ | ||
25 | int dead; | ||
26 | int count; | ||
27 | struct rcu_head head; | ||
28 | struct node { | ||
29 | struct list_head list; | ||
30 | struct audit_tree *owner; | ||
31 | unsigned index; /* index; upper bit indicates 'will prune' */ | ||
32 | } owners[]; | ||
33 | }; | ||
34 | |||
35 | static LIST_HEAD(tree_list); | ||
36 | static LIST_HEAD(prune_list); | ||
37 | |||
38 | /* | ||
39 | * One struct chunk is attached to each inode of interest. | ||
40 | * We replace struct chunk on tagging/untagging. | ||
41 | * Rules have pointer to struct audit_tree. | ||
42 | * Rules have struct list_head rlist forming a list of rules over | ||
43 | * the same tree. | ||
44 | * References to struct chunk are collected at audit_inode{,_child}() | ||
45 | * time and used in AUDIT_TREE rule matching. | ||
46 | * These references are dropped at the same time we are calling | ||
47 | * audit_free_names(), etc. | ||
48 | * | ||
49 | * Cyclic lists galore: | ||
50 | * tree.chunks anchors chunk.owners[].list hash_lock | ||
51 | * tree.rules anchors rule.rlist audit_filter_mutex | ||
52 | * chunk.trees anchors tree.same_root hash_lock | ||
53 | * chunk.hash is a hash with middle bits of watch.inode as | ||
54 | * a hash function. RCU, hash_lock | ||
55 | * | ||
56 | * tree is refcounted; one reference for "some rules on rules_list refer to | ||
57 | * it", one for each chunk with pointer to it. | ||
58 | * | ||
59 | * chunk is refcounted by embedded inotify_watch. | ||
60 | * | ||
61 | * node.index allows to get from node.list to containing chunk. | ||
62 | * MSB of that sucker is stolen to mark taggings that we might have to | ||
63 | * revert - several operations have very unpleasant cleanup logics and | ||
64 | * that makes a difference. Some. | ||
65 | */ | ||
66 | |||
67 | static struct inotify_handle *rtree_ih; | ||
68 | |||
69 | static struct audit_tree *alloc_tree(const char *s) | ||
70 | { | ||
71 | struct audit_tree *tree; | ||
72 | |||
73 | tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); | ||
74 | if (tree) { | ||
75 | atomic_set(&tree->count, 1); | ||
76 | tree->goner = 0; | ||
77 | INIT_LIST_HEAD(&tree->chunks); | ||
78 | INIT_LIST_HEAD(&tree->rules); | ||
79 | INIT_LIST_HEAD(&tree->list); | ||
80 | INIT_LIST_HEAD(&tree->same_root); | ||
81 | tree->root = NULL; | ||
82 | strcpy(tree->pathname, s); | ||
83 | } | ||
84 | return tree; | ||
85 | } | ||
86 | |||
87 | static inline void get_tree(struct audit_tree *tree) | ||
88 | { | ||
89 | atomic_inc(&tree->count); | ||
90 | } | ||
91 | |||
92 | static void __put_tree(struct rcu_head *rcu) | ||
93 | { | ||
94 | struct audit_tree *tree = container_of(rcu, struct audit_tree, head); | ||
95 | kfree(tree); | ||
96 | } | ||
97 | |||
98 | static inline void put_tree(struct audit_tree *tree) | ||
99 | { | ||
100 | if (atomic_dec_and_test(&tree->count)) | ||
101 | call_rcu(&tree->head, __put_tree); | ||
102 | } | ||
103 | |||
104 | /* to avoid bringing the entire thing in audit.h */ | ||
105 | const char *audit_tree_path(struct audit_tree *tree) | ||
106 | { | ||
107 | return tree->pathname; | ||
108 | } | ||
109 | |||
110 | static struct audit_chunk *alloc_chunk(int count) | ||
111 | { | ||
112 | struct audit_chunk *chunk; | ||
113 | size_t size; | ||
114 | int i; | ||
115 | |||
116 | size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); | ||
117 | chunk = kzalloc(size, GFP_KERNEL); | ||
118 | if (!chunk) | ||
119 | return NULL; | ||
120 | |||
121 | INIT_LIST_HEAD(&chunk->hash); | ||
122 | INIT_LIST_HEAD(&chunk->trees); | ||
123 | chunk->count = count; | ||
124 | for (i = 0; i < count; i++) { | ||
125 | INIT_LIST_HEAD(&chunk->owners[i].list); | ||
126 | chunk->owners[i].index = i; | ||
127 | } | ||
128 | inotify_init_watch(&chunk->watch); | ||
129 | return chunk; | ||
130 | } | ||
131 | |||
132 | static void __free_chunk(struct rcu_head *rcu) | ||
133 | { | ||
134 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); | ||
135 | int i; | ||
136 | |||
137 | for (i = 0; i < chunk->count; i++) { | ||
138 | if (chunk->owners[i].owner) | ||
139 | put_tree(chunk->owners[i].owner); | ||
140 | } | ||
141 | kfree(chunk); | ||
142 | } | ||
143 | |||
144 | static inline void free_chunk(struct audit_chunk *chunk) | ||
145 | { | ||
146 | call_rcu(&chunk->head, __free_chunk); | ||
147 | } | ||
148 | |||
149 | void audit_put_chunk(struct audit_chunk *chunk) | ||
150 | { | ||
151 | put_inotify_watch(&chunk->watch); | ||
152 | } | ||
153 | |||
154 | enum {HASH_SIZE = 128}; | ||
155 | static struct list_head chunk_hash_heads[HASH_SIZE]; | ||
156 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); | ||
157 | |||
158 | static inline struct list_head *chunk_hash(const struct inode *inode) | ||
159 | { | ||
160 | unsigned long n = (unsigned long)inode / L1_CACHE_BYTES; | ||
161 | return chunk_hash_heads + n % HASH_SIZE; | ||
162 | } | ||
163 | |||
164 | /* hash_lock is held by caller */ | ||
165 | static void insert_hash(struct audit_chunk *chunk) | ||
166 | { | ||
167 | struct list_head *list = chunk_hash(chunk->watch.inode); | ||
168 | list_add_rcu(&chunk->hash, list); | ||
169 | } | ||
170 | |||
171 | /* called under rcu_read_lock */ | ||
172 | struct audit_chunk *audit_tree_lookup(const struct inode *inode) | ||
173 | { | ||
174 | struct list_head *list = chunk_hash(inode); | ||
175 | struct list_head *pos; | ||
176 | |||
177 | list_for_each_rcu(pos, list) { | ||
178 | struct audit_chunk *p = container_of(pos, struct audit_chunk, hash); | ||
179 | if (p->watch.inode == inode) { | ||
180 | get_inotify_watch(&p->watch); | ||
181 | return p; | ||
182 | } | ||
183 | } | ||
184 | return NULL; | ||
185 | } | ||
186 | |||
187 | int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) | ||
188 | { | ||
189 | int n; | ||
190 | for (n = 0; n < chunk->count; n++) | ||
191 | if (chunk->owners[n].owner == tree) | ||
192 | return 1; | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | /* tagging and untagging inodes with trees */ | ||
197 | |||
198 | static void untag_chunk(struct audit_chunk *chunk, struct node *p) | ||
199 | { | ||
200 | struct audit_chunk *new; | ||
201 | struct audit_tree *owner; | ||
202 | int size = chunk->count - 1; | ||
203 | int i, j; | ||
204 | |||
205 | mutex_lock(&chunk->watch.inode->inotify_mutex); | ||
206 | if (chunk->dead) { | ||
207 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | ||
208 | return; | ||
209 | } | ||
210 | |||
211 | owner = p->owner; | ||
212 | |||
213 | if (!size) { | ||
214 | chunk->dead = 1; | ||
215 | spin_lock(&hash_lock); | ||
216 | list_del_init(&chunk->trees); | ||
217 | if (owner->root == chunk) | ||
218 | owner->root = NULL; | ||
219 | list_del_init(&p->list); | ||
220 | list_del_rcu(&chunk->hash); | ||
221 | spin_unlock(&hash_lock); | ||
222 | inotify_evict_watch(&chunk->watch); | ||
223 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | ||
224 | put_inotify_watch(&chunk->watch); | ||
225 | return; | ||
226 | } | ||
227 | |||
228 | new = alloc_chunk(size); | ||
229 | if (!new) | ||
230 | goto Fallback; | ||
231 | if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) { | ||
232 | free_chunk(new); | ||
233 | goto Fallback; | ||
234 | } | ||
235 | |||
236 | chunk->dead = 1; | ||
237 | spin_lock(&hash_lock); | ||
238 | list_replace_init(&chunk->trees, &new->trees); | ||
239 | if (owner->root == chunk) { | ||
240 | list_del_init(&owner->same_root); | ||
241 | owner->root = NULL; | ||
242 | } | ||
243 | |||
244 | for (i = j = 0; i < size; i++, j++) { | ||
245 | struct audit_tree *s; | ||
246 | if (&chunk->owners[j] == p) { | ||
247 | list_del_init(&p->list); | ||
248 | i--; | ||
249 | continue; | ||
250 | } | ||
251 | s = chunk->owners[j].owner; | ||
252 | new->owners[i].owner = s; | ||
253 | new->owners[i].index = chunk->owners[j].index - j + i; | ||
254 | if (!s) /* result of earlier fallback */ | ||
255 | continue; | ||
256 | get_tree(s); | ||
257 | list_replace_init(&chunk->owners[i].list, &new->owners[j].list); | ||
258 | } | ||
259 | |||
260 | list_replace_rcu(&chunk->hash, &new->hash); | ||
261 | list_for_each_entry(owner, &new->trees, same_root) | ||
262 | owner->root = new; | ||
263 | spin_unlock(&hash_lock); | ||
264 | inotify_evict_watch(&chunk->watch); | ||
265 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | ||
266 | put_inotify_watch(&chunk->watch); | ||
267 | return; | ||
268 | |||
269 | Fallback: | ||
270 | // do the best we can | ||
271 | spin_lock(&hash_lock); | ||
272 | if (owner->root == chunk) { | ||
273 | list_del_init(&owner->same_root); | ||
274 | owner->root = NULL; | ||
275 | } | ||
276 | list_del_init(&p->list); | ||
277 | p->owner = NULL; | ||
278 | put_tree(owner); | ||
279 | spin_unlock(&hash_lock); | ||
280 | mutex_unlock(&chunk->watch.inode->inotify_mutex); | ||
281 | } | ||
282 | |||
283 | static int create_chunk(struct inode *inode, struct audit_tree *tree) | ||
284 | { | ||
285 | struct audit_chunk *chunk = alloc_chunk(1); | ||
286 | if (!chunk) | ||
287 | return -ENOMEM; | ||
288 | |||
289 | if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) { | ||
290 | free_chunk(chunk); | ||
291 | return -ENOSPC; | ||
292 | } | ||
293 | |||
294 | mutex_lock(&inode->inotify_mutex); | ||
295 | spin_lock(&hash_lock); | ||
296 | if (tree->goner) { | ||
297 | spin_unlock(&hash_lock); | ||
298 | chunk->dead = 1; | ||
299 | inotify_evict_watch(&chunk->watch); | ||
300 | mutex_unlock(&inode->inotify_mutex); | ||
301 | put_inotify_watch(&chunk->watch); | ||
302 | return 0; | ||
303 | } | ||
304 | chunk->owners[0].index = (1U << 31); | ||
305 | chunk->owners[0].owner = tree; | ||
306 | get_tree(tree); | ||
307 | list_add(&chunk->owners[0].list, &tree->chunks); | ||
308 | if (!tree->root) { | ||
309 | tree->root = chunk; | ||
310 | list_add(&tree->same_root, &chunk->trees); | ||
311 | } | ||
312 | insert_hash(chunk); | ||
313 | spin_unlock(&hash_lock); | ||
314 | mutex_unlock(&inode->inotify_mutex); | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | /* the first tagged inode becomes root of tree */ | ||
319 | static int tag_chunk(struct inode *inode, struct audit_tree *tree) | ||
320 | { | ||
321 | struct inotify_watch *watch; | ||
322 | struct audit_tree *owner; | ||
323 | struct audit_chunk *chunk, *old; | ||
324 | struct node *p; | ||
325 | int n; | ||
326 | |||
327 | if (inotify_find_watch(rtree_ih, inode, &watch) < 0) | ||
328 | return create_chunk(inode, tree); | ||
329 | |||
330 | old = container_of(watch, struct audit_chunk, watch); | ||
331 | |||
332 | /* are we already there? */ | ||
333 | spin_lock(&hash_lock); | ||
334 | for (n = 0; n < old->count; n++) { | ||
335 | if (old->owners[n].owner == tree) { | ||
336 | spin_unlock(&hash_lock); | ||
337 | put_inotify_watch(watch); | ||
338 | return 0; | ||
339 | } | ||
340 | } | ||
341 | spin_unlock(&hash_lock); | ||
342 | |||
343 | chunk = alloc_chunk(old->count + 1); | ||
344 | if (!chunk) | ||
345 | return -ENOMEM; | ||
346 | |||
347 | mutex_lock(&inode->inotify_mutex); | ||
348 | if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { | ||
349 | mutex_unlock(&inode->inotify_mutex); | ||
350 | free_chunk(chunk); | ||
351 | return -ENOSPC; | ||
352 | } | ||
353 | spin_lock(&hash_lock); | ||
354 | if (tree->goner) { | ||
355 | spin_unlock(&hash_lock); | ||
356 | chunk->dead = 1; | ||
357 | inotify_evict_watch(&chunk->watch); | ||
358 | mutex_unlock(&inode->inotify_mutex); | ||
359 | put_inotify_watch(&chunk->watch); | ||
360 | return 0; | ||
361 | } | ||
362 | list_replace_init(&old->trees, &chunk->trees); | ||
363 | for (n = 0, p = chunk->owners; n < old->count; n++, p++) { | ||
364 | struct audit_tree *s = old->owners[n].owner; | ||
365 | p->owner = s; | ||
366 | p->index = old->owners[n].index; | ||
367 | if (!s) /* result of fallback in untag */ | ||
368 | continue; | ||
369 | get_tree(s); | ||
370 | list_replace_init(&old->owners[n].list, &p->list); | ||
371 | } | ||
372 | p->index = (chunk->count - 1) | (1U<<31); | ||
373 | p->owner = tree; | ||
374 | get_tree(tree); | ||
375 | list_add(&p->list, &tree->chunks); | ||
376 | list_replace_rcu(&old->hash, &chunk->hash); | ||
377 | list_for_each_entry(owner, &chunk->trees, same_root) | ||
378 | owner->root = chunk; | ||
379 | old->dead = 1; | ||
380 | if (!tree->root) { | ||
381 | tree->root = chunk; | ||
382 | list_add(&tree->same_root, &chunk->trees); | ||
383 | } | ||
384 | spin_unlock(&hash_lock); | ||
385 | inotify_evict_watch(&old->watch); | ||
386 | mutex_unlock(&inode->inotify_mutex); | ||
387 | put_inotify_watch(&old->watch); | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | static struct audit_chunk *find_chunk(struct node *p) | ||
392 | { | ||
393 | int index = p->index & ~(1U<<31); | ||
394 | p -= index; | ||
395 | return container_of(p, struct audit_chunk, owners[0]); | ||
396 | } | ||
397 | |||
398 | static void kill_rules(struct audit_tree *tree) | ||
399 | { | ||
400 | struct audit_krule *rule, *next; | ||
401 | struct audit_entry *entry; | ||
402 | struct audit_buffer *ab; | ||
403 | |||
404 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { | ||
405 | entry = container_of(rule, struct audit_entry, rule); | ||
406 | |||
407 | list_del_init(&rule->rlist); | ||
408 | if (rule->tree) { | ||
409 | /* not a half-baked one */ | ||
410 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | ||
411 | audit_log_format(ab, "op=remove rule dir="); | ||
412 | audit_log_untrustedstring(ab, rule->tree->pathname); | ||
413 | if (rule->filterkey) { | ||
414 | audit_log_format(ab, " key="); | ||
415 | audit_log_untrustedstring(ab, rule->filterkey); | ||
416 | } else | ||
417 | audit_log_format(ab, " key=(null)"); | ||
418 | audit_log_format(ab, " list=%d res=1", rule->listnr); | ||
419 | audit_log_end(ab); | ||
420 | rule->tree = NULL; | ||
421 | list_del_rcu(&entry->list); | ||
422 | call_rcu(&entry->rcu, audit_free_rule_rcu); | ||
423 | } | ||
424 | } | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * finish killing struct audit_tree | ||
429 | */ | ||
430 | static void prune_one(struct audit_tree *victim) | ||
431 | { | ||
432 | spin_lock(&hash_lock); | ||
433 | while (!list_empty(&victim->chunks)) { | ||
434 | struct node *p; | ||
435 | struct audit_chunk *chunk; | ||
436 | |||
437 | p = list_entry(victim->chunks.next, struct node, list); | ||
438 | chunk = find_chunk(p); | ||
439 | get_inotify_watch(&chunk->watch); | ||
440 | spin_unlock(&hash_lock); | ||
441 | |||
442 | untag_chunk(chunk, p); | ||
443 | |||
444 | put_inotify_watch(&chunk->watch); | ||
445 | spin_lock(&hash_lock); | ||
446 | } | ||
447 | spin_unlock(&hash_lock); | ||
448 | put_tree(victim); | ||
449 | } | ||
450 | |||
451 | /* trim the uncommitted chunks from tree */ | ||
452 | |||
453 | static void trim_marked(struct audit_tree *tree) | ||
454 | { | ||
455 | struct list_head *p, *q; | ||
456 | spin_lock(&hash_lock); | ||
457 | if (tree->goner) { | ||
458 | spin_unlock(&hash_lock); | ||
459 | return; | ||
460 | } | ||
461 | /* reorder */ | ||
462 | for (p = tree->chunks.next; p != &tree->chunks; p = q) { | ||
463 | struct node *node = list_entry(p, struct node, list); | ||
464 | q = p->next; | ||
465 | if (node->index & (1U<<31)) { | ||
466 | list_del_init(p); | ||
467 | list_add(p, &tree->chunks); | ||
468 | } | ||
469 | } | ||
470 | |||
471 | while (!list_empty(&tree->chunks)) { | ||
472 | struct node *node; | ||
473 | struct audit_chunk *chunk; | ||
474 | |||
475 | node = list_entry(tree->chunks.next, struct node, list); | ||
476 | |||
477 | /* have we run out of marked? */ | ||
478 | if (!(node->index & (1U<<31))) | ||
479 | break; | ||
480 | |||
481 | chunk = find_chunk(node); | ||
482 | get_inotify_watch(&chunk->watch); | ||
483 | spin_unlock(&hash_lock); | ||
484 | |||
485 | untag_chunk(chunk, node); | ||
486 | |||
487 | put_inotify_watch(&chunk->watch); | ||
488 | spin_lock(&hash_lock); | ||
489 | } | ||
490 | if (!tree->root && !tree->goner) { | ||
491 | tree->goner = 1; | ||
492 | spin_unlock(&hash_lock); | ||
493 | mutex_lock(&audit_filter_mutex); | ||
494 | kill_rules(tree); | ||
495 | list_del_init(&tree->list); | ||
496 | mutex_unlock(&audit_filter_mutex); | ||
497 | prune_one(tree); | ||
498 | } else { | ||
499 | spin_unlock(&hash_lock); | ||
500 | } | ||
501 | } | ||
502 | |||
503 | /* called with audit_filter_mutex */ | ||
504 | int audit_remove_tree_rule(struct audit_krule *rule) | ||
505 | { | ||
506 | struct audit_tree *tree; | ||
507 | tree = rule->tree; | ||
508 | if (tree) { | ||
509 | spin_lock(&hash_lock); | ||
510 | list_del_init(&rule->rlist); | ||
511 | if (list_empty(&tree->rules) && !tree->goner) { | ||
512 | tree->root = NULL; | ||
513 | list_del_init(&tree->same_root); | ||
514 | tree->goner = 1; | ||
515 | list_move(&tree->list, &prune_list); | ||
516 | rule->tree = NULL; | ||
517 | spin_unlock(&hash_lock); | ||
518 | audit_schedule_prune(); | ||
519 | return 1; | ||
520 | } | ||
521 | rule->tree = NULL; | ||
522 | spin_unlock(&hash_lock); | ||
523 | return 1; | ||
524 | } | ||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | void audit_trim_trees(void) | ||
529 | { | ||
530 | struct list_head cursor; | ||
531 | |||
532 | mutex_lock(&audit_filter_mutex); | ||
533 | list_add(&cursor, &tree_list); | ||
534 | while (cursor.next != &tree_list) { | ||
535 | struct audit_tree *tree; | ||
536 | struct nameidata nd; | ||
537 | struct vfsmount *root_mnt; | ||
538 | struct node *node; | ||
539 | struct list_head list; | ||
540 | int err; | ||
541 | |||
542 | tree = container_of(cursor.next, struct audit_tree, list); | ||
543 | get_tree(tree); | ||
544 | list_del(&cursor); | ||
545 | list_add(&cursor, &tree->list); | ||
546 | mutex_unlock(&audit_filter_mutex); | ||
547 | |||
548 | err = path_lookup(tree->pathname, 0, &nd); | ||
549 | if (err) | ||
550 | goto skip_it; | ||
551 | |||
552 | root_mnt = collect_mounts(nd.mnt, nd.dentry); | ||
553 | path_release(&nd); | ||
554 | if (!root_mnt) | ||
555 | goto skip_it; | ||
556 | |||
557 | list_add_tail(&list, &root_mnt->mnt_list); | ||
558 | spin_lock(&hash_lock); | ||
559 | list_for_each_entry(node, &tree->chunks, list) { | ||
560 | struct audit_chunk *chunk = find_chunk(node); | ||
561 | struct inode *inode = chunk->watch.inode; | ||
562 | struct vfsmount *mnt; | ||
563 | node->index |= 1U<<31; | ||
564 | list_for_each_entry(mnt, &list, mnt_list) { | ||
565 | if (mnt->mnt_root->d_inode == inode) { | ||
566 | node->index &= ~(1U<<31); | ||
567 | break; | ||
568 | } | ||
569 | } | ||
570 | } | ||
571 | spin_unlock(&hash_lock); | ||
572 | trim_marked(tree); | ||
573 | put_tree(tree); | ||
574 | list_del_init(&list); | ||
575 | drop_collected_mounts(root_mnt); | ||
576 | skip_it: | ||
577 | mutex_lock(&audit_filter_mutex); | ||
578 | } | ||
579 | list_del(&cursor); | ||
580 | mutex_unlock(&audit_filter_mutex); | ||
581 | } | ||
582 | |||
583 | static int is_under(struct vfsmount *mnt, struct dentry *dentry, | ||
584 | struct nameidata *nd) | ||
585 | { | ||
586 | if (mnt != nd->mnt) { | ||
587 | for (;;) { | ||
588 | if (mnt->mnt_parent == mnt) | ||
589 | return 0; | ||
590 | if (mnt->mnt_parent == nd->mnt) | ||
591 | break; | ||
592 | mnt = mnt->mnt_parent; | ||
593 | } | ||
594 | dentry = mnt->mnt_mountpoint; | ||
595 | } | ||
596 | return is_subdir(dentry, nd->dentry); | ||
597 | } | ||
598 | |||
599 | int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) | ||
600 | { | ||
601 | |||
602 | if (pathname[0] != '/' || | ||
603 | rule->listnr != AUDIT_FILTER_EXIT || | ||
604 | op & ~AUDIT_EQUAL || | ||
605 | rule->inode_f || rule->watch || rule->tree) | ||
606 | return -EINVAL; | ||
607 | rule->tree = alloc_tree(pathname); | ||
608 | if (!rule->tree) | ||
609 | return -ENOMEM; | ||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | void audit_put_tree(struct audit_tree *tree) | ||
614 | { | ||
615 | put_tree(tree); | ||
616 | } | ||
617 | |||
618 | /* called with audit_filter_mutex */ | ||
619 | int audit_add_tree_rule(struct audit_krule *rule) | ||
620 | { | ||
621 | struct audit_tree *seed = rule->tree, *tree; | ||
622 | struct nameidata nd; | ||
623 | struct vfsmount *mnt, *p; | ||
624 | struct list_head list; | ||
625 | int err; | ||
626 | |||
627 | list_for_each_entry(tree, &tree_list, list) { | ||
628 | if (!strcmp(seed->pathname, tree->pathname)) { | ||
629 | put_tree(seed); | ||
630 | rule->tree = tree; | ||
631 | list_add(&rule->rlist, &tree->rules); | ||
632 | return 0; | ||
633 | } | ||
634 | } | ||
635 | tree = seed; | ||
636 | list_add(&tree->list, &tree_list); | ||
637 | list_add(&rule->rlist, &tree->rules); | ||
638 | /* do not set rule->tree yet */ | ||
639 | mutex_unlock(&audit_filter_mutex); | ||
640 | |||
641 | err = path_lookup(tree->pathname, 0, &nd); | ||
642 | if (err) | ||
643 | goto Err; | ||
644 | mnt = collect_mounts(nd.mnt, nd.dentry); | ||
645 | path_release(&nd); | ||
646 | if (!mnt) { | ||
647 | err = -ENOMEM; | ||
648 | goto Err; | ||
649 | } | ||
650 | list_add_tail(&list, &mnt->mnt_list); | ||
651 | |||
652 | get_tree(tree); | ||
653 | list_for_each_entry(p, &list, mnt_list) { | ||
654 | err = tag_chunk(p->mnt_root->d_inode, tree); | ||
655 | if (err) | ||
656 | break; | ||
657 | } | ||
658 | |||
659 | list_del(&list); | ||
660 | drop_collected_mounts(mnt); | ||
661 | |||
662 | if (!err) { | ||
663 | struct node *node; | ||
664 | spin_lock(&hash_lock); | ||
665 | list_for_each_entry(node, &tree->chunks, list) | ||
666 | node->index &= ~(1U<<31); | ||
667 | spin_unlock(&hash_lock); | ||
668 | } else { | ||
669 | trim_marked(tree); | ||
670 | goto Err; | ||
671 | } | ||
672 | |||
673 | mutex_lock(&audit_filter_mutex); | ||
674 | if (list_empty(&rule->rlist)) { | ||
675 | put_tree(tree); | ||
676 | return -ENOENT; | ||
677 | } | ||
678 | rule->tree = tree; | ||
679 | put_tree(tree); | ||
680 | |||
681 | return 0; | ||
682 | Err: | ||
683 | mutex_lock(&audit_filter_mutex); | ||
684 | list_del_init(&tree->list); | ||
685 | list_del_init(&tree->rules); | ||
686 | put_tree(tree); | ||
687 | return err; | ||
688 | } | ||
689 | |||
690 | int audit_tag_tree(char *old, char *new) | ||
691 | { | ||
692 | struct list_head cursor, barrier; | ||
693 | int failed = 0; | ||
694 | struct nameidata nd; | ||
695 | struct vfsmount *tagged; | ||
696 | struct list_head list; | ||
697 | struct vfsmount *mnt; | ||
698 | struct dentry *dentry; | ||
699 | int err; | ||
700 | |||
701 | err = path_lookup(new, 0, &nd); | ||
702 | if (err) | ||
703 | return err; | ||
704 | tagged = collect_mounts(nd.mnt, nd.dentry); | ||
705 | path_release(&nd); | ||
706 | if (!tagged) | ||
707 | return -ENOMEM; | ||
708 | |||
709 | err = path_lookup(old, 0, &nd); | ||
710 | if (err) { | ||
711 | drop_collected_mounts(tagged); | ||
712 | return err; | ||
713 | } | ||
714 | mnt = mntget(nd.mnt); | ||
715 | dentry = dget(nd.dentry); | ||
716 | path_release(&nd); | ||
717 | |||
718 | if (dentry == tagged->mnt_root && dentry == mnt->mnt_root) | ||
719 | follow_up(&mnt, &dentry); | ||
720 | |||
721 | list_add_tail(&list, &tagged->mnt_list); | ||
722 | |||
723 | mutex_lock(&audit_filter_mutex); | ||
724 | list_add(&barrier, &tree_list); | ||
725 | list_add(&cursor, &barrier); | ||
726 | |||
727 | while (cursor.next != &tree_list) { | ||
728 | struct audit_tree *tree; | ||
729 | struct vfsmount *p; | ||
730 | |||
731 | tree = container_of(cursor.next, struct audit_tree, list); | ||
732 | get_tree(tree); | ||
733 | list_del(&cursor); | ||
734 | list_add(&cursor, &tree->list); | ||
735 | mutex_unlock(&audit_filter_mutex); | ||
736 | |||
737 | err = path_lookup(tree->pathname, 0, &nd); | ||
738 | if (err) { | ||
739 | put_tree(tree); | ||
740 | mutex_lock(&audit_filter_mutex); | ||
741 | continue; | ||
742 | } | ||
743 | |||
744 | spin_lock(&vfsmount_lock); | ||
745 | if (!is_under(mnt, dentry, &nd)) { | ||
746 | spin_unlock(&vfsmount_lock); | ||
747 | path_release(&nd); | ||
748 | put_tree(tree); | ||
749 | mutex_lock(&audit_filter_mutex); | ||
750 | continue; | ||
751 | } | ||
752 | spin_unlock(&vfsmount_lock); | ||
753 | path_release(&nd); | ||
754 | |||
755 | list_for_each_entry(p, &list, mnt_list) { | ||
756 | failed = tag_chunk(p->mnt_root->d_inode, tree); | ||
757 | if (failed) | ||
758 | break; | ||
759 | } | ||
760 | |||
761 | if (failed) { | ||
762 | put_tree(tree); | ||
763 | mutex_lock(&audit_filter_mutex); | ||
764 | break; | ||
765 | } | ||
766 | |||
767 | mutex_lock(&audit_filter_mutex); | ||
768 | spin_lock(&hash_lock); | ||
769 | if (!tree->goner) { | ||
770 | list_del(&tree->list); | ||
771 | list_add(&tree->list, &tree_list); | ||
772 | } | ||
773 | spin_unlock(&hash_lock); | ||
774 | put_tree(tree); | ||
775 | } | ||
776 | |||
777 | while (barrier.prev != &tree_list) { | ||
778 | struct audit_tree *tree; | ||
779 | |||
780 | tree = container_of(barrier.prev, struct audit_tree, list); | ||
781 | get_tree(tree); | ||
782 | list_del(&tree->list); | ||
783 | list_add(&tree->list, &barrier); | ||
784 | mutex_unlock(&audit_filter_mutex); | ||
785 | |||
786 | if (!failed) { | ||
787 | struct node *node; | ||
788 | spin_lock(&hash_lock); | ||
789 | list_for_each_entry(node, &tree->chunks, list) | ||
790 | node->index &= ~(1U<<31); | ||
791 | spin_unlock(&hash_lock); | ||
792 | } else { | ||
793 | trim_marked(tree); | ||
794 | } | ||
795 | |||
796 | put_tree(tree); | ||
797 | mutex_lock(&audit_filter_mutex); | ||
798 | } | ||
799 | list_del(&barrier); | ||
800 | list_del(&cursor); | ||
801 | list_del(&list); | ||
802 | mutex_unlock(&audit_filter_mutex); | ||
803 | dput(dentry); | ||
804 | mntput(mnt); | ||
805 | drop_collected_mounts(tagged); | ||
806 | return failed; | ||
807 | } | ||
808 | |||
809 | /* | ||
810 | * That gets run when evict_chunk() ends up needing to kill audit_tree. | ||
811 | * Runs from a separate thread, with audit_cmd_mutex held. | ||
812 | */ | ||
813 | void audit_prune_trees(void) | ||
814 | { | ||
815 | mutex_lock(&audit_filter_mutex); | ||
816 | |||
817 | while (!list_empty(&prune_list)) { | ||
818 | struct audit_tree *victim; | ||
819 | |||
820 | victim = list_entry(prune_list.next, struct audit_tree, list); | ||
821 | list_del_init(&victim->list); | ||
822 | |||
823 | mutex_unlock(&audit_filter_mutex); | ||
824 | |||
825 | prune_one(victim); | ||
826 | |||
827 | mutex_lock(&audit_filter_mutex); | ||
828 | } | ||
829 | |||
830 | mutex_unlock(&audit_filter_mutex); | ||
831 | } | ||
832 | |||
833 | /* | ||
834 | * Here comes the stuff asynchronous to auditctl operations | ||
835 | */ | ||
836 | |||
837 | /* inode->inotify_mutex is locked */ | ||
838 | static void evict_chunk(struct audit_chunk *chunk) | ||
839 | { | ||
840 | struct audit_tree *owner; | ||
841 | int n; | ||
842 | |||
843 | if (chunk->dead) | ||
844 | return; | ||
845 | |||
846 | chunk->dead = 1; | ||
847 | mutex_lock(&audit_filter_mutex); | ||
848 | spin_lock(&hash_lock); | ||
849 | while (!list_empty(&chunk->trees)) { | ||
850 | owner = list_entry(chunk->trees.next, | ||
851 | struct audit_tree, same_root); | ||
852 | owner->goner = 1; | ||
853 | owner->root = NULL; | ||
854 | list_del_init(&owner->same_root); | ||
855 | spin_unlock(&hash_lock); | ||
856 | kill_rules(owner); | ||
857 | list_move(&owner->list, &prune_list); | ||
858 | audit_schedule_prune(); | ||
859 | spin_lock(&hash_lock); | ||
860 | } | ||
861 | list_del_rcu(&chunk->hash); | ||
862 | for (n = 0; n < chunk->count; n++) | ||
863 | list_del_init(&chunk->owners[n].list); | ||
864 | spin_unlock(&hash_lock); | ||
865 | mutex_unlock(&audit_filter_mutex); | ||
866 | } | ||
867 | |||
868 | static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask, | ||
869 | u32 cookie, const char *dname, struct inode *inode) | ||
870 | { | ||
871 | struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); | ||
872 | |||
873 | if (mask & IN_IGNORED) { | ||
874 | evict_chunk(chunk); | ||
875 | put_inotify_watch(watch); | ||
876 | } | ||
877 | } | ||
878 | |||
879 | static void destroy_watch(struct inotify_watch *watch) | ||
880 | { | ||
881 | struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); | ||
882 | free_chunk(chunk); | ||
883 | } | ||
884 | |||
885 | static const struct inotify_operations rtree_inotify_ops = { | ||
886 | .handle_event = handle_event, | ||
887 | .destroy_watch = destroy_watch, | ||
888 | }; | ||
889 | |||
890 | static int __init audit_tree_init(void) | ||
891 | { | ||
892 | int i; | ||
893 | |||
894 | rtree_ih = inotify_init(&rtree_inotify_ops); | ||
895 | if (IS_ERR(rtree_ih)) | ||
896 | audit_panic("cannot initialize inotify handle for rectree watches"); | ||
897 | |||
898 | for (i = 0; i < HASH_SIZE; i++) | ||
899 | INIT_LIST_HEAD(&chunk_hash_heads[i]); | ||
900 | |||
901 | return 0; | ||
902 | } | ||
903 | __initcall(audit_tree_init); | ||
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index df66a21fb360..5d96f2cc7be8 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
@@ -87,7 +87,7 @@ struct list_head audit_filter_list[AUDIT_NR_FILTERS] = { | |||
87 | #endif | 87 | #endif |
88 | }; | 88 | }; |
89 | 89 | ||
90 | static DEFINE_MUTEX(audit_filter_mutex); | 90 | DEFINE_MUTEX(audit_filter_mutex); |
91 | 91 | ||
92 | /* Inotify handle */ | 92 | /* Inotify handle */ |
93 | extern struct inotify_handle *audit_ih; | 93 | extern struct inotify_handle *audit_ih; |
@@ -145,7 +145,7 @@ static inline void audit_free_rule(struct audit_entry *e) | |||
145 | kfree(e); | 145 | kfree(e); |
146 | } | 146 | } |
147 | 147 | ||
148 | static inline void audit_free_rule_rcu(struct rcu_head *head) | 148 | void audit_free_rule_rcu(struct rcu_head *head) |
149 | { | 149 | { |
150 | struct audit_entry *e = container_of(head, struct audit_entry, rcu); | 150 | struct audit_entry *e = container_of(head, struct audit_entry, rcu); |
151 | audit_free_rule(e); | 151 | audit_free_rule(e); |
@@ -217,7 +217,7 @@ static inline struct audit_entry *audit_init_entry(u32 field_count) | |||
217 | 217 | ||
218 | /* Unpack a filter field's string representation from user-space | 218 | /* Unpack a filter field's string representation from user-space |
219 | * buffer. */ | 219 | * buffer. */ |
220 | static char *audit_unpack_string(void **bufp, size_t *remain, size_t len) | 220 | char *audit_unpack_string(void **bufp, size_t *remain, size_t len) |
221 | { | 221 | { |
222 | char *str; | 222 | char *str; |
223 | 223 | ||
@@ -247,7 +247,7 @@ static inline int audit_to_inode(struct audit_krule *krule, | |||
247 | struct audit_field *f) | 247 | struct audit_field *f) |
248 | { | 248 | { |
249 | if (krule->listnr != AUDIT_FILTER_EXIT || | 249 | if (krule->listnr != AUDIT_FILTER_EXIT || |
250 | krule->watch || krule->inode_f) | 250 | krule->watch || krule->inode_f || krule->tree) |
251 | return -EINVAL; | 251 | return -EINVAL; |
252 | 252 | ||
253 | krule->inode_f = f; | 253 | krule->inode_f = f; |
@@ -266,7 +266,7 @@ static int audit_to_watch(struct audit_krule *krule, char *path, int len, | |||
266 | if (path[0] != '/' || path[len-1] == '/' || | 266 | if (path[0] != '/' || path[len-1] == '/' || |
267 | krule->listnr != AUDIT_FILTER_EXIT || | 267 | krule->listnr != AUDIT_FILTER_EXIT || |
268 | op & ~AUDIT_EQUAL || | 268 | op & ~AUDIT_EQUAL || |
269 | krule->inode_f || krule->watch) /* 1 inode # per rule, for hash */ | 269 | krule->inode_f || krule->watch || krule->tree) |
270 | return -EINVAL; | 270 | return -EINVAL; |
271 | 271 | ||
272 | watch = audit_init_watch(path); | 272 | watch = audit_init_watch(path); |
@@ -622,6 +622,17 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
622 | goto exit_free; | 622 | goto exit_free; |
623 | } | 623 | } |
624 | break; | 624 | break; |
625 | case AUDIT_DIR: | ||
626 | str = audit_unpack_string(&bufp, &remain, f->val); | ||
627 | if (IS_ERR(str)) | ||
628 | goto exit_free; | ||
629 | entry->rule.buflen += f->val; | ||
630 | |||
631 | err = audit_make_tree(&entry->rule, str, f->op); | ||
632 | kfree(str); | ||
633 | if (err) | ||
634 | goto exit_free; | ||
635 | break; | ||
625 | case AUDIT_INODE: | 636 | case AUDIT_INODE: |
626 | err = audit_to_inode(&entry->rule, f); | 637 | err = audit_to_inode(&entry->rule, f); |
627 | if (err) | 638 | if (err) |
@@ -668,7 +679,7 @@ exit_free: | |||
668 | } | 679 | } |
669 | 680 | ||
670 | /* Pack a filter field's string representation into data block. */ | 681 | /* Pack a filter field's string representation into data block. */ |
671 | static inline size_t audit_pack_string(void **bufp, char *str) | 682 | static inline size_t audit_pack_string(void **bufp, const char *str) |
672 | { | 683 | { |
673 | size_t len = strlen(str); | 684 | size_t len = strlen(str); |
674 | 685 | ||
@@ -747,6 +758,11 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) | |||
747 | data->buflen += data->values[i] = | 758 | data->buflen += data->values[i] = |
748 | audit_pack_string(&bufp, krule->watch->path); | 759 | audit_pack_string(&bufp, krule->watch->path); |
749 | break; | 760 | break; |
761 | case AUDIT_DIR: | ||
762 | data->buflen += data->values[i] = | ||
763 | audit_pack_string(&bufp, | ||
764 | audit_tree_path(krule->tree)); | ||
765 | break; | ||
750 | case AUDIT_FILTERKEY: | 766 | case AUDIT_FILTERKEY: |
751 | data->buflen += data->values[i] = | 767 | data->buflen += data->values[i] = |
752 | audit_pack_string(&bufp, krule->filterkey); | 768 | audit_pack_string(&bufp, krule->filterkey); |
@@ -795,6 +811,11 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b) | |||
795 | if (strcmp(a->watch->path, b->watch->path)) | 811 | if (strcmp(a->watch->path, b->watch->path)) |
796 | return 1; | 812 | return 1; |
797 | break; | 813 | break; |
814 | case AUDIT_DIR: | ||
815 | if (strcmp(audit_tree_path(a->tree), | ||
816 | audit_tree_path(b->tree))) | ||
817 | return 1; | ||
818 | break; | ||
798 | case AUDIT_FILTERKEY: | 819 | case AUDIT_FILTERKEY: |
799 | /* both filterkeys exist based on above type compare */ | 820 | /* both filterkeys exist based on above type compare */ |
800 | if (strcmp(a->filterkey, b->filterkey)) | 821 | if (strcmp(a->filterkey, b->filterkey)) |
@@ -897,6 +918,14 @@ static struct audit_entry *audit_dupe_rule(struct audit_krule *old, | |||
897 | new->inode_f = old->inode_f; | 918 | new->inode_f = old->inode_f; |
898 | new->watch = NULL; | 919 | new->watch = NULL; |
899 | new->field_count = old->field_count; | 920 | new->field_count = old->field_count; |
921 | /* | ||
922 | * note that we are OK with not refcounting here; audit_match_tree() | ||
923 | * never dereferences tree and we can't get false positives there | ||
924 | * since we'd have to have rule gone from the list *and* removed | ||
925 | * before the chunks found by lookup had been allocated, i.e. before | ||
926 | * the beginning of list scan. | ||
927 | */ | ||
928 | new->tree = old->tree; | ||
900 | memcpy(new->fields, old->fields, sizeof(struct audit_field) * fcount); | 929 | memcpy(new->fields, old->fields, sizeof(struct audit_field) * fcount); |
901 | 930 | ||
902 | /* deep copy this information, updating the se_rule fields, because | 931 | /* deep copy this information, updating the se_rule fields, because |
@@ -1217,6 +1246,7 @@ static inline int audit_add_rule(struct audit_entry *entry, | |||
1217 | struct audit_entry *e; | 1246 | struct audit_entry *e; |
1218 | struct audit_field *inode_f = entry->rule.inode_f; | 1247 | struct audit_field *inode_f = entry->rule.inode_f; |
1219 | struct audit_watch *watch = entry->rule.watch; | 1248 | struct audit_watch *watch = entry->rule.watch; |
1249 | struct audit_tree *tree = entry->rule.tree; | ||
1220 | struct nameidata *ndp = NULL, *ndw = NULL; | 1250 | struct nameidata *ndp = NULL, *ndw = NULL; |
1221 | int h, err; | 1251 | int h, err; |
1222 | #ifdef CONFIG_AUDITSYSCALL | 1252 | #ifdef CONFIG_AUDITSYSCALL |
@@ -1238,6 +1268,9 @@ static inline int audit_add_rule(struct audit_entry *entry, | |||
1238 | mutex_unlock(&audit_filter_mutex); | 1268 | mutex_unlock(&audit_filter_mutex); |
1239 | if (e) { | 1269 | if (e) { |
1240 | err = -EEXIST; | 1270 | err = -EEXIST; |
1271 | /* normally audit_add_tree_rule() will free it on failure */ | ||
1272 | if (tree) | ||
1273 | audit_put_tree(tree); | ||
1241 | goto error; | 1274 | goto error; |
1242 | } | 1275 | } |
1243 | 1276 | ||
@@ -1259,6 +1292,13 @@ static inline int audit_add_rule(struct audit_entry *entry, | |||
1259 | h = audit_hash_ino((u32)watch->ino); | 1292 | h = audit_hash_ino((u32)watch->ino); |
1260 | list = &audit_inode_hash[h]; | 1293 | list = &audit_inode_hash[h]; |
1261 | } | 1294 | } |
1295 | if (tree) { | ||
1296 | err = audit_add_tree_rule(&entry->rule); | ||
1297 | if (err) { | ||
1298 | mutex_unlock(&audit_filter_mutex); | ||
1299 | goto error; | ||
1300 | } | ||
1301 | } | ||
1262 | 1302 | ||
1263 | if (entry->rule.flags & AUDIT_FILTER_PREPEND) { | 1303 | if (entry->rule.flags & AUDIT_FILTER_PREPEND) { |
1264 | list_add_rcu(&entry->list, list); | 1304 | list_add_rcu(&entry->list, list); |
@@ -1292,6 +1332,7 @@ static inline int audit_del_rule(struct audit_entry *entry, | |||
1292 | struct audit_entry *e; | 1332 | struct audit_entry *e; |
1293 | struct audit_field *inode_f = entry->rule.inode_f; | 1333 | struct audit_field *inode_f = entry->rule.inode_f; |
1294 | struct audit_watch *watch, *tmp_watch = entry->rule.watch; | 1334 | struct audit_watch *watch, *tmp_watch = entry->rule.watch; |
1335 | struct audit_tree *tree = entry->rule.tree; | ||
1295 | LIST_HEAD(inotify_list); | 1336 | LIST_HEAD(inotify_list); |
1296 | int h, ret = 0; | 1337 | int h, ret = 0; |
1297 | #ifdef CONFIG_AUDITSYSCALL | 1338 | #ifdef CONFIG_AUDITSYSCALL |
@@ -1336,6 +1377,9 @@ static inline int audit_del_rule(struct audit_entry *entry, | |||
1336 | } | 1377 | } |
1337 | } | 1378 | } |
1338 | 1379 | ||
1380 | if (e->rule.tree) | ||
1381 | audit_remove_tree_rule(&e->rule); | ||
1382 | |||
1339 | list_del_rcu(&e->list); | 1383 | list_del_rcu(&e->list); |
1340 | call_rcu(&e->rcu, audit_free_rule_rcu); | 1384 | call_rcu(&e->rcu, audit_free_rule_rcu); |
1341 | 1385 | ||
@@ -1354,6 +1398,8 @@ static inline int audit_del_rule(struct audit_entry *entry, | |||
1354 | out: | 1398 | out: |
1355 | if (tmp_watch) | 1399 | if (tmp_watch) |
1356 | audit_put_watch(tmp_watch); /* match initial get */ | 1400 | audit_put_watch(tmp_watch); /* match initial get */ |
1401 | if (tree) | ||
1402 | audit_put_tree(tree); /* that's the temporary one */ | ||
1357 | 1403 | ||
1358 | return ret; | 1404 | return ret; |
1359 | } | 1405 | } |
@@ -1737,6 +1783,7 @@ int selinux_audit_rule_update(void) | |||
1737 | { | 1783 | { |
1738 | struct audit_entry *entry, *n, *nentry; | 1784 | struct audit_entry *entry, *n, *nentry; |
1739 | struct audit_watch *watch; | 1785 | struct audit_watch *watch; |
1786 | struct audit_tree *tree; | ||
1740 | int i, err = 0; | 1787 | int i, err = 0; |
1741 | 1788 | ||
1742 | /* audit_filter_mutex synchronizes the writers */ | 1789 | /* audit_filter_mutex synchronizes the writers */ |
@@ -1748,6 +1795,7 @@ int selinux_audit_rule_update(void) | |||
1748 | continue; | 1795 | continue; |
1749 | 1796 | ||
1750 | watch = entry->rule.watch; | 1797 | watch = entry->rule.watch; |
1798 | tree = entry->rule.tree; | ||
1751 | nentry = audit_dupe_rule(&entry->rule, watch); | 1799 | nentry = audit_dupe_rule(&entry->rule, watch); |
1752 | if (unlikely(IS_ERR(nentry))) { | 1800 | if (unlikely(IS_ERR(nentry))) { |
1753 | /* save the first error encountered for the | 1801 | /* save the first error encountered for the |
@@ -1763,7 +1811,9 @@ int selinux_audit_rule_update(void) | |||
1763 | list_add(&nentry->rule.rlist, | 1811 | list_add(&nentry->rule.rlist, |
1764 | &watch->rules); | 1812 | &watch->rules); |
1765 | list_del(&entry->rule.rlist); | 1813 | list_del(&entry->rule.rlist); |
1766 | } | 1814 | } else if (tree) |
1815 | list_replace_init(&entry->rule.rlist, | ||
1816 | &nentry->rule.rlist); | ||
1767 | list_replace_rcu(&entry->list, &nentry->list); | 1817 | list_replace_rcu(&entry->list, &nentry->list); |
1768 | } | 1818 | } |
1769 | call_rcu(&entry->rcu, audit_free_rule_rcu); | 1819 | call_rcu(&entry->rcu, audit_free_rule_rcu); |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 8a85c203be12..80ecab0942ef 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/binfmts.h> | 65 | #include <linux/binfmts.h> |
66 | #include <linux/highmem.h> | 66 | #include <linux/highmem.h> |
67 | #include <linux/syscalls.h> | 67 | #include <linux/syscalls.h> |
68 | #include <linux/inotify.h> | ||
68 | 69 | ||
69 | #include "audit.h" | 70 | #include "audit.h" |
70 | 71 | ||
@@ -179,6 +180,11 @@ struct audit_aux_data_pids { | |||
179 | int pid_count; | 180 | int pid_count; |
180 | }; | 181 | }; |
181 | 182 | ||
183 | struct audit_tree_refs { | ||
184 | struct audit_tree_refs *next; | ||
185 | struct audit_chunk *c[31]; | ||
186 | }; | ||
187 | |||
182 | /* The per-task audit context. */ | 188 | /* The per-task audit context. */ |
183 | struct audit_context { | 189 | struct audit_context { |
184 | int dummy; /* must be the first element */ | 190 | int dummy; /* must be the first element */ |
@@ -211,6 +217,9 @@ struct audit_context { | |||
211 | pid_t target_pid; | 217 | pid_t target_pid; |
212 | u32 target_sid; | 218 | u32 target_sid; |
213 | 219 | ||
220 | struct audit_tree_refs *trees, *first_trees; | ||
221 | int tree_count; | ||
222 | |||
214 | #if AUDIT_DEBUG | 223 | #if AUDIT_DEBUG |
215 | int put_count; | 224 | int put_count; |
216 | int ino_count; | 225 | int ino_count; |
@@ -265,6 +274,117 @@ static int audit_match_perm(struct audit_context *ctx, int mask) | |||
265 | } | 274 | } |
266 | } | 275 | } |
267 | 276 | ||
277 | /* | ||
278 | * We keep a linked list of fixed-sized (31 pointer) arrays of audit_chunk *; | ||
279 | * ->first_trees points to its beginning, ->trees - to the current end of data. | ||
280 | * ->tree_count is the number of free entries in array pointed to by ->trees. | ||
281 | * Original condition is (NULL, NULL, 0); as soon as it grows we never revert to NULL, | ||
282 | * "empty" becomes (p, p, 31) afterwards. We don't shrink the list (and seriously, | ||
283 | * it's going to remain 1-element for almost any setup) until we free context itself. | ||
284 | * References in it _are_ dropped - at the same time we free/drop aux stuff. | ||
285 | */ | ||
286 | |||
287 | #ifdef CONFIG_AUDIT_TREE | ||
288 | static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk) | ||
289 | { | ||
290 | struct audit_tree_refs *p = ctx->trees; | ||
291 | int left = ctx->tree_count; | ||
292 | if (likely(left)) { | ||
293 | p->c[--left] = chunk; | ||
294 | ctx->tree_count = left; | ||
295 | return 1; | ||
296 | } | ||
297 | if (!p) | ||
298 | return 0; | ||
299 | p = p->next; | ||
300 | if (p) { | ||
301 | p->c[30] = chunk; | ||
302 | ctx->trees = p; | ||
303 | ctx->tree_count = 30; | ||
304 | return 1; | ||
305 | } | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static int grow_tree_refs(struct audit_context *ctx) | ||
310 | { | ||
311 | struct audit_tree_refs *p = ctx->trees; | ||
312 | ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL); | ||
313 | if (!ctx->trees) { | ||
314 | ctx->trees = p; | ||
315 | return 0; | ||
316 | } | ||
317 | if (p) | ||
318 | p->next = ctx->trees; | ||
319 | else | ||
320 | ctx->first_trees = ctx->trees; | ||
321 | ctx->tree_count = 31; | ||
322 | return 1; | ||
323 | } | ||
324 | #endif | ||
325 | |||
326 | static void unroll_tree_refs(struct audit_context *ctx, | ||
327 | struct audit_tree_refs *p, int count) | ||
328 | { | ||
329 | #ifdef CONFIG_AUDIT_TREE | ||
330 | struct audit_tree_refs *q; | ||
331 | int n; | ||
332 | if (!p) { | ||
333 | /* we started with empty chain */ | ||
334 | p = ctx->first_trees; | ||
335 | count = 31; | ||
336 | /* if the very first allocation has failed, nothing to do */ | ||
337 | if (!p) | ||
338 | return; | ||
339 | } | ||
340 | n = count; | ||
341 | for (q = p; q != ctx->trees; q = q->next, n = 31) { | ||
342 | while (n--) { | ||
343 | audit_put_chunk(q->c[n]); | ||
344 | q->c[n] = NULL; | ||
345 | } | ||
346 | } | ||
347 | while (n-- > ctx->tree_count) { | ||
348 | audit_put_chunk(q->c[n]); | ||
349 | q->c[n] = NULL; | ||
350 | } | ||
351 | ctx->trees = p; | ||
352 | ctx->tree_count = count; | ||
353 | #endif | ||
354 | } | ||
355 | |||
356 | static void free_tree_refs(struct audit_context *ctx) | ||
357 | { | ||
358 | struct audit_tree_refs *p, *q; | ||
359 | for (p = ctx->first_trees; p; p = q) { | ||
360 | q = p->next; | ||
361 | kfree(p); | ||
362 | } | ||
363 | } | ||
364 | |||
365 | static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree) | ||
366 | { | ||
367 | #ifdef CONFIG_AUDIT_TREE | ||
368 | struct audit_tree_refs *p; | ||
369 | int n; | ||
370 | if (!tree) | ||
371 | return 0; | ||
372 | /* full ones */ | ||
373 | for (p = ctx->first_trees; p != ctx->trees; p = p->next) { | ||
374 | for (n = 0; n < 31; n++) | ||
375 | if (audit_tree_match(p->c[n], tree)) | ||
376 | return 1; | ||
377 | } | ||
378 | /* partial */ | ||
379 | if (p) { | ||
380 | for (n = ctx->tree_count; n < 31; n++) | ||
381 | if (audit_tree_match(p->c[n], tree)) | ||
382 | return 1; | ||
383 | } | ||
384 | #endif | ||
385 | return 0; | ||
386 | } | ||
387 | |||
268 | /* Determine if any context name data matches a rule's watch data */ | 388 | /* Determine if any context name data matches a rule's watch data */ |
269 | /* Compare a task_struct with an audit_rule. Return 1 on match, 0 | 389 | /* Compare a task_struct with an audit_rule. Return 1 on match, 0 |
270 | * otherwise. */ | 390 | * otherwise. */ |
@@ -379,6 +499,10 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
379 | result = (name->dev == rule->watch->dev && | 499 | result = (name->dev == rule->watch->dev && |
380 | name->ino == rule->watch->ino); | 500 | name->ino == rule->watch->ino); |
381 | break; | 501 | break; |
502 | case AUDIT_DIR: | ||
503 | if (ctx) | ||
504 | result = match_tree_refs(ctx, rule->tree); | ||
505 | break; | ||
382 | case AUDIT_LOGINUID: | 506 | case AUDIT_LOGINUID: |
383 | result = 0; | 507 | result = 0; |
384 | if (ctx) | 508 | if (ctx) |
@@ -727,6 +851,8 @@ static inline void audit_free_context(struct audit_context *context) | |||
727 | context->name_count, count); | 851 | context->name_count, count); |
728 | } | 852 | } |
729 | audit_free_names(context); | 853 | audit_free_names(context); |
854 | unroll_tree_refs(context, NULL, 0); | ||
855 | free_tree_refs(context); | ||
730 | audit_free_aux(context); | 856 | audit_free_aux(context); |
731 | kfree(context->filterkey); | 857 | kfree(context->filterkey); |
732 | kfree(context); | 858 | kfree(context); |
@@ -1270,6 +1396,7 @@ void audit_syscall_exit(int valid, long return_code) | |||
1270 | tsk->audit_context = new_context; | 1396 | tsk->audit_context = new_context; |
1271 | } else { | 1397 | } else { |
1272 | audit_free_names(context); | 1398 | audit_free_names(context); |
1399 | unroll_tree_refs(context, NULL, 0); | ||
1273 | audit_free_aux(context); | 1400 | audit_free_aux(context); |
1274 | context->aux = NULL; | 1401 | context->aux = NULL; |
1275 | context->aux_pids = NULL; | 1402 | context->aux_pids = NULL; |
@@ -1281,6 +1408,95 @@ void audit_syscall_exit(int valid, long return_code) | |||
1281 | } | 1408 | } |
1282 | } | 1409 | } |
1283 | 1410 | ||
1411 | static inline void handle_one(const struct inode *inode) | ||
1412 | { | ||
1413 | #ifdef CONFIG_AUDIT_TREE | ||
1414 | struct audit_context *context; | ||
1415 | struct audit_tree_refs *p; | ||
1416 | struct audit_chunk *chunk; | ||
1417 | int count; | ||
1418 | if (likely(list_empty(&inode->inotify_watches))) | ||
1419 | return; | ||
1420 | context = current->audit_context; | ||
1421 | p = context->trees; | ||
1422 | count = context->tree_count; | ||
1423 | rcu_read_lock(); | ||
1424 | chunk = audit_tree_lookup(inode); | ||
1425 | rcu_read_unlock(); | ||
1426 | if (!chunk) | ||
1427 | return; | ||
1428 | if (likely(put_tree_ref(context, chunk))) | ||
1429 | return; | ||
1430 | if (unlikely(!grow_tree_refs(context))) { | ||
1431 | printk(KERN_WARNING "out of memory, audit has lost a tree reference"); | ||
1432 | audit_set_auditable(context); | ||
1433 | audit_put_chunk(chunk); | ||
1434 | unroll_tree_refs(context, p, count); | ||
1435 | return; | ||
1436 | } | ||
1437 | put_tree_ref(context, chunk); | ||
1438 | #endif | ||
1439 | } | ||
1440 | |||
1441 | static void handle_path(const struct dentry *dentry) | ||
1442 | { | ||
1443 | #ifdef CONFIG_AUDIT_TREE | ||
1444 | struct audit_context *context; | ||
1445 | struct audit_tree_refs *p; | ||
1446 | const struct dentry *d, *parent; | ||
1447 | struct audit_chunk *drop; | ||
1448 | unsigned long seq; | ||
1449 | int count; | ||
1450 | |||
1451 | context = current->audit_context; | ||
1452 | p = context->trees; | ||
1453 | count = context->tree_count; | ||
1454 | retry: | ||
1455 | drop = NULL; | ||
1456 | d = dentry; | ||
1457 | rcu_read_lock(); | ||
1458 | seq = read_seqbegin(&rename_lock); | ||
1459 | for(;;) { | ||
1460 | struct inode *inode = d->d_inode; | ||
1461 | if (inode && unlikely(!list_empty(&inode->inotify_watches))) { | ||
1462 | struct audit_chunk *chunk; | ||
1463 | chunk = audit_tree_lookup(inode); | ||
1464 | if (chunk) { | ||
1465 | if (unlikely(!put_tree_ref(context, chunk))) { | ||
1466 | drop = chunk; | ||
1467 | break; | ||
1468 | } | ||
1469 | } | ||
1470 | } | ||
1471 | parent = d->d_parent; | ||
1472 | if (parent == d) | ||
1473 | break; | ||
1474 | d = parent; | ||
1475 | } | ||
1476 | if (unlikely(read_seqretry(&rename_lock, seq) || drop)) { /* in this order */ | ||
1477 | rcu_read_unlock(); | ||
1478 | if (!drop) { | ||
1479 | /* just a race with rename */ | ||
1480 | unroll_tree_refs(context, p, count); | ||
1481 | goto retry; | ||
1482 | } | ||
1483 | audit_put_chunk(drop); | ||
1484 | if (grow_tree_refs(context)) { | ||
1485 | /* OK, got more space */ | ||
1486 | unroll_tree_refs(context, p, count); | ||
1487 | goto retry; | ||
1488 | } | ||
1489 | /* too bad */ | ||
1490 | printk(KERN_WARNING | ||
1491 | "out of memory, audit has lost a tree reference"); | ||
1492 | unroll_tree_refs(context, p, count); | ||
1493 | audit_set_auditable(context); | ||
1494 | return; | ||
1495 | } | ||
1496 | rcu_read_unlock(); | ||
1497 | #endif | ||
1498 | } | ||
1499 | |||
1284 | /** | 1500 | /** |
1285 | * audit_getname - add a name to the list | 1501 | * audit_getname - add a name to the list |
1286 | * @name: name to add | 1502 | * @name: name to add |
@@ -1407,7 +1623,7 @@ void __audit_inode(const char *name, const struct dentry *dentry) | |||
1407 | { | 1623 | { |
1408 | int idx; | 1624 | int idx; |
1409 | struct audit_context *context = current->audit_context; | 1625 | struct audit_context *context = current->audit_context; |
1410 | const struct inode *inode = inode = dentry->d_inode; | 1626 | const struct inode *inode = dentry->d_inode; |
1411 | 1627 | ||
1412 | if (!context->in_syscall) | 1628 | if (!context->in_syscall) |
1413 | return; | 1629 | return; |
@@ -1427,6 +1643,7 @@ void __audit_inode(const char *name, const struct dentry *dentry) | |||
1427 | idx = context->name_count - 1; | 1643 | idx = context->name_count - 1; |
1428 | context->names[idx].name = NULL; | 1644 | context->names[idx].name = NULL; |
1429 | } | 1645 | } |
1646 | handle_path(dentry); | ||
1430 | audit_copy_inode(&context->names[idx], inode); | 1647 | audit_copy_inode(&context->names[idx], inode); |
1431 | } | 1648 | } |
1432 | 1649 | ||
@@ -1456,6 +1673,8 @@ void __audit_inode_child(const char *dname, const struct dentry *dentry, | |||
1456 | if (!context->in_syscall) | 1673 | if (!context->in_syscall) |
1457 | return; | 1674 | return; |
1458 | 1675 | ||
1676 | if (inode) | ||
1677 | handle_one(inode); | ||
1459 | /* determine matching parent */ | 1678 | /* determine matching parent */ |
1460 | if (!dname) | 1679 | if (!dname) |
1461 | goto add_names; | 1680 | goto add_names; |