aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-27 14:58:50 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-27 14:58:50 -0500
commit047ce6d380e8e66cfb6cbc22e873af89dd0c216c (patch)
tree725d5f911d34ed234a5df8b6ef07772ca6c678b9
parenta3b5c1065f3fb934a87dd07d23def99916023d6f (diff)
parentd406db524c32ca35bd85cada28a547fff3115715 (diff)
Merge tag 'audit-pr-20181224' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit
Pull audit updates from Paul Moore: "In the finest of holiday of traditions, I have a number of gifts to share today. While most of them are re-gifts from others, unlike the typical re-gift, these are things you will want in and around your tree; I promise. This pull request is perhaps a bit larger than our typical PR, but most of it comes from Jan's rework of audit's fanotify code; a very welcome improvement. We ran this through our normal regression tests, as well as some newly created stress tests and everything looks good. Richard added a few patches, mostly cleaning up a few things and and shortening some of the audit records that we send to userspace; a change the userspace folks are quite happy about. Finally YueHaibing and I kick in a few patches to simplify things a bit and make the code less prone to errors. Lastly, I want to say thanks one more time to everyone who has contributed patches, testing, and code reviews for the audit subsystem over the past year. The project is what it is due to your help and contributions - thank you" * tag 'audit-pr-20181224' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit: (22 commits) audit: remove duplicated include from audit.c audit: shorten PATH cap values when zero audit: use current whenever possible audit: minimize our use of audit_log_format() audit: remove WATCH and TREE config options audit: use session_info helper audit: localize audit_log_session_info prototype audit: Use 'mark' name for fsnotify_mark variables audit: Replace chunk attached to mark instead of replacing mark audit: Simplify locking around untag_chunk() audit: Drop all unused chunk nodes during deletion audit: Guarantee forward progress of chunk untagging audit: Allocate fsnotify mark independently of chunk audit: Provide helper for dropping mark's chunk reference audit: Remove pointless check in insert_hash() audit: Factor out chunk replacement code audit: Make hash table insertion safe against concurrent lookups audit: Embed key into chunk audit: Fix possible tagging failures audit: Fix possible spurious -ENOSPC error ...
-rw-r--r--drivers/tty/tty_audit.c13
-rw-r--r--include/linux/audit.h8
-rw-r--r--init/Kconfig9
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/audit.c62
-rw-r--r--kernel/audit.h10
-rw-r--r--kernel/audit_fsnotify.c6
-rw-r--r--kernel/audit_tree.c498
-rw-r--r--kernel/audit_watch.c6
-rw-r--r--kernel/auditsc.c150
-rw-r--r--security/integrity/ima/ima_api.c2
11 files changed, 395 insertions, 373 deletions
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 50f567b6a66e..28f87fd6a28e 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -61,20 +61,19 @@ static void tty_audit_log(const char *description, dev_t dev,
61 unsigned char *data, size_t size) 61 unsigned char *data, size_t size)
62{ 62{
63 struct audit_buffer *ab; 63 struct audit_buffer *ab;
64 struct task_struct *tsk = current; 64 pid_t pid = task_pid_nr(current);
65 pid_t pid = task_pid_nr(tsk); 65 uid_t uid = from_kuid(&init_user_ns, task_uid(current));
66 uid_t uid = from_kuid(&init_user_ns, task_uid(tsk)); 66 uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(current));
67 uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk)); 67 unsigned int sessionid = audit_get_sessionid(current);
68 unsigned int sessionid = audit_get_sessionid(tsk);
69 68
70 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); 69 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
71 if (ab) { 70 if (ab) {
72 char name[sizeof(tsk->comm)]; 71 char name[sizeof(current->comm)];
73 72
74 audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d" 73 audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
75 " minor=%d comm=", description, pid, uid, 74 " minor=%d comm=", description, pid, uid,
76 loginuid, sessionid, MAJOR(dev), MINOR(dev)); 75 loginuid, sessionid, MAJOR(dev), MINOR(dev));
77 get_task_comm(name, tsk); 76 get_task_comm(name, current);
78 audit_log_untrustedstring(ab, name); 77 audit_log_untrustedstring(ab, name);
79 audit_log_format(ab, " data="); 78 audit_log_format(ab, " data=");
80 audit_log_n_hex(ab, data, size); 79 audit_log_n_hex(ab, data, size);
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 9334fbef7bae..a625c29a2ea2 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -115,8 +115,6 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall);
115 115
116struct filename; 116struct filename;
117 117
118extern void audit_log_session_info(struct audit_buffer *ab);
119
120#define AUDIT_OFF 0 118#define AUDIT_OFF 0
121#define AUDIT_ON 1 119#define AUDIT_ON 1
122#define AUDIT_LOCKED 2 120#define AUDIT_LOCKED 2
@@ -153,8 +151,7 @@ extern void audit_log_link_denied(const char *operation);
153extern void audit_log_lost(const char *message); 151extern void audit_log_lost(const char *message);
154 152
155extern int audit_log_task_context(struct audit_buffer *ab); 153extern int audit_log_task_context(struct audit_buffer *ab);
156extern void audit_log_task_info(struct audit_buffer *ab, 154extern void audit_log_task_info(struct audit_buffer *ab);
157 struct task_struct *tsk);
158 155
159extern int audit_update_lsm_rules(void); 156extern int audit_update_lsm_rules(void);
160 157
@@ -202,8 +199,7 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
202{ 199{
203 return 0; 200 return 0;
204} 201}
205static inline void audit_log_task_info(struct audit_buffer *ab, 202static inline void audit_log_task_info(struct audit_buffer *ab)
206 struct task_struct *tsk)
207{ } 203{ }
208#define audit_enabled AUDIT_OFF 204#define audit_enabled AUDIT_OFF
209#endif /* CONFIG_AUDIT */ 205#endif /* CONFIG_AUDIT */
diff --git a/init/Kconfig b/init/Kconfig
index ed9352513c32..3e6be1694766 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -335,15 +335,6 @@ config HAVE_ARCH_AUDITSYSCALL
335config AUDITSYSCALL 335config AUDITSYSCALL
336 def_bool y 336 def_bool y
337 depends on AUDIT && HAVE_ARCH_AUDITSYSCALL 337 depends on AUDIT && HAVE_ARCH_AUDITSYSCALL
338
339config AUDIT_WATCH
340 def_bool y
341 depends on AUDITSYSCALL
342 select FSNOTIFY
343
344config AUDIT_TREE
345 def_bool y
346 depends on AUDITSYSCALL
347 select FSNOTIFY 338 select FSNOTIFY
348 339
349source "kernel/irq/Kconfig" 340source "kernel/irq/Kconfig"
diff --git a/kernel/Makefile b/kernel/Makefile
index 7343b3a9bff0..9dc7f519129d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -76,9 +76,7 @@ obj-$(CONFIG_IKCONFIG) += configs.o
76obj-$(CONFIG_SMP) += stop_machine.o 76obj-$(CONFIG_SMP) += stop_machine.o
77obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o 77obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
78obj-$(CONFIG_AUDIT) += audit.o auditfilter.o 78obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
79obj-$(CONFIG_AUDITSYSCALL) += auditsc.o 79obj-$(CONFIG_AUDITSYSCALL) += auditsc.o audit_watch.o audit_fsnotify.o audit_tree.o
80obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o audit_fsnotify.o
81obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
82obj-$(CONFIG_GCOV_KERNEL) += gcov/ 80obj-$(CONFIG_GCOV_KERNEL) += gcov/
83obj-$(CONFIG_KCOV) += kcov.o 81obj-$(CONFIG_KCOV) += kcov.o
84obj-$(CONFIG_KPROBES) += kprobes.o 82obj-$(CONFIG_KPROBES) += kprobes.o
diff --git a/kernel/audit.c b/kernel/audit.c
index 2a8058764aa6..632d36059556 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -60,7 +60,6 @@
60#include <linux/mutex.h> 60#include <linux/mutex.h>
61#include <linux/gfp.h> 61#include <linux/gfp.h>
62#include <linux/pid.h> 62#include <linux/pid.h>
63#include <linux/slab.h>
64 63
65#include <linux/audit.h> 64#include <linux/audit.h>
66 65
@@ -400,7 +399,7 @@ static int audit_log_config_change(char *function_name, u32 new, u32 old,
400 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); 399 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
401 if (unlikely(!ab)) 400 if (unlikely(!ab))
402 return rc; 401 return rc;
403 audit_log_format(ab, "%s=%u old=%u", function_name, new, old); 402 audit_log_format(ab, "%s=%u old=%u ", function_name, new, old);
404 audit_log_session_info(ab); 403 audit_log_session_info(ab);
405 rc = audit_log_task_context(ab); 404 rc = audit_log_task_context(ab);
406 if (rc) 405 if (rc)
@@ -1067,7 +1066,7 @@ static void audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
1067 *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); 1066 *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
1068 if (unlikely(!*ab)) 1067 if (unlikely(!*ab))
1069 return; 1068 return;
1070 audit_log_format(*ab, "pid=%d uid=%u", pid, uid); 1069 audit_log_format(*ab, "pid=%d uid=%u ", pid, uid);
1071 audit_log_session_info(*ab); 1070 audit_log_session_info(*ab);
1072 audit_log_task_context(*ab); 1071 audit_log_task_context(*ab);
1073} 1072}
@@ -1096,10 +1095,11 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
1096 1095
1097 if (audit_enabled == AUDIT_OFF) 1096 if (audit_enabled == AUDIT_OFF)
1098 return; 1097 return;
1098
1099 ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_FEATURE_CHANGE); 1099 ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_FEATURE_CHANGE);
1100 if (!ab) 1100 if (!ab)
1101 return; 1101 return;
1102 audit_log_task_info(ab, current); 1102 audit_log_task_info(ab);
1103 audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", 1103 audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
1104 audit_feature_names[which], !!old_feature, !!new_feature, 1104 audit_feature_names[which], !!old_feature, !!new_feature,
1105 !!old_lock, !!new_lock, res); 1105 !!old_lock, !!new_lock, res);
@@ -2042,7 +2042,7 @@ void audit_log_session_info(struct audit_buffer *ab)
2042 unsigned int sessionid = audit_get_sessionid(current); 2042 unsigned int sessionid = audit_get_sessionid(current);
2043 uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current)); 2043 uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current));
2044 2044
2045 audit_log_format(ab, " auid=%u ses=%u", auid, sessionid); 2045 audit_log_format(ab, "auid=%u ses=%u", auid, sessionid);
2046} 2046}
2047 2047
2048void audit_log_key(struct audit_buffer *ab, char *key) 2048void audit_log_key(struct audit_buffer *ab, char *key)
@@ -2058,11 +2058,13 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
2058{ 2058{
2059 int i; 2059 int i;
2060 2060
2061 audit_log_format(ab, " %s=", prefix); 2061 if (cap_isclear(*cap)) {
2062 CAP_FOR_EACH_U32(i) { 2062 audit_log_format(ab, " %s=0", prefix);
2063 audit_log_format(ab, "%08x", 2063 return;
2064 cap->cap[CAP_LAST_U32 - i]);
2065 } 2064 }
2065 audit_log_format(ab, " %s=", prefix);
2066 CAP_FOR_EACH_U32(i)
2067 audit_log_format(ab, "%08x", cap->cap[CAP_LAST_U32 - i]);
2066} 2068}
2067 2069
2068static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name) 2070static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
@@ -2177,22 +2179,21 @@ void audit_log_name(struct audit_context *context, struct audit_names *n,
2177 } 2179 }
2178 2180
2179 /* log the audit_names record type */ 2181 /* log the audit_names record type */
2180 audit_log_format(ab, " nametype=");
2181 switch(n->type) { 2182 switch(n->type) {
2182 case AUDIT_TYPE_NORMAL: 2183 case AUDIT_TYPE_NORMAL:
2183 audit_log_format(ab, "NORMAL"); 2184 audit_log_format(ab, " nametype=NORMAL");
2184 break; 2185 break;
2185 case AUDIT_TYPE_PARENT: 2186 case AUDIT_TYPE_PARENT:
2186 audit_log_format(ab, "PARENT"); 2187 audit_log_format(ab, " nametype=PARENT");
2187 break; 2188 break;
2188 case AUDIT_TYPE_CHILD_DELETE: 2189 case AUDIT_TYPE_CHILD_DELETE:
2189 audit_log_format(ab, "DELETE"); 2190 audit_log_format(ab, " nametype=DELETE");
2190 break; 2191 break;
2191 case AUDIT_TYPE_CHILD_CREATE: 2192 case AUDIT_TYPE_CHILD_CREATE:
2192 audit_log_format(ab, "CREATE"); 2193 audit_log_format(ab, " nametype=CREATE");
2193 break; 2194 break;
2194 default: 2195 default:
2195 audit_log_format(ab, "UNKNOWN"); 2196 audit_log_format(ab, " nametype=UNKNOWN");
2196 break; 2197 break;
2197 } 2198 }
2198 2199
@@ -2247,15 +2248,15 @@ out_null:
2247 audit_log_format(ab, " exe=(null)"); 2248 audit_log_format(ab, " exe=(null)");
2248} 2249}
2249 2250
2250struct tty_struct *audit_get_tty(struct task_struct *tsk) 2251struct tty_struct *audit_get_tty(void)
2251{ 2252{
2252 struct tty_struct *tty = NULL; 2253 struct tty_struct *tty = NULL;
2253 unsigned long flags; 2254 unsigned long flags;
2254 2255
2255 spin_lock_irqsave(&tsk->sighand->siglock, flags); 2256 spin_lock_irqsave(&current->sighand->siglock, flags);
2256 if (tsk->signal) 2257 if (current->signal)
2257 tty = tty_kref_get(tsk->signal->tty); 2258 tty = tty_kref_get(current->signal->tty);
2258 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 2259 spin_unlock_irqrestore(&current->sighand->siglock, flags);
2259 return tty; 2260 return tty;
2260} 2261}
2261 2262
@@ -2264,25 +2265,24 @@ void audit_put_tty(struct tty_struct *tty)
2264 tty_kref_put(tty); 2265 tty_kref_put(tty);
2265} 2266}
2266 2267
2267void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) 2268void audit_log_task_info(struct audit_buffer *ab)
2268{ 2269{
2269 const struct cred *cred; 2270 const struct cred *cred;
2270 char comm[sizeof(tsk->comm)]; 2271 char comm[sizeof(current->comm)];
2271 struct tty_struct *tty; 2272 struct tty_struct *tty;
2272 2273
2273 if (!ab) 2274 if (!ab)
2274 return; 2275 return;
2275 2276
2276 /* tsk == current */
2277 cred = current_cred(); 2277 cred = current_cred();
2278 tty = audit_get_tty(tsk); 2278 tty = audit_get_tty();
2279 audit_log_format(ab, 2279 audit_log_format(ab,
2280 " ppid=%d pid=%d auid=%u uid=%u gid=%u" 2280 " ppid=%d pid=%d auid=%u uid=%u gid=%u"
2281 " euid=%u suid=%u fsuid=%u" 2281 " euid=%u suid=%u fsuid=%u"
2282 " egid=%u sgid=%u fsgid=%u tty=%s ses=%u", 2282 " egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
2283 task_ppid_nr(tsk), 2283 task_ppid_nr(current),
2284 task_tgid_nr(tsk), 2284 task_tgid_nr(current),
2285 from_kuid(&init_user_ns, audit_get_loginuid(tsk)), 2285 from_kuid(&init_user_ns, audit_get_loginuid(current)),
2286 from_kuid(&init_user_ns, cred->uid), 2286 from_kuid(&init_user_ns, cred->uid),
2287 from_kgid(&init_user_ns, cred->gid), 2287 from_kgid(&init_user_ns, cred->gid),
2288 from_kuid(&init_user_ns, cred->euid), 2288 from_kuid(&init_user_ns, cred->euid),
@@ -2292,11 +2292,11 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
2292 from_kgid(&init_user_ns, cred->sgid), 2292 from_kgid(&init_user_ns, cred->sgid),
2293 from_kgid(&init_user_ns, cred->fsgid), 2293 from_kgid(&init_user_ns, cred->fsgid),
2294 tty ? tty_name(tty) : "(none)", 2294 tty ? tty_name(tty) : "(none)",
2295 audit_get_sessionid(tsk)); 2295 audit_get_sessionid(current));
2296 audit_put_tty(tty); 2296 audit_put_tty(tty);
2297 audit_log_format(ab, " comm="); 2297 audit_log_format(ab, " comm=");
2298 audit_log_untrustedstring(ab, get_task_comm(comm, tsk)); 2298 audit_log_untrustedstring(ab, get_task_comm(comm, current));
2299 audit_log_d_path_exe(ab, tsk->mm); 2299 audit_log_d_path_exe(ab, current->mm);
2300 audit_log_task_context(ab); 2300 audit_log_task_context(ab);
2301} 2301}
2302EXPORT_SYMBOL(audit_log_task_info); 2302EXPORT_SYMBOL(audit_log_task_info);
@@ -2317,7 +2317,7 @@ void audit_log_link_denied(const char *operation)
2317 if (!ab) 2317 if (!ab)
2318 return; 2318 return;
2319 audit_log_format(ab, "op=%s", operation); 2319 audit_log_format(ab, "op=%s", operation);
2320 audit_log_task_info(ab, current); 2320 audit_log_task_info(ab);
2321 audit_log_format(ab, " res=0"); 2321 audit_log_format(ab, " res=0");
2322 audit_log_end(ab); 2322 audit_log_end(ab);
2323} 2323}
diff --git a/kernel/audit.h b/kernel/audit.h
index 214e14948370..91421679a168 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -210,6 +210,8 @@ struct audit_context {
210 210
211extern bool audit_ever_enabled; 211extern bool audit_ever_enabled;
212 212
213extern void audit_log_session_info(struct audit_buffer *ab);
214
213extern void audit_copy_inode(struct audit_names *name, 215extern void audit_copy_inode(struct audit_names *name,
214 const struct dentry *dentry, 216 const struct dentry *dentry,
215 struct inode *inode); 217 struct inode *inode);
@@ -262,11 +264,11 @@ extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
262extern void audit_log_d_path_exe(struct audit_buffer *ab, 264extern void audit_log_d_path_exe(struct audit_buffer *ab,
263 struct mm_struct *mm); 265 struct mm_struct *mm);
264 266
265extern struct tty_struct *audit_get_tty(struct task_struct *tsk); 267extern struct tty_struct *audit_get_tty(void);
266extern void audit_put_tty(struct tty_struct *tty); 268extern void audit_put_tty(struct tty_struct *tty);
267 269
268/* audit watch functions */ 270/* audit watch functions */
269#ifdef CONFIG_AUDIT_WATCH 271#ifdef CONFIG_AUDITSYSCALL
270extern void audit_put_watch(struct audit_watch *watch); 272extern void audit_put_watch(struct audit_watch *watch);
271extern void audit_get_watch(struct audit_watch *watch); 273extern void audit_get_watch(struct audit_watch *watch);
272extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op); 274extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op);
@@ -299,9 +301,9 @@ extern int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark
299#define audit_mark_compare(m, i, d) 0 301#define audit_mark_compare(m, i, d) 0
300#define audit_exe_compare(t, m) (-EINVAL) 302#define audit_exe_compare(t, m) (-EINVAL)
301#define audit_dupe_exe(n, o) (-EINVAL) 303#define audit_dupe_exe(n, o) (-EINVAL)
302#endif /* CONFIG_AUDIT_WATCH */ 304#endif /* CONFIG_AUDITSYSCALL */
303 305
304#ifdef CONFIG_AUDIT_TREE 306#ifdef CONFIG_AUDITSYSCALL
305extern struct audit_chunk *audit_tree_lookup(const struct inode *inode); 307extern struct audit_chunk *audit_tree_lookup(const struct inode *inode);
306extern void audit_put_chunk(struct audit_chunk *chunk); 308extern void audit_put_chunk(struct audit_chunk *chunk);
307extern bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree); 309extern bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree);
diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
index fba78047fb37..cf4512a33675 100644
--- a/kernel/audit_fsnotify.c
+++ b/kernel/audit_fsnotify.c
@@ -130,10 +130,8 @@ static void audit_mark_log_rule_change(struct audit_fsnotify_mark *audit_mark, c
130 ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE); 130 ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
131 if (unlikely(!ab)) 131 if (unlikely(!ab))
132 return; 132 return;
133 audit_log_format(ab, "auid=%u ses=%u op=%s", 133 audit_log_session_info(ab);
134 from_kuid(&init_user_ns, audit_get_loginuid(current)), 134 audit_log_format(ab, " op=%s path=", op);
135 audit_get_sessionid(current), op);
136 audit_log_format(ab, " path=");
137 audit_log_untrustedstring(ab, audit_mark->path); 135 audit_log_untrustedstring(ab, audit_mark->path);
138 audit_log_key(ab, rule->filterkey); 136 audit_log_key(ab, rule->filterkey);
139 audit_log_format(ab, " list=%d res=1", rule->listnr); 137 audit_log_format(ab, " list=%d res=1", rule->listnr);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index ea43181cde4a..d4af4d97f847 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -24,9 +24,9 @@ struct audit_tree {
24 24
25struct audit_chunk { 25struct audit_chunk {
26 struct list_head hash; 26 struct list_head hash;
27 struct fsnotify_mark mark; 27 unsigned long key;
28 struct fsnotify_mark *mark;
28 struct list_head trees; /* with root here */ 29 struct list_head trees; /* with root here */
29 int dead;
30 int count; 30 int count;
31 atomic_long_t refs; 31 atomic_long_t refs;
32 struct rcu_head head; 32 struct rcu_head head;
@@ -37,13 +37,25 @@ struct audit_chunk {
37 } owners[]; 37 } owners[];
38}; 38};
39 39
40struct audit_tree_mark {
41 struct fsnotify_mark mark;
42 struct audit_chunk *chunk;
43};
44
40static LIST_HEAD(tree_list); 45static LIST_HEAD(tree_list);
41static LIST_HEAD(prune_list); 46static LIST_HEAD(prune_list);
42static struct task_struct *prune_thread; 47static struct task_struct *prune_thread;
43 48
44/* 49/*
45 * One struct chunk is attached to each inode of interest. 50 * One struct chunk is attached to each inode of interest through
46 * We replace struct chunk on tagging/untagging. 51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
52 * untagging, the mark is stable as long as there is chunk attached. The
53 * association between mark and chunk is protected by hash_lock and
54 * audit_tree_group->mark_mutex. Thus as long as we hold
55 * audit_tree_group->mark_mutex and check that the mark is alive by
56 * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
57 * the current chunk.
58 *
47 * Rules have pointer to struct audit_tree. 59 * Rules have pointer to struct audit_tree.
48 * Rules have struct list_head rlist forming a list of rules over 60 * Rules have struct list_head rlist forming a list of rules over
49 * the same tree. 61 * the same tree.
@@ -62,8 +74,12 @@ static struct task_struct *prune_thread;
62 * tree is refcounted; one reference for "some rules on rules_list refer to 74 * tree is refcounted; one reference for "some rules on rules_list refer to
63 * it", one for each chunk with pointer to it. 75 * it", one for each chunk with pointer to it.
64 * 76 *
65 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount 77 * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
66 * of watch contributes 1 to .refs). 78 * one chunk reference. This reference is dropped either when a mark is going
79 * to be freed (corresponding inode goes away) or when chunk attached to the
80 * mark gets replaced. This reference must be dropped using
81 * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
82 * grace period as it protects RCU readers of the hash table.
67 * 83 *
68 * node.index allows to get from node.list to containing chunk. 84 * node.index allows to get from node.list to containing chunk.
69 * MSB of that sucker is stolen to mark taggings that we might have to 85 * MSB of that sucker is stolen to mark taggings that we might have to
@@ -72,6 +88,7 @@ static struct task_struct *prune_thread;
72 */ 88 */
73 89
74static struct fsnotify_group *audit_tree_group; 90static struct fsnotify_group *audit_tree_group;
91static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
75 92
76static struct audit_tree *alloc_tree(const char *s) 93static struct audit_tree *alloc_tree(const char *s)
77{ 94{
@@ -131,12 +148,43 @@ static void __put_chunk(struct rcu_head *rcu)
131 audit_put_chunk(chunk); 148 audit_put_chunk(chunk);
132} 149}
133 150
134static void audit_tree_destroy_watch(struct fsnotify_mark *entry) 151/*
152 * Drop reference to the chunk that was held by the mark. This is the reference
153 * that gets dropped after we've removed the chunk from the hash table and we
154 * use it to make sure chunk cannot be freed before RCU grace period expires.
155 */
156static void audit_mark_put_chunk(struct audit_chunk *chunk)
135{ 157{
136 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
137 call_rcu(&chunk->head, __put_chunk); 158 call_rcu(&chunk->head, __put_chunk);
138} 159}
139 160
161static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
162{
163 return container_of(mark, struct audit_tree_mark, mark);
164}
165
166static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
167{
168 return audit_mark(mark)->chunk;
169}
170
171static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
172{
173 kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
174}
175
176static struct fsnotify_mark *alloc_mark(void)
177{
178 struct audit_tree_mark *amark;
179
180 amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
181 if (!amark)
182 return NULL;
183 fsnotify_init_mark(&amark->mark, audit_tree_group);
184 amark->mark.mask = FS_IN_IGNORED;
185 return &amark->mark;
186}
187
140static struct audit_chunk *alloc_chunk(int count) 188static struct audit_chunk *alloc_chunk(int count)
141{ 189{
142 struct audit_chunk *chunk; 190 struct audit_chunk *chunk;
@@ -156,8 +204,6 @@ static struct audit_chunk *alloc_chunk(int count)
156 INIT_LIST_HEAD(&chunk->owners[i].list); 204 INIT_LIST_HEAD(&chunk->owners[i].list);
157 chunk->owners[i].index = i; 205 chunk->owners[i].index = i;
158 } 206 }
159 fsnotify_init_mark(&chunk->mark, audit_tree_group);
160 chunk->mark.mask = FS_IN_IGNORED;
161 return chunk; 207 return chunk;
162} 208}
163 209
@@ -172,36 +218,25 @@ static unsigned long inode_to_key(const struct inode *inode)
172 return (unsigned long)&inode->i_fsnotify_marks; 218 return (unsigned long)&inode->i_fsnotify_marks;
173} 219}
174 220
175/*
176 * Function to return search key in our hash from chunk. Key 0 is special and
177 * should never be present in the hash.
178 */
179static unsigned long chunk_to_key(struct audit_chunk *chunk)
180{
181 /*
182 * We have a reference to the mark so it should be attached to a
183 * connector.
184 */
185 if (WARN_ON_ONCE(!chunk->mark.connector))
186 return 0;
187 return (unsigned long)chunk->mark.connector->obj;
188}
189
190static inline struct list_head *chunk_hash(unsigned long key) 221static inline struct list_head *chunk_hash(unsigned long key)
191{ 222{
192 unsigned long n = key / L1_CACHE_BYTES; 223 unsigned long n = key / L1_CACHE_BYTES;
193 return chunk_hash_heads + n % HASH_SIZE; 224 return chunk_hash_heads + n % HASH_SIZE;
194} 225}
195 226
196/* hash_lock & entry->lock is held by caller */ 227/* hash_lock & mark->group->mark_mutex is held by caller */
197static void insert_hash(struct audit_chunk *chunk) 228static void insert_hash(struct audit_chunk *chunk)
198{ 229{
199 unsigned long key = chunk_to_key(chunk);
200 struct list_head *list; 230 struct list_head *list;
201 231
202 if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED)) 232 /*
203 return; 233 * Make sure chunk is fully initialized before making it visible in the
204 list = chunk_hash(key); 234 * hash. Pairs with a data dependency barrier in READ_ONCE() in
235 * audit_tree_lookup().
236 */
237 smp_wmb();
238 WARN_ON_ONCE(!chunk->key);
239 list = chunk_hash(chunk->key);
205 list_add_rcu(&chunk->hash, list); 240 list_add_rcu(&chunk->hash, list);
206} 241}
207 242
@@ -213,7 +248,11 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
213 struct audit_chunk *p; 248 struct audit_chunk *p;
214 249
215 list_for_each_entry_rcu(p, list, hash) { 250 list_for_each_entry_rcu(p, list, hash) {
216 if (chunk_to_key(p) == key) { 251 /*
252 * We use a data dependency barrier in READ_ONCE() to make sure
253 * the chunk we see is fully initialized.
254 */
255 if (READ_ONCE(p->key) == key) {
217 atomic_long_inc(&p->refs); 256 atomic_long_inc(&p->refs);
218 return p; 257 return p;
219 } 258 }
@@ -239,137 +278,159 @@ static struct audit_chunk *find_chunk(struct node *p)
239 return container_of(p, struct audit_chunk, owners[0]); 278 return container_of(p, struct audit_chunk, owners[0]);
240} 279}
241 280
242static void untag_chunk(struct node *p) 281static void replace_mark_chunk(struct fsnotify_mark *mark,
282 struct audit_chunk *chunk)
283{
284 struct audit_chunk *old;
285
286 assert_spin_locked(&hash_lock);
287 old = mark_chunk(mark);
288 audit_mark(mark)->chunk = chunk;
289 if (chunk)
290 chunk->mark = mark;
291 if (old)
292 old->mark = NULL;
293}
294
295static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
243{ 296{
244 struct audit_chunk *chunk = find_chunk(p);
245 struct fsnotify_mark *entry = &chunk->mark;
246 struct audit_chunk *new = NULL;
247 struct audit_tree *owner; 297 struct audit_tree *owner;
248 int size = chunk->count - 1;
249 int i, j; 298 int i, j;
250 299
251 fsnotify_get_mark(entry); 300 new->key = old->key;
301 list_splice_init(&old->trees, &new->trees);
302 list_for_each_entry(owner, &new->trees, same_root)
303 owner->root = new;
304 for (i = j = 0; j < old->count; i++, j++) {
305 if (!old->owners[j].owner) {
306 i--;
307 continue;
308 }
309 owner = old->owners[j].owner;
310 new->owners[i].owner = owner;
311 new->owners[i].index = old->owners[j].index - j + i;
312 if (!owner) /* result of earlier fallback */
313 continue;
314 get_tree(owner);
315 list_replace_init(&old->owners[j].list, &new->owners[i].list);
316 }
317 replace_mark_chunk(old->mark, new);
318 /*
319 * Make sure chunk is fully initialized before making it visible in the
320 * hash. Pairs with a data dependency barrier in READ_ONCE() in
321 * audit_tree_lookup().
322 */
323 smp_wmb();
324 list_replace_rcu(&old->hash, &new->hash);
325}
252 326
253 spin_unlock(&hash_lock); 327static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
328{
329 struct audit_tree *owner = p->owner;
330
331 if (owner->root == chunk) {
332 list_del_init(&owner->same_root);
333 owner->root = NULL;
334 }
335 list_del_init(&p->list);
336 p->owner = NULL;
337 put_tree(owner);
338}
254 339
255 if (size) 340static int chunk_count_trees(struct audit_chunk *chunk)
256 new = alloc_chunk(size); 341{
342 int i;
343 int ret = 0;
257 344
258 mutex_lock(&entry->group->mark_mutex); 345 for (i = 0; i < chunk->count; i++)
259 spin_lock(&entry->lock); 346 if (chunk->owners[i].owner)
347 ret++;
348 return ret;
349}
350
351static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
352{
353 struct audit_chunk *new;
354 int size;
355
356 mutex_lock(&audit_tree_group->mark_mutex);
260 /* 357 /*
261 * mark_mutex protects mark from getting detached and thus also from 358 * mark_mutex stabilizes chunk attached to the mark so we can check
262 * mark->connector->obj getting NULL. 359 * whether it didn't change while we've dropped hash_lock.
263 */ 360 */
264 if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { 361 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
265 spin_unlock(&entry->lock); 362 mark_chunk(mark) != chunk)
266 mutex_unlock(&entry->group->mark_mutex); 363 goto out_mutex;
267 if (new)
268 fsnotify_put_mark(&new->mark);
269 goto out;
270 }
271
272 owner = p->owner;
273 364
365 size = chunk_count_trees(chunk);
274 if (!size) { 366 if (!size) {
275 chunk->dead = 1;
276 spin_lock(&hash_lock); 367 spin_lock(&hash_lock);
277 list_del_init(&chunk->trees); 368 list_del_init(&chunk->trees);
278 if (owner->root == chunk)
279 owner->root = NULL;
280 list_del_init(&p->list);
281 list_del_rcu(&chunk->hash); 369 list_del_rcu(&chunk->hash);
370 replace_mark_chunk(mark, NULL);
282 spin_unlock(&hash_lock); 371 spin_unlock(&hash_lock);
283 spin_unlock(&entry->lock); 372 fsnotify_detach_mark(mark);
284 mutex_unlock(&entry->group->mark_mutex); 373 mutex_unlock(&audit_tree_group->mark_mutex);
285 fsnotify_destroy_mark(entry, audit_tree_group); 374 audit_mark_put_chunk(chunk);
286 goto out; 375 fsnotify_free_mark(mark);
376 return;
287 } 377 }
288 378
379 new = alloc_chunk(size);
289 if (!new) 380 if (!new)
290 goto Fallback; 381 goto out_mutex;
291 382
292 if (fsnotify_add_mark_locked(&new->mark, entry->connector->obj,
293 FSNOTIFY_OBJ_TYPE_INODE, 1)) {
294 fsnotify_put_mark(&new->mark);
295 goto Fallback;
296 }
297
298 chunk->dead = 1;
299 spin_lock(&hash_lock); 383 spin_lock(&hash_lock);
300 list_replace_init(&chunk->trees, &new->trees); 384 /*
301 if (owner->root == chunk) { 385 * This has to go last when updating chunk as once replace_chunk() is
302 list_del_init(&owner->same_root); 386 * called, new RCU readers can see the new chunk.
303 owner->root = NULL; 387 */
304 } 388 replace_chunk(new, chunk);
305
306 for (i = j = 0; j <= size; i++, j++) {
307 struct audit_tree *s;
308 if (&chunk->owners[j] == p) {
309 list_del_init(&p->list);
310 i--;
311 continue;
312 }
313 s = chunk->owners[j].owner;
314 new->owners[i].owner = s;
315 new->owners[i].index = chunk->owners[j].index - j + i;
316 if (!s) /* result of earlier fallback */
317 continue;
318 get_tree(s);
319 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
320 }
321
322 list_replace_rcu(&chunk->hash, &new->hash);
323 list_for_each_entry(owner, &new->trees, same_root)
324 owner->root = new;
325 spin_unlock(&hash_lock);
326 spin_unlock(&entry->lock);
327 mutex_unlock(&entry->group->mark_mutex);
328 fsnotify_destroy_mark(entry, audit_tree_group);
329 fsnotify_put_mark(&new->mark); /* drop initial reference */
330 goto out;
331
332Fallback:
333 // do the best we can
334 spin_lock(&hash_lock);
335 if (owner->root == chunk) {
336 list_del_init(&owner->same_root);
337 owner->root = NULL;
338 }
339 list_del_init(&p->list);
340 p->owner = NULL;
341 put_tree(owner);
342 spin_unlock(&hash_lock); 389 spin_unlock(&hash_lock);
343 spin_unlock(&entry->lock); 390 mutex_unlock(&audit_tree_group->mark_mutex);
344 mutex_unlock(&entry->group->mark_mutex); 391 audit_mark_put_chunk(chunk);
345out: 392 return;
346 fsnotify_put_mark(entry); 393
347 spin_lock(&hash_lock); 394out_mutex:
395 mutex_unlock(&audit_tree_group->mark_mutex);
348} 396}
349 397
398/* Call with group->mark_mutex held, releases it */
350static int create_chunk(struct inode *inode, struct audit_tree *tree) 399static int create_chunk(struct inode *inode, struct audit_tree *tree)
351{ 400{
352 struct fsnotify_mark *entry; 401 struct fsnotify_mark *mark;
353 struct audit_chunk *chunk = alloc_chunk(1); 402 struct audit_chunk *chunk = alloc_chunk(1);
354 if (!chunk) 403
404 if (!chunk) {
405 mutex_unlock(&audit_tree_group->mark_mutex);
355 return -ENOMEM; 406 return -ENOMEM;
407 }
356 408
357 entry = &chunk->mark; 409 mark = alloc_mark();
358 if (fsnotify_add_inode_mark(entry, inode, 0)) { 410 if (!mark) {
359 fsnotify_put_mark(entry); 411 mutex_unlock(&audit_tree_group->mark_mutex);
412 kfree(chunk);
413 return -ENOMEM;
414 }
415
416 if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
417 mutex_unlock(&audit_tree_group->mark_mutex);
418 fsnotify_put_mark(mark);
419 kfree(chunk);
360 return -ENOSPC; 420 return -ENOSPC;
361 } 421 }
362 422
363 spin_lock(&entry->lock);
364 spin_lock(&hash_lock); 423 spin_lock(&hash_lock);
365 if (tree->goner) { 424 if (tree->goner) {
366 spin_unlock(&hash_lock); 425 spin_unlock(&hash_lock);
367 chunk->dead = 1; 426 fsnotify_detach_mark(mark);
368 spin_unlock(&entry->lock); 427 mutex_unlock(&audit_tree_group->mark_mutex);
369 fsnotify_destroy_mark(entry, audit_tree_group); 428 fsnotify_free_mark(mark);
370 fsnotify_put_mark(entry); 429 fsnotify_put_mark(mark);
430 kfree(chunk);
371 return 0; 431 return 0;
372 } 432 }
433 replace_mark_chunk(mark, chunk);
373 chunk->owners[0].index = (1U << 31); 434 chunk->owners[0].index = (1U << 31);
374 chunk->owners[0].owner = tree; 435 chunk->owners[0].owner = tree;
375 get_tree(tree); 436 get_tree(tree);
@@ -378,35 +439,49 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
378 tree->root = chunk; 439 tree->root = chunk;
379 list_add(&tree->same_root, &chunk->trees); 440 list_add(&tree->same_root, &chunk->trees);
380 } 441 }
442 chunk->key = inode_to_key(inode);
443 /*
444 * Inserting into the hash table has to go last as once we do that RCU
445 * readers can see the chunk.
446 */
381 insert_hash(chunk); 447 insert_hash(chunk);
382 spin_unlock(&hash_lock); 448 spin_unlock(&hash_lock);
383 spin_unlock(&entry->lock); 449 mutex_unlock(&audit_tree_group->mark_mutex);
384 fsnotify_put_mark(entry); /* drop initial reference */ 450 /*
451 * Drop our initial reference. When mark we point to is getting freed,
452 * we get notification through ->freeing_mark callback and cleanup
453 * chunk pointing to this mark.
454 */
455 fsnotify_put_mark(mark);
385 return 0; 456 return 0;
386} 457}
387 458
388/* the first tagged inode becomes root of tree */ 459/* the first tagged inode becomes root of tree */
389static int tag_chunk(struct inode *inode, struct audit_tree *tree) 460static int tag_chunk(struct inode *inode, struct audit_tree *tree)
390{ 461{
391 struct fsnotify_mark *old_entry, *chunk_entry; 462 struct fsnotify_mark *mark;
392 struct audit_tree *owner;
393 struct audit_chunk *chunk, *old; 463 struct audit_chunk *chunk, *old;
394 struct node *p; 464 struct node *p;
395 int n; 465 int n;
396 466
397 old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks, 467 mutex_lock(&audit_tree_group->mark_mutex);
398 audit_tree_group); 468 mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
399 if (!old_entry) 469 if (!mark)
400 return create_chunk(inode, tree); 470 return create_chunk(inode, tree);
401 471
402 old = container_of(old_entry, struct audit_chunk, mark); 472 /*
403 473 * Found mark is guaranteed to be attached and mark_mutex protects mark
474 * from getting detached and thus it makes sure there is chunk attached
475 * to the mark.
476 */
404 /* are we already there? */ 477 /* are we already there? */
405 spin_lock(&hash_lock); 478 spin_lock(&hash_lock);
479 old = mark_chunk(mark);
406 for (n = 0; n < old->count; n++) { 480 for (n = 0; n < old->count; n++) {
407 if (old->owners[n].owner == tree) { 481 if (old->owners[n].owner == tree) {
408 spin_unlock(&hash_lock); 482 spin_unlock(&hash_lock);
409 fsnotify_put_mark(old_entry); 483 mutex_unlock(&audit_tree_group->mark_mutex);
484 fsnotify_put_mark(mark);
410 return 0; 485 return 0;
411 } 486 }
412 } 487 }
@@ -414,83 +489,38 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
414 489
415 chunk = alloc_chunk(old->count + 1); 490 chunk = alloc_chunk(old->count + 1);
416 if (!chunk) { 491 if (!chunk) {
417 fsnotify_put_mark(old_entry); 492 mutex_unlock(&audit_tree_group->mark_mutex);
493 fsnotify_put_mark(mark);
418 return -ENOMEM; 494 return -ENOMEM;
419 } 495 }
420 496
421 chunk_entry = &chunk->mark;
422
423 mutex_lock(&old_entry->group->mark_mutex);
424 spin_lock(&old_entry->lock);
425 /*
426 * mark_mutex protects mark from getting detached and thus also from
427 * mark->connector->obj getting NULL.
428 */
429 if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
430 /* old_entry is being shot, lets just lie */
431 spin_unlock(&old_entry->lock);
432 mutex_unlock(&old_entry->group->mark_mutex);
433 fsnotify_put_mark(old_entry);
434 fsnotify_put_mark(&chunk->mark);
435 return -ENOENT;
436 }
437
438 if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj,
439 FSNOTIFY_OBJ_TYPE_INODE, 1)) {
440 spin_unlock(&old_entry->lock);
441 mutex_unlock(&old_entry->group->mark_mutex);
442 fsnotify_put_mark(chunk_entry);
443 fsnotify_put_mark(old_entry);
444 return -ENOSPC;
445 }
446
447 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
448 spin_lock(&chunk_entry->lock);
449 spin_lock(&hash_lock); 497 spin_lock(&hash_lock);
450
451 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
452 if (tree->goner) { 498 if (tree->goner) {
453 spin_unlock(&hash_lock); 499 spin_unlock(&hash_lock);
454 chunk->dead = 1; 500 mutex_unlock(&audit_tree_group->mark_mutex);
455 spin_unlock(&chunk_entry->lock); 501 fsnotify_put_mark(mark);
456 spin_unlock(&old_entry->lock); 502 kfree(chunk);
457 mutex_unlock(&old_entry->group->mark_mutex);
458
459 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
460
461 fsnotify_put_mark(chunk_entry);
462 fsnotify_put_mark(old_entry);
463 return 0; 503 return 0;
464 } 504 }
465 list_replace_init(&old->trees, &chunk->trees); 505 p = &chunk->owners[chunk->count - 1];
466 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
467 struct audit_tree *s = old->owners[n].owner;
468 p->owner = s;
469 p->index = old->owners[n].index;
470 if (!s) /* result of fallback in untag */
471 continue;
472 get_tree(s);
473 list_replace_init(&old->owners[n].list, &p->list);
474 }
475 p->index = (chunk->count - 1) | (1U<<31); 506 p->index = (chunk->count - 1) | (1U<<31);
476 p->owner = tree; 507 p->owner = tree;
477 get_tree(tree); 508 get_tree(tree);
478 list_add(&p->list, &tree->chunks); 509 list_add(&p->list, &tree->chunks);
479 list_replace_rcu(&old->hash, &chunk->hash);
480 list_for_each_entry(owner, &chunk->trees, same_root)
481 owner->root = chunk;
482 old->dead = 1;
483 if (!tree->root) { 510 if (!tree->root) {
484 tree->root = chunk; 511 tree->root = chunk;
485 list_add(&tree->same_root, &chunk->trees); 512 list_add(&tree->same_root, &chunk->trees);
486 } 513 }
514 /*
515 * This has to go last when updating chunk as once replace_chunk() is
516 * called, new RCU readers can see the new chunk.
517 */
518 replace_chunk(chunk, old);
487 spin_unlock(&hash_lock); 519 spin_unlock(&hash_lock);
488 spin_unlock(&chunk_entry->lock); 520 mutex_unlock(&audit_tree_group->mark_mutex);
489 spin_unlock(&old_entry->lock); 521 fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
490 mutex_unlock(&old_entry->group->mark_mutex); 522 audit_mark_put_chunk(old);
491 fsnotify_destroy_mark(old_entry, audit_tree_group); 523
492 fsnotify_put_mark(chunk_entry); /* drop initial reference */
493 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
494 return 0; 524 return 0;
495} 525}
496 526
@@ -503,8 +533,7 @@ static void audit_tree_log_remove_rule(struct audit_krule *rule)
503 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); 533 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
504 if (unlikely(!ab)) 534 if (unlikely(!ab))
505 return; 535 return;
506 audit_log_format(ab, "op=remove_rule"); 536 audit_log_format(ab, "op=remove_rule dir=");
507 audit_log_format(ab, " dir=");
508 audit_log_untrustedstring(ab, rule->tree->pathname); 537 audit_log_untrustedstring(ab, rule->tree->pathname);
509 audit_log_key(ab, rule->filterkey); 538 audit_log_key(ab, rule->filterkey);
510 audit_log_format(ab, " list=%d res=1", rule->listnr); 539 audit_log_format(ab, " list=%d res=1", rule->listnr);
@@ -534,22 +563,48 @@ static void kill_rules(struct audit_tree *tree)
534} 563}
535 564
536/* 565/*
537 * finish killing struct audit_tree 566 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
567 * chunks. The function expects tagged chunks are all at the beginning of the
568 * chunks list.
538 */ 569 */
539static void prune_one(struct audit_tree *victim) 570static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
540{ 571{
541 spin_lock(&hash_lock); 572 spin_lock(&hash_lock);
542 while (!list_empty(&victim->chunks)) { 573 while (!list_empty(&victim->chunks)) {
543 struct node *p; 574 struct node *p;
575 struct audit_chunk *chunk;
576 struct fsnotify_mark *mark;
577
578 p = list_first_entry(&victim->chunks, struct node, list);
579 /* have we run out of marked? */
580 if (tagged && !(p->index & (1U<<31)))
581 break;
582 chunk = find_chunk(p);
583 mark = chunk->mark;
584 remove_chunk_node(chunk, p);
585 /* Racing with audit_tree_freeing_mark()? */
586 if (!mark)
587 continue;
588 fsnotify_get_mark(mark);
589 spin_unlock(&hash_lock);
544 590
545 p = list_entry(victim->chunks.next, struct node, list); 591 untag_chunk(chunk, mark);
592 fsnotify_put_mark(mark);
546 593
547 untag_chunk(p); 594 spin_lock(&hash_lock);
548 } 595 }
549 spin_unlock(&hash_lock); 596 spin_unlock(&hash_lock);
550 put_tree(victim); 597 put_tree(victim);
551} 598}
552 599
600/*
601 * finish killing struct audit_tree
602 */
603static void prune_one(struct audit_tree *victim)
604{
605 prune_tree_chunks(victim, false);
606}
607
553/* trim the uncommitted chunks from tree */ 608/* trim the uncommitted chunks from tree */
554 609
555static void trim_marked(struct audit_tree *tree) 610static void trim_marked(struct audit_tree *tree)
@@ -569,18 +624,11 @@ static void trim_marked(struct audit_tree *tree)
569 list_add(p, &tree->chunks); 624 list_add(p, &tree->chunks);
570 } 625 }
571 } 626 }
627 spin_unlock(&hash_lock);
572 628
573 while (!list_empty(&tree->chunks)) { 629 prune_tree_chunks(tree, true);
574 struct node *node;
575
576 node = list_entry(tree->chunks.next, struct node, list);
577
578 /* have we run out of marked? */
579 if (!(node->index & (1U<<31)))
580 break;
581 630
582 untag_chunk(node); 631 spin_lock(&hash_lock);
583 }
584 if (!tree->root && !tree->goner) { 632 if (!tree->root && !tree->goner) {
585 tree->goner = 1; 633 tree->goner = 1;
586 spin_unlock(&hash_lock); 634 spin_unlock(&hash_lock);
@@ -661,7 +709,7 @@ void audit_trim_trees(void)
661 /* this could be NULL if the watch is dying else where... */ 709 /* this could be NULL if the watch is dying else where... */
662 node->index |= 1U<<31; 710 node->index |= 1U<<31;
663 if (iterate_mounts(compare_root, 711 if (iterate_mounts(compare_root,
664 (void *)chunk_to_key(chunk), 712 (void *)(chunk->key),
665 root_mnt)) 713 root_mnt))
666 node->index &= ~(1U<<31); 714 node->index &= ~(1U<<31);
667 } 715 }
@@ -959,10 +1007,6 @@ static void evict_chunk(struct audit_chunk *chunk)
959 int need_prune = 0; 1007 int need_prune = 0;
960 int n; 1008 int n;
961 1009
962 if (chunk->dead)
963 return;
964
965 chunk->dead = 1;
966 mutex_lock(&audit_filter_mutex); 1010 mutex_lock(&audit_filter_mutex);
967 spin_lock(&hash_lock); 1011 spin_lock(&hash_lock);
968 while (!list_empty(&chunk->trees)) { 1012 while (!list_empty(&chunk->trees)) {
@@ -999,17 +1043,27 @@ static int audit_tree_handle_event(struct fsnotify_group *group,
999 return 0; 1043 return 0;
1000} 1044}
1001 1045
1002static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) 1046static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
1047 struct fsnotify_group *group)
1003{ 1048{
1004 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); 1049 struct audit_chunk *chunk;
1005 1050
1006 evict_chunk(chunk); 1051 mutex_lock(&mark->group->mark_mutex);
1052 spin_lock(&hash_lock);
1053 chunk = mark_chunk(mark);
1054 replace_mark_chunk(mark, NULL);
1055 spin_unlock(&hash_lock);
1056 mutex_unlock(&mark->group->mark_mutex);
1057 if (chunk) {
1058 evict_chunk(chunk);
1059 audit_mark_put_chunk(chunk);
1060 }
1007 1061
1008 /* 1062 /*
1009 * We are guaranteed to have at least one reference to the mark from 1063 * We are guaranteed to have at least one reference to the mark from
1010 * either the inode or the caller of fsnotify_destroy_mark(). 1064 * either the inode or the caller of fsnotify_destroy_mark().
1011 */ 1065 */
1012 BUG_ON(refcount_read(&entry->refcnt) < 1); 1066 BUG_ON(refcount_read(&mark->refcnt) < 1);
1013} 1067}
1014 1068
1015static const struct fsnotify_ops audit_tree_ops = { 1069static const struct fsnotify_ops audit_tree_ops = {
@@ -1022,6 +1076,8 @@ static int __init audit_tree_init(void)
1022{ 1076{
1023 int i; 1077 int i;
1024 1078
1079 audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1080
1025 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); 1081 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1026 if (IS_ERR(audit_tree_group)) 1082 if (IS_ERR(audit_tree_group))
1027 audit_panic("cannot initialize fsnotify group for rectree watches"); 1083 audit_panic("cannot initialize fsnotify group for rectree watches");
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 787c7afdf829..20ef9ba134b0 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -245,10 +245,8 @@ static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watc
245 ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE); 245 ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
246 if (!ab) 246 if (!ab)
247 return; 247 return;
248 audit_log_format(ab, "auid=%u ses=%u op=%s", 248 audit_log_session_info(ab);
249 from_kuid(&init_user_ns, audit_get_loginuid(current)), 249 audit_log_format(ab, "op=%s path=", op);
250 audit_get_sessionid(current), op);
251 audit_log_format(ab, " path=");
252 audit_log_untrustedstring(ab, w->path); 250 audit_log_untrustedstring(ab, w->path);
253 audit_log_key(ab, r->filterkey); 251 audit_log_key(ab, r->filterkey);
254 audit_log_format(ab, " list=%d res=1", r->listnr); 252 audit_log_format(ab, " list=%d res=1", r->listnr);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index b2d1f043f17f..6593a5207fb0 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -200,7 +200,6 @@ static int audit_match_filetype(struct audit_context *ctx, int val)
200 * References in it _are_ dropped - at the same time we free/drop aux stuff. 200 * References in it _are_ dropped - at the same time we free/drop aux stuff.
201 */ 201 */
202 202
203#ifdef CONFIG_AUDIT_TREE
204static void audit_set_auditable(struct audit_context *ctx) 203static void audit_set_auditable(struct audit_context *ctx)
205{ 204{
206 if (!ctx->prio) { 205 if (!ctx->prio) {
@@ -245,12 +244,10 @@ static int grow_tree_refs(struct audit_context *ctx)
245 ctx->tree_count = 31; 244 ctx->tree_count = 31;
246 return 1; 245 return 1;
247} 246}
248#endif
249 247
250static void unroll_tree_refs(struct audit_context *ctx, 248static void unroll_tree_refs(struct audit_context *ctx,
251 struct audit_tree_refs *p, int count) 249 struct audit_tree_refs *p, int count)
252{ 250{
253#ifdef CONFIG_AUDIT_TREE
254 struct audit_tree_refs *q; 251 struct audit_tree_refs *q;
255 int n; 252 int n;
256 if (!p) { 253 if (!p) {
@@ -274,7 +271,6 @@ static void unroll_tree_refs(struct audit_context *ctx,
274 } 271 }
275 ctx->trees = p; 272 ctx->trees = p;
276 ctx->tree_count = count; 273 ctx->tree_count = count;
277#endif
278} 274}
279 275
280static void free_tree_refs(struct audit_context *ctx) 276static void free_tree_refs(struct audit_context *ctx)
@@ -288,7 +284,6 @@ static void free_tree_refs(struct audit_context *ctx)
288 284
289static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree) 285static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)
290{ 286{
291#ifdef CONFIG_AUDIT_TREE
292 struct audit_tree_refs *p; 287 struct audit_tree_refs *p;
293 int n; 288 int n;
294 if (!tree) 289 if (!tree)
@@ -305,7 +300,6 @@ static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)
305 if (audit_tree_match(p->c[n], tree)) 300 if (audit_tree_match(p->c[n], tree))
306 return 1; 301 return 1;
307 } 302 }
308#endif
309 return 0; 303 return 0;
310} 304}
311 305
@@ -836,44 +830,6 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
836 rcu_read_unlock(); 830 rcu_read_unlock();
837} 831}
838 832
839/* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */
840static inline struct audit_context *audit_take_context(struct task_struct *tsk,
841 int return_valid,
842 long return_code)
843{
844 struct audit_context *context = tsk->audit_context;
845
846 if (!context)
847 return NULL;
848 context->return_valid = return_valid;
849
850 /*
851 * we need to fix up the return code in the audit logs if the actual
852 * return codes are later going to be fixed up by the arch specific
853 * signal handlers
854 *
855 * This is actually a test for:
856 * (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) ||
857 * (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK)
858 *
859 * but is faster than a bunch of ||
860 */
861 if (unlikely(return_code <= -ERESTARTSYS) &&
862 (return_code >= -ERESTART_RESTARTBLOCK) &&
863 (return_code != -ENOIOCTLCMD))
864 context->return_code = -EINTR;
865 else
866 context->return_code = return_code;
867
868 if (context->in_syscall && !context->dummy) {
869 audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]);
870 audit_filter_inodes(tsk, context);
871 }
872
873 audit_set_context(tsk, NULL);
874 return context;
875}
876
877static inline void audit_proctitle_free(struct audit_context *context) 833static inline void audit_proctitle_free(struct audit_context *context)
878{ 834{
879 kfree(context->proctitle.value); 835 kfree(context->proctitle.value);
@@ -1107,7 +1063,7 @@ static void audit_log_execve_info(struct audit_context *context,
1107 } 1063 }
1108 1064
1109 /* write as much as we can to the audit log */ 1065 /* write as much as we can to the audit log */
1110 if (len_buf > 0) { 1066 if (len_buf >= 0) {
1111 /* NOTE: some magic numbers here - basically if we 1067 /* NOTE: some magic numbers here - basically if we
1112 * can't fit a reasonable amount of data into the 1068 * can't fit a reasonable amount of data into the
1113 * existing audit buffer, flush it and start with 1069 * existing audit buffer, flush it and start with
@@ -1302,15 +1258,18 @@ static inline int audit_proctitle_rtrim(char *proctitle, int len)
1302 return len; 1258 return len;
1303} 1259}
1304 1260
1305static void audit_log_proctitle(struct task_struct *tsk, 1261static void audit_log_proctitle(void)
1306 struct audit_context *context)
1307{ 1262{
1308 int res; 1263 int res;
1309 char *buf; 1264 char *buf;
1310 char *msg = "(null)"; 1265 char *msg = "(null)";
1311 int len = strlen(msg); 1266 int len = strlen(msg);
1267 struct audit_context *context = audit_context();
1312 struct audit_buffer *ab; 1268 struct audit_buffer *ab;
1313 1269
1270 if (!context || context->dummy)
1271 return;
1272
1314 ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); 1273 ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE);
1315 if (!ab) 1274 if (!ab)
1316 return; /* audit_panic or being filtered */ 1275 return; /* audit_panic or being filtered */
@@ -1323,7 +1282,7 @@ static void audit_log_proctitle(struct task_struct *tsk,
1323 if (!buf) 1282 if (!buf)
1324 goto out; 1283 goto out;
1325 /* Historically called this from procfs naming */ 1284 /* Historically called this from procfs naming */
1326 res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN); 1285 res = get_cmdline(current, buf, MAX_PROCTITLE_AUDIT_LEN);
1327 if (res == 0) { 1286 if (res == 0) {
1328 kfree(buf); 1287 kfree(buf);
1329 goto out; 1288 goto out;
@@ -1343,15 +1302,15 @@ out:
1343 audit_log_end(ab); 1302 audit_log_end(ab);
1344} 1303}
1345 1304
1346static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) 1305static void audit_log_exit(void)
1347{ 1306{
1348 int i, call_panic = 0; 1307 int i, call_panic = 0;
1308 struct audit_context *context = audit_context();
1349 struct audit_buffer *ab; 1309 struct audit_buffer *ab;
1350 struct audit_aux_data *aux; 1310 struct audit_aux_data *aux;
1351 struct audit_names *n; 1311 struct audit_names *n;
1352 1312
1353 /* tsk == current */ 1313 context->personality = current->personality;
1354 context->personality = tsk->personality;
1355 1314
1356 ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); 1315 ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL);
1357 if (!ab) 1316 if (!ab)
@@ -1373,7 +1332,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1373 context->argv[3], 1332 context->argv[3],
1374 context->name_count); 1333 context->name_count);
1375 1334
1376 audit_log_task_info(ab, tsk); 1335 audit_log_task_info(ab);
1377 audit_log_key(ab, context->filterkey); 1336 audit_log_key(ab, context->filterkey);
1378 audit_log_end(ab); 1337 audit_log_end(ab);
1379 1338
@@ -1462,7 +1421,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1462 audit_log_name(context, n, NULL, i++, &call_panic); 1421 audit_log_name(context, n, NULL, i++, &call_panic);
1463 } 1422 }
1464 1423
1465 audit_log_proctitle(tsk, context); 1424 audit_log_proctitle();
1466 1425
1467 /* Send end of event record to help user space know we are finished */ 1426 /* Send end of event record to help user space know we are finished */
1468 ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); 1427 ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
@@ -1480,22 +1439,31 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1480 */ 1439 */
1481void __audit_free(struct task_struct *tsk) 1440void __audit_free(struct task_struct *tsk)
1482{ 1441{
1483 struct audit_context *context; 1442 struct audit_context *context = tsk->audit_context;
1484 1443
1485 context = audit_take_context(tsk, 0, 0);
1486 if (!context) 1444 if (!context)
1487 return; 1445 return;
1488 1446
1489 /* Check for system calls that do not go through the exit 1447 /* We are called either by do_exit() or the fork() error handling code;
1490 * function (e.g., exit_group), then free context block. 1448 * in the former case tsk == current and in the latter tsk is a
1491 * We use GFP_ATOMIC here because we might be doing this 1449 * random task_struct that doesn't doesn't have any meaningful data we
1492 * in the context of the idle thread */ 1450 * need to log via audit_log_exit().
1493 /* that can happen only if we are called from do_exit() */ 1451 */
1494 if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) 1452 if (tsk == current && !context->dummy && context->in_syscall) {
1495 audit_log_exit(context, tsk); 1453 context->return_valid = 0;
1454 context->return_code = 0;
1455
1456 audit_filter_syscall(tsk, context,
1457 &audit_filter_list[AUDIT_FILTER_EXIT]);
1458 audit_filter_inodes(tsk, context);
1459 if (context->current_state == AUDIT_RECORD_CONTEXT)
1460 audit_log_exit();
1461 }
1462
1496 if (!list_empty(&context->killed_trees)) 1463 if (!list_empty(&context->killed_trees))
1497 audit_kill_trees(&context->killed_trees); 1464 audit_kill_trees(&context->killed_trees);
1498 1465
1466 audit_set_context(tsk, NULL);
1499 audit_free_context(context); 1467 audit_free_context(context);
1500} 1468}
1501 1469
@@ -1565,17 +1533,40 @@ void __audit_syscall_exit(int success, long return_code)
1565{ 1533{
1566 struct audit_context *context; 1534 struct audit_context *context;
1567 1535
1568 if (success) 1536 context = audit_context();
1569 success = AUDITSC_SUCCESS;
1570 else
1571 success = AUDITSC_FAILURE;
1572
1573 context = audit_take_context(current, success, return_code);
1574 if (!context) 1537 if (!context)
1575 return; 1538 return;
1576 1539
1577 if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) 1540 if (!context->dummy && context->in_syscall) {
1578 audit_log_exit(context, current); 1541 if (success)
1542 context->return_valid = AUDITSC_SUCCESS;
1543 else
1544 context->return_valid = AUDITSC_FAILURE;
1545
1546 /*
1547 * we need to fix up the return code in the audit logs if the
1548 * actual return codes are later going to be fixed up by the
1549 * arch specific signal handlers
1550 *
1551 * This is actually a test for:
1552 * (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) ||
1553 * (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK)
1554 *
1555 * but is faster than a bunch of ||
1556 */
1557 if (unlikely(return_code <= -ERESTARTSYS) &&
1558 (return_code >= -ERESTART_RESTARTBLOCK) &&
1559 (return_code != -ENOIOCTLCMD))
1560 context->return_code = -EINTR;
1561 else
1562 context->return_code = return_code;
1563
1564 audit_filter_syscall(current, context,
1565 &audit_filter_list[AUDIT_FILTER_EXIT]);
1566 audit_filter_inodes(current, context);
1567 if (context->current_state == AUDIT_RECORD_CONTEXT)
1568 audit_log_exit();
1569 }
1579 1570
1580 context->in_syscall = 0; 1571 context->in_syscall = 0;
1581 context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; 1572 context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
@@ -1597,12 +1588,10 @@ void __audit_syscall_exit(int success, long return_code)
1597 kfree(context->filterkey); 1588 kfree(context->filterkey);
1598 context->filterkey = NULL; 1589 context->filterkey = NULL;
1599 } 1590 }
1600 audit_set_context(current, context);
1601} 1591}
1602 1592
1603static inline void handle_one(const struct inode *inode) 1593static inline void handle_one(const struct inode *inode)
1604{ 1594{
1605#ifdef CONFIG_AUDIT_TREE
1606 struct audit_context *context; 1595 struct audit_context *context;
1607 struct audit_tree_refs *p; 1596 struct audit_tree_refs *p;
1608 struct audit_chunk *chunk; 1597 struct audit_chunk *chunk;
@@ -1627,12 +1616,10 @@ static inline void handle_one(const struct inode *inode)
1627 return; 1616 return;
1628 } 1617 }
1629 put_tree_ref(context, chunk); 1618 put_tree_ref(context, chunk);
1630#endif
1631} 1619}
1632 1620
1633static void handle_path(const struct dentry *dentry) 1621static void handle_path(const struct dentry *dentry)
1634{ 1622{
1635#ifdef CONFIG_AUDIT_TREE
1636 struct audit_context *context; 1623 struct audit_context *context;
1637 struct audit_tree_refs *p; 1624 struct audit_tree_refs *p;
1638 const struct dentry *d, *parent; 1625 const struct dentry *d, *parent;
@@ -1685,7 +1672,6 @@ retry:
1685 return; 1672 return;
1686 } 1673 }
1687 rcu_read_unlock(); 1674 rcu_read_unlock();
1688#endif
1689} 1675}
1690 1676
1691static struct audit_names *audit_alloc_name(struct audit_context *context, 1677static struct audit_names *audit_alloc_name(struct audit_context *context,
@@ -2035,7 +2021,7 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
2035 uid = from_kuid(&init_user_ns, task_uid(current)); 2021 uid = from_kuid(&init_user_ns, task_uid(current));
2036 oldloginuid = from_kuid(&init_user_ns, koldloginuid); 2022 oldloginuid = from_kuid(&init_user_ns, koldloginuid);
2037 loginuid = from_kuid(&init_user_ns, kloginuid), 2023 loginuid = from_kuid(&init_user_ns, kloginuid),
2038 tty = audit_get_tty(current); 2024 tty = audit_get_tty();
2039 2025
2040 audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid); 2026 audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid);
2041 audit_log_task_context(ab); 2027 audit_log_task_context(ab);
@@ -2056,7 +2042,6 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
2056 */ 2042 */
2057int audit_set_loginuid(kuid_t loginuid) 2043int audit_set_loginuid(kuid_t loginuid)
2058{ 2044{
2059 struct task_struct *task = current;
2060 unsigned int oldsessionid, sessionid = AUDIT_SID_UNSET; 2045 unsigned int oldsessionid, sessionid = AUDIT_SID_UNSET;
2061 kuid_t oldloginuid; 2046 kuid_t oldloginuid;
2062 int rc; 2047 int rc;
@@ -2075,8 +2060,8 @@ int audit_set_loginuid(kuid_t loginuid)
2075 sessionid = (unsigned int)atomic_inc_return(&session_id); 2060 sessionid = (unsigned int)atomic_inc_return(&session_id);
2076 } 2061 }
2077 2062
2078 task->sessionid = sessionid; 2063 current->sessionid = sessionid;
2079 task->loginuid = loginuid; 2064 current->loginuid = loginuid;
2080out: 2065out:
2081 audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc); 2066 audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc);
2082 return rc; 2067 return rc;
@@ -2513,10 +2498,9 @@ void audit_seccomp_actions_logged(const char *names, const char *old_names,
2513 if (unlikely(!ab)) 2498 if (unlikely(!ab))
2514 return; 2499 return;
2515 2500
2516 audit_log_format(ab, "op=seccomp-logging"); 2501 audit_log_format(ab,
2517 audit_log_format(ab, " actions=%s", names); 2502 "op=seccomp-logging actions=%s old-actions=%s res=%d",
2518 audit_log_format(ab, " old-actions=%s", old_names); 2503 names, old_names, res);
2519 audit_log_format(ab, " res=%d", res);
2520 audit_log_end(ab); 2504 audit_log_end(ab);
2521} 2505}
2522 2506
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 99dd1d53fc35..af134588ab4e 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -336,7 +336,7 @@ void ima_audit_measurement(struct integrity_iint_cache *iint,
336 audit_log_untrustedstring(ab, filename); 336 audit_log_untrustedstring(ab, filename);
337 audit_log_format(ab, " hash=\"%s:%s\"", algo_name, hash); 337 audit_log_format(ab, " hash=\"%s:%s\"", algo_name, hash);
338 338
339 audit_log_task_info(ab, current); 339 audit_log_task_info(ab);
340 audit_log_end(ab); 340 audit_log_end(ab);
341 341
342 iint->flags |= IMA_AUDITED; 342 iint->flags |= IMA_AUDITED;